hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e5adc61d48ceb1a66a453a40b2dfbc7e3b71c63
| 2,391
|
py
|
Python
|
src/components/roll_padding_2d_layer.py
|
morgannewellsun/Reverse-Conway
|
1750bf6ab96b8d6ccc1d8905264097e091adb82c
|
[
"Unlicense"
] | 1
|
2022-03-13T22:44:53.000Z
|
2022-03-13T22:44:53.000Z
|
src/components/roll_padding_2d_layer.py
|
morgannewellsun/Reverse-Conway
|
1750bf6ab96b8d6ccc1d8905264097e091adb82c
|
[
"Unlicense"
] | null | null | null |
src/components/roll_padding_2d_layer.py
|
morgannewellsun/Reverse-Conway
|
1750bf6ab96b8d6ccc1d8905264097e091adb82c
|
[
"Unlicense"
] | null | null | null |
import tensorflow as tf
class RollPadding2DLayer(tf.keras.layers.Layer):
def __init__(self, padding: int):
super(RollPadding2DLayer, self).__init__()
self._padding = padding
self._left_begin_arg = None
self._left_size_arg = None
self._right_begin_arg = None
self._right_size_arg = None
self._top_begin_arg = None
self._top_size_arg = None
self._bottom_begin_arg = None
self._bottom_size_arg = None
def build(self, input_shape):
if len(input_shape) == 3:
batch_begin_arg, batch_size_arg = [], []
elif len(input_shape) == 4:
batch_begin_arg, batch_size_arg = [0], [-1]
else:
raise ValueError(f"RollPadding2DLayer recieved an input of shape {input_shape}. Dimension must be 3 or 4.")
# x dimension
self._left_begin_arg = tf.constant(batch_begin_arg + [0, 0, 0])
self._left_size_arg = tf.constant(batch_size_arg + [input_shape[-3], self._padding, input_shape[-1]])
self._right_begin_arg = tf.constant(batch_begin_arg + [0, input_shape[-2] - self._padding, 0])
self._right_size_arg = tf.constant(batch_size_arg + [input_shape[-3], self._padding, input_shape[-1]])
# y dimension
self._top_begin_arg = tf.constant(batch_begin_arg + [0, 0, 0])
self._top_size_arg = tf.constant(batch_size_arg + [self._padding, input_shape[-2] + 2 * self._padding, input_shape[-1]])
self._bottom_begin_arg = tf.constant(batch_begin_arg + [input_shape[-3] - self._padding, 0, 0])
self._bottom_size_arg = tf.constant(batch_size_arg + [self._padding, input_shape[-2] + 2 * self._padding, input_shape[-1]])
def call(self, inputs, **kwargs):
left_slice = tf.slice(inputs, self._left_begin_arg, self._left_size_arg)
right_slice = tf.slice(inputs, self._right_begin_arg, self._right_size_arg)
x_dim_padded_inputs = tf.concat([right_slice, inputs, left_slice], axis=-2)
top_slice = tf.slice(x_dim_padded_inputs, self._top_begin_arg, self._top_size_arg)
bottom_slice = tf.slice(x_dim_padded_inputs, self._bottom_begin_arg, self._bottom_size_arg)
xy_dim_padded_inputs = tf.concat([bottom_slice, x_dim_padded_inputs, top_slice], axis=-3)
return xy_dim_padded_inputs
def get_config(self):
return {"padding": self._padding}
| 50.87234
| 131
| 0.679214
|
781de0ee0a125c78df965d3af5495763cc850f0a
| 5,227
|
py
|
Python
|
assignments/assignment2/layers.py
|
NadyaStrogankova/dlcourse_ai
|
d03e3123b9f801fa3d801ab08e7327df5d48be43
|
[
"MIT"
] | null | null | null |
assignments/assignment2/layers.py
|
NadyaStrogankova/dlcourse_ai
|
d03e3123b9f801fa3d801ab08e7327df5d48be43
|
[
"MIT"
] | null | null | null |
assignments/assignment2/layers.py
|
NadyaStrogankova/dlcourse_ai
|
d03e3123b9f801fa3d801ab08e7327df5d48be43
|
[
"MIT"
] | null | null | null |
import numpy as np
def l2_regularization(W, reg_strength):
"""
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
"""
# print(W.shape)
loss = reg_strength * (W ** 2).sum()
grad = 2 * reg_strength * W
return loss, grad
def softmax_with_cross_entropy(predictions, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
"""
sm = softmax(predictions)
# print("softmax count", softmax, e, "sum", sum(e).sum())
# Your final implementation shouldn't have any loops
target, ti = targets(target_index, predictions.shape)
loss = np.mean(-np.log(sm[ti]))
dpredictions = (sm - target) / sm.shape[0]
# print("predictions", predictions, "softmax", sm, "target", target, "loss", loss, "grad", dpredictions)
return loss, dpredictions.reshape(predictions.shape)
class Param:
"""
Trainable parameter of the model
Captures both parameter value and the gradient
"""
def __init__(self, value):
self.value = value
self.grad = np.zeros_like(value)
class ReLULayer:
def __init__(self):
self.param = None
def forward(self, X):
X_next = np.maximum(X, 0)
self.param = Param(X_next)
# Hint: you'll need to save some information about X
# to use it later in the backward pass
# raise Exception("Not implemented!")
return X_next
def backward(self, d_out):
"""
Backward pass
Arguments:
d_out, np array (batch_size, num_features) - gradient
of loss function with respect to output
Returns:
d_result: np array (batch_size, num_features) - gradient
with respect to input
"""
d_result = d_out
d_result[self.param.value == 0] = 0
self.grad = d_result
# print("backward", d_result, self.param.value)
# Your final implementation shouldn't have any loops
# raise Exception("Not implemented!")
return d_result
def params(self):
# ReLU Doesn't have any parameters
return {}
class FullyConnectedLayer:
def __init__(self, n_input, n_output):
self.W = Param(0.001 * np.random.randn(n_input, n_output))
self.B = Param(0.001 * np.random.randn(1, n_output))
self.X = None
def forward(self, X):
# print(self.W.value, self.B)
X_next = X.dot(self.W.value) + self.B.value
# print("shapes", X_next.shape, self.W.value.shape, X)
self.param = Param(X_next)
self.X = Param(X)
return X_next
# Your final implementation shouldn't have any loops
def backward(self, d_out):
"""
Backward pass
Computes gradient with respect to input and
accumulates gradients within self.W and self.B
Arguments:
d_out, np array (batch_size, n_output) - gradient
of loss function with respect to output
Returns:
d_result: np array (batch_size, n_input) - gradient
with respect to input
"""
# print(d_out, self.W.value.T)
d_input = d_out.dot(self.W.value.T)
self.grad = d_input
# Compute both gradient with respect to input
# and gradients with respect to W and B
# Add gradients of W and B to their `grad` attribute
self.params()['W'].grad = self.X.value.T.dot(d_out)
self.params()['B'].grad = np.ones((1, d_out.shape[0])).dot(d_out)
# print(d_out.shape, self.params()['B'].grad.shape)
# It should be pretty similar to linear classifier from
# the previous assignment
return d_input
def params(self):
return {'W': self.W, 'B': self.B}
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
if predictions.ndim > 1:
pred_scaled = predictions.T - predictions.max(axis=1)
e = np.exp(pred_scaled)
sm = (e / e.sum(axis=0)).T
else:
pred_scaled = predictions - np.max(predictions)
e = np.exp(pred_scaled)
sm = np.array(e / sum(e))
# print(np.array(sm))
# Your final implementation shouldn't have any loops
return sm
def targets(target_index, shape):
target = np.zeros(shape)
ti = np.arange(len(target_index)), target_index.ravel()
target[ti] = 1
return target, ti
| 30.213873
| 109
| 0.619476
|
c445c834b4ffdeac10b5abd9a8513d8b99852eec
| 952
|
py
|
Python
|
ucb_cs61A/homework/hw07/tests/ordered.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | null | null | null |
ucb_cs61A/homework/hw07/tests/ordered.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | 1
|
2017-07-31T08:15:26.000Z
|
2017-07-31T08:15:26.000Z
|
ucb_cs61A/homework/hw07/tests/ordered.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | 1
|
2019-10-06T16:52:31.000Z
|
2019-10-06T16:52:31.000Z
|
test = {
'name': 'ordered?',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
scm> (ordered? '(1 2 3 4 5)) ; True or False
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> (ordered? '(1 5 2 4 3)) ; True or False
False
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> (ordered? '(2 2)) ; True or False
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> (ordered? '(1 2 2 4 3)) ; True or False
False
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
scm> (load 'hw07)
""",
'teardown': '',
'type': 'scheme'
}
]
}
| 19.833333
| 55
| 0.320378
|
a53a49aeb0420bd952ba0e4c9644602416921aaf
| 211
|
py
|
Python
|
30_days/day_9_recursion.py
|
hsm207/hackerrank
|
776e2f84036b5103c5fbb4d9855568ace0cdd042
|
[
"Unlicense"
] | null | null | null |
30_days/day_9_recursion.py
|
hsm207/hackerrank
|
776e2f84036b5103c5fbb4d9855568ace0cdd042
|
[
"Unlicense"
] | null | null | null |
30_days/day_9_recursion.py
|
hsm207/hackerrank
|
776e2f84036b5103c5fbb4d9855568ace0cdd042
|
[
"Unlicense"
] | null | null | null |
import sys
def factorial(n):
if n <= 1:
return 1
else:
return n * factorial(n - 1)
if __name__ == "__main__":
n = int(input().strip())
result = factorial(n)
print(result)
| 14.066667
| 35
| 0.540284
|
38cd4cf4c7f8c78775ecfe54cb26d47f43e90e21
| 1,695
|
py
|
Python
|
examples/example_with_sound.py
|
bobatsar/moviepy
|
17028410205a56f2937011e08ae0e91971e49318
|
[
"MIT"
] | 3
|
2021-03-14T09:33:36.000Z
|
2021-03-14T09:33:43.000Z
|
examples/example_with_sound.py
|
bobatsar/moviepy
|
17028410205a56f2937011e08ae0e91971e49318
|
[
"MIT"
] | 10
|
2016-08-27T04:01:32.000Z
|
2017-10-30T06:43:49.000Z
|
examples/example_with_sound.py
|
bobatsar/moviepy
|
17028410205a56f2937011e08ae0e91971e49318
|
[
"MIT"
] | 2
|
2020-02-19T13:16:49.000Z
|
2020-03-11T16:19:57.000Z
|
"""
Description of the video:
The screen is split in two parts showing Carry and Audrey at the phone,
talking at the same time, because it is actually two scenes of a same
movie put together.
"""
from moviepy.editor import *
from moviepy.video.tools.drawing import color_split
duration = 6 # duration of the final clip
# LOAD THE MAIN SCENE
# this small video contains the two scenes that we will put together.
main_clip = VideoFileClip("../../videos/charadePhone.mp4")
W,H = main_clip.size
# MAKE THE LEFT CLIP : cut, crop, add a mask
mask = color_split((2*W/3,H),
p1=(W/3,H), p2=(2*W/3,0),
col1=1, col2=0,
grad_width=2)
mask_clip = ImageClip(mask, ismask=True)
clip_left = (main_clip.coreader()
.subclip(0,duration)
.crop( x1=60, x2=60 + 2*W/3)
.set_mask(mask_clip))
# MAKE THE RIGHT CLIP : cut, crop, add a mask
mask = color_split((2*W/3,H),
p1=(2,H), p2=(W/3+2,0),
col1=0, col2=1,
grad_width=2)
mask_clip = ImageClip(mask, ismask=True)
clip_right = (main_clip.coreader()
.subclip(21,21+duration)
.crop(x1=70, x2=70+2*W/3)
.set_mask(mask_clip))
# ASSEMBLE AND WRITE THE MOVIE TO A FILE
cc = CompositeVideoClip([clip_right.set_pos('right').volumex(0.4),
clip_left.set_pos('left').volumex(0.4)],
size = (W,H))
#cc.preview()
cc.write_videofile("../../biphone3.avi",fps=24, codec='mpeg4')
| 27.786885
| 71
| 0.545133
|
57e9fae5bf205bf80b84635bb9435ae7f109bad8
| 31,801
|
py
|
Python
|
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
pci/kubernetes
|
6a0d3c74949385494e42e5f9142925e851991ac8
|
[
"Apache-2.0"
] | 1
|
2020-04-05T20:05:45.000Z
|
2020-04-05T20:05:45.000Z
|
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
pci/kubernetes
|
6a0d3c74949385494e42e5f9142925e851991ac8
|
[
"Apache-2.0"
] | null | null | null |
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
pci/kubernetes
|
6a0d3c74949385494e42e5f9142925e851991ac8
|
[
"Apache-2.0"
] | 1
|
2020-04-05T20:05:48.000Z
|
2020-04-05T20:05:48.000Z
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.kubernetes.flagmanager import FlagManager
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.destroy('feature-gates')
kubelet_opts.destroy('experimental-nvidia-gpus')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# cleanup old flagmanagers
FlagManager('kubelet').destroy_all()
FlagManager('kube-proxy').destroy_all()
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
archive = hookenv.resource_get('cni')
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = kube_control.get_auth_credentials()
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_worker_services(servers, dns, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
try:
_apply_node_label(label, delete=True)
except CalledProcessError:
hookenv.log('Error removing node label {}'.format(label))
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig('/root/.kube/config', server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
def configure_worker_services(api_servers, dns, cluster_cidr):
''' Add remaining flags for the worker services and configure snaps to use
them '''
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add('require-kubeconfig', 'true')
kubelet_opts.add('kubeconfig', kubeconfig_path)
kubelet_opts.add('network-plugin', 'cni')
kubelet_opts.add('v', '0')
kubelet_opts.add('address', '0.0.0.0')
kubelet_opts.add('port', '10250')
kubelet_opts.add('cluster-dns', dns['sdn-ip'])
kubelet_opts.add('cluster-domain', dns['domain'])
kubelet_opts.add('anonymous-auth', 'false')
kubelet_opts.add('client-ca-file', ca_cert_path)
kubelet_opts.add('tls-cert-file', server_cert_path)
kubelet_opts.add('tls-private-key-file', server_key_path)
kubelet_opts.add('logtostderr', 'true')
kube_proxy_opts = FlagManager('kube-proxy')
kube_proxy_opts.add('cluster-cidr', cluster_cidr)
kube_proxy_opts.add('kubeconfig', kubeconfig_path)
kube_proxy_opts.add('logtostderr', 'true')
kube_proxy_opts.add('v', '0')
kube_proxy_opts.add('master', random.choice(api_servers), strict=True)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts.add('conntrack-max-per-core', '0')
cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ')
check_call(cmd)
cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ')
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress replication controller manifest
manifest = addon_path.format('ingress-replication-controller.yaml')
render('ingress-replication-controller.yaml', manifest, context)
hookenv.log('Creating the ingress replication controller.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
flag = 'allow-privileged'
hookenv.log('Setting {}={}'.format(flag, privileged))
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add(flag, privileged)
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts.add('experimental-nvidia-gpus', '1')
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts.add('feature-gates', 'Accelerators=true')
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
kubelet_opts.destroy('experimental-nvidia-gpus')
else:
kubelet_opts.remove('feature-gates', 'Accelerators=true')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname())
kube_control.set_auth_request(nodeuser)
@when('kube-control.auth.available')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
creds = kube_control.get_auth_credentials()
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
hostname = gethostname()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, hostname, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
check_call(split(cmd))
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
| 36.344
| 101
| 0.689884
|
6617105cf3aa67f78b6b9eec23f38b5096a2bc0e
| 1,706
|
py
|
Python
|
sounds_data/fuck_fuck_data.py
|
MeetDevin/WebCrawler
|
5cd4546c1a25fbe1466fafd0fcc3f050a69b44a8
|
[
"Apache-2.0"
] | 3
|
2019-03-07T03:43:17.000Z
|
2020-02-25T08:18:11.000Z
|
sounds_data/fuck_fuck_data.py
|
MeetDevin/ProjectX
|
5cd4546c1a25fbe1466fafd0fcc3f050a69b44a8
|
[
"Apache-2.0"
] | null | null | null |
sounds_data/fuck_fuck_data.py
|
MeetDevin/ProjectX
|
5cd4546c1a25fbe1466fafd0fcc3f050a69b44a8
|
[
"Apache-2.0"
] | null | null | null |
from PIL import Image
import numpy as np
import os
def windows(length, window_size):
start = 0
i = 0
while start < length:
yield start, start + window_size, i
start += int(window_size*0.5)
i += 1
def fuck_data(window_size=600):
files_path = 'images'
for train_class in os.listdir(files_path):
if train_class != 'Pica+pica':
continue
for pic_name in os.listdir(files_path + '/' + train_class):
if os.path.isfile(files_path + '/' + train_class + '/' + pic_name):
filename = files_path + '/' + train_class + '/' + pic_name
save_dir = 'new_images' + '/' + train_class
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# 读取图像
old_img = np.asarray(Image.open(filename))
img = old_img.copy()
img.setflags(write=True)
for (start, end, i) in windows(np.shape(img)[1], window_size):
if np.shape(img[:, start:end])[1] < window_size:
end = np.shape(img)[1]
start = end - window_size
if start < 0:
break
save_path = save_dir + '/' + pic_name.replace('.jpg', '(' + str(i) + ')') + '.jpg'
if os.path.exists(save_path):
print('--exist: ', save_path)
continue
else:
Image.fromarray(old_img[:, start:end]).save(save_path)
print('save:', save_path)
if __name__ == '__main__':
fuck_data(600)
| 29.413793
| 102
| 0.481243
|
d52ec64321ca0ebe0f9646dff56d9449604ee953
| 3,158
|
py
|
Python
|
nikola/plugins/compile/rest/youtube.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | null | null | null |
nikola/plugins/compile/rest/youtube.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | null | null | null |
nikola/plugins/compile/rest/youtube.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | 1
|
2021-07-07T11:32:42.000Z
|
2021-07-07T11:32:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2020 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""YouTube directive for reStructuredText."""
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola.plugins.compile.rest import _align_choice, _align_options_base
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
"""Plugin for the youtube directive."""
name = "rest_youtube"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
directives.register_directive('youtube', Youtube)
return super().set_site(site)
CODE = """\
<div class="youtube-video{align}">
<iframe width="{width}" height="{height}"
src="https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent"
frameborder="0" allow="encrypted-media" allowfullscreen
></iframe>
</div>"""
class Youtube(Directive):
"""reST extension for inserting youtube embedded videos.
Usage:
.. youtube:: lyViVmaBQDg
:height: 400
:width: 600
"""
has_content = True
required_arguments = 1
option_spec = {
"width": directives.unchanged,
"height": directives.unchanged,
"align": _align_choice
}
def run(self):
"""Run the youtube directive."""
self.check_content()
options = {
'yid': self.arguments[0],
'width': 560,
'height': 315,
}
options.update({k: v for k, v in self.options.items() if v})
if self.options.get('align') in _align_options_base:
options['align'] = ' align-' + self.options['align']
else:
options['align'] = ''
return [nodes.raw('', CODE.format(**options), format='html')]
def check_content(self):
"""Check if content exists."""
if self.content: # pragma: no cover
raise self.warning("This directive does not accept content. The "
"'key=value' format for options is deprecated, "
"use ':key: value' instead")
| 32.895833
| 79
| 0.662445
|
063675fe71336d1faa28ddab3249b327d6aa567f
| 801
|
py
|
Python
|
Project/proj/blend.py
|
Zankhana1995/feature-detection-and-matching
|
5a67fd4495232b6f8a38899194a6c87e2275619c
|
[
"MIT"
] | 1
|
2020-10-28T07:46:08.000Z
|
2020-10-28T07:46:08.000Z
|
Project/proj/blend.py
|
Zankhana1995/feature-detection-and-matching
|
5a67fd4495232b6f8a38899194a6c87e2275619c
|
[
"MIT"
] | null | null | null |
Project/proj/blend.py
|
Zankhana1995/feature-detection-and-matching
|
5a67fd4495232b6f8a38899194a6c87e2275619c
|
[
"MIT"
] | null | null | null |
from proj.ransac import *
# this function will blend images
def blend(image2, homography, minX, minY, stitchedImage):
h = stitchedImage.shape[0]
w = stitchedImage.shape[1]
for i in range(h):
for j in range(w):
x1 = j - minX
y1 = i - minY
x2, y2 = project(x1, y1, homography)
x2 = int(x2)
y2 = int(y2)
if x2 >= 0 and x2 < image2.shape[1] and y2 >= 0 and y2 < image2.shape[0]:
if stitchedImage[i][j][0] == 0 or stitchedImage[i][j][1] == 0 or stitchedImage[i][j][2] == 0:
stitchedImage[i][j] = image2[y2][x2]
else:
stitchedImage[i][j] = 0.5 * stitchedImage[i][j] + 0.5 * image2[y2][x2]
print("blending done")
return stitchedImage
| 36.409091
| 109
| 0.524345
|
49c7c03e3d7250bcb7d8e8b26d7ca2565fc4dcd7
| 2,659
|
py
|
Python
|
{{cookiecutter.python_name}}/setup.py
|
fcollonval/theme-cookiecutter
|
e95554b339bec0b4d44ee265a9d861f27e7db276
|
[
"BSD-3-Clause"
] | 50
|
2019-02-21T08:11:57.000Z
|
2022-03-30T21:22:45.000Z
|
{{cookiecutter.python_name}}/setup.py
|
fcollonval/theme-cookiecutter
|
e95554b339bec0b4d44ee265a9d861f27e7db276
|
[
"BSD-3-Clause"
] | 14
|
2019-07-09T11:29:32.000Z
|
2022-01-25T10:36:49.000Z
|
{{cookiecutter.python_name}}/setup.py
|
fcollonval/theme-cookiecutter
|
e95554b339bec0b4d44ee265a9d861f27e7db276
|
[
"BSD-3-Clause"
] | 22
|
2019-07-09T11:30:08.000Z
|
2022-01-12T09:27:46.000Z
|
"""
{{ cookiecutter.python_name }} setup
"""
import json
import os
from jupyter_packaging import (
create_cmdclass, install_npm, ensure_targets,
combine_commands, skip_if_exists,
)
import setuptools
HERE = os.path.abspath(os.path.dirname(__file__))
# The name of the project
name = "{{ cookiecutter.python_name }}"
# Get our version
with open(os.path.join(HERE, 'package.json')) as f:
version = json.load(f)['version']
lab_path = os.path.join(HERE, name, "labextension")
# Representative files that should exist after a successful build
jstargets = [
os.path.join(lab_path, "package.json"),
]
package_data_spec = {
name: [
"*"
]
}
labext_name = "{{ cookiecutter.labextension_name }}"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, lab_path, "**"),
("share/jupyter/labextensions/%s" % labext_name, HERE, "install.json"),
]
cmdclass = create_cmdclass(
"jsdeps",
package_data_spec=package_data_spec,
data_files_spec=data_files_spec
)
js_command = combine_commands(
install_npm(HERE, build_cmd="build:prod", npm=["jlpm"]),
ensure_targets(jstargets),
)
is_repo = os.path.exists(os.path.join(HERE, '.git'))
if is_repo:
cmdclass['jsdeps'] = js_command
else:
cmdclass['jsdeps'] = skip_if_exists(jstargets, js_command)
with open("README.md", "r") as fh:
long_description = fh.read()
setup_args = dict(
name=name,
version=version,
url="{{ cookiecutter.repository }}",
author="{{ cookiecutter.author_name }}",
description="{{ cookiecutter.project_short_description }}",
long_description=long_description,
long_description_content_type="text/markdown",
cmdclass=cmdclass,
packages=setuptools.find_packages(),
install_requires=[
"jupyterlab>=3.0.0,==3.*",
],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
license="BSD-3-Clause",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Framework :: Jupyter",
"Framework :: Jupyter :: JupyterLab :: 3",
"Framework :: Jupyter :: JupyterLab :: Extensions",
"Framework :: Jupyter :: JupyterLab :: Extensions :: Prebuilt",
"Framework :: Jupyter :: JupyterLab :: Extensions :: Themes",
],
)
if __name__ == "__main__":
setuptools.setup(**setup_args)
| 26.59
| 75
| 0.659646
|
4b82c970c0ad4a34c53bfad7f2d3db53344ec7f6
| 3,777
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
MCLXI/AceDOrig
|
ccd5cfefeaedaf1df18f72855ef304c26c54afd8
|
[
"MIT"
] | 5
|
2018-07-02T20:00:11.000Z
|
2019-08-06T13:34:58.000Z
|
contrib/macdeploy/custom_dsstore.py
|
MCLXI/AceDOrig
|
ccd5cfefeaedaf1df18f72855ef304c26c54afd8
|
[
"MIT"
] | 9
|
2018-05-22T20:20:26.000Z
|
2020-02-28T09:25:32.000Z
|
contrib/macdeploy/custom_dsstore.py
|
MCLXI/AceDOrig
|
ccd5cfefeaedaf1df18f72855ef304c26c54afd8
|
[
"MIT"
] | 5
|
2019-03-28T17:55:21.000Z
|
2020-05-05T16:50:08.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00acedcoreuser:\x00Documents:\x00acedcore:\x00acedcore:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/acedcoreuser/Documents/acedcore/acedcore/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['AceD-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.918033
| 1,817
| 0.727562
|
0e82af05b7446f1c5794f7aee5045491b8ee4f6e
| 12,915
|
py
|
Python
|
tests/test_geocoder.py
|
ravik/mapbox-baseSDK
|
2e24ff6e2b69db92f08f435f56021c0b65016277
|
[
"MIT"
] | null | null | null |
tests/test_geocoder.py
|
ravik/mapbox-baseSDK
|
2e24ff6e2b69db92f08f435f56021c0b65016277
|
[
"MIT"
] | null | null | null |
tests/test_geocoder.py
|
ravik/mapbox-baseSDK
|
2e24ff6e2b69db92f08f435f56021c0b65016277
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import json
import re
import responses
import pytest
import mapbox
def test_geocoder_default_name():
"""Default name is set"""
geocoder = mapbox.Geocoder()
assert geocoder.name == 'mapbox.places'
def test_geocoder_name():
"""Named dataset name is set"""
geocoder = mapbox.Geocoder('mapbox.places-permanent')
assert geocoder.name == 'mapbox.places-permanent'
def _check_coordinate_precision(coord, precision):
"""Coordinate precision is <= specified number of digits"""
if '.' not in coord:
return True
else:
return len(coord.split('.')[-1]) <= precision
@responses.activate
def test_geocoder_forward():
"""Forward geocoding works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(access_token='pk.test').forward('1600 pennsylvania ave nw')
assert response.status_code == 200
assert response.json()['query'] == ["1600", "pennsylvania", "ave", "nw"]
@responses.activate
def test_geocoder_forward_geojson():
"""Forward geocoding .geojson method works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(access_token='pk.test').forward('1600 pennsylvania ave nw')
assert response.status_code == 200
assert response.geojson() == response.json()
@responses.activate
def test_geocoder_reverse():
"""Reverse geocoding works"""
lon, lat = -77.4371, 37.5227
body = json.dumps({"query": [lon, lat]})
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/{0},{1}.json?access_token=pk.test'.format(lon, lat),
match_querystring=True,
body=body,
status=200,
content_type='application/json')
response = mapbox.Geocoder(access_token='pk.test').reverse(lon=lon, lat=lat)
assert response.status_code == 200
assert response.json()['query'] == [lon, lat]
@responses.activate
def test_geocoder_reverse_geojson():
"""Reverse geocoding geojson works"""
lon, lat = -77.4371, 37.5227
body = json.dumps({"query": [lon, lat]})
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/{0},{1}.json?access_token=pk.test'.format(lon, lat),
match_querystring=True,
body=body,
status=200,
content_type='application/json')
response = mapbox.Geocoder(access_token='pk.test').reverse(lon=lon, lat=lat)
assert response.status_code == 200
assert response.geojson() == response.json()
def test_geocoder_place_types():
"""Place types are enumerated"""
assert sorted(mapbox.Geocoder().place_types.items()) == [
('address', "A street address with house number. Examples: 1600 Pennsylvania Ave NW, 1051 Market St, Oberbaumstrasse 7."),
('country', "Sovereign states and other political entities. Examples: United States, France, China, Russia."),
('district', "Second order administrative division. Only used when necessary. Examples: Tianjin, Beijing"),
('locality', "A smaller area within a place that possesses official status and boundaries. Examples: Oakleigh (Melbourne)"),
('neighborhood', 'A smaller area within a place, often without formal boundaries. Examples: Montparnasse, Downtown, Haight-Ashbury.'),
('place', "City, town, village or other municipality relevant to a country's address or postal system. Examples: Cleveland, Saratoga Springs, Berlin, Paris."),
('poi', "Places of interest including commercial venues, major landmarks, parks, and other features. Examples: Subway Restaurant, Yosemite National Park, Statue of Liberty."),
('poi.landmark', "Places of interest that are particularly notable or long-lived like parks, places of worship and museums. A strict subset of the poi place type. Examples: Yosemite National Park, Statue of Liberty."),
('postcode', "Postal code, varies by a country's postal system. Examples: 20009, CR0 3RL."),
('region', "First order administrative divisions within a country, usually provinces or states. Examples: California, Ontario, Essonne.")]
def test_validate_country_codes_err():
try:
mapbox.Geocoder()._validate_country_codes(('us', 'bogus'))
except mapbox.InvalidCountryCodeError as err:
assert str(err) == "bogus"
def test_validate_country():
assert mapbox.Geocoder()._validate_country_codes(
('us', 'br')) == {'country': 'us,br'}
def test_validate_place_types_err():
try:
mapbox.Geocoder()._validate_place_types(('address', 'bogus'))
except mapbox.InvalidPlaceTypeError as err:
assert str(err) == "bogus"
def test_validate_place_types():
assert mapbox.Geocoder()._validate_place_types(
('address', 'poi')) == {'types': 'address,poi'}
@responses.activate
def test_geocoder_forward_types():
"""Type filtering of forward geocoding works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?types=address,country,place,poi.landmark,postcode,region&access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'1600 pennsylvania ave nw',
types=('address', 'country', 'place', 'poi.landmark', 'postcode', 'region'))
assert response.status_code == 200
assert response.json()['query'] == ["1600", "pennsylvania", "ave", "nw"]
@responses.activate
def test_geocoder_reverse_types():
"""Type filtering of reverse geocoding works"""
lon, lat = -77.4371, 37.5227
body = json.dumps({"query": [lon, lat]})
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/{0},{1}.json?types=address,country,place,poi.landmark,postcode,region&access_token=pk.test'.format(lon, lat),
match_querystring=True,
body=body,
status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').reverse(
lon=lon, lat=lat,
types=('address', 'country', 'place', 'poi.landmark', 'postcode', 'region'))
assert response.status_code == 200
assert response.json()['query'] == [lon, lat]
@responses.activate
def test_geocoder_forward_proximity():
"""Proximity parameter works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?proximity=0.0,0.0&access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'1600 pennsylvania ave nw', lon=0, lat=0)
assert response.status_code == 200
assert response.json()['query'] == ["1600", "pennsylvania", "ave", "nw"]
@responses.activate
def test_geocoder_proximity_rounding():
"""Proximity parameter is rounded to 3 decimal places"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json',
match_querystring=False,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'1600 pennsylvania ave nw', lon=0.123456, lat=0.987654)
# check coordinate precision for proximity flag
match = re.search(r'[&\?]proximity=([^&$]+)', response.url)
assert match is not None
for coord in re.split(r'(%2C|,)', match.group(1)):
assert _check_coordinate_precision(coord, 3)
@responses.activate
def test_geocoder_forward_bbox():
"""Bbox parameter works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/washington.json?bbox=-78.3284%2C38.6039%2C-78.0428%2C38.7841&access_token=pk.test',
match_querystring=True,
body='{"query": ["washington"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'washington', bbox=(-78.3284,38.6039,-78.0428,38.7841))
assert response.status_code == 200
assert response.json()['query'] == ["washington"]
@responses.activate
def test_geocoder_forward_limit():
"""Limit parameter works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/washington.json?limit=3&access_token=pk.test',
match_querystring=True,
body='{"query": ["washington"], "features": [1, 2, 3]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'washington', limit=3)
assert response.status_code == 200
assert len(response.json()['features']) == 3
@responses.activate
def test_geocoder_reverse_limit():
"""Limit parameter works"""
lon, lat = -77.4371, 37.5227
body = json.dumps({"query": [lon, lat],
"features": [{'name': 'place'}]})
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/{0},{1}.json?access_token=pk.test&limit=1&types=place'.format(lon, lat),
match_querystring=True,
body=body,
status=200,
content_type='application/json')
service = mapbox.Geocoder(access_token='pk.test')
response = service.reverse(lon=lon, lat=lat, limit=1, types=['place'])
assert response.status_code == 200
assert len(response.json()['features']) == 1
@responses.activate
def test_geocoder_reverse_limit_requires_onetype():
"""Limit requires a single type"""
lon, lat = -77.123456789, 37.987654321
service = mapbox.Geocoder(access_token='pk.test')
with pytest.raises(mapbox.InvalidPlaceTypeError):
service.reverse(lon=lon, lat=lat, limit=1)
with pytest.raises(mapbox.InvalidPlaceTypeError):
service.reverse(lon=lon, lat=lat, limit=1, types=['places', 'country'])
@responses.activate
def test_geocoder_reverse_rounding():
"""Reverse geocoding parameters are rounded to 5 decimal places"""
lon, lat = -77.123456789, 37.987654321
body = json.dumps({"query": [lon, lat]})
responses.add(
responses.GET,
re.compile('https:\/\/api\.mapbox\.com\/geocoding\/v5\/mapbox\.places\/.+\.json'),
match_querystring=False,
body=body,
status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').reverse(
lon=lon, lat=lat)
# check coordinate precision for reverse geocoding coordinates
match = re.search(r'\/([\-\d\.\,]+)\.json', response.url)
assert match is not None
for coord in re.split(r'(%2C|,)', match.group(1)):
assert _check_coordinate_precision(coord, 5)
@responses.activate
def test_geocoder_unicode():
"""Forward geocoding works with non-ascii inputs
Specifically, the URITemplate needs to utf-8 encode all inputs
"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/Florian%C3%B3polis%2C%20Brazil.json?access_token=pk.test',
match_querystring=True,
body='{}', status=200,
content_type='application/json')
query = "Florianópolis, Brazil"
try:
query = query.decode('utf-8') # Python 2
except:
pass # Python 3
response = mapbox.Geocoder(access_token='pk.test').forward(query)
assert response.status_code == 200
@responses.activate
def test_geocoder_forward_country():
"""Country parameter of forward geocoding works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?country=us&access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward('1600 pennsylvania ave nw', country=['us'])
assert response.status_code == 200
| 36.586402
| 226
| 0.66535
|
389ff68f5c61510cfd937c48b01a5f78bfd31731
| 8,355
|
py
|
Python
|
venv/lib/python3.7/site-packages/nltk/cluster/kmeans.py
|
VighneshHarihar/Newsify
|
321f5f65bb6983c0ca5a3864900b27ce36a32717
|
[
"MIT"
] | 10
|
2021-05-31T07:18:08.000Z
|
2022-03-19T09:20:11.000Z
|
venv/lib/python3.7/site-packages/nltk/cluster/kmeans.py
|
VighneshHarihar/Newsify
|
321f5f65bb6983c0ca5a3864900b27ce36a32717
|
[
"MIT"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
venv/lib/python3.7/site-packages/nltk/cluster/kmeans.py
|
VighneshHarihar/Newsify
|
321f5f65bb6983c0ca5a3864900b27ce36a32717
|
[
"MIT"
] | 7
|
2015-09-30T03:00:44.000Z
|
2021-06-04T05:34:39.000Z
|
# Natural Language Toolkit: K-Means Clusterer
#
# Copyright (C) 2001-2020 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import copy
import random
import sys
try:
import numpy
except ImportError:
pass
from nltk.cluster.util import VectorSpaceClusterer
class KMeansClusterer(VectorSpaceClusterer):
"""
The K-means clusterer starts with k arbitrary chosen means then allocates
each vector to the cluster with the closest mean. It then recalculates the
means of each cluster as the centroid of the vectors in the cluster. This
process repeats until the cluster memberships stabilise. This is a
hill-climbing algorithm which may converge to a local maximum. Hence the
clustering is often repeated with random initial means and the most
commonly occurring output means are chosen.
"""
def __init__(
self,
num_means,
distance,
repeats=1,
conv_test=1e-6,
initial_means=None,
normalise=False,
svd_dimensions=None,
rng=None,
avoid_empty_clusters=False,
):
"""
:param num_means: the number of means to use (may use fewer)
:type num_means: int
:param distance: measure of distance between two vectors
:type distance: function taking two vectors and returing a float
:param repeats: number of randomised clustering trials to use
:type repeats: int
:param conv_test: maximum variation in mean differences before
deemed convergent
:type conv_test: number
:param initial_means: set of k initial means
:type initial_means: sequence of vectors
:param normalise: should vectors be normalised to length 1
:type normalise: boolean
:param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
:type svd_dimensions: int
:param rng: random number generator (or None)
:type rng: Random
:param avoid_empty_clusters: include current centroid in computation
of next one; avoids undefined behavior
when clusters become empty
:type avoid_empty_clusters: boolean
"""
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
self._num_means = num_means
self._distance = distance
self._max_difference = conv_test
assert not initial_means or len(initial_means) == num_means
self._means = initial_means
assert repeats >= 1
assert not (initial_means and repeats > 1)
self._repeats = repeats
self._rng = rng if rng else random.Random()
self._avoid_empty_clusters = avoid_empty_clusters
def cluster_vectorspace(self, vectors, trace=False):
if self._means and self._repeats > 1:
print("Warning: means will be discarded for subsequent trials")
meanss = []
for trial in range(self._repeats):
if trace:
print("k-means trial", trial)
if not self._means or trial > 1:
self._means = self._rng.sample(list(vectors), self._num_means)
self._cluster_vectorspace(vectors, trace)
meanss.append(self._means)
if len(meanss) > 1:
# sort the means first (so that different cluster numbering won't
# effect the distance comparison)
for means in meanss:
means.sort(key=sum)
# find the set of means that's minimally different from the others
min_difference = min_means = None
for i in range(len(meanss)):
d = 0
for j in range(len(meanss)):
if i != j:
d += self._sum_distances(meanss[i], meanss[j])
if min_difference is None or d < min_difference:
min_difference, min_means = d, meanss[i]
# use the best means
self._means = min_means
def _cluster_vectorspace(self, vectors, trace=False):
if self._num_means < len(vectors):
# perform k-means clustering
converged = False
while not converged:
# assign the tokens to clusters based on minimum distance to
# the cluster means
clusters = [[] for m in range(self._num_means)]
for vector in vectors:
index = self.classify_vectorspace(vector)
clusters[index].append(vector)
if trace:
print("iteration")
# for i in range(self._num_means):
# print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
# recalculate cluster means by computing the centroid of each cluster
new_means = list(map(self._centroid, clusters, self._means))
# measure the degree of change from the previous step for convergence
difference = self._sum_distances(self._means, new_means)
if difference < self._max_difference:
converged = True
# remember the new means
self._means = new_means
def classify_vectorspace(self, vector):
# finds the closest cluster centroid
# returns that cluster's index
best_distance = best_index = None
for index in range(len(self._means)):
mean = self._means[index]
dist = self._distance(vector, mean)
if best_distance is None or dist < best_distance:
best_index, best_distance = index, dist
return best_index
def num_clusters(self):
if self._means:
return len(self._means)
else:
return self._num_means
def means(self):
"""
The means used for clustering.
"""
return self._means
def _sum_distances(self, vectors1, vectors2):
difference = 0.0
for u, v in zip(vectors1, vectors2):
difference += self._distance(u, v)
return difference
def _centroid(self, cluster, mean):
if self._avoid_empty_clusters:
centroid = copy.copy(mean)
for vector in cluster:
centroid += vector
return centroid / (1 + len(cluster))
else:
if not len(cluster):
sys.stderr.write("Error: no centroid defined for empty cluster.\n")
sys.stderr.write(
"Try setting argument 'avoid_empty_clusters' to True\n"
)
assert False
centroid = copy.copy(cluster[0])
for vector in cluster[1:]:
centroid += vector
return centroid / len(cluster)
def __repr__(self):
return "<KMeansClusterer means=%s repeats=%d>" % (self._means, self._repeats)
#################################################################################
def demo():
# example from figure 14.9, page 517, Manning and Schutze
from nltk.cluster import KMeansClusterer, euclidean_distance
vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
means = [[4, 3], [5, 5]]
clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means)
clusters = clusterer.cluster(vectors, True, trace=True)
print("Clustered:", vectors)
print("As:", clusters)
print("Means:", clusterer.means())
print()
vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# test k-means using the euclidean distance metric, 2 means and repeat
# clustering 10 times with random seeds
clusterer = KMeansClusterer(2, euclidean_distance, repeats=10)
clusters = clusterer.cluster(vectors, True)
print("Clustered:", vectors)
print("As:", clusters)
print("Means:", clusterer.means())
print()
# classify a new vector
vector = numpy.array([3, 3])
print("classify(%s):" % vector, end=" ")
print(clusterer.classify(vector))
print()
if __name__ == "__main__":
demo()
| 36.012931
| 88
| 0.590664
|
e965f9440012cbe0e542023ae99cce885454848e
| 3,742
|
py
|
Python
|
rqt_speech_status/src/rqt_speech_status/rqt_speech_status.py
|
mhri/mhri_tools
|
329bdf3bd4939aab65262b1135d07c7e866a3892
|
[
"Apache-2.0"
] | null | null | null |
rqt_speech_status/src/rqt_speech_status/rqt_speech_status.py
|
mhri/mhri_tools
|
329bdf3bd4939aab65262b1135d07c7e866a3892
|
[
"Apache-2.0"
] | null | null | null |
rqt_speech_status/src/rqt_speech_status/rqt_speech_status.py
|
mhri/mhri_tools
|
329bdf3bd4939aab65262b1135d07c7e866a3892
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#-*- encoding: utf8 -*-
import rospy
import rospkg
import os
import time
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtWidgets import QWidget, QFrame, QListWidgetItem, QVBoxLayout, QHBoxLayout, QLabel, QLayout, QSizePolicy, QSpacerItem, QShortcut
from python_qt_binding.QtGui import QColor, QKeySequence, QPixmap
from python_qt_binding.QtCore import Qt, pyqtSignal, QSize, QObject
from std_msgs.msg import Bool, String, Float64, Empty
class SignalRender(QObject):
renderSignal = pyqtSignal([bool])
class SpeechStatusPlugin(Plugin):
def __init__(self, context):
super(SpeechStatusPlugin, self).__init__(context)
self.setObjectName('SpeechStatusPlugin')
self._widget = QWidget()
if context.serial_number() > 1:
self._widget.setWindowTitle(
self._widget.windowTitle() + (' (%d)' % context.serial_number()))
ui_file = os.path.join(rospkg.RosPack().get_path('rqt_speech_status'), 'resource', 'speech_widget.ui')
loadUi(ui_file, self._widget)
context.add_widget(self._widget)
file_live = os.path.join(rospkg.RosPack().get_path('rqt_speech_status'), 'resource', 'microphone.png')
file_mute = os.path.join(rospkg.RosPack().get_path('rqt_speech_status'), 'resource', 'muted.png')
file_silency = os.path.join(rospkg.RosPack().get_path('rqt_speech_status'), 'resource', 'silence-face.png')
file_speech= os.path.join(rospkg.RosPack().get_path('rqt_speech_status'), 'resource', 'surprise-face.png')
self.img_mute = QPixmap(file_mute).scaled(160, 160)
self.img_live = QPixmap(file_live).scaled(160, 160)
self.img_silency = QPixmap(file_silency).scaled(160, 160)
self.img_speech = QPixmap(file_speech).scaled(160, 160)
self._widget.imgRecognition.setPixmap(self.img_live)
self._widget.imgSilency.setPixmap(self.img_silency)
self.signal_render_recog_status = SignalRender()
self.signal_render_recog_status.renderSignal.connect(self.render_recog_status)
self.signal_render_silency_status = SignalRender()
self.signal_render_silency_status.renderSignal.connect(self.render_silency_status)
rospy.Subscriber('enable_recognition', Bool, self.handle_enable_recognition)
rospy.Subscriber('start_of_speech', Empty, self.handle_start_speech)
rospy.Subscriber('end_of_speech', Empty, self.handle_end_speech)
rospy.Subscriber('silency_detected', Empty, self.handle_silency_detection)
def handle_enable_recognition(self, msg):
if msg.data:
self.signal_render_recog_status.renderSignal.emit(True)
else:
self.signal_render_recog_status.renderSignal.emit(False)
def handle_silency_detection(self, msg):
self.signal_render_silency_status.renderSignal.emit(True)
def handle_start_speech(self, msg):
self.signal_render_silency_status.renderSignal.emit(False)
def handle_end_speech(self, msg):
self.signal_render_silency_status.renderSignal.emit(True)
def render_recog_status(self, status):
if status:
self._widget.imgRecognition.setPixmap(self.img_live)
else:
self._widget.imgRecognition.setPixmap(self.img_mute)
def render_silency_status(self, status):
if status:
self._widget.imgSilency.setPixmap(self.img_silency)
else:
self._widget.imgSilency.setPixmap(self.img_speech)
def shutdown_plugin(self):
pass
def save_settings(self, plugin_settings, instance_settings):
pass
def restore_settings(self, plugin_settings, instance_settings):
pass
| 39.389474
| 152
| 0.719669
|
9863846d419af1635f56f4e76cfdfe972c716bae
| 2,645
|
py
|
Python
|
accelbyte_py_sdk/core/_config_repository.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/core/_config_repository.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/core/_config_repository.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import json
from abc import ABC, abstractmethod
from os import environ
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
class ConfigRepository(ABC):
@abstractmethod
def get_base_url(self) -> str:
pass
@abstractmethod
def get_client_id(self) -> str:
pass
@abstractmethod
def get_client_secret(self) -> str:
pass
@abstractmethod
def get_namespace(self) -> str:
pass
def get_client_auth(self) -> Tuple[str, str]:
return self.get_client_id(), self.get_client_secret()
class MyConfigRepository(ConfigRepository):
def __init__(
self,
base_url: str,
client_id: str,
client_secret: str,
namespace: Optional[str] = None,
) -> None:
self._base_url = base_url
self._client_id = client_id
self._client_secret = client_secret
self._namespace = namespace if namespace else ""
def get_base_url(self) -> str:
return self._base_url
def get_client_id(self) -> str:
return self._client_id
def get_client_secret(self) -> str:
return self._client_secret
def get_namespace(self) -> str:
return self._namespace
class EnvironmentConfigRepository(ConfigRepository):
def __init__(self):
self._base_url = environ["AB_BASE_URL"]
self._client_id = environ["AB_CLIENT_ID"]
self._client_secret = environ["AB_CLIENT_SECRET"]
self._namespace = environ["AB_NAMESPACE"]
def get_base_url(self) -> str:
return self._base_url
def get_client_id(self) -> str:
return self._client_id
def get_client_secret(self) -> str:
return self._client_secret
def get_namespace(self) -> str:
return self._namespace
class JsonFileConfigRepository(ConfigRepository):
def __init__(self, json_file: Union[str, Path]):
if isinstance(json_file, str):
json_file = Path(json_file)
if not json_file.exists():
raise FileExistsError
json_obj = json.loads(json_file.read_text())
self._base_url = json_obj.get("baseUrl")
self._client_id = json_obj.get("clientId")
self._client_secret = json_obj.get("clientSecret")
self._namespace = json_obj.get("namespace")
def get_base_url(self) -> str:
return self._base_url
def get_client_id(self) -> str:
return self._client_id
def get_client_secret(self) -> str:
return self._client_secret
def get_namespace(self) -> str:
return self._namespace
| 25.679612
| 61
| 0.65293
|
c707b480d2182a0c3dfde1347e3e780eeb0c2da2
| 5,802
|
py
|
Python
|
celer/tests/test_enet.py
|
Badr-MOUFAD/celer
|
75e3a4c05a067574a7a75925e239310e97110e54
|
[
"BSD-3-Clause"
] | null | null | null |
celer/tests/test_enet.py
|
Badr-MOUFAD/celer
|
75e3a4c05a067574a7a75925e239310e97110e54
|
[
"BSD-3-Clause"
] | null | null | null |
celer/tests/test_enet.py
|
Badr-MOUFAD/celer
|
75e3a4c05a067574a7a75925e239310e97110e54
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import product
import pytest
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_allclose, assert_array_less, assert_equal)
from sklearn.linear_model import (
enet_path, ElasticNet as sk_ElasticNet, ElasticNetCV as sk_ElasticNetCV)
from celer import Lasso, ElasticNet, celer_path, ElasticNetCV
from celer.utils.testing import build_dataset
def test_raise_errors_l1_ratio():
with np.testing.assert_raises(ValueError):
ElasticNet(l1_ratio=5.)
with np.testing.assert_raises(NotImplementedError):
ElasticNet(l1_ratio=0.)
with np.testing.assert_raises(NotImplementedError):
X, y = build_dataset(n_samples=30, n_features=50)
y = np.sign(y)
celer_path(X, y, 'logreg', l1_ratio=0.5)
@pytest.mark.parametrize("sparse_X", (True, False))
def test_ElasticNet_Lasso_equivalence(sparse_X):
n_samples, n_features = 50, 100
X, y = build_dataset(n_samples, n_features, sparse_X=sparse_X)
alpha_max = norm(X.T@y, ord=np.inf) / n_samples
alpha = alpha_max / 100.
coef_lasso = Lasso(alpha=alpha).fit(X, y).coef_
coef_enet = ElasticNet(alpha=alpha, l1_ratio=1.0).fit(X, y).coef_
assert_allclose(coef_lasso, coef_enet)
np.random.seed(0)
weights = abs(np.random.randn(n_features))
alpha_max = norm(X.T@y / weights, ord=np.inf) / n_samples
alpha = alpha_max / 100.
coef_lasso = Lasso(alpha=alpha, weights=weights).fit(X, y).coef_
coef_enet = ElasticNet(alpha=alpha, l1_ratio=1.0, weights=weights).fit(X, y).coef_
assert_allclose(coef_lasso, coef_enet)
@pytest.mark.parametrize("prune", (0, 1))
def test_sk_enet_path_equivalence(prune):
"""Test that celer_path matches sklearn enet_path."""
n_samples, n_features = 40, 80
X, y = build_dataset(n_samples, n_features, sparse_X=False)
tol = 1e-14
l1_ratio = 0.7
alpha_max = norm(X.T@y, ord=np.inf) / n_samples
params = dict(eps=1e-3, tol=tol, l1_ratio=l1_ratio)
# one alpha
alpha = alpha_max / 100.
alphas1, coefs1, gaps1 = celer_path(
X, y, "lasso", alphas=[alpha],
prune=prune, max_iter=30, **params)
alphas2, coefs2, _ = enet_path(X, y, max_iter=10000,
alphas=[alpha], **params)
assert_equal(alphas1, alphas2)
assert_array_less(gaps1, tol * norm(y) ** 2 / n_samples)
assert_allclose(coefs1, coefs2, rtol=1e-3, atol=1e-4)
# many alphas
n_alphas = 20
alphas1, coefs1, gaps1 = celer_path(
X, y, "lasso", n_alphas=n_alphas,
prune=prune, max_iter=30, **params)
alphas2, coefs2, _ = enet_path(X, y, max_iter=10000,
n_alphas=n_alphas, **params)
assert_allclose(alphas1, alphas2)
assert_array_less(gaps1, tol * norm(y) ** 2 / n_samples)
assert_allclose(coefs1, coefs2, rtol=1e-3, atol=1e-4)
@pytest.mark.parametrize("sparse_X, fit_intercept, positive",
product([False, True], [False, True], [False, True]))
def test_sk_ElasticNet_equivalence(sparse_X, fit_intercept, positive):
n_samples, n_features = 30, 50
X, y = build_dataset(n_samples, n_features, sparse_X=sparse_X)
params = {'l1_ratio': 0.5, 'tol': 1e-14,
'fit_intercept': fit_intercept, 'positive': positive}
reg_celer = ElasticNet(**params).fit(X, y)
reg_sk = sk_ElasticNet(**params).fit(X, y)
assert_allclose(reg_celer.coef_, reg_sk.coef_, rtol=1e-3, atol=1e-3)
if fit_intercept:
assert_allclose(reg_celer.intercept_, reg_sk.intercept_)
@pytest.mark.parametrize("sparse_X", (True, False))
def test_weighted_ElasticNet(sparse_X):
n_samples, n_features = 30, 50
X, y = build_dataset(n_samples, n_features, sparse_X)
np.random.seed(0)
weights = abs(np.random.randn(n_features))
l1_ratio = .7
params = {'max_iter': 10000, 'tol': 1e-14, 'fit_intercept': False}
alpha_max = norm(X.T@y / weights, ord=np.inf) / n_samples
alpha = alpha_max / 100.
reg_enet = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, **params).fit(X, y)
lmbda = alpha * l1_ratio * n_samples / (n_samples + n_features)
mu = alpha * (1 - l1_ratio)
X_tilde = np.vstack(
(X, np.sqrt(n_samples*mu) * np.eye(n_features)))
y_tilde = np.hstack((y, np.zeros(n_features)))
reg_lasso = Lasso(alpha=lmbda, **params)
reg_lasso.fit(X_tilde, y_tilde)
assert_allclose(reg_enet.coef_, reg_lasso.coef_, rtol=1e-4, atol=1e-3)
@pytest.mark.parametrize("fit_intercept", (False, True))
def test_infinite_weights(fit_intercept):
n_samples, n_features = 30, 100
X, y = build_dataset(n_samples, n_features, sparse_X=False)
np.random.seed(42)
weights = abs(np.random.rand(n_features))
n_inf = n_features // 5
inf_indices = np.random.choice(n_features, size=n_inf, replace=False)
weights[inf_indices] = np.inf
reg = ElasticNet(l1_ratio=0.5, tol=1e-8,
fit_intercept=fit_intercept, weights=weights)
reg.fit(X, y)
assert_equal(reg.coef_[inf_indices], 0)
@pytest.mark.parametrize("fit_intercept", (False, True))
def test_ElasticNetCV(fit_intercept):
n_samples, n_features = 30, 100
X, y = build_dataset(n_samples, n_features, sparse_X=False)
params = dict(l1_ratio=[0.7, 0.8, 0.5], eps=0.05, n_alphas=10, tol=1e-10, cv=2,
fit_intercept=fit_intercept, n_jobs=-1)
clf = ElasticNetCV(**params)
clf.fit(X, y)
clf2 = sk_ElasticNetCV(**params, max_iter=10000)
clf2.fit(X, y)
assert_allclose(
clf.mse_path_, clf2.mse_path_, rtol=1e-3, atol=1e-4)
assert_allclose(clf.alpha_, clf2.alpha_)
assert_allclose(clf.coef_, clf2.coef_, atol=1e-5)
assert_allclose(clf.l1_ratio_, clf2.l1_ratio_, atol=1e-5)
if __name__ == '__main__':
pass
| 32.965909
| 86
| 0.677008
|
debdbe5c50411acf4416b1fdc303d8d6042add40
| 1,670
|
py
|
Python
|
applications/CoSimulationApplication/python_scripts/base_classes/co_simulation_convergence_criteria.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 778
|
2017-01-27T16:29:17.000Z
|
2022-03-30T03:01:51.000Z
|
applications/CoSimulationApplication/python_scripts/base_classes/co_simulation_convergence_criteria.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 6,634
|
2017-01-15T22:56:13.000Z
|
2022-03-31T15:03:36.000Z
|
applications/CoSimulationApplication/python_scripts/base_classes/co_simulation_convergence_criteria.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 224
|
2017-02-07T14:12:49.000Z
|
2022-03-06T23:09:34.000Z
|
# Importing the Kratos Library
import KratosMultiphysics as KM
# CoSimulation imports
import KratosMultiphysics.CoSimulationApplication.co_simulation_tools as cs_tools
import KratosMultiphysics.CoSimulationApplication.colors as colors
class CoSimulationConvergenceCriteria:
"""Baseclass for the convergence criteria used for CoSimulation
Checks if convergence was achieved in a (strongly) coupled simulation
"""
def __init__(self, settings):
self.settings = settings
self.settings.RecursivelyValidateAndAssignDefaults(self._GetDefaultParameters())
self.echo_level = self.settings["echo_level"].GetInt()
def Initialize(self):
pass
def Finalize(self):
pass
def InitializeSolutionStep(self):
pass
def FinalizeSolutionStep(self):
pass
def InitializeNonLinearIteration(self):
pass
def FinalizeNonLinearIteration(self):
pass
def IsConverged(self, residual, current_data):
raise NotImplementedError('"IsConverged" has to be implemented in the derived class!')
def PrintInfo(self):
cs_tools.cs_print_info("Convergence Criteria", colors.bold(self._ClassName()))
def Check(self):
cs_tools.cs_print_warning("Convergence Criteria", colors.bold(self._ClassName()), 'does not implement "Check"')
@classmethod
def _ClassName(cls):
return cls.__name__
@classmethod
def _GetDefaultParameters(cls):
return KM.Parameters("""{
"type" : "UNSPECIFIED",
"solver" : "UNSPECIFIED",
"data_name" : "UNSPECIFIED",
"echo_level" : 0
}""")
| 29.298246
| 119
| 0.685629
|
3131d948edb26ef2b59ea8b15f45317018ede508
| 233
|
py
|
Python
|
run.py
|
rafael1717y/dashboard_flask
|
5eaa00d89b7d6ae46649c1e6e90c188025f7cc40
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
rafael1717y/dashboard_flask
|
5eaa00d89b7d6ae46649c1e6e90c188025f7cc40
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
rafael1717y/dashboard_flask
|
5eaa00d89b7d6ae46649c1e6e90c188025f7cc40
|
[
"Apache-2.0"
] | null | null | null |
from config import app_config, app_active
from app import create_app
config = app_config[app_active]
print(app_active)
if __name__ == '__main__':
create_app(config)
config.APP.run(host=config.IP_HOST, port=config.PORT_HOST)
| 25.888889
| 62
| 0.781116
|
ad9da0cc0324bf08a7fdc13e3b3c6ddff6e66da9
| 5,292
|
py
|
Python
|
sdk/python/pulumi_aws/securityhub/invite_accepter.py
|
sibuthomasmathew/pulumi-aws
|
6351f2182eb6f693d4e09e4136c385adfa0ab674
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/securityhub/invite_accepter.py
|
sibuthomasmathew/pulumi-aws
|
6351f2182eb6f693d4e09e4136c385adfa0ab674
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/securityhub/invite_accepter.py
|
sibuthomasmathew/pulumi-aws
|
6351f2182eb6f693d4e09e4136c385adfa0ab674
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['InviteAccepter']
class InviteAccepter(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
master_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
> **Note:** AWS accounts can only be associated with a single Security Hub master account. Destroying this resource will disassociate the member account from the master account.
Accepts a Security Hub invitation.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_account = aws.securityhub.Account("exampleAccount")
example_member = aws.securityhub.Member("exampleMember",
account_id="123456789012",
email="example@example.com",
invite=True)
invitee_account = aws.securityhub.Account("inviteeAccount", opts=pulumi.ResourceOptions(provider="aws.invitee"))
invitee_invite_accepter = aws.securityhub.InviteAccepter("inviteeInviteAccepter", master_id=example_member.master_id,
opts=pulumi.ResourceOptions(provider="aws.invitee",
depends_on=[aws_securityhub_account["accepter"]]))
```
## Import
Security Hub invite acceptance can be imported using the account ID, e.g.
```sh
$ pulumi import aws:securityhub/inviteAccepter:InviteAccepter example 123456789012
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] master_id: The account ID of the master Security Hub account whose invitation you're accepting.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if master_id is None and not opts.urn:
raise TypeError("Missing required property 'master_id'")
__props__['master_id'] = master_id
__props__['invitation_id'] = None
super(InviteAccepter, __self__).__init__(
'aws:securityhub/inviteAccepter:InviteAccepter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
invitation_id: Optional[pulumi.Input[str]] = None,
master_id: Optional[pulumi.Input[str]] = None) -> 'InviteAccepter':
"""
Get an existing InviteAccepter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] invitation_id: The ID of the invitation.
:param pulumi.Input[str] master_id: The account ID of the master Security Hub account whose invitation you're accepting.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["invitation_id"] = invitation_id
__props__["master_id"] = master_id
return InviteAccepter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="invitationId")
def invitation_id(self) -> pulumi.Output[str]:
"""
The ID of the invitation.
"""
return pulumi.get(self, "invitation_id")
@property
@pulumi.getter(name="masterId")
def master_id(self) -> pulumi.Output[str]:
"""
The account ID of the master Security Hub account whose invitation you're accepting.
"""
return pulumi.get(self, "master_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.023256
| 185
| 0.654573
|
227c62391caf338482f9203fc4200531b943502d
| 9,016
|
py
|
Python
|
reo/tests/test_tiered_energy_rate_and_profiler.py
|
sakshi-testing-reopt/REopt_Lite_API
|
42040ed96998901d411ae87096420ae48cf5c01a
|
[
"BSD-3-Clause"
] | null | null | null |
reo/tests/test_tiered_energy_rate_and_profiler.py
|
sakshi-testing-reopt/REopt_Lite_API
|
42040ed96998901d411ae87096420ae48cf5c01a
|
[
"BSD-3-Clause"
] | null | null | null |
reo/tests/test_tiered_energy_rate_and_profiler.py
|
sakshi-testing-reopt/REopt_Lite_API
|
42040ed96998901d411ae87096420ae48cf5c01a
|
[
"BSD-3-Clause"
] | 1
|
2021-05-02T05:36:37.000Z
|
2021-05-02T05:36:37.000Z
|
# *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
import json
from django.test import TestCase
from tastypie.test import ResourceTestCaseMixin
class TestEnergyTiers(ResourceTestCaseMixin, TestCase):
"""
Tariff from Florida Light & Power (residential) with simple tiered energy rate:
"energyratestructure":
[[{"max": 1000, "rate": 0.07531, "adj": 0.0119, "unit": "kWh"}, {"rate": 0.09613, "adj": 0.0119, "unit": "kWh"}]]
Testing with "annual_kwh": 24,000 such that the "year_one_energy_charges" should be:
12,000 kWh * (0.07531 + 0.0119) $/kWh + 12,000 kWh * (0.09613 + 0.0119) $/kWh = $ 2,342.88
"""
def setUp(self):
super(TestEnergyTiers, self).setUp()
self.submit_url = '/v1/job/'
self.results_url = '/v1/job/<run_uuid>/results/'
self.post = {"Scenario": {"webtool_uuid": None, "description": "", "timeout_seconds": 295,
"Site": {
"land_acres": None,
"longitude": -91.7337,
"roof_squarefeet": None,
"latitude": 35.2468,
"address": "",
"PV": {"max_kw": 0.0},
"Generator": {"max_kw": 0.0},
"LoadProfile": {"critical_loads_kw_is_net": False, "year": 2017, "loads_kw_is_net": True, "outage_start_hour": None, "outage_end_hour": None, "monthly_totals_kwh": [], "critical_load_pct": 0.5, "loads_kw": [], "outage_is_major_event": True, "critical_loads_kw": [], "doe_reference_name": "MidriseApartment", "annual_kwh": 24000.0},
"Storage": {"max_kwh": 0.0, "max_kw": 0.0, },
"ElectricTariff": {"add_blended_rates_to_urdb_rate": False, "wholesale_rate_us_dollars_per_kwh": 0.0, "net_metering_limit_kw": 0.0, "interconnection_limit_kw": 100000000.0, "blended_monthly_demand_charges_us_dollars_per_kw": [], "urdb_utility_name": "Florida Power & Light Co.", "urdb_label": "", "wholesale_rate_above_site_load_us_dollars_per_kwh": 0.0, "urdb_rate_name": "RS-1 Residential Service", "urdb_response": {"sector": "Residential", "startdate": 1433116800, "phasewiring": "Single Phase", "name": "RS-1 Residential Service", "enddate": 1440975600, "source": "https://www.fpl.com/rates/pdf/electric-tariff-section8.pdf", "uri": "https://openei.org/apps/USURDB/rate/view/5550d6fd5457a3187e8b4567", "label": "5550d6fd5457a3187e8b4567", "utility": "Florida Power & Light Co.", "eiaid": 6452, "sourceparent": "https://www.fpl.com/rates.html", "energyratestructure": [[{"max": 1000, "rate": 0.07531, "adj": 0.0119, "unit": "kWh"}, {"rate": 0.09613, "adj": 0.0119, "unit": "kWh"}]], "energyweekdayschedule": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "energyweekendschedule": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "fixedmonthlycharge": 7.57, "energycomments": "Adjustment: Environmental, Storm Charge, Conservation, Capacity\\r\\nRate: Base rate and fuel", "supersedes": "53a455f05257a3ff4a8d8cf2", "revisions": [1431339677, 1431340059, 1431528978, 1431529815, 1431530443, 1448395688]}, "blended_annual_demand_charges_us_dollars_per_kw": 0.0, "blended_annual_rates_us_dollars_per_kwh": 0.0, "blended_monthly_rates_us_dollars_per_kwh": []},
"Financial": {"escalation_pct": 0.026, "offtaker_discount_pct": 0.081, "value_of_lost_load_us_dollars_per_kwh": 100.0, "analysis_years": 20, "microgrid_upgrade_cost_pct": 0.3, "offtaker_tax_pct": 0.26, "om_cost_escalation_pct": 0.025},
"Wind": {"max_kw": 0.0}
}
}
}
def get_response(self, data):
initial_post = self.api_client.post(self.submit_url, format='json', data=data)
uuid = json.loads(initial_post.content)['run_uuid']
response = json.loads(self.api_client.get(self.results_url.replace('<run_uuid>', str(uuid))).content)
return response
def test_tiered_energy_rate_and_profiler(self):
expected_year_one_energy_cost = 2342.88
response = self.get_response(self.post)
tariff_out = response['outputs']['Scenario']['Site']['ElectricTariff']
profile_out = response['outputs']['Scenario']['Profile']
messages = response['messages']
try:
self.assertEqual(tariff_out['year_one_energy_cost_us_dollars'], expected_year_one_energy_cost,
"Year one energy bill ({}) does not equal expected cost ({})."
.format(tariff_out['year_one_energy_cost_us_dollars'], expected_year_one_energy_cost))
self.assertGreater(profile_out['pre_setup_scenario_seconds'], 0, "Profiling results failed")
self.assertGreater(profile_out['setup_scenario_seconds'], 0, "Profiling results failed")
self.assertGreater(profile_out['reopt_seconds'], 0, "Profiling results failed")
self.assertGreater(profile_out['reopt_bau_seconds'], 0, "Profiling results failed")
self.assertGreater(profile_out['parse_run_outputs_seconds'], 0, "Profiling results failed")
except Exception as e:
error_msg = None
if hasattr(messages, "error"):
error_msg = messages.error
print("test_tiered_energy_rate API error message: {}".format(error_msg))
print("Run uuid: {}".format(response['outputs']['Scenario']['run_uuid']))
raise e
| 91.070707
| 3,280
| 0.578971
|
5a2a2f242ff5861aa06b7ac8bedbea59a488a8ee
| 4,660
|
py
|
Python
|
back-propagation/bp-with-weightedCE/bp.py
|
shinmao/ML-_-DL
|
9a378454502635d6b29a7bb8e85d849da154a114
|
[
"MIT"
] | 2
|
2021-09-24T02:05:15.000Z
|
2021-09-24T02:05:21.000Z
|
back-propagation/bp-with-weightedCE/bp.py
|
shinmao/ML-_-DL
|
9a378454502635d6b29a7bb8e85d849da154a114
|
[
"MIT"
] | null | null | null |
back-propagation/bp-with-weightedCE/bp.py
|
shinmao/ML-_-DL
|
9a378454502635d6b29a7bb8e85d849da154a114
|
[
"MIT"
] | null | null | null |
import numpy as np
import sklearn.datasets as sd
import matplotlib.pyplot as plt
from sklearn.linear_model import Perceptron
from sklearn.model_selection import train_test_split
'''
dataset:
X to be input with two features
y to be k = 2
'''
X, y = sd.make_moons(500, noise=0.4)
def plot(X1, X2, y):
color = {0: 'blue', 1: 'red'}
plt.title('dataset')
plt.xlabel('x1')
plt.ylabel('x2')
for i, j, c in zip(X1, X2, y):
plt.scatter(i, j, c = color[c], marker='o', s = 50, edgecolors='k', cmap = plt.cm.Spectral)
plot(X[:, 0], X[:, 1], y)
plt.show()
def nn_plot(X, y, pfunc):
xmin, xmax = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
ymin, ymax = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
steps = 0.01
xx, yy = np.meshgrid(np.arange(xmin, xmax, steps), np.arange(ymin, ymax, steps))
labels = pfunc(np.c_[xx.ravel(), yy.ravel()])
zz = labels.reshape(xx.shape)
plt.contourf(xx, yy, zz, cmap = plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap = plt.cm.Spectral)
plt.show()
class NN(object):
def __init__(self):
self.input= 2
self.output = 1
self.hidden_units = 2
# weight for false negative and false positive
# false negative is more serious than false positive
self.fn_wt = 50
self.fp_wt = 10
np.random.seed(1)
# w1 matrix from input to hidden layer 2*2
self.w1 = np.random.randn(self.input, self.hidden_units)
# w2 matrix from hidden layer 2*1
self.w2 = np.random.randn(self.hidden_units, self.output)
def sigmoid(self, x):
return 1/(1 + np.exp(-x))
# derivation for sigmoid
def deriv_sigmoid(self, x):
return self.sigmoid(x)*(1 - self.sigmoid(x))
def ff(self, x):
# hidden layer
self.hu_sum = np.dot(self.w1.T, x.T)
self.hu_output = self.sigmoid(self.hu_sum)
# output layer
self.output_sum = np.dot(self.w2.T, self.hu_output)
self.output = self.sigmoid(self.output_sum)
#print("w1: ", self.w1)
#print("w2: ", self.w2)
return self.output
def loss(self, pred, y):
m = y.shape[0]
logprob = self.fn_wt * np.multiply(np.log(pred), y) + self.fp_wt * np.multiply(1-y, np.log(1 - pred))
loss = -np.sum(logprob) / m
return loss
def bp(self, X, y):
pred = self.ff(X)
m = X.shape[0]
# update weight in w2
# chain rule:
#output_delta = pred - y
#output_delta = 100*pred + 1900* np.multiply(y, pred) - 2000*y
output_delta = -50 * y + 40 * np.multiply(pred, y) + 10 * pred
delta_dz = np.multiply(output_delta, self.deriv_sigmoid(self.output_sum))
self.dw2 = (1/m) * np.sum(np.multiply(self.hu_output, delta_dz), axis = 1).reshape(self.w2.shape)
# update weight in w1
hidden_delta = output_delta * self.w2 * self.deriv_sigmoid(self.hu_sum)
self.dw1 = (1/m) * np.dot(X.T, hidden_delta.T)
def update(self, lr = 1.2):
self.w1 = self.w1 - lr * self.dw1
self.w2 = self.w2 - lr * self.dw2
def train(self, X, y, it = 100):
for i in range(it):
y_hat = self.ff(X)
los = self.loss(y_hat, y)
self.bp(X, y)
# update weight w1, w2
self.update()
if i % 10 == 0:
print("loss: ", los)
def pred(self, X):
y_hat = self.ff(X)
y_hat = [1 if x_[0] >= 0.5 else 0 for x_ in y_hat.T]
return np.array(y_hat)
def score(self, pred, y):
# accuracy
corect_cnt = np.sum(pred == y)
correct0 = 0
correct1 = 0
for i in range(len(y)):
if pred[i] == y[i]:
if pred[i] == 0:
correct0 += 1
else:
correct1 += 1
return corect_cnt / len(y), correct0, correct1
if __name__ == '__main__':
# 4 fold validation
tr_X, te_X, tr_y, te_y = train_test_split(X, y, test_size = 0.25)
# compare linear model perceptron with neural network
clf = Perceptron(tol = 1e-3, random_state= 0)
clf.fit(X, y)
nn_plot(X, y, lambda x : clf.predict(x))
print("Perceptron's score: ", clf.score(X, y))
model = NN()
model.train(tr_X, tr_y)
pred_y = model.pred(te_X)
print("after train")
score, score0, score1 = model.score(pred_y, te_y)
nn_plot(X, y, lambda x : model.pred(x))
print("predict: ", pred_y)
print("label: ", te_y)
print("NN's score: ", score, score0, score1)
| 33.049645
| 109
| 0.549356
|
eb84dd42d8add97a2ed6c889cff33a520d102298
| 2,031
|
py
|
Python
|
optimism/test/testDofManager.py
|
btalamini/optimism
|
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
|
[
"MIT"
] | null | null | null |
optimism/test/testDofManager.py
|
btalamini/optimism
|
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
|
[
"MIT"
] | 1
|
2022-03-12T00:01:12.000Z
|
2022-03-12T00:01:12.000Z
|
optimism/test/testDofManager.py
|
btalamini/optimism
|
023e1b2a0b137900a7517e4c7ac5056255cf7bbe
|
[
"MIT"
] | 3
|
2021-12-23T19:53:31.000Z
|
2022-03-27T23:12:03.000Z
|
from optimism.JaxConfig import *
from optimism import Mesh
from optimism.test import MeshFixture
class DofManagerTest(MeshFixture.MeshFixture):
def setUp(self):
self.Nx = 4
self.Ny = 5
xRange = [0.,1.]
yRange = [0.,1.]
#self.targetDispGrad = np.array([[0.1, -0.2],[0.4, -0.1]])
self.mesh, _ = self.create_mesh_and_disp(self.Nx, self.Ny, xRange, yRange,
lambda x : 0*x)
Ebcs = [Mesh.EssentialBC(nodeSet='top', field=0),
Mesh.EssentialBC(nodeSet='right', field=1)]
self.nNodes = self.Nx*self.Ny
self.nFields = 2
fieldShape = (self.nNodes, self.nFields)
self.dofManager = Mesh.DofManager(self.mesh, fieldShape, Ebcs)
self.nDof = self.nFields*self.nNodes
U = np.zeros((self.nNodes, self.nFields))
U = U.at[:,1].set(1.0)
U = U.at[self.mesh.nodeSets['top'],0].set(2.0)
self.U = U.at[self.mesh.nodeSets['right'],1].set(3.0)
def test_get_bc_size(self):
# number of dofs from top, field 0
nEbcs = self.Nx
# number of dofs from right, field 1
nEbcs += self.Ny
self.assertEqual(self.dofManager.get_bc_size(), nEbcs)
def test_get_unknown_size(self):
# number of dofs from top, field 0
nEbcs = self.Nx
# number of dofs from right, field 1
nEbcs += self.Ny
self.assertEqual(self.dofManager.get_unknown_size(), self.nDof - nEbcs)
def test_slice_unknowns_with_dof_indices(self):
Uu = self.dofManager.get_unknown_values(self.U)
Uu_x = self.dofManager.slice_unknowns_with_dof_indices(Uu, (slice(None),0) )
self.assertArrayEqual(Uu_x, np.zeros(self.Nx*(self.Ny-1)))
Uu_y = self.dofManager.slice_unknowns_with_dof_indices(Uu, (slice(None),1) )
self.assertArrayEqual(Uu_y, np.ones(self.Ny*(self.Nx-1)))
if __name__ == '__main__':
MeshFixture.unittest.main()
| 33.85
| 84
| 0.597243
|
50be3049a3933f4303e30fc9be0b3374df11e241
| 118
|
py
|
Python
|
tests/fixtures/internal/subpackage_a/module_1.py
|
gitter-badger/dependenpy
|
db411b7bbd466b79064cbb419049f17cd3bff4c1
|
[
"ISC"
] | 10
|
2020-01-08T10:42:32.000Z
|
2021-07-08T01:58:08.000Z
|
tests/fixtures/internal/subpackage_a/module_1.py
|
gitter-badger/dependenpy
|
db411b7bbd466b79064cbb419049f17cd3bff4c1
|
[
"ISC"
] | 18
|
2015-03-13T11:55:49.000Z
|
2017-06-20T11:56:46.000Z
|
tests/fixtures/internal/subpackage_a/module_1.py
|
gitter-badger/dependenpy
|
db411b7bbd466b79064cbb419049f17cd3bff4c1
|
[
"ISC"
] | 1
|
2019-12-10T18:32:05.000Z
|
2019-12-10T18:32:05.000Z
|
import internal.subpackage_a.subpackage_1.module_i
class Class1(object):
def function(self):
import sys
| 16.857143
| 50
| 0.737288
|
6ba36e4304275749d37e8000ac203d05025f1129
| 6,578
|
py
|
Python
|
furniture/demo_rl.py
|
clvrai/furniture
|
ed4a905e7beb1daf3af7717d979e0ba93421077a
|
[
"MIT"
] | 364
|
2019-11-20T16:28:39.000Z
|
2022-03-28T23:00:19.000Z
|
furniture/demo_rl.py
|
whiteBerryJ/furniture
|
ed4a905e7beb1daf3af7717d979e0ba93421077a
|
[
"MIT"
] | 22
|
2019-11-24T08:27:22.000Z
|
2022-03-31T23:30:25.000Z
|
furniture/demo_rl.py
|
whiteBerryJ/furniture
|
ed4a905e7beb1daf3af7717d979e0ba93421077a
|
[
"MIT"
] | 52
|
2019-11-21T01:01:01.000Z
|
2022-03-02T11:52:53.000Z
|
"""
Demonstration for RL experiments with new environment design.
This script tells you how to use our IKEA furniture assembly environment for RL
experiments and design your own reward function and task.
First, FurnitureExampleEnv shows you how to define a new task.
* `__init__`: sets environment- and task-specific configurations
* `_reset`: initializes variables when an episode is reset
* `_place_objects`: specifies the initialization of furniture parts
* `_get_obs`: includes more information for your task
* `_step`: simulates the environment and returns observation and reward
* `_compute_reward`: designs your own reward function
We describe how to collect trajectories with a random policy in `main`.
Please refer to `furniture/rl` for more advanced RL implementations.
"""
from collections import OrderedDict
import numpy as np
from env.furniture_baxter import FurnitureBaxterEnv
from env.models import furniture_names, background_names, agent_names, furniture_name2id
import env.transform_utils as T
class FurnitureExampleEnv(FurnitureBaxterEnv):
"""
Baxter robot environment with a reaching task as an example.
"""
def __init__(self, config):
"""
Args:
config: general configuration for the environment.
"""
###################################################
# Change @config before creating a MuJoCo scene #
###################################################
# set the furniture to be always the simple blocks
config.furniture_id = furniture_name2id["block"]
# set subtask_ob for getting target object
config.subtask_ob = True
# set environment- and task-specific configurations
config.max_episode_steps = 50
# create a MuJoCo environment based on @config
super().__init__(config)
def _reset(self, furniture_id=None, background=None):
"""
Resets simulation and variables to compute reward.
Args:
furniture_id: ID of the furniture model to reset.
background: name of the background scene to reset.
"""
super()._reset(furniture_id, background)
##########################################
# Set variables needed to compute reward #
##########################################
# pick an object to reach
assert self._subtask_part1 != -1
self._target_body = self._object_names[self._subtask_part1]
def _place_objects(self):
"""
Returns the initial positions and rotations of furniture parts.
Returns:
xpos((float * 3) * n_obj): x,y,z position of the objects in world frame
xquat((float * 4) * n_obj): quaternion of the objects
"""
######################################################
# Specify initial position and rotation of each part #
######################################################
pos_init = {"1_block_l": [-0.3, -0.2, 0.05], "2_block_r": [0.1, -0.2, 0.05]}
quat_init = {"1_block_l": [1, 0, 0, 0], "2_block_r": [1, 0, 0, 0]}
return pos_init, quat_init
def _get_obs(self):
"""
Returns the current observation.
"""
obs = super()._get_obs()
return obs
def _step(self, a):
"""
Takes a simulation step with action @a.
"""
# zero out left arm's action and only use right arm
a[6:12] = 0
# simulate action @a
ob, _, _, _ = super(FurnitureBaxterEnv, self)._step(a)
# compute your own reward
reward, done, info = self._compute_reward(a)
# store some information for log
info["right_arm_action"] = a[0:6]
info["right_gripper_action"] = a[12]
return ob, reward, done, info
def _compute_reward(self, a):
"""
Computes reward for the task.
"""
info = {}
# control penalty
ctrl_penalty = self._ctrl_penalty(a)
# distance-based reward
hand_pos = np.array(self.sim.data.site_xpos[self.right_eef_site_id])
dist = T.l2_dist(hand_pos, self._get_pos(self._target_body))
distance_reward = -self._config.distance_reward * dist
# reward for successful reaching
success_reward = 0
if dist < 0.05:
success_reward = self._config.success_reward
# add up all rewards
reward = ctrl_penalty + distance_reward + success_reward
done = False
# log each component of reward
info["ctrl_penalty"] = ctrl_penalty
info["distance_reward"] = distance_reward
info["success_reward"] = success_reward
return reward, done, info
def main(args):
"""
Shows basic rollout code for simulating the environment.
"""
print("IKEA Furniture Assembly Environment!")
# make environment following arguments
from env import make_env
env = make_env("FurnitureExampleEnv", args)
# define a random policy
def policy_action(ob):
return env.action_space.sample()
# define policy update
def update_policy(rollout):
pass
# run one episode and collect transitions
rollout = []
done = False
observation = env.reset()
ep_length = 0
# update unity rendering
env.render()
while not done:
ep_length += 1
# sample action from policy
action = policy_action(observation)
# simulate environment
observation, reward, done, info = env.step(action)
print(
"{:3d} step: reward ({:5.3f}) action ({})".format(
ep_length, reward, action["default"]
)
)
# update unity rendering
env.render()
# collect transition
rollout.append({"ob": observation, "reward": reward, "done": done})
# update your network using @rollout
update_policy(rollout)
# close the environment instance
env.close()
def argsparser():
"""
Returns argument parser for furniture assembly environment.
"""
import argparse
import config.furniture as furniture_config
from util import str2bool
parser = argparse.ArgumentParser("Demo for IKEA Furniture Assembly Environment")
parser.add_argument("--seed", type=int, default=123)
parser.add_argument("--debug", type=str2bool, default=False)
furniture_config.add_argument(parser)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = argsparser()
main(args)
| 29.630631
| 88
| 0.610368
|
f535105052079ab65987d0168ab0bea5d78c33e4
| 633
|
py
|
Python
|
information_web/info/modules/admin/__init__.py
|
Fulaiy/information
|
8b98072dffda3cbf4d30145f244ac8bf05e9cd96
|
[
"MIT"
] | null | null | null |
information_web/info/modules/admin/__init__.py
|
Fulaiy/information
|
8b98072dffda3cbf4d30145f244ac8bf05e9cd96
|
[
"MIT"
] | 3
|
2021-03-18T20:46:28.000Z
|
2022-01-13T00:49:07.000Z
|
information_web/info/modules/admin/__init__.py
|
Fulaiy/information
|
8b98072dffda3cbf4d30145f244ac8bf05e9cd96
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
from flask import redirect
from flask import request
from flask import session
from flask import url_for
admin_blu = Blueprint("admin",__name__,url_prefix='/admin')
from . import views
@admin_blu.before_request
def before_request():
# 判断如果不是登录页面的请求
if request.url.endswith(url_for("admin.admin_index")):
user_id = session.get("user_id")
# print(user_id,'lllllll')
is_admin = session.get("is_admin",False)
# print(is_admin,'oooooooooooo')
if not (user_id and is_admin):
# 判断当前是否有用户登录,或者是否是管理员,如果不是,直接重定向到项目主页
return redirect('/')
| 27.521739
| 59
| 0.693523
|
7f894aabb110b70f083bda11960bbc0d19f78c51
| 7,246
|
py
|
Python
|
validation/scaler_validation.py
|
anonymous-profet2/profet
|
902f8ea03a650f078a21722f5fe44ebfc5cfad41
|
[
"MIT"
] | 1
|
2022-02-24T01:35:10.000Z
|
2022-02-24T01:35:10.000Z
|
validation/scaler_validation.py
|
anonymous-profet/profet
|
cbe4991eb2282db8b94f6697c68434217742dbf7
|
[
"MIT"
] | null | null | null |
validation/scaler_validation.py
|
anonymous-profet/profet
|
cbe4991eb2282db8b94f6697c68434217742dbf7
|
[
"MIT"
] | 13
|
2021-10-14T02:24:43.000Z
|
2022-03-14T03:16:17.000Z
|
# profet: validate scaler prediction model
import pandas as pd
import numpy as np
import pickle
import math
from tqdm import tqdm
from sklearn.metrics import mean_absolute_percentage_error, r2_score, mean_squared_error
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--target_instance', type=str)
args = parser.parse_args()
# Global Variables for validation
ANCHOR_INSTANCE = args.target_instance
INSTANCE_LIST = ['g3s.xlarge', 'g4dn.xlarge', 'p2.xlarge', 'p3.2xlarge']
PRED_INSTANCES = [x for x in INSTANCE_LIST if x != ANCHOR_INSTANCE]
ANCHOR_NAME = ANCHOR_INSTANCE[:2]
SCALER_MODE = 'MinMax-2nd'
# Load Dataset from anchor validation
anchor_pred = pickle.load(open(f'./{ANCHOR_INSTANCE}_anchor_median.pickle', 'rb'))
model_list = sorted(list(anchor_pred[0]['model'].value_counts().index))
dataset_list = sorted(list(anchor_pred[0]['dataset'].value_counts().index))
batchsize_list = sorted(list(anchor_pred[0]['batchsize'].value_counts().index))
exp_list = sorted(list(anchor_pred[0]['exp_name'].value_counts().index))
instance_index = {}
for i in range(len(anchor_pred)):
instance_name = [x.split('_pred')[0] for x in list(anchor_pred[i].columns) if '_pred' in x][0]
instance_index[instance_name] = i
# Function-1: inference_batch_dataset
# This function inference scaled prediction value(0~1) from polynomial model,
# then convert scaled prediction value to real latency
def inference_batch_dataset(instance_name, model_name, latency_min, latency_max, size_pred, b_or_d):
if b_or_d == 'batchsize':
scaler_size = 256
scaler = pickle.load(open(f"./model/scaler/{SCALER_MODE}/{instance_name[:2]}/scaler_{instance_name}_{model_name}_{scaler_size}dataset_BSpred.pickle", "rb"))
else: # if b_or_d == 'dataset'
scaler_size = 64
scaler = pickle.load(open(f"./model/scaler/{SCALER_MODE}/{instance_name[:2]}/scaler_{instance_name}_{model_name}_{scaler_size}batchsize_DSpred.pickle", "rb"))
scaled_pred = scaler.predict(np.array([size_pred]).reshape(-1, 1))
latency_pred = (scaled_pred * (latency_max - latency_min) + latency_min)
latency_pred = latency_pred[0][0]
return latency_pred
# Function-2: scaler_validation
# b_or_d: what you wan to predict (batchsize or dataset)
# t_or_p: setting of min-max values (true or pred, pred is anchor prediction value)
# 1. setting variables for scaler model-validation
# 2. loop target instances, models, val_sizes, and exps, get values and inference for validation
# (val_size is size of opposit condition of b_or_d, so if b_or_d is batchsize, val_size_list is dataset_list)
# 3. convert values into dataframe, and return it
def scaler_validation(anchor_pred, b_or_d, t_or_p):
if b_or_d == 'batchsize':
size_min = 16
size_max = 256
condition_size = 'dataset'
val_size_list = dataset_list
else: # if b_or_d == 'dataset'
size_min = 32
size_max = 256
condition_size = 'batchsize'
val_size_list = batchsize_list
val_result = {}
key_list = ['instance_name', 'model', 'exp_name', 'true',
'scaler_pred', 'anchor_pred', 'size_min', 'size_max', 'size_pred',
'latency_min', 'latency_max', 'b_or_d', 'b_or_d_size', 't_or_p']
for key in key_list:
val_result.setdefault(key, [])
for val_instance in tqdm(PRED_INSTANCES):
for val_model in model_list:
for val_size in val_size_list:
for val_exp in exp_list:
true_pred_df = anchor_pred[instance_index[val_instance]]
cond = ((true_pred_df['model'] == val_model) &
(true_pred_df[condition_size] == val_size) &
(true_pred_df['exp_name'] == val_exp))
val_df = true_pred_df[cond]
if len(val_df) != 5:
continue
latency_min = val_df[val_df[b_or_d] == size_min][f'{val_instance}_{t_or_p}'].values[0]
latency_max = val_df[val_df[b_or_d] == size_max][f'{val_instance}_{t_or_p}'].values[0]
size_pred_list = [x for x in sorted(list(val_df[b_or_d].values)) if x not in [size_min, size_max]]
for size_pred in size_pred_list:
latency_true = val_df[val_df[b_or_d] == size_pred][f'{val_instance}_true'].values[0]
latency_anchor_pred = val_df[val_df[b_or_d] == size_pred][f'{val_instance}_pred'].values[0]
latency_scaler_pred = inference_batch_dataset(
val_instance, val_model, latency_min, latency_max, size_pred, b_or_d)
val_result['instance_name'].append(val_instance)
val_result['model'].append(val_model)
val_result['exp_name'].append(val_exp)
val_result['true'].append(latency_true)
val_result['anchor_pred'].append(latency_anchor_pred)
val_result['scaler_pred'].append(latency_scaler_pred)
val_result['size_min'].append(size_min)
val_result['size_max'].append(size_max)
val_result['size_pred'].append(size_pred)
val_result['latency_min'].append(latency_min)
val_result['latency_max'].append(latency_max)
val_result['b_or_d'].append(b_or_d)
val_result['b_or_d_size'].append(val_size)
val_result['t_or_p'].append(t_or_p)
val_result_df = pd.DataFrame.from_dict(val_result, orient='columns')
return val_result_df
# Function-3: print_error
# print MAPE, R^2, and RMSE
def print_error(true, pred):
print(f'MAPE: {mean_absolute_percentage_error(true, pred) * 100}')
print(f'R2: {r2_score(true, pred)}')
print(f'RMSE: {math.sqrt(mean_squared_error(true, pred))} us')
print(f'RMSE: {math.sqrt(mean_squared_error(true, pred))/1000} ms')
print()
# 02-2-1. Model Validation of Scaler Prediction
# - setting 1: batchsize prediction with true min-max values
# - setting 2: batchsize prediction with anchor predicted min-max values
# - setting 3: dataset prediction with true min-max values
# - setting 4: dataset prediction with anchor predicted min-max values
result_batchsize_true = scaler_validation(anchor_pred, 'batchsize', 'true')
print_error(result_batchsize_true['true'], result_batchsize_true['scaler_pred'])
result_batchsize_pred = scaler_validation(anchor_pred, 'batchsize', 'pred')
print_error(result_batchsize_pred['true'], result_batchsize_pred['scaler_pred'])
result_dataset_true = scaler_validation(anchor_pred, 'dataset', 'true')
print_error(result_dataset_true['true'], result_dataset_true['scaler_pred'])
result_dataset_pred = scaler_validation(anchor_pred, 'dataset', 'pred')
print_error(result_dataset_pred['true'], result_dataset_pred['scaler_pred'])
pickle.dump([result_batchsize_true, result_batchsize_pred, result_dataset_true, result_dataset_pred],
open(f'scaler_result_{ANCHOR_NAME}_{SCALER_MODE}.pickle', 'wb'))
| 51.757143
| 166
| 0.669197
|
f2262ccf67d9f3f1af5e6938a89d8dfbf49386fe
| 1,332
|
py
|
Python
|
src/debugbar/providers/DebugProvider.py
|
girardinsamuel/debugbar
|
0a528be1b5ec06836a253264f1ae620d33eda42e
|
[
"MIT"
] | null | null | null |
src/debugbar/providers/DebugProvider.py
|
girardinsamuel/debugbar
|
0a528be1b5ec06836a253264f1ae620d33eda42e
|
[
"MIT"
] | null | null | null |
src/debugbar/providers/DebugProvider.py
|
girardinsamuel/debugbar
|
0a528be1b5ec06836a253264f1ae620d33eda42e
|
[
"MIT"
] | null | null | null |
from masonite.providers import Provider
from ..Debugger import Debugger
from ..collectors.MessageCollector import MessageCollector
from ..collectors.PythonCollector import PythonCollector
from ..collectors.QueryCollector import QueryCollector
from ..collectors.KeyValueCollector import KeyValueCollector
from ..collectors.MeasureCollector import MeasureCollector
class DebugProvider(Provider):
def __init__(self, application):
self.application = application
def register(self):
debugger = Debugger()
time = MeasureCollector("Time")
time.start_measure('boot')
debugger.add_collector(MessageCollector())
debugger.add_collector(KeyValueCollector("Environment"))
debugger.add_collector(time)
# debugger.add_collector(KeyValueCollector("Request"))
debugger.add_collector(QueryCollector().start_logging('masoniteorm.connection.queries'))
self.application.bind('debugger', debugger)
def boot(self):
self.application.make('debugger').get_collector('Time').stop_measure('boot')
response = self.application.make('response')
if 'text/html' in response.header('Content-Type'):
response.content += self.application.make('debugger').get_renderer('javascript').render()
response.make_headers()
| 41.625
| 101
| 0.728979
|
c2f869ab9bf6c9baa4eed6140b34d9a67e518e80
| 7,155
|
py
|
Python
|
nltk/chat/eliza.py
|
SamuraiT/nltk3-alpha
|
18a1a0ff8697eaeeb5d3c0bc6dad251d5b8fe931
|
[
"Apache-2.0"
] | 3
|
2019-04-09T22:59:33.000Z
|
2019-06-14T09:23:24.000Z
|
nltk/chat/eliza.py
|
guker/nltk
|
085399ea9d53318ae6e8568909fa55f0d905ad5a
|
[
"Apache-2.0"
] | null | null | null |
nltk/chat/eliza.py
|
guker/nltk
|
085399ea9d53318ae6e8568909fa55f0d905ad5a
|
[
"Apache-2.0"
] | 2
|
2019-10-28T01:33:22.000Z
|
2019-10-30T06:43:43.000Z
|
# Natural Language Toolkit: Eliza
#
# Copyright (C) 2001-2014 NLTK Project
# Authors: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
# Based on an Eliza implementation by Joe Strout <joe@strout.net>,
# Jeff Epler <jepler@inetnebr.com> and Jez Higgins <mailto:jez@jezuk.co.uk>.
# a translation table used to convert things you say into things the
# computer says back, e.g. "I am" --> "you are"
from __future__ import print_function
from nltk.chat.util import Chat, reflections
# a table of response pairs, where each pair consists of a
# regular expression, and a list of possible responses,
# with group-macros labelled as %1, %2.
pairs = (
(r'I need (.*)',
( "Why do you need %1?",
"Would it really help you to get %1?",
"Are you sure you need %1?")),
(r'Why don\'t you (.*)',
( "Do you really think I don't %1?",
"Perhaps eventually I will %1.",
"Do you really want me to %1?")),
(r'Why can\'t I (.*)',
( "Do you think you should be able to %1?",
"If you could %1, what would you do?",
"I don't know -- why can't you %1?",
"Have you really tried?")),
(r'I can\'t (.*)',
( "How do you know you can't %1?",
"Perhaps you could %1 if you tried.",
"What would it take for you to %1?")),
(r'I am (.*)',
( "Did you come to me because you are %1?",
"How long have you been %1?",
"How do you feel about being %1?")),
(r'I\'m (.*)',
( "How does being %1 make you feel?",
"Do you enjoy being %1?",
"Why do you tell me you're %1?",
"Why do you think you're %1?")),
(r'Are you (.*)',
( "Why does it matter whether I am %1?",
"Would you prefer it if I were not %1?",
"Perhaps you believe I am %1.",
"I may be %1 -- what do you think?")),
(r'What (.*)',
( "Why do you ask?",
"How would an answer to that help you?",
"What do you think?")),
(r'How (.*)',
( "How do you suppose?",
"Perhaps you can answer your own question.",
"What is it you're really asking?")),
(r'Because (.*)',
( "Is that the real reason?",
"What other reasons come to mind?",
"Does that reason apply to anything else?",
"If %1, what else must be true?")),
(r'(.*) sorry (.*)',
( "There are many times when no apology is needed.",
"What feelings do you have when you apologize?")),
(r'Hello(.*)',
( "Hello... I'm glad you could drop by today.",
"Hi there... how are you today?",
"Hello, how are you feeling today?")),
(r'I think (.*)',
( "Do you doubt %1?",
"Do you really think so?",
"But you're not sure %1?")),
(r'(.*) friend (.*)',
( "Tell me more about your friends.",
"When you think of a friend, what comes to mind?",
"Why don't you tell me about a childhood friend?")),
(r'Yes',
( "You seem quite sure.",
"OK, but can you elaborate a bit?")),
(r'(.*) computer(.*)',
( "Are you really talking about me?",
"Does it seem strange to talk to a computer?",
"How do computers make you feel?",
"Do you feel threatened by computers?")),
(r'Is it (.*)',
( "Do you think it is %1?",
"Perhaps it's %1 -- what do you think?",
"If it were %1, what would you do?",
"It could well be that %1.")),
(r'It is (.*)',
( "You seem very certain.",
"If I told you that it probably isn't %1, what would you feel?")),
(r'Can you (.*)',
( "What makes you think I can't %1?",
"If I could %1, then what?",
"Why do you ask if I can %1?")),
(r'Can I (.*)',
( "Perhaps you don't want to %1.",
"Do you want to be able to %1?",
"If you could %1, would you?")),
(r'You are (.*)',
( "Why do you think I am %1?",
"Does it please you to think that I'm %1?",
"Perhaps you would like me to be %1.",
"Perhaps you're really talking about yourself?")),
(r'You\'re (.*)',
( "Why do you say I am %1?",
"Why do you think I am %1?",
"Are we talking about you, or me?")),
(r'I don\'t (.*)',
( "Don't you really %1?",
"Why don't you %1?",
"Do you want to %1?")),
(r'I feel (.*)',
( "Good, tell me more about these feelings.",
"Do you often feel %1?",
"When do you usually feel %1?",
"When you feel %1, what do you do?")),
(r'I have (.*)',
( "Why do you tell me that you've %1?",
"Have you really %1?",
"Now that you have %1, what will you do next?")),
(r'I would (.*)',
( "Could you explain why you would %1?",
"Why would you %1?",
"Who else knows that you would %1?")),
(r'Is there (.*)',
( "Do you think there is %1?",
"It's likely that there is %1.",
"Would you like there to be %1?")),
(r'My (.*)',
( "I see, your %1.",
"Why do you say that your %1?",
"When your %1, how do you feel?")),
(r'You (.*)',
( "We should be discussing you, not me.",
"Why do you say that about me?",
"Why do you care whether I %1?")),
(r'Why (.*)',
( "Why don't you tell me the reason why %1?",
"Why do you think %1?" )),
(r'I want (.*)',
( "What would it mean to you if you got %1?",
"Why do you want %1?",
"What would you do if you got %1?",
"If you got %1, then what would you do?")),
(r'(.*) mother(.*)',
( "Tell me more about your mother.",
"What was your relationship with your mother like?",
"How do you feel about your mother?",
"How does this relate to your feelings today?",
"Good family relations are important.")),
(r'(.*) father(.*)',
( "Tell me more about your father.",
"How did your father make you feel?",
"How do you feel about your father?",
"Does your relationship with your father relate to your feelings today?",
"Do you have trouble showing affection with your family?")),
(r'(.*) child(.*)',
( "Did you have close friends as a child?",
"What is your favorite childhood memory?",
"Do you remember any dreams or nightmares from childhood?",
"Did the other children sometimes tease you?",
"How do you think your childhood experiences relate to your feelings today?")),
(r'(.*)\?',
( "Why do you ask that?",
"Please consider whether you can answer your own question.",
"Perhaps the answer lies within yourself?",
"Why don't you tell me?")),
(r'quit',
( "Thank you for talking with me.",
"Good-bye.",
"Thank you, that will be $150. Have a good day!")),
(r'(.*)',
( "Please tell me more.",
"Let's change focus a bit... Tell me about your family.",
"Can you elaborate on that?",
"Why do you say that %1?",
"I see.",
"Very interesting.",
"%1.",
"I see. And what does that tell you?",
"How does that make you feel?",
"How do you feel when you say that?"))
)
eliza_chatbot = Chat(pairs, reflections)
def eliza_chat():
print("Therapist\n---------")
print("Talk to the program by typing in plain English, using normal upper-")
print('and lower-case letters and punctuation. Enter "quit" when done.')
print('='*72)
print("Hello. How are you feeling today?")
eliza_chatbot.converse()
def demo():
eliza_chat()
if __name__ == "__main__":
demo()
| 29.204082
| 83
| 0.581831
|
c67dad9ff5cc6a872229f9b752825a6115c12720
| 1,647
|
py
|
Python
|
app/app.py
|
amazingguni/flask-ddd
|
8a789c1db96a8ad4079fdd2957f64b7af710b5b3
|
[
"Apache-2.0"
] | 1
|
2022-02-06T14:00:02.000Z
|
2022-02-06T14:00:02.000Z
|
app/app.py
|
amazingguni/flask-ddd
|
8a789c1db96a8ad4079fdd2957f64b7af710b5b3
|
[
"Apache-2.0"
] | null | null | null |
app/app.py
|
amazingguni/flask-ddd
|
8a789c1db96a8ad4079fdd2957f64b7af710b5b3
|
[
"Apache-2.0"
] | null | null | null |
from contextlib import contextmanager
import os
from flask import Flask, request, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_cors import CORS
from flask_login import LoginManager
from .common.event.event_dispatcher import EventDispatcher
db = SQLAlchemy()
login_manager = LoginManager()
migrate = Migrate(db=db)
dispatcher = EventDispatcher()
def create_app(config_name='config.development.DevelopmentConfig'):
# create and configure the app
app = Flask(__name__)
app.config.from_object(
os.environ.get('APP_SETTINGS', config_name))
CORS(app)
db.init_app(app)
migrate.init_app(app)
login_manager.init_app(app)
@app.route('/')
# pylint: disable=unused-variable
def home():
return render_template('home.html.j2')
from .user import views as user_views
from .catalog import views as catalog_views
from .admin import views as admin_views
from .cart import views as cart_views
from .order import views as order_views
views = [user_views, catalog_views, admin_views, cart_views, order_views]
register_blueprints(app, views)
from .containers import Container
session = db.session
container = Container(app=app, session=session)
app.container = container
with app.app_context():
container.wire(modules=views)
return app
def register_blueprints(app, views):
for view in views:
app.register_blueprint(view.bp)
@login_manager.user_loader
def load_user(user_id):
from .user.domain.user import User
return User.query.filter(User.id == user_id).first()
| 26.564516
| 77
| 0.738312
|
8e1f64f4346da9e2a5303306ca5027b3e8f0acbf
| 2,322
|
py
|
Python
|
webapp/models/mlva.py
|
fasemoreakinyemi/coxbase_webapp
|
6217c095eff02af7356116d1eade316cc6707ca1
|
[
"0BSD"
] | null | null | null |
webapp/models/mlva.py
|
fasemoreakinyemi/coxbase_webapp
|
6217c095eff02af7356116d1eade316cc6707ca1
|
[
"0BSD"
] | null | null | null |
webapp/models/mlva.py
|
fasemoreakinyemi/coxbase_webapp
|
6217c095eff02af7356116d1eade316cc6707ca1
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, DECIMAL, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.dialects.mysql import YEAR
Base = declarative_base()
class plasmid(Base):
__tablename__ = "Plasmid"
ID = Column(Integer, nullable=False, primary_key=True)
PlasmidType = Column(String(15))
class mst(Base):
__tablename__ = "MST"
ID = Column(Integer, nullable=False, primary_key=True)
MSTType = Column(String(30))
class TypingMeta(Base):
__tablename__ = "TypingMetadata"
ID = Column(Integer, nullable=False, primary_key=True)
ClusterType = Column(String(5))
Genotype = Column(String(10))
class SampleMetadata(Base):
__tablename__ = "SampleMetadata"
ID = Column(Integer, nullable=False, autoincrement=True, primary_key=True)
SampleStrain = Column(String(50))
SampleYear = Column(YEAR(4))
SampleHost = Column(String(30))
SampleSource = Column(String(100))
SampleCountry = Column(String(30))
CountryProvince = Column(String(100))
Latitude = Column(DECIMAL(10,8))
Longitude = Column(DECIMAL(11,8))
PubmedID = Column(Integer())
PlasmidID = Column(Integer, ForeignKey("Plasmid.ID"), nullable=False)
MSTID = Column(Integer, ForeignKey("MST.ID"), nullable=False)
TypingID = Column(Integer, ForeignKey("TypingMetadata.ID"), nullable=False)
MLVAID = Column(Integer, ForeignKey("MLVAProfile.ID"), nullable=False)
class mlvaProfile(Base):
__tablename__ = "MLVAProfile"
ID = Column(Integer, nullable=False, primary_key=True)
PanelType = Column(String(30), nullable=False)
ms01 = Column(Float(10,2))
ms03 = Column(Float(10,2))
ms20 = Column(Float(10,2))
ms21 = Column(Float(10,2))
ms22 = Column(Float(10,2))
ms23 = Column(Float(10,2))
ms24 = Column(Float(10,2))
ms26 = Column(Float(10,2))
ms27 = Column(Float(10,2))
ms28 = Column(Float(10,2))
ms30 = Column(Float(10,2))
ms31 = Column(Float(10,2))
ms33 = Column(Float(10,2))
ms34 = Column(Float(10,2))
engine = create_engine('mysql+pymysql://burnetii:crazyburnetii@localhost/MLVA')
Base.metadata.create_all(engine)
| 33.171429
| 79
| 0.702842
|
150b3b7d9330c20b9b59fe09fa5e329bdd34356b
| 4,014
|
py
|
Python
|
airflow/operators/http_operator.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 5
|
2020-07-17T07:33:58.000Z
|
2022-03-02T06:23:47.000Z
|
airflow/operators/http_operator.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 20
|
2017-04-18T19:47:46.000Z
|
2020-01-13T04:19:24.000Z
|
airflow/operators/http_operator.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 12
|
2020-01-09T14:02:39.000Z
|
2022-01-24T07:18:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Callable, Dict, Optional
from airflow.exceptions import AirflowException
from airflow.hooks.http_hook import HttpHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SimpleHttpOperator(BaseOperator):
"""
Calls an endpoint on an HTTP system to execute an action
:param http_conn_id: The connection to run the operator against
:type http_conn_id: str
:param endpoint: The relative part of the full url. (templated)
:type endpoint: str
:param method: The HTTP method to use, default = "POST"
:type method: str
:param data: The data to pass. POST-data in POST/PUT and params
in the URL for a GET request. (templated)
:type data: For POST/PUT, depends on the content-type parameter,
for GET a dictionary of key/value string pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
:param log_response: Log the response (default: False)
:type log_response: bool
"""
template_fields = ['endpoint', 'data', 'headers', ]
template_ext = ()
ui_color = '#f4a460'
@apply_defaults
def __init__(self,
endpoint: str,
method: str = 'POST',
data: Any = None,
headers: Optional[Dict[str, str]] = None,
response_check: Optional[Callable] = None,
extra_options: Optional[Dict[str, Any]] = None,
http_conn_id: str = 'http_default',
log_response: bool = False,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.http_conn_id = http_conn_id
self.method = method
self.endpoint = endpoint
self.headers = headers or {}
self.data = data or {}
self.response_check = response_check
self.extra_options = extra_options or {}
self.log_response = log_response
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
def execute(self, context):
http = HttpHook(self.method, http_conn_id=self.http_conn_id)
self.log.info("Calling HTTP method")
response = http.run(self.endpoint,
self.data,
self.headers,
self.extra_options)
if self.log_response:
self.log.info(response.text)
if self.response_check:
if not self.response_check(response):
raise AirflowException("Response check returned False.")
return response.text
| 41.8125
| 105
| 0.665421
|
48641bfd24f313dd595fe1d1fe422b23b780a2fb
| 16,965
|
py
|
Python
|
src/reference_model/poseidon_python/poseidon_ff.py
|
datenlord/poseidon-spinalhdl
|
1e4ce3f0ae06d6c264e103629dd28a75c119a548
|
[
"MIT"
] | null | null | null |
src/reference_model/poseidon_python/poseidon_ff.py
|
datenlord/poseidon-spinalhdl
|
1e4ce3f0ae06d6c264e103629dd28a75c119a548
|
[
"MIT"
] | null | null | null |
src/reference_model/poseidon_python/poseidon_ff.py
|
datenlord/poseidon-spinalhdl
|
1e4ce3f0ae06d6c264e103629dd28a75c119a548
|
[
"MIT"
] | 1
|
2021-12-18T08:44:10.000Z
|
2021-12-18T08:44:10.000Z
|
import random
import basic
import finite_field as ff
import constants
import os
# poseidon hash function
def transform_array(array):
"""transform array of int into array of PrimeField"""
array_ff = []
for element in array:
array_ff.append(ff.PrimeField(element))
return array_ff
def transform_matrix(matrix):
"""transform int matrix into PrimeField matrix"""
matrix_ff = []
for array in matrix:
matrix_ff.append(transform_array(array))
return matrix_ff
def s_box_ff(op1):
op1.expassign(5)
return op1
def s_boxes_ff(array):
tmp = []
for element in array:
element.expassign(5)
tmp.append(element)
return tmp
def add_round_constants_ff(states_ff, constants_ff):
for i in range(len(states_ff)):
states_ff[i].addassign(constants_ff[i])
return states_ff
def mds_mixing_ff(state_ff, mds_matrix_ff):
new_state_ff = []
for i in range(len(state_ff)):
tmp = ff.PrimeField(0)
for j in range(len(state_ff)):
tmp.addassign(state_ff[j].mul(mds_matrix_ff[j][i]))
new_state_ff.append(tmp)
return new_state_ff
def sparse_mds_mixing(state_ff, w_vec, v_vec):
new_state_ff = []
temp = ff.PrimeField(0)
for i in range(len(state_ff)):
temp.addassign(state_ff[i].mul(w_vec[i]))
new_state_ff.append(temp)
for i in range(len(state_ff) - 1):
temp = state_ff[0].mul(v_vec[i])
temp.addassign(state_ff[i + 1])
new_state_ff.append(temp)
return new_state_ff
def poseidon_hash(preimage):
t = len(preimage) + 1
if t not in basic.T_RANGE:
print("error: the length of preimage is incorrect")
exit()
roundf = basic.ROUNDFULL
roundp = basic.ROUNDPARTIAL[t]
round_constants_ff = transform_array(
constants.generate_constants(t, roundf, roundp)
)
mds_matrix_ff = transform_matrix(basic.PrimeFieldOps.get_mds_matrix(t))
state_ff = transform_array(basic.PrimeFieldOps.init_state(preimage))
for i in range(int(roundf / 2)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * t : (i + 1) * t]
)
state_ff = s_boxes_ff(state_ff)
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
for i in range(int(roundf / 2), int(roundf / 2 + roundp)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * t : (i + 1) * t]
)
state_ff[0] = s_box_ff(state_ff[0])
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
for i in range(int(roundf / 2 + roundp), int(roundf + roundp)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * t : (i + 1) * t]
)
state_ff = s_boxes_ff(state_ff)
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
return state_ff[1].fromMont()
def poseidon_hash_ff(state_ff):
t = len(state_ff)
if t not in basic.T_RANGE:
print("error: the length of preimage is incorrect")
exit()
roundf = basic.ROUNDFULL
roundp = basic.ROUNDPARTIAL[t]
round_constants_ff = transform_array(
constants.generate_constants(t, roundf, roundp)
)
mds_matrix_ff = transform_matrix(basic.PrimeFieldOps.get_mds_matrix(t))
for i in range(int(roundf / 2)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * t : (i + 1) * t]
)
state_ff = s_boxes_ff(state_ff)
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
for i in range(int(roundf / 2), int(roundf / 2 + roundp)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * t : (i + 1) * t]
)
state_ff[0] = s_box_ff(state_ff[0])
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
for i in range(int(roundf / 2 + roundp), int(roundf + roundp)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * t : (i + 1) * t]
)
state_ff = s_boxes_ff(state_ff)
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
return state_ff[1]
def poseidon_hash_opt(preimage):
"""optimized poseidon hash function"""
t = len(preimage) + 1
assert t in basic.T_RANGE, "the length of preimage is out of range"
roundf = basic.ROUNDFULL
roundp = basic.ROUNDPARTIAL[t]
state_ff = transform_array(basic.PrimeFieldOps.init_state(preimage))
# read constants used in optimized poseidon from files
cmp_constants, pre_sparse, w_hat, v_rest = read_constants_files(
t, "./poseidon_constants"
)
cmp_constants_ff = transform_array(cmp_constants)
pre_sparse_ff = transform_matrix(pre_sparse)
sparse_w_ff = transform_matrix(w_hat)
sparse_v_ff = transform_matrix(v_rest)
mds_matrix_ff = transform_matrix(basic.PrimeFieldOps.get_mds_matrix(t))
# add pre round constants
state_ff = add_round_constants_ff(state_ff, cmp_constants_ff[0:t])
# implement the first half full round
Rf = int(roundf / 2)
for i in range(Rf):
state_ff = s_boxes_ff(state_ff)
state_ff = add_round_constants_ff(
state_ff, cmp_constants_ff[(i + 1) * t : (i + 2) * t]
)
if i == (Rf - 1):
state_ff = mds_mixing_ff(state_ff, pre_sparse_ff)
else:
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
# implement optimized partial round
for i in range(roundp):
state_ff[0] = s_box_ff(state_ff[0])
state_ff[0].addassign(cmp_constants_ff[(Rf + 1) * t + i])
state_ff = sparse_mds_mixing(state_ff, sparse_w_ff[i], sparse_v_ff[i])
# implement the second half full round
base = (Rf + 1) * t + roundp # compressed round constants base index
for i in range(Rf):
state_ff = s_boxes_ff(state_ff)
if i < (Rf - 1):
state_ff = add_round_constants_ff(
state_ff, cmp_constants_ff[base : base + t]
)
base = base + t
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
return state_ff[1].fromMont()
def read_pre_sparse_matrix(arity, path):
file_path = path + f"/pre_sparse_matrix/pre_sparse_matrix_{arity}.txt"
file = open(file_path, mode="r")
pre_sparse_elements = file.readlines()
pre_sparse_matrix = []
for i in range(arity):
pre_sparse_vec = []
for j in range(arity):
pre_sparse_vec.append(int(pre_sparse_elements[i * arity + j], 16))
pre_sparse_matrix.append(pre_sparse_vec)
file.close()
return pre_sparse_matrix
def read_sparse_matrix(arity, path):
file_path = path + f"/sparse_matrix/sparse_matrix_{arity}.txt"
file = open(file_path, mode="r")
sparse_matrix_elements = file.readlines()
# w_hat is the first column of sparse matrix. It will be directly multiplied (scalar product) with a row of state elements.
w_hat = []
# v_rest contains all but the first (already included in `w_hat`).
v_rest = []
for i in range(basic.ROUNDPARTIAL[arity]):
w_vec = []
v_vec = []
for j in range(2 * arity - 1):
element = int(sparse_matrix_elements[i * (2 * arity - 1) + j], 16)
if j < arity:
w_vec.append(element)
else:
v_vec.append(element)
w_hat.append(w_vec)
v_rest.append(v_vec)
file.close()
return w_hat, v_rest
def read_constants_files(arity, path):
"""read constants used in optimized poseidon"""
assert arity in basic.T_RANGE, "illegal parameter"
# read compressed round constants from file
file_path = (
path + f"/compressed_round_constants/compressed_round_constants_{arity}.txt"
)
file = open(file_path, mode="r")
cmp_round_constants = file.readlines()
for i in range(len(cmp_round_constants)):
cmp_round_constants[i] = int(cmp_round_constants[i], 16)
file.close()
# read pre sparse matrix from file
pre_sparse_matrix = read_pre_sparse_matrix(arity, path)
# read sparse matrix from file
w_hat, v_rest = read_sparse_matrix(arity, path)
return cmp_round_constants, pre_sparse_matrix, w_hat, v_rest
def print_random_cases(cases_num, state_size):
"""generate random test case and print"""
for i in range(cases_num):
state_elements = [ff.PrimeField(pow(2, state_size - 1) - 1)]
for index in range(state_size - 1):
# state_elements.append(ff.PrimeField(random.randint(0, basic.P-1)))
state_elements.append(ff.PrimeField(index))
print(f"random input {i}:")
for element in state_elements:
print(hex(element.value))
ref_output = poseidon_hash_ff(state_elements)
print(f"reference output {i}:")
print(f"ref_outputs[{i}] = 255'h", hex(ref_output.fromMont()))
def write_random_cases(cases_num, state_size):
"""generate random test case and write to file"""
# change directory and open target file
os.mkdir("random_test_cases")
os.chdir("random_test_cases")
input_file = open(f"arity_{state_size}_inputs.txt", "w")
output_file = open(f"arity_{state_size}_outputs.txt", "w")
if state_size not in basic.T_RANGE:
print("error: the length of preimage is incorrect")
exit()
roundf = basic.ROUNDFULL
roundp = basic.ROUNDPARTIAL[state_size]
round_constants_ff = transform_array(
constants.generate_constants(state_size, roundf, roundp)
)
mds_matrix_ff = transform_matrix(basic.PrimeFieldOps.get_mds_matrix(state_size))
for case_index in range(cases_num):
state_ff = []
print(f"random input {case_index}")
for index in range(state_size):
state_ff.append(ff.PrimeField(random.randint(0, basic.P - 1)))
input_str = "{:#066X}".format(state_ff[index].value)
input_file.write(input_str[2:] + "\n")
for i in range(int(roundf / 2)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * state_size : (i + 1) * state_size]
)
state_ff = s_boxes_ff(state_ff)
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
for i in range(int(roundf / 2), int(roundf / 2 + roundp)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * state_size : (i + 1) * state_size]
)
state_ff[0] = s_box_ff(state_ff[0])
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
for i in range(int(roundf / 2 + roundp), int(roundf + roundp)):
state_ff = add_round_constants_ff(
state_ff, round_constants_ff[i * state_size : (i + 1) * state_size]
)
state_ff = s_boxes_ff(state_ff)
state_ff = mds_mixing_ff(state_ff, mds_matrix_ff)
print(f"reference output {case_index}")
output_str = "{:#066X}".format(state_ff[1].value)
output_file.write(output_str[2:] + "\n")
input_file.close()
output_file.close()
def output_mds_matrix_ff():
"""get all mds_matrix in Montgomery domain and write to files"""
os.mkdir("mds_matrixs_ff")
os.chdir("mds_matrixs_ff")
for t in basic.T_RANGE:
# get mds matrix and transform to Montgomery domain
mds_matrix = basic.PrimeFieldOps.get_mds_matrix(t)
mds_matrix_ff = []
for element in mds_matrix:
mds_matrix_ff.append(transform_array(element))
# write data to files
fileobject = open("mds_matrix_ff_{}.txt".format(t), "w")
for mds_vec in mds_matrix_ff:
for element in mds_vec:
output_str = hex(element.value)
fileobject.write(output_str[2:] + "\n")
fileobject.close()
def output_round_constants_ff():
"""get all round constants in Montgomery domain and write to files"""
os.mkdir("round_constants_ff")
os.chdir("round_constants_ff")
for t in basic.T_RANGE:
fileobject = open("round_constants_ff_{}.txt".format(t), "w")
round_constants = transform_array(
constants.generate_constants(t, basic.ROUNDFULL, basic.ROUNDPARTIAL[t])
)
for element in round_constants:
output_str = hex(element.value)
fileobject.write(output_str[2:] + "\n")
fileobject.close()
def output_pre_sparse_matrix_ff():
"""get pre sparse matrix in Montgomery domain and write to files"""
os.mkdir("./poseidon_constants/pre_sparse_matrix_ff")
os.chdir("./poseidon_constants/pre_sparse_matrix_ff")
for t in basic.T_RANGE:
src_file = open(f"../pre_sparse_matrix/pre_sparse_matrix_{t}.txt", "r")
dest_file = open(f"pre_sparse_matrix_ff_{t}.txt", "w")
pre_sparse_elements = src_file.readlines()
for element in pre_sparse_elements:
element = ff.PrimeField(int(element, 16))
output_str = hex(element.value)
dest_file.write(output_str[2:] + "\n")
dest_file.close()
def output_sparse_matrix_ff():
os.mkdir("./poseidon_constants/sparse_matrix_ff")
os.chdir("./poseidon_constants/sparse_matrix_ff")
for t in basic.T_RANGE:
src_file = open(f"../sparse_matrix/sparse_matrix_{t}.txt", "r")
sparse_elements = src_file.readlines()
if t <= 5:
dest_file = open(f"sparse_matrix_ff_{t}.txt", "w")
for i in range(basic.ROUNDPARTIAL[t]):
element = ff.PrimeField(int(sparse_elements[i * (2 * t - 1)], 16))
output_str = hex(element.value)
dest_file.write(output_str[2:] + "\n")
for j in range(t, 2 * t - 1):
element = ff.PrimeField(
int(sparse_elements[i * (2 * t - 1) + j], 16)
)
output_str = hex(element.value)
dest_file.write(output_str[2:] + "\n")
for j in range(1, t):
element = ff.PrimeField(
int(sparse_elements[i * (2 * t - 1) + j], 16)
)
output_str = hex(element.value)
dest_file.write(output_str[2:] + "\n")
dest_file.close()
else:
what_file = open(f"sparse_matrix_column_ff_{t}.txt", "w")
vrest_file = open(f"sparse_matrix_row_ff_{t}.txt", "w")
vec_len = 2 * t - 1
for i in range(len(sparse_elements)):
element = ff.PrimeField(int(sparse_elements[i], 16))
output_str = hex(element.value)
if i % vec_len == 0:
what_file.write(output_str[2:] + "\n")
vrest_file.write(output_str[2:] + "\n")
elif i % vec_len < t:
what_file.write(output_str[2:] + "\n")
else:
vrest_file.write(output_str[2:] + "\n")
what_file.close()
vrest_file.close()
def output_compressed_round_constants():
"""get compressed round constants in Montgomery domain and write to files"""
os.mkdir("./poseidon_constants/compressed_round_constants_ff")
os.chdir("./poseidon_constants/compressed_round_constants_ff")
for t in basic.T_RANGE:
Rf = int((basic.ROUNDFULL) / 2)
roundp = basic.ROUNDPARTIAL[t]
src_file = open(
f"../compressed_round_constants/compressed_round_constants_{t}.txt", "r"
)
constants_elements = src_file.readlines()
dest_file1 = open(f"pre_round_constants_ff_{t}.txt", "w")
dest_file2 = open(f"full_round_constants_ff_{t}.txt", "w")
dest_file3 = open(f"partial_round_constants_ff_{t}.txt", "w")
for i in range(len(constants_elements)):
element = ff.PrimeField(int(constants_elements[i], 16))
output_str = hex(element.value)
if i < t:
dest_file1.write(output_str[2:] + "\n")
elif i < (Rf + 1) * t:
dest_file2.write(output_str[2:] + "\n")
elif i < ((Rf + 1) * t + roundp):
dest_file3.write(output_str[2:] + "\n")
else:
dest_file2.write(output_str[2:] + "\n")
for i in range(t):
dest_file2.write("0" * 64 + "\n")
dest_file1.close()
dest_file2.close()
dest_file3.close()
# print(poseidon_hash([0,1,2,3]))
# print(poseidon_hash_opt([0,1,2,3]))
| 35.197095
| 128
| 0.603949
|
47a30479062d8b20a87e2e976dc8b6c2299152b5
| 1,515
|
py
|
Python
|
proteus/tests/cylinder2D/conforming_rans3p/pressureincrement_n.py
|
acatwithacomputer/proteus
|
80dfad95da6ab4d18a88a035f55c26b03540a864
|
[
"MIT"
] | null | null | null |
proteus/tests/cylinder2D/conforming_rans3p/pressureincrement_n.py
|
acatwithacomputer/proteus
|
80dfad95da6ab4d18a88a035f55c26b03540a864
|
[
"MIT"
] | 13
|
2018-02-08T23:22:59.000Z
|
2020-12-06T19:40:32.000Z
|
proteus/tests/cylinder2D/conforming_rans3p/pressureincrement_n.py
|
acatwithacomputer/proteus
|
80dfad95da6ab4d18a88a035f55c26b03540a864
|
[
"MIT"
] | 1
|
2020-02-17T03:25:34.000Z
|
2020-02-17T03:25:34.000Z
|
from __future__ import absolute_import
from proteus import *
from proteus.default_n import *
try:
from .pressureincrement_p import *
except:
from pressureincrement_p import *
triangleOptions = triangleOptions
femSpaces = {0:pbasis}
stepController=FixedStep
numericalFluxType = PresInc.NumericalFlux
matrix = LinearAlgebraTools.SparseMatrix
if openTop:
if useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
else:
multilevelLinearSolver = KSP_petsc4py
levelLinearSolver = KSP_petsc4py
parallelPartitioningType = parallelPartitioningType
nLayersOfOverlapForParallel = nLayersOfOverlapForParallel
nonlinearSmoother = None
linearSmoother = None
else:
linearSmoother = LinearSolvers.NavierStokesPressureCorrection # pure neumann laplacian solver
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
linear_solver_options_prefix = 'phi_'
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
#linear solve rtolerance
linTolFac = 0.0
l_atol_res = phi_nl_atol_res
tolFac = 0.0
nl_atol_res = phi_nl_atol_res
nonlinearSolverConvergenceTest = 'r'
levelNonlinearSolverConvergenceTest = 'r'
linearSolverConvergenceTest = 'r-true'
maxLineSearches=0
periodicDirichletConditions=None
#conservativeFlux = {0:'point-eval'} #'point-eval','pwl-bdm-opt'
conservativeFlux=None
| 28.055556
| 100
| 0.780858
|
6c189e90782a13466ef220250ae8c6def2f980b9
| 6,411
|
py
|
Python
|
low_level_simulation/devel/lib/python2.7/dist-packages/costum_msgs/msg/_GlobalSegmentResultsMsg.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | null | null | null |
low_level_simulation/devel/lib/python2.7/dist-packages/costum_msgs/msg/_GlobalSegmentResultsMsg.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | null | null | null |
low_level_simulation/devel/lib/python2.7/dist-packages/costum_msgs/msg/_GlobalSegmentResultsMsg.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from costum_msgs/GlobalSegmentResultsMsg.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GlobalSegmentResultsMsg(genpy.Message):
_md5sum = "c71ff6a13e6428908623dac9d5859650"
_type = "costum_msgs/GlobalSegmentResultsMsg"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int64 segment_index
int64 n_failures
float64 time_mean
float64 time_stdev
float64 time_max
float64 time_min
float64 distance_mean
float64 distance_stdev
float64 distance_max
float64 distance_min
float64 speed_mean
float64 speed_stdev
float64 speed_max
float64 speed_min
"""
__slots__ = ['segment_index','n_failures','time_mean','time_stdev','time_max','time_min','distance_mean','distance_stdev','distance_max','distance_min','speed_mean','speed_stdev','speed_max','speed_min']
_slot_types = ['int64','int64','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
segment_index,n_failures,time_mean,time_stdev,time_max,time_min,distance_mean,distance_stdev,distance_max,distance_min,speed_mean,speed_stdev,speed_max,speed_min
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GlobalSegmentResultsMsg, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.segment_index is None:
self.segment_index = 0
if self.n_failures is None:
self.n_failures = 0
if self.time_mean is None:
self.time_mean = 0.
if self.time_stdev is None:
self.time_stdev = 0.
if self.time_max is None:
self.time_max = 0.
if self.time_min is None:
self.time_min = 0.
if self.distance_mean is None:
self.distance_mean = 0.
if self.distance_stdev is None:
self.distance_stdev = 0.
if self.distance_max is None:
self.distance_max = 0.
if self.distance_min is None:
self.distance_min = 0.
if self.speed_mean is None:
self.speed_mean = 0.
if self.speed_stdev is None:
self.speed_stdev = 0.
if self.speed_max is None:
self.speed_max = 0.
if self.speed_min is None:
self.speed_min = 0.
else:
self.segment_index = 0
self.n_failures = 0
self.time_mean = 0.
self.time_stdev = 0.
self.time_max = 0.
self.time_min = 0.
self.distance_mean = 0.
self.distance_stdev = 0.
self.distance_max = 0.
self.distance_min = 0.
self.speed_mean = 0.
self.speed_stdev = 0.
self.speed_max = 0.
self.speed_min = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2q12d().pack(_x.segment_index, _x.n_failures, _x.time_mean, _x.time_stdev, _x.time_max, _x.time_min, _x.distance_mean, _x.distance_stdev, _x.distance_max, _x.distance_min, _x.speed_mean, _x.speed_stdev, _x.speed_max, _x.speed_min))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 112
(_x.segment_index, _x.n_failures, _x.time_mean, _x.time_stdev, _x.time_max, _x.time_min, _x.distance_mean, _x.distance_stdev, _x.distance_max, _x.distance_min, _x.speed_mean, _x.speed_stdev, _x.speed_max, _x.speed_min,) = _get_struct_2q12d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2q12d().pack(_x.segment_index, _x.n_failures, _x.time_mean, _x.time_stdev, _x.time_max, _x.time_min, _x.distance_mean, _x.distance_stdev, _x.distance_max, _x.distance_min, _x.speed_mean, _x.speed_stdev, _x.speed_max, _x.speed_min))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 112
(_x.segment_index, _x.n_failures, _x.time_mean, _x.time_stdev, _x.time_max, _x.time_min, _x.distance_mean, _x.distance_stdev, _x.distance_max, _x.distance_min, _x.speed_mean, _x.speed_stdev, _x.speed_max, _x.speed_min,) = _get_struct_2q12d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2q12d = None
def _get_struct_2q12d():
global _struct_2q12d
if _struct_2q12d is None:
_struct_2q12d = struct.Struct("<2q12d")
return _struct_2q12d
| 39.331288
| 270
| 0.691468
|
160d2c5d7e4ba03339c130df7ec53a6b82c55332
| 379
|
py
|
Python
|
robots/build/rrbot_gazebo/catkin_generated/pkg.develspace.context.pc.py
|
eiphy/lita
|
262d6ccabde8467db47278dc39574e5ea34abda2
|
[
"BSD-3-Clause"
] | 4
|
2019-01-11T02:56:06.000Z
|
2019-03-27T14:26:25.000Z
|
robots/build/gazebo_ros_demos/rrbot_gazebo/catkin_generated/pkg.develspace.context.pc.py
|
eiphy/lita
|
262d6ccabde8467db47278dc39574e5ea34abda2
|
[
"BSD-3-Clause"
] | 5
|
2019-01-10T11:18:54.000Z
|
2019-03-03T09:33:40.000Z
|
robots/build/rrbot_gazebo/catkin_generated/pkg.develspace.context.pc.py
|
eiphy/lita
|
262d6ccabde8467db47278dc39574e5ea34abda2
|
[
"BSD-3-Clause"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rrbot_gazebo"
PROJECT_SPACE_DIR = "/home/ei/engine/lita/robots/devel"
PROJECT_VERSION = "0.1.0"
| 42.111111
| 68
| 0.704485
|
6402c400e328b52a73dd8360b5f6b9be6c249788
| 418
|
py
|
Python
|
misc/api.py
|
alexpickering/dota_webscraper
|
930c17f5563e6c778a820097517e7bb37e36bfe8
|
[
"MIT"
] | null | null | null |
misc/api.py
|
alexpickering/dota_webscraper
|
930c17f5563e6c778a820097517e7bb37e36bfe8
|
[
"MIT"
] | null | null | null |
misc/api.py
|
alexpickering/dota_webscraper
|
930c17f5563e6c778a820097517e7bb37e36bfe8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return 'This is the home page'
@app.route('/heroes')
def display_hero_data():
with open('heroes.csv', 'r') as f:
csv_data = f.read()
return render_template('heroes.html', csv_data=csv_data)
@app.route('/csv.html')
def display_csv_as_html():
return render_template('csv.html')
| 23.222222
| 60
| 0.684211
|
314534841262420627abaa44d23ce722192d3b0c
| 521
|
py
|
Python
|
user_scripts/merge_IEC.py
|
OceanNuclear/PeakFinding
|
dd82589201496b8c46cbd8ae28c2dabbfba7fed1
|
[
"MIT"
] | 2
|
2021-10-31T08:49:18.000Z
|
2021-11-04T13:12:36.000Z
|
user_scripts/merge_IEC.py
|
OceanNuclear/PeakFinding
|
dd82589201496b8c46cbd8ae28c2dabbfba7fed1
|
[
"MIT"
] | null | null | null |
user_scripts/merge_IEC.py
|
OceanNuclear/PeakFinding
|
dd82589201496b8c46cbd8ae28c2dabbfba7fed1
|
[
"MIT"
] | null | null | null |
from peakfinding.spectrum import RealSpectrumInteractive, RealSpectrum
import sys
print(*sys.argv[1:-1])
print(sys.argv[-1]+".IEC")
"""
Note: inside ipython, the using * (wildcard) in sys.argv will give an UNSORTED (disorderd!) list of the files grepped by wildcard.
But outside of ipython sys.argv will give a sorted sys.argv.
Therefore you're encouraged to not use * in ipython.
"""
spectrum = RealSpectrumInteractive.from_multiple_files(*sys.argv[1:-1])
spectrum.show_sqrt_scale()
spectrum.to_IEC(sys.argv[-1]+".IEC")
| 40.076923
| 130
| 0.765835
|
8c1006bdfcd28c6fff5a39d33d1988f4e7a4a521
| 8,121
|
py
|
Python
|
src/reports/report_configs.py
|
william-cass-wright/twl_bigcommerce_api
|
d674193db7dd46e1922c06d22c2da7a6aa07b8b8
|
[
"Apache-2.0"
] | null | null | null |
src/reports/report_configs.py
|
william-cass-wright/twl_bigcommerce_api
|
d674193db7dd46e1922c06d22c2da7a6aa07b8b8
|
[
"Apache-2.0"
] | null | null | null |
src/reports/report_configs.py
|
william-cass-wright/twl_bigcommerce_api
|
d674193db7dd46e1922c06d22c2da7a6aa07b8b8
|
[
"Apache-2.0"
] | null | null | null |
"""
## (1) MONTHLY REPORT OF SALES FOR SALES TAX PURPOSES BY PAYMENT METHOD
- using [BigCommerce Orders v2 API](https://developer.bigcommerce.com/api-reference/store-management/orders/orders/getallorders) in BigCommOrdersAPI class
## (2) MONTHLY REPORTS FOR INVENTORY VALUATION
- using [BigCommerce Catalog/Products v3 API](https://developer.bigcommerce.com/api-reference/store-management/catalog/products/getproducts) in BigCommProductsAPI class
## (3) NEED A MONTHLY SALES REPORT BY CATEGORY AND BY ITEM.
- using [BigCommerce Orders v2 API](https://developer.bigcommerce.com/api-reference/store-management/orders/orders/getallorders) in BigCommOrdersAPI class
"""
from typing import List
import datetime as dt
from operator import lt, gt, eq
TODAY = str(dt.datetime.today()).split(" ")[0]
REPORT_START_DATE = "2021-08-01"
def sales_tax_report_configs() -> dict:
"""
report type: orders
"""
REPORT_TITLE = "ORDERS SUBTOTAL EX TAX BY PAYMENT METHOD"
file_name = REPORT_TITLE.lower().replace(" ", "_")
input_dict = {}
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["subtotal_ex_tax"]
inputs["index"] = ["date_created_month"]
inputs["columns"] = ["payment_method"]
input_dict["ytd_pivot_by_month"] = inputs
inputs = {}
inputs["type"] = "data_filter"
inputs["bool_arg"] = REPORT_START_DATE
inputs["column"] = "date_created"
inputs["bool_op"] = gt
input_dict["table_modification"] = inputs
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["subtotal_ex_tax"]
inputs["index"] = ["date_created_month", "date_created_date"]
inputs["columns"] = ["payment_method"]
input_dict["pivot_by_day_post_aug01"] = inputs
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["subtotal_ex_tax", "subtotal_inc_tax", "subtotal_tax"]
inputs["index"] = ["date_created_month"]
inputs["columns"] = ["payment_method"]
input_dict["pivot_by_month_post_aug01"] = inputs
inputs = {}
inputs["type"] = "sum_on_previous_table"
inputs["axis"] = 1
input_dict["sum_by_row"] = inputs
inputs = {}
inputs["type"] = "sum_on_previous_table"
inputs["axis"] = 0
input_dict["sum_by_column"] = inputs
configs = {}
configs["report_title"] = REPORT_TITLE
configs["export_file_name"] = f"{TODAY}_{file_name}_post_aug01"
configs["input_dict"] = input_dict
return configs
def sales_by_category_report_configs() -> dict:
"""
report type: orders
DONT INCLUDE BRAND OR CATEGORY COLUMNS
"""
REPORT_TITLE = "ORDERS SUBTOTAL EX TAX BY CATEGORY AND BRAND"
file_name = REPORT_TITLE.lower().replace(" ", "_")
input_dict = {}
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["subtotal_ex_tax"]
inputs["index"] = ["date_created_month"]
inputs["columns"] = ["category_top"]
input_dict["ytd_pivot_by_brand_and_categories"] = inputs
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["subtotal_ex_tax"]
inputs["index"] = ["category_top", "category_all"]
inputs["columns"] = ["date_created_month"]
input_dict["ytd_pivot_by_categories"] = inputs
inputs = {}
inputs["type"] = "data_filter"
inputs["bool_arg"] = REPORT_START_DATE
inputs["column"] = "date_created"
inputs["bool_op"] = gt
input_dict["table_modification"] = inputs
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["subtotal_ex_tax"]
inputs["index"] = ["date_created_month", "date_created_date"]
inputs["columns"] = ["category_top"]
input_dict["pivot_by_brand_and_date"] = inputs
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["subtotal_ex_tax"]
inputs["index"] = ["category_top", "category_all"]
inputs["columns"] = ["date_created_month"]
input_dict["pivot_by_categories"] = inputs
inputs = {}
inputs["type"] = "sum_on_previous_table"
inputs["axis"] = 1
input_dict["sum_by_row"] = inputs
inputs = {}
inputs["type"] = "sum_on_previous_table"
inputs["axis"] = 0
input_dict["sum_by_column"] = inputs
# TODAY = str(dt.datetime.today()).split(" ")[0]
configs = {}
configs["report_title"] = REPORT_TITLE
configs["export_file_name"] = f"{TODAY}_{file_name}_post_aug01"
configs["input_dict"] = input_dict
return configs
def inventory_valuation_report_configs() -> dict:
"""
report type: product
"""
REPORT_TITLE = "INVENTORY VALUATION REPORT"
file_name = REPORT_TITLE.lower().replace(" ", "_")
input_dict = {}
inputs = {}
inputs["type"] = "groupby_table"
inputs["values"] = ["price", "cost_price", "inventory_level", "inventory_value", "inventory_value_by_cost"]
inputs["index"] = ["category_top"]
inputs["aggfuncs"] = ["sum", "mean"]
input_dict["groupby_category"] = inputs
inputs = {}
inputs["type"] = "groupby_table"
inputs["values"] = ["price", "cost_price", "inventory_level", "inventory_value", "inventory_value_by_cost"]
inputs["index"] = ["category_top"]
inputs["aggfuncs"] = ["sum", "mean", "median", "count"]
input_dict["groupby_category"] = inputs
inputs = {}
inputs["type"] = "groupby_table"
inputs["values"] = ["price", "cost_price", "inventory_level", "inventory_value", "inventory_value_by_cost"]
inputs["index"] = ["brand_name"]
inputs["aggfuncs"] = "sum"
input_dict["groupby_brand"] = inputs
inputs = {}
inputs["type"] = "groupby_table"
inputs["values"] = ["price", "cost_price", "inventory_level", "inventory_value", "inventory_value_by_cost"]
inputs["index"] = ["brand_name", "category_top"]
inputs["aggfuncs"] = "sum"
input_dict["groupby_cat_and_brand"] = inputs
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["price"]
inputs["index"] = ["brand_name", "category_top", "category_all"]
inputs["columns"] = ["date_created_month"]
input_dict["pivot_by_price"] = inputs
inputs = {}
inputs["type"] = "sum_on_previous_table"
inputs["axis"] = 1
input_dict["sum_by_row"] = inputs
inputs = {}
inputs["type"] = "sum_on_previous_table"
inputs["axis"] = 0
input_dict["sum_by_column"] = inputs
# TODAY = str(dt.datetime.today()).split(" ")[0]
configs = {}
configs["report_title"] = REPORT_TITLE
configs["export_file_name"] = f"{TODAY}_{file_name}"
configs["input_dict"] = input_dict
return configs
def collections_report_configs(collections: List[str]) -> dict:
"""
report type: orders
"""
REPORT_TITLE = "COLLECTIONS MONTHLY REPORT"
file_name = REPORT_TITLE.lower().replace(" ", "_")
input_dict = {}
inputs = {}
inputs["type"] = "pivot_table"
inputs["values"] = ["subtotal_ex_tax"]
inputs["index"] = ["date_created_month"]
inputs["columns"] = ["sku_prefix"]
input_dict["ytd_pivot_by_month"] = inputs
for sku_prefix in collections:
inputs = {}
inputs["type"] = "data_filter"
inputs["bool_arg"] = sku_prefix
inputs["column"] = "sku_prefix"
inputs["bool_op"] = eq
input_dict[f"data_filter_{sku_prefix.lower()}"] = inputs
# inputs = {}
# inputs["type"] = "pivot_table"
# inputs["values"] = ["price_ex_tax"]
# inputs["index"] = ["sku_prefix", "sku"]
# inputs["columns"] = ["date_created_month"]
# input_dict[f"pivot_by_month_{sku_prefix.lower()}"] = inputs
inputs = {}
inputs["type"] = "groupby_table"
inputs["values"] = ["price_ex_tax"]
inputs["index"] = ["date_created_month", "sku_prefix", "sku"]
inputs["aggfuncs"] = ["sum", "count"]
input_dict[f"groupby_month_{sku_prefix.lower()}"] = inputs
inputs = {}
inputs["type"] = "data_reset"
input_dict[f"data_reset_{sku_prefix.lower()}"] = inputs
configs = {}
configs["report_title"] = REPORT_TITLE
configs["export_file_name"] = f"{TODAY}_{file_name}"
configs["input_dict"] = input_dict
return configs
| 31.722656
| 168
| 0.643517
|
65e51f301e6f0138e74121c7e450a09ca8640c97
| 805
|
py
|
Python
|
blackbelt/slack.py
|
apiaryio/black-belt
|
d28d7022605bfd0c6814e591da21729f6aa0eecf
|
[
"MIT"
] | 3
|
2015-06-02T20:46:53.000Z
|
2019-07-16T20:15:38.000Z
|
blackbelt/slack.py
|
apiaryio/black-belt
|
d28d7022605bfd0c6814e591da21729f6aa0eecf
|
[
"MIT"
] | 113
|
2015-01-13T15:27:51.000Z
|
2018-09-12T09:06:45.000Z
|
blackbelt/slack.py
|
apiaryio/black-belt
|
d28d7022605bfd0c6814e591da21729f6aa0eecf
|
[
"MIT"
] | 1
|
2015-06-02T20:47:14.000Z
|
2015-06-02T20:47:14.000Z
|
from slacker import Slacker
from blackbelt.config import config
class Slack(object):
def __init__(self, token=None):
if not token:
token = config['slack']['access_token']
slack = Slacker(token)
self.slack = slack
if not token:
raise ValueError("Can't do things with Slack without access token. Run bb init.")
self.token = token
def get_user_id(self):
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":blackbelt:")
def post_message(message, room='#engine-room'):
client = Slack()
msg = "<@%s> %s" % (client.get_user_id(), message)
client.post_message(msg, room)
| 30.961538
| 111
| 0.636025
|
789c4c8a486a3baf95018464d6d015f34de6367c
| 1,599
|
py
|
Python
|
musi/base.py
|
sixohsix/musi
|
231801895583c19375ad51f3e259c8621b1db923
|
[
"MIT"
] | 1
|
2015-11-14T17:22:53.000Z
|
2015-11-14T17:22:53.000Z
|
musi/base.py
|
sixohsix/musi
|
231801895583c19375ad51f3e259c8621b1db923
|
[
"MIT"
] | null | null | null |
musi/base.py
|
sixohsix/musi
|
231801895583c19375ad51f3e259c8621b1db923
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from types import FunctionType, ClassType
import logging
logging.basicConfig()
log = logging.getLogger("musi")
def Constant(val):
def val_f(now):
return val
return val_f
C = Constant
class Buffer(object):
def __init__(self):
self.val = None
def __call__(self, val):
if val == self.val:
return None
self.val = val
return val
def print_(name, val):
print("{} {}".format(name, val))
def Tap(val_f, name="tap", func=print_):
def tap(now):
val = val_f(now)
func(name, val)
return val
return tap
def If(test_f, true_f, false_f):
def if_(now):
# Note that we do NOT short-circuit
test = test_f(now)
true = true_f(now)
false = false_f(now)
return true if test > 0.0 else false
return if_
def Eval(value_f):
def eval_(now):
return value_f(now)(now)
return eval_
def song_time(now):
return now
class Tempo(object):
def __init__(self, bpm_f, bpb):
def no_zero_bpm(now):
bpm = bpm_f(now)
return bpm if bpm != 0.0 else 0.00000001
self.bpm_f = no_zero_bpm
self.bpb = bpb
def song_beats(self, now):
return int(now / 60.0 * self.bpm_f(now))
def song_bars(self, now):
return self.song_beats(now) / self.bpb
def Duration(self, bars=0, beats=0):
nbeats = float((bars * self.bpb) + beats)
def duration(now):
return 1.0 / (self.bpm_f(now) / nbeats / 60.0)
return duration
| 20.240506
| 58
| 0.587242
|
6edd1bf80b7ff5fa9e9c82ca715154986cd56570
| 1,081
|
py
|
Python
|
legipy/parsers/legislature_list_parser.py
|
vermavinay8948/legipy
|
f82ae8db82a53fe28a193ea16d05b07316ffc5b9
|
[
"MIT"
] | 6
|
2016-05-16T21:08:53.000Z
|
2021-06-28T16:50:08.000Z
|
legipy/parsers/legislature_list_parser.py
|
vermavinay8948/legipy
|
f82ae8db82a53fe28a193ea16d05b07316ffc5b9
|
[
"MIT"
] | 6
|
2018-02-10T23:00:54.000Z
|
2021-11-17T23:09:10.000Z
|
legipy/parsers/legislature_list_parser.py
|
vermavinay8948/legipy
|
f82ae8db82a53fe28a193ea16d05b07316ffc5b9
|
[
"MIT"
] | 9
|
2018-02-03T10:56:37.000Z
|
2021-04-28T12:24:41.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import re
from bs4 import BeautifulSoup
from legipy.common import parse_date
from legipy.common import parse_roman
from legipy.models.legislature import Legislature
def parse_legislature_list(url, html):
soup = BeautifulSoup(html, 'html5lib', from_encoding='utf-8')
results = []
for leg_header in soup.find_all('h2'):
text = leg_header.get_text()
text = re.sub(r'\s+', ' ', text)
num = parse_roman(re.search('^[MDCLXVI]+', text).group(0))
m = re.search(r'à compter du (\d{1,2}(?:er)?\s+[^\s]+\s+\d{4})', text)
if m:
start = parse_date(m.group(1))
end = None
else:
start = None
end = None
m = re.search(r'du (\d{1,2}(?:er)?\s+[^\s]+\s+\d{4}) '
r'au (\d{1,2}(?:er)?\s+[^\s]+\s+\d{4})', text)
if m:
start = parse_date(m.group(1))
end = parse_date(m.group(2))
results.append(Legislature(number=num, start=start, end=end))
return results
| 27.717949
| 78
| 0.565217
|
e34b07ce7ab6a1b37facb27b319a5e5a4a491428
| 22,681
|
py
|
Python
|
contrib/utils/perf/deploy-gce-perf-cluster.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 1,664
|
2015-01-03T09:35:21.000Z
|
2022-03-31T04:55:24.000Z
|
contrib/utils/perf/deploy-gce-perf-cluster.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 3,018
|
2015-02-19T20:16:10.000Z
|
2021-11-13T20:47:48.000Z
|
contrib/utils/perf/deploy-gce-perf-cluster.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 1,673
|
2015-01-06T14:14:42.000Z
|
2022-03-31T07:22:30.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import subprocess
import sys
import pprint
import time
import traceback
import re
import socket
cluster_prefix = "perf"
ambari_repo_file_url = "http://s3.amazonaws.com/dev.hortonworks.com/ambari/centos7/2.x/updates/2.7.1.0/ambaribn.repo"
public_hostname_script = "foo"
hostname_script = "foo"
NUMBER_OF_AGENTS_ON_HOST = 50
class SSH:
"""
Ssh implementation of this
"""
def __init__(self, user, sshkey_file, host, command, custom_option='', errorMessage = None):
self.user = user
self.sshkey_file = sshkey_file
self.host = host
self.command = command
self.errorMessage = errorMessage
self.custom_option = custom_option
def run(self):
sshcommand = ["ssh",
"-o", "ConnectTimeOut=180",
"-o", "StrictHostKeyChecking=no",
"-o", "BatchMode=yes",
self.custom_option,
"-i", self.sshkey_file,
self.user + "@" + self.host, self.command]
if not self.custom_option:
del sshcommand[7]
i = 1
while True:
try:
sshstat = subprocess.Popen(sshcommand, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
log = sshstat.communicate()
if sshstat.returncode != 0:
print "Executing SSH command on {0} failed: {1}".format(self.host, log)
print "\nRetrying SSH command one more time!"
if i >= 3:
break
i += 1
time.sleep(10)
continue
break
except:
print "Could not SSH to {0}, waiting for it to start".format(self.host)
i += 1
time.sleep(10)
if i >= 3:
print "Could not execute remote ssh command: " + ' '.join(sshcommand)
raise Exception("Could not connect to {0}. Giving up with erros: {1}".format(self.host, log))
errorMsg = log[1]
if self.errorMessage and sshstat.returncode != 0:
errorMsg = self.errorMessage + "\n" + errorMsg
print "SSH command execution finished"
return {"exitstatus": sshstat.returncode, "log": log, "errormsg": errorMsg}
class SCP:
"""
SCP implementation that is thread based. The status can be returned using
status val
"""
def __init__(self, user, sshkey_file, host, inputFile, remote, errorMessage = None):
self.user = user
self.sshkey_file = sshkey_file
self.host = host
self.inputFile = inputFile
self.remote = remote
self.errorMessage = errorMessage
def run(self):
scpcommand = ["scp",
"-r",
"-o", "ConnectTimeout=60",
"-o", "BatchMode=yes",
"-o", "StrictHostKeyChecking=no",
"-i", self.sshkey_file, self.inputFile, self.user + "@" +
self.host + ":" + self.remote]
i = 1
while True:
try:
scpstat = subprocess.Popen(scpcommand, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
log = scpstat.communicate()
if scpstat.returncode != 0:
print "Executing SCP command on {0} failed: {1}".format(self.host, log)
print "\nRetrying SCP command one more time!"
if i >= 3:
break
i += 1
time.sleep(10)
continue
break
except:
print "Could not SCP to {0}, waiting for it to start".format(self.host)
i += 1
time.sleep(10)
if i >= 3:
print "Could not execute remote scp command: " + ' '.join(scpcommand)
raise Exception("Could not connect to {0}. Giving up with erros: {1}".format(self.host, log))
errorMsg = log[1]
if self.errorMessage and scpstat.returncode != 0:
errorMsg = self.errorMessage + "\n" + errorMsg
print "SCP command execution finished"
return {"exitstatus": scpstat.returncode, "log": log, "errormsg": errorMsg}
# main method to parse arguments from user and start work
def main():
parser = argparse.ArgumentParser(
description='This script brings up a cluster with ambari installed, configured and started',
epilog='Only GCE is supported as of now!'
)
# options
parser.add_argument('--controller', type=str,
action='store', help='GCE controller ip address.')
parser.add_argument('--key', type=str,
action='store', help='Path to GCE ssh key.')
parser.add_argument('--cluster-suffix', type=str,
action='store', help='Cluster name suffix.')
parser.add_argument('--agent-prefix', type=str,
action='store', help='Agent name prefix.')
parser.add_argument('--agents-count', type=int,
action='store', help='Agents count for whole cluster (multiples of 50).')
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(-1)
args = parser.parse_args()
do_work(args)
def do_work(args):
"""
Check that all required args are passed in. If so, deploy the cluster.
:param args: Command line args
"""
if not args.controller:
raise Exception("GCE controller ip address is not defined!")
if not args.key:
raise Exception("Path to gce ssh key is not defined!")
if not args.cluster_suffix:
raise Exception("Cluster name suffix is not defined!")
if not args.agent_prefix:
raise Exception("Agent name prefix is not defined!")
if not args.agents_count:
raise Exception("Agents count for whole cluster is not defined (will put 50 Agents per VM)!")
deploy_cluster(args)
def deploy_cluster(args):
"""
Process cluster deployment
:param args: Command line args.
"""
# When dividing, need to get the ceil.
number_of_nodes = ((args.agents_count - 1) / NUMBER_OF_AGENTS_ON_HOST) + 1
# In case of an error after creating VMs, can simply comment out this function to run again without creating VMs.
create_vms(args, number_of_nodes)
# getting list of vms information like hostname and ip address
print "Getting list of virtual machines from cluster..."
# Dictionary from host name to IP
(server_dict, agents_dict) = get_vms_list(args)
# check number of nodes in cluster to be the same as user asked
print "Checking count of created nodes in cluster..."
if not agents_dict or len(agents_dict) < number_of_nodes:
raise Exception("Cannot bring up enough nodes. Requested {0}, but got {1}. Probably not enough resources!".format(number_of_nodes, len(agents_dict)))
print "GCE cluster was successfully created!\n"
# installing/starting ambari-server and ambari-agents on each host
server_item = server_dict.items()[0]
server_host_name = server_item[0]
server_ip = server_item[1]
print "=========================="
print "Server Hostname: %s" % server_host_name
print "Server IP: %s" % server_ip
print "==========================\n"
# Sort the agents by hostname into a list.
sorted_agents = sort_hosts(agents_dict)
pretty_print_vms(sorted_agents)
print "Creating server.sh script (which will be executed on server to install/configure/start ambari-server)..."
create_server_script(server_host_name)
print "Creating agent.sh script (which will be executed on agent hosts to install/configure/start ambari-agent..."
create_agent_script(server_host_name)
time.sleep(10)
prepare_server(args, server_host_name, server_ip)
# If the user asks for a number of agents that is not a multiple of 50, then only create how many are needed instead
# of 50 on every VM.
num_agents_left_to_create = args.agents_count
start_num = 1
for (hostname, ip) in sorted_agents:
num_agents_on_this_host = min(num_agents_left_to_create, NUMBER_OF_AGENTS_ON_HOST)
print "=========================="
print "Working on VM {0} that will contain hosts {1} - {2}".format(hostname, start_num, start_num + num_agents_on_this_host - 1)
# The agent multiplier config will be different on each VM.
cmd_generate_multiplier_conf = "mkdir -p /etc/ambari-agent/conf/ ; printf \"start={0}\\nnum={1}\\nprefix={2}\" > /etc/ambari-agent/conf/agent-multiplier.conf".format(start_num, num_agents_on_this_host, args.agent_prefix)
start_num += num_agents_on_this_host
num_agents_left_to_create -= num_agents_on_this_host
prepare_agent(args, hostname, ip, cmd_generate_multiplier_conf)
pass
print "All scripts where successfully copied and started on all hosts. " \
"\nPay attention that server.sh script need 5 minutes to finish and agent.sh need 3 minutes!"
def create_vms(args, number_of_nodes):
"""
Request the server and VMs for the agents from GCE.
:param args: Command line args
:param number_of_nodes: Number of VMs to request.
"""
print "Creating server VM {0}-server-{1} with xxlarge nodes on centos7...".format(cluster_prefix, args.cluster_suffix)
execute_command(args, args.controller, "/opt/gce-utils/gce up {0}-server-{1} 1 --centos7 --xxlarge --ex --disk-xxlarge --ssd".format(cluster_prefix, args.cluster_suffix),
"Failed to create server, probably not enough resources!", "-tt")
time.sleep(10)
# trying to create cluster with needed params
print "Creating agent VMs {0}-agent-{1} with {2} xlarge nodes on centos7...".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes))
execute_command(args, args.controller, "/opt/gce-utils/gce up {0}-agent-{1} {2} --centos7 --xlarge --ex --disk-xlarge".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes)),
"Failed to create cluster VMs, probably not enough resources!", "-tt")
# VMs are not accessible immediately
time.sleep(10)
def prepare_server(args, hostname, ip):
remote_path = "/server.sh"
local_path = "server.sh"
print "Copying server.sh to {0}...".format(hostname)
put_file(args, ip, local_path, remote_path, "Failed to copy file!")
print "Executing remote ssh command (set correct permissions and start executing server.sh in separate process) on {0}...".format(hostname)
execute_command(args, ip, "cd /; chmod 777 server.sh; nohup ./server.sh >/server.log 2>&1 &",
"Install/configure/start server script failed!")
def prepare_agent(args, hostname, ip, cmd_generate_multiplier_conf):
remote_path = "/agent.sh"
local_path = "agent.sh"
print "Copying agent.sh to {0}...".format(hostname)
put_file(args, ip, local_path, remote_path, "Failed to copy file!")
print "Generating agent-multiplier.conf"
execute_command(args, ip, cmd_generate_multiplier_conf, "Failed to generate agent-multiplier.conf on host {0}".format(hostname))
print "Executing remote ssh command (set correct permissions and start executing agent.sh in separate process) on {0}...".format(hostname)
execute_command(args, ip, "cd /; chmod 777 agent.sh; nohup ./agent.sh >/agent.log 2>&1 &",
"Install/configure start agent script failed!")
def create_server_script(server_host_name):
"""
Creating server.sh script in the same dir where current script is located
server.sh script will install, configure and start ambari-server and ambari-agent on host
:param server_host_name: Server host name
"""
# ambari-server setup <options> may not work property, so doing several calls like
# echo "arg=value" >> .../ambari.properties
contents = "#!/bin/bash\n" + \
"yum install wget -y\n" + \
"wget -O /etc/yum.repos.d/ambari.repo {0}\n".format(ambari_repo_file_url) + \
"yum clean all; yum install git ambari-server -y\n" + \
"mkdir /home ; cd /home ; git clone https://github.com/apache/ambari.git ; cd ambari ; git checkout branch-2.5\n" + \
"cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-server/resources/stacks/PERF\n" + \
"cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-agent/cache/stacks/PERF\n" + \
"sed -i -f /home/ambari/ambari-server/src/main/resources/stacks/PERF/install_packages.sed /var/lib/ambari-server/resources/custom_actions/scripts/install_packages.py\n" + \
"sed -i -f /home/ambari/ambari-server/src/main/resources/stacks/PERF/install_packages.sed /var/lib/ambari-agent/cache/custom_actions/scripts/install_packages.py\n" + \
"\n" + \
"\n" + \
"cd /; wget http://central.maven.org/maven2/mysql/mysql-connector-java/5.1.40/mysql-connector-java-5.1.40.jar;\n" + \
"mkdir /usr/share/java; chmod 777 /usr/share/java;" + \
"cp mysql-connector-java-5.1.40.jar /usr/share/java/; chmod 777 /usr/share/java/mysql-connector-java-5.1.40.jar;\n" + \
"ln -s /usr/share/java/mysql-connector-java-5.1.40.jar /usr/share/java/mysql-connector-java.jar;\n" + \
"cd /etc/yum.repos.d/; wget http://repo.mysql.com/mysql-community-release-el6-5.noarch.rpm; rpm -ivh mysql-community-release-el6-5.noarch.rpm;" + \
"yum clean all; yum install mysql-server -y\n" + \
"sed -i -e 's/mysqld]/mysqld]\\nmax_allowed_packet=1024M\\njoin_buffer_size=512M\\nsort_buffer_size=128M\\nread_rnd_buffer_size=128M\\ninnodb_buffer_pool_size=16G" \
"\\ninnodb_file_io_threads=16\\ninnodb_thread_concurrency=32\\nkey_buffer_size=16G\\nquery_cache_limit=16M\\nquery_cache_size=512M\\nthread_cache_size=128\\ninnodb_log_buffer_size=512M/1' /etc/my.cnf\n" + \
"service mysqld start\n" + \
"mysql -uroot -e \"CREATE DATABASE ambari;\"\n" + \
"mysql -uroot -e \"SOURCE /var/lib/ambari-server/resources/Ambari-DDL-MySQL-CREATE.sql;\" ambari\n" + \
"mysql -uroot -e \"CREATE USER 'ambari'@'%' IDENTIFIED BY 'bigdata';\"\n" + \
"mysql -uroot -e \"GRANT ALL PRIVILEGES ON *.* TO 'ambari'@'%%';\"\n" + \
"mysql -uroot -e \"CREATE USER 'ambari'@'localhost' IDENTIFIED BY 'bigdata';\"\n" + \
"mysql -uroot -e \"GRANT ALL PRIVILEGES ON *.* TO 'ambari'@'localhost';\"\n" + \
"mysql -uroot -e \"CREATE USER 'ambari'@'{0}' IDENTIFIED BY 'bigdata';\"\n".format(server_host_name) + \
"mysql -uroot -e \"GRANT ALL PRIVILEGES ON *.* TO 'ambari'@'{0}';\"\n".format(server_host_name) + \
"mysql -uroot -e \"FLUSH PRIVILEGES;\"\n" + \
"\n" + \
"\n" + \
"ambari-server setup -s\n" + \
"ambari-server setup --database mysql --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar --databasehost=localhost --databaseport=3306 --databasename=ambari --databaseusername=ambari --databasepassword=bigdata\n" + \
"sed -i -e 's/=postgres/=mysql/g' /etc/ambari-server/conf/ambari.properties\n" + \
"sed -i -e 's/server.persistence.type=local/server.persistence.type=remote/g' /etc/ambari-server/conf/ambari.properties\n" + \
"sed -i -e 's/local.database.user=postgres//g' /etc/ambari-server/conf/ambari.properties\n" + \
"sed -i -e 's/server.jdbc.postgres.schema=ambari//g' /etc/ambari-server/conf/ambari.properties\n" + \
"sed -i -e 's/agent.threadpool.size.max=25/agent.threadpool.size.max=100/g' /etc/ambari-server/conf/ambari.properties\n" + \
"sed -i -e 's/client.threadpool.size.max=25/client.threadpool.size.max=65/g' /etc/ambari-server/conf/ambari.properties\n" + \
"sed -i -e 's/false/true/g' /var/lib/ambari-server/resources/stacks/PERF/1.0/metainfo.xml\n" + \
"sed -i -e 's/false/true/g' /var/lib/ambari-server/resources/stacks/PERF/2.0/metainfo.xml\n" + \
"sed -i -e 's/-Xmx2048m/-Xmx16384m/g' /var/lib/ambari-server/ambari-env.sh\n" + \
"\n" + \
"echo 'server.jdbc.driver=com.mysql.jdbc.Driver' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'server.jdbc.rca.url=jdbc:mysql://{0}:3306/ambari' >> /etc/ambari-server/conf/ambari.properties\n".format(server_host_name) + \
"echo 'server.jdbc.rca.driver=com.mysql.jdbc.Driver' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'server.jdbc.url=jdbc:mysql://{0}:3306/ambari' >> /etc/ambari-server/conf/ambari.properties\n".format(server_host_name) + \
"echo 'server.jdbc.port=3306' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'server.jdbc.hostname=localhost' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'server.jdbc.driver.path=/usr/share/java/mysql-connector-java.jar' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'alerts.cache.enabled=true' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'alerts.cache.size=100000' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'alerts.execution.scheduler.maxThreads=4' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'security.temporary.keystore.retention.minutes=180' >> /etc/ambari-server/conf/ambari.properties\n" + \
"echo 'stack.hooks.folder=stacks/PERF/1.0/hooks' >> /etc/ambari-server/conf/ambari.properties\n" + \
"\n" + \
"ambari-server start --skip-database-check\n" + \
"exit 0"
with open("server.sh", "w") as f:
f.write(contents)
def create_agent_script(server_host_name):
"""
Creating agent.sh script in the same dir where current script is located
agent.sh script will install, configure and start ambari-agent on host
:param server_host_name: Server host name
"""
# TODO, instead of cloning Ambari repo on each VM, do it on the server once and distribute to all of the agents.
contents = "#!/bin/bash\n" + \
"yum install wget -y\n" + \
"wget -O /etc/yum.repos.d/ambari.repo {0}\n".format(ambari_repo_file_url) + \
"yum clean all; yum install krb5-workstation git ambari-agent -y\n" + \
"mkdir /home ; cd /home; git clone https://github.com/apache/ambari.git ; cd ambari ; git checkout branch-2.5\n" + \
"cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-agent/cache/stacks/PERF\n" + \
"sed -i -f /var/lib/ambari-agent/cache/stacks/PERF/PythonExecutor.sed /usr/lib/ambari-agent/lib/ambari_agent/PythonExecutor.py\n" + \
"sed -i -f /var/lib/ambari-agent/cache/stacks/PERF/check_host.sed /var/lib/ambari-agent/cache/custom_actions/scripts/check_host.py\n" + \
"sed -i -e 's/hostname=localhost/hostname={0}/g' /etc/ambari-agent/conf/ambari-agent.ini\n".format(server_host_name) + \
"sed -i -e 's/agent]/agent]\\nhostname_script={0}\\npublic_hostname_script={1}\\n/1' /etc/ambari-agent/conf/ambari-agent.ini\n".format(hostname_script, public_hostname_script) + \
"python /home/ambari/ambari-agent/conf/unix/agent-multiplier.py start\n" + \
"exit 0"
with open("agent.sh", "w") as f:
f.write(contents)
def execute_command(args, ip, cmd, fail_message, custom_option='', login='root'):
"""
Method to execute ssh commands via SSH class
:param args: Command line args
:param ip: IP to ssh to
:param cmd: Command to execute
:param fail_message: In case of an error, what to report
:param custom_option: Custom flags
:param login: Login user
:return: Return execute log message
"""
ssh = SSH(login, args.key, ip, cmd, custom_option, fail_message)
ssh_result = ssh.run()
status_code = ssh_result["exitstatus"]
if status_code != 0:
raise Exception(ssh_result["errormsg"])
return ssh_result["log"][0]
def put_file(args, ip, local_file, remote_file, fail_message, login='root'):
"""
Method to copy file from local to remote host via SCP class
:param args: Command line args
:param ip: IP to ssh to
:param local_file: Path to local file
:param remote_file: Path to remote file
:param fail_message: In case of an error, what to report
:param login: Login user.
:return: Return copy log message
"""
scp = SCP(login, args.key, ip, local_file,
remote_file, fail_message)
scp_result = scp.run()
status_code = scp_result["exitstatus"]
if status_code != 0:
raise Exception(scp_result["errormsg"])
return scp_result["log"][0]
def get_vms_list(args):
"""
Get tuple of (x, y) where
x = dictionary from single server host name to ip
y = dictionary from multiple agent host names to ip
:param args: Command line arguments
:return: Tuple of dictionaries of hostnames and ip for server and agents.
"""
# Get the server.
server = __get_vms_list_from_name(args, "{0}-server-{1}".format(cluster_prefix, args.cluster_suffix))
# Get the agents
agents = __get_vms_list_from_name(args, "{0}-agent-{1}".format(cluster_prefix, args.cluster_suffix))
return (server, agents)
def __get_vms_list_from_name(args, cluster_name):
"""
Method to parse "gce info {cluster-name}" command output and get hosts and ips pairs for every host in cluster
:param args: Command line args
:return: Mapping of VM host name to ip.
"""
gce_fqdb_cmd = '/opt/gce-utils/gce info {0}'.format(cluster_name)
out = execute_command(args, args.controller, gce_fqdb_cmd, "Failed to get VMs list!", "-tt")
lines = out.split('\n')
#print "LINES=" + str(lines)
if lines[0].startswith("Using profile") and not lines[1].strip():
result = {}
for s in lines[4:]: # Ignore non-meaningful lines
if not s:
continue
match = re.match(r'^ [^ ]+ ([\w\.-]*)\s+([\d\.]*).*$', s, re.M)
if match:
result[match.group(1)] = match.group(2)
else:
raise Exception('Cannot parse "{0}"'.format(s))
return result
else:
raise Exception('Cannot parse "{0}"'.format(lines))
def sort_hosts(hosts):
"""
Sort the hosts by name and take into account the numbers.
:param hosts: Dictionary from host name (e.g., perf-9-test, perf-62-test), to the IP
:return: Sorted list of tuples
"""
host_names = hosts.keys()
sorted_host_tuples = [(None, None),] * len(hosts)
pattern = re.compile(".*?-agent-.*?(\d+)")
for host_name in host_names:
m = pattern.match(host_name)
if m and len(m.groups()) == 1:
number = int(m.group(1))
ip = hosts[host_name]
sorted_host_tuples[number - 1] = (host_name, ip)
return sorted_host_tuples
def pretty_print_vms(vms):
"""
Pretty print the VMs hostnames
:param vms: List of tuples (hostname, ip)
"""
print "=========================="
print "Hostnames of nodes in cluster:"
for (hostname, ip) in vms:
print hostname
print "==========================\n"
if __name__ == "__main__":
main()
| 42.157993
| 237
| 0.679952
|
9a9e5108a78a87be32bc12fa8a574505ad7827cc
| 9,538
|
py
|
Python
|
test.py
|
CheungBH/yolov3-channel-and-layer-pruning
|
457f81386cbc54ace0ad677581e383c516305ba9
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
CheungBH/yolov3-channel-and-layer-pruning
|
457f81386cbc54ace0ad677581e383c516305ba9
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
CheungBH/yolov3-channel-and-layer-pruning
|
457f81386cbc54ace0ad677581e383c516305ba9
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
from torch.utils.data import DataLoader
from models import *
from utils.datasets import *
from utils.utils import *
def test(cfg,
data,
weights=None,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.001,
nms_thres=0.5,
save_json=False,
model=None,
writer=None):
# Initialize/load model and set device
if model is None:
device = torch_utils.select_device(opt.device)
verbose = True
# Initialize model
model = Darknet(cfg, img_size).to(device)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
_ = load_darknet_weights(model, weights)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
else:
device = next(model.parameters()).device # get model device
verbose = False
# Configure run
data = parse_data_cfg(data)
nc = int(data['classes']) # number of classes
test_path = data['valid'] # path to test images
names = load_classes(data['names']) # class names
# Dataloader
dataset = LoadImagesAndLabels(test_path, img_size, batch_size)
dataloader = DataLoader(dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size, 16]),
shuffle=True,
pin_memory=True,
collate_fn=dataset.collate_fn)
seen = 0
model.eval()
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP', 'F1')
p, r, f1, mp, mr, map, mf1 = 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3)
write_tb = True
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
targets = targets.to(device)
imgs = imgs.to(device)
_, _, height, width = imgs.shape # batch size, channels, height, width
# Plot images with bounding boxes
if batch_i == 0 and not os.path.exists('test_batch0.jpg'):
plot_images(imgs=imgs, targets=targets, paths=paths, fname='test_batch0.jpg')
# Run model
inf_out, train_out = model(imgs) # inference and training outputs
# Compute loss
if hasattr(model, 'hyp'): # if model has loss hyperparameters
loss += compute_loss(train_out, targets, model)[1][:3].cpu() # GIoU, obj, cls
# Run NMS
output = non_max_suppression(inf_out, conf_thres=conf_thres, nms_thres=nms_thres)
all_none = [None] * len(output)
#
if writer and write_tb:
# if writer and None not in output and write_tb:
outs = out2ls(output)
write_tb = False
plot_output(imgs, outs, writer)
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
seen += 1
if pred is None:
if nl:
stats.append(([], torch.Tensor(), torch.Tensor(), tcls))
continue
# Append to text file
# with open('test.txt', 'a') as file:
# [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(Path(paths[si]).stem.split('_')[-1])
box = pred[:, :4].clone() # xyxy
scale_coords(imgs[si].shape[1:], box, shapes[si]) # to original shape
box = xyxy2xywh(box) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for di, d in enumerate(pred):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(d[6])],
'bbox': [floatn(x, 3) for x in box[di]],
'score': floatn(d[4], 5)})
# Clip boxes to image bounds
clip_coords(pred, (height, width))
# Assign all predictions as incorrect
correct = [0] * len(pred)
if nl:
detected = []
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
tbox[:, [0, 2]] *= width
tbox[:, [1, 3]] *= height
# Search for correct predictions
for i, (*pbox, pconf, pcls_conf, pcls) in enumerate(pred):
# Break if all targets already located in image
if len(detected) == nl:
break
# Continue if predicted class not among image classes
if pcls.item() not in tcls:
continue
# Best iou, index between pred and targets
m = (pcls == tcls_tensor).nonzero().view(-1)
iou, bi = bbox_iou(pbox, tbox[m]).max(0)
# If iou > threshold and class is correct mark as correct
if iou > iou_thres and m[bi] not in detected: # and pcls == tcls[bi]:
correct[i] = 1
detected.append(m[bi])
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct, pred[:, 4].cpu(), pred[:, 6].cpu(), tcls))
# Compute statistics
stats = [np.concatenate(x, 0) for x in list(zip(*stats))] # to numpy
if len(stats):
p, r, ap, f1, ap_class = ap_per_class(*stats)
mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%10.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))
# Print results per class
if verbose and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))
# Save JSON
if save_json and map and len(jdict):
try:
imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataset.img_files]
with open('results.json', 'w') as file:
json.dump(jdict, file)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
cocoGt = COCO('../coco/annotations/instances_val2014.json') # initialize COCO ground truth api
cocoDt = cocoGt.loadRes('results.json') # initialize COCO pred api
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.params.imgIds = imgIds # [:32] # only evaluate these images
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
map = cocoEval.stats[1] # update mAP to pycocotools mAP
except:
print('WARNING: missing dependency pycocotools from requirements.txt. Can not compute official COCO mAP.')
# Return results
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map, mf1, *(loss / len(dataloader)).tolist()), maps
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/coco.data', help='coco.data file path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp.weights', help='path to weights file')
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.5, help='iou threshold for non-maximum suppression')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
test(opt.cfg,
opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.save_json)
| 40.760684
| 119
| 0.538373
|
d60a6e3b0a67d742b6a22928bbb0aeead470848d
| 395
|
py
|
Python
|
scheduler/wsgi.py
|
arpit456jain/Interview-Scheduler
|
b8b93e319c3a8a7c2ae72f8082e0ffced6e05f42
|
[
"MIT"
] | null | null | null |
scheduler/wsgi.py
|
arpit456jain/Interview-Scheduler
|
b8b93e319c3a8a7c2ae72f8082e0ffced6e05f42
|
[
"MIT"
] | 1
|
2022-03-16T19:08:43.000Z
|
2022-03-17T15:59:01.000Z
|
scheduler/wsgi.py
|
arpit456jain/Interview-Scheduler
|
b8b93e319c3a8a7c2ae72f8082e0ffced6e05f42
|
[
"MIT"
] | null | null | null |
"""
WSGI config for scheduler project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scheduler.settings')
application = get_wsgi_application()
| 23.235294
| 78
| 0.787342
|
de2570089c438f6383e1ae403d6432866bdebbd2
| 2,602
|
py
|
Python
|
test/testcontrolfile.py
|
Jangzq/u-postgres-tool
|
10578bdf338b14d63b19264a772830ef894a2b91
|
[
"Unlicense"
] | null | null | null |
test/testcontrolfile.py
|
Jangzq/u-postgres-tool
|
10578bdf338b14d63b19264a772830ef894a2b91
|
[
"Unlicense"
] | null | null | null |
test/testcontrolfile.py
|
Jangzq/u-postgres-tool
|
10578bdf338b14d63b19264a772830ef894a2b91
|
[
"Unlicense"
] | null | null | null |
import unittest
import logging.config
import shutil
from upgtool.controlfile import ControlFile
from upgtool.upgexception import UPgException
from upgtool import pgstruct
from upgtool import upgcrc
logging.config.fileConfig('cfg/logging.conf')
class TestControlFile(unittest.TestCase):
def setUp(self):
pgstruct.load_offset_cfg('cfg/pg95-offset.dat')
pgstruct.load_type_size('cfg/pg95-type.dat')
upgcrc.initcrc('lib/pg_crc.so')
def test_load(self):
shutil.copy('test-data/pg_control', 'test-data/global/pg_control')
ctrlfile = ControlFile('test-data');
self.assertTrue(ctrlfile.systemidentifier == 6145043555394211565)
self.assertTrue(ctrlfile.catalog_version_no == 201504291)
self.assertTrue(ctrlfile.dbstate == 1)
self.assertTrue(ctrlfile.checkPoint == 301989928)
self.assertTrue(ctrlfile.minRecoveryPoint == 0)
self.assertTrue(ctrlfile.minRecoveryPointTLI == 0)
self.assertTrue(ctrlfile.xlog_blcksz == 8192)
self.assertTrue(ctrlfile.xlog_seg_size == 16777216)
self.assertTrue(ctrlfile.blcksz == 8192)
self.assertTrue(ctrlfile.relseg_size == 131072)
print(ctrlfile.checkPointCopy.time)
self.assertTrue(ctrlfile.checkPointCopy.redo == 301989928);
self.assertTrue(ctrlfile.checkPointCopy.ThisTimeLineID == 1);
self.assertTrue(ctrlfile.checkPointCopy.PrevTimeLineID == 1);
self.assertTrue(ctrlfile.checkPointCopy.fullPageWrites == 1);
self.assertTrue(ctrlfile.checkPointCopy.nextXidEpoch == 0);
self.assertTrue(ctrlfile.checkPointCopy.nextXid == 789);
self.assertTrue(ctrlfile.checkPointCopy.nextOid == 16437);
self.assertTrue(ctrlfile.checkPointCopy.nextMulti == 1);
self.assertTrue(ctrlfile.checkPointCopy.nextMultiOffset == 0);
self.assertTrue(ctrlfile.checkPointCopy.oldestXid == 658);
self.assertTrue(ctrlfile.checkPointCopy.oldestXidDB == 1);
self.assertTrue(ctrlfile.checkPointCopy.oldestMulti == 1);
self.assertTrue(ctrlfile.checkPointCopy.oldestMultiDB == 1);
self.assertTrue(ctrlfile.checkPointCopy.time == 1434128134);
self.assertTrue(ctrlfile.checkPointCopy.oldestCommitTs == 0);
def test_crcerror(self):
shutil.copy('test-data/pg_control_crcerror', 'test-data/global/pg_control')
try:
ctrlfile = ControlFile('test-data');
self.assertTrue(False)
except UPgException:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main
| 44.101695
| 83
| 0.701768
|
2c4bce2d9c6f988b4802b85123f3b88e1466582e
| 1,166
|
py
|
Python
|
src/tashi/dfs/diskimageinterface.py
|
apache/tashi
|
87f55e4d30800c085ea786bf40c9412b816969e6
|
[
"Apache-2.0"
] | 6
|
2015-02-26T22:52:15.000Z
|
2021-11-10T16:04:40.000Z
|
src/tashi/dfs/diskimageinterface.py
|
stroucki/tashi
|
6888a865c184ea80f10e70981addc73a262a81b7
|
[
"Apache-2.0"
] | null | null | null |
src/tashi/dfs/diskimageinterface.py
|
stroucki/tashi
|
6888a865c184ea80f10e70981addc73a262a81b7
|
[
"Apache-2.0"
] | 6
|
2015-06-29T19:03:20.000Z
|
2021-11-10T16:04:30.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class DiskImageInterface():
def __init__(self, config):
if self.__class__ is DiskImageInterface:
raise NotImplementedError
self.config = config
def cloneImage(self, srcImage, dstImage):
raise NotImplementedError
def rebaseImage(self, srcImage, dstImage):
raise NotImplementedError
def getImageInfo(self, srcImage, dstImage):
raise NotImplementedError
| 36.4375
| 62
| 0.77187
|
5c5d72655ea366d43c3f41a8ac8f7530e656aa09
| 2,911
|
py
|
Python
|
tests/whatsapp/models/test_location_message.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
tests/whatsapp/models/test_location_message.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
tests/whatsapp/models/test_location_message.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
import pytest
from pydantic.error_wrappers import ValidationError
from infobip_channels.whatsapp.models.body.core import MessageBody
from infobip_channels.whatsapp.models.body.location_message import LocationMessageBody
from tests.conftest import get_random_string
from tests.whatsapp.conftest import LocationMessageBodyFactory
def test_location_message_body__is_an_instance_of_message_body():
assert isinstance(LocationMessageBodyFactory.build(), MessageBody) is True
@pytest.mark.parametrize(
"content",
[
None,
"",
{},
{"latitude": 42, "name": "test", "address": "address one"},
{"longitude": 120, "name": "test", "address": "address one"},
],
)
def test_when_content_is_invalid__validation_error_is_raised(content):
with pytest.raises(ValidationError):
LocationMessageBodyFactory.build(**{"content": content})
@pytest.mark.parametrize("latitude", [None, "", {}, -90.001, 90.0001])
def test_when_content_latitude_is_invalid__validation_error_is_raised(latitude):
with pytest.raises(ValidationError):
LocationMessageBodyFactory.build(
**{"content": {"latitude": latitude, "longitude": 120.53}}
)
@pytest.mark.parametrize("longitude", [None, "", {}, -181.0, 181.0])
def test_when_content_longitude_is_invalid__validation_error_is_raised(longitude):
with pytest.raises(ValidationError):
LocationMessageBodyFactory.build(
**{"content": {"longitude": longitude, "latitude": -50.934}}
)
@pytest.mark.parametrize("name", [{}, get_random_string(1001)])
def test_when_content_name_is_invalid__validation_error_is_raised(name):
with pytest.raises(ValidationError):
LocationMessageBodyFactory.build(
**{"content": {"longitude": 130.5541, "latitude": -50.934, "name": name}}
)
@pytest.mark.parametrize("address", [{}, get_random_string(1001)])
def test_when_content_address_is_invalid__validation_error_is_raised(address):
with pytest.raises(ValidationError):
LocationMessageBodyFactory.build(
**{
"content": {
"longitude": -165.33,
"latitude": -89.205,
"address": address,
}
}
)
def test_when_input_data_is_valid__validation_error_is_not_raised():
try:
LocationMessageBody(
**{
"from": "441134960000",
"to": "38598451987",
"messageId": "a28dd97c-1ffb-4fcf-99f1-0b557ed381da",
"content": {
"latitude": 83,
"longitude": -103,
"name": "test",
"address": "test",
},
"callbackData": "Callback data",
}
)
except ValidationError:
pytest.fail("Unexpected ValidationError raised")
| 34.247059
| 86
| 0.63449
|
27e1d835295f6a7aa3d544bbcb1a06eb9c089ba4
| 19,829
|
py
|
Python
|
google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py
|
bingatgoogle/python-videointelligence
|
000e9bc7c1d1498eff75fa12e9ee4a111ebcf830
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py
|
bingatgoogle/python-videointelligence
|
000e9bc7c1d1498eff75fa12e9ee4a111ebcf830
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py
|
bingatgoogle/python-videointelligence
|
000e9bc7c1d1498eff75fa12e9ee4a111ebcf830
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.videointelligence_v1beta2.types import video_intelligence
from .transports.base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import VideoIntelligenceServiceGrpcTransport
from .transports.grpc_asyncio import VideoIntelligenceServiceGrpcAsyncIOTransport
class VideoIntelligenceServiceClientMeta(type):
"""Metaclass for the VideoIntelligenceService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[VideoIntelligenceServiceTransport]]
_transport_registry["grpc"] = VideoIntelligenceServiceGrpcTransport
_transport_registry["grpc_asyncio"] = VideoIntelligenceServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[VideoIntelligenceServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class VideoIntelligenceServiceClient(metaclass=VideoIntelligenceServiceClientMeta):
"""Service that implements Google Cloud Video Intelligence API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "videointelligence.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> VideoIntelligenceServiceTransport:
"""Returns the transport used by the client instance.
Returns:
VideoIntelligenceServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VideoIntelligenceServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the video intelligence service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, VideoIntelligenceServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, VideoIntelligenceServiceTransport):
# transport is a VideoIntelligenceServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def annotate_video(
self,
request: video_intelligence.AnnotateVideoRequest = None,
*,
input_uri: str = None,
features: Sequence[video_intelligence.Feature] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
Args:
request (google.cloud.videointelligence_v1beta2.types.AnnotateVideoRequest):
The request object. Video annotation request.
input_uri (str):
Input video location. Currently, only `Google Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are
supported, which must be specified in the following
format: ``gs://bucket-id/object-id`` (other URI formats
return
[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]).
For more information, see `Request
URIs <https://cloud.google.com/storage/docs/request-endpoints>`__.
A video URI may include wildcards in ``object-id``, and
thus identify multiple videos. Supported wildcards: '*'
to match 0 or more characters; '?' to match 1 character.
If unset, the input video should be embedded in the
request as ``input_content``. If set, ``input_content``
should be unset.
This corresponds to the ``input_uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
features (Sequence[google.cloud.videointelligence_v1beta2.types.Feature]):
Required. Requested video annotation
features.
This corresponds to the ``features`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.videointelligence_v1beta2.types.AnnotateVideoResponse` Video annotation response. Included in the response
field of the Operation returned by the GetOperation
call of the google::longrunning::Operations service.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([input_uri, features])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a video_intelligence.AnnotateVideoRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, video_intelligence.AnnotateVideoRequest):
request = video_intelligence.AnnotateVideoRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if input_uri is not None:
request.input_uri = input_uri
if features is not None:
request.features = features
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.annotate_video]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
video_intelligence.AnnotateVideoResponse,
metadata_type=video_intelligence.AnnotateVideoProgress,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-videointelligence",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("VideoIntelligenceServiceClient",)
| 43.772627
| 185
| 0.647032
|
bb991f59079aa0ffb64cbd0df7ba4ea8ffa73a80
| 4,619
|
py
|
Python
|
sentry/plugins/sentry_redmine/models.py
|
optimal-outsource/django-sentry
|
1b55011f1474cde8707a9febaf0c64c8142496d2
|
[
"BSD-3-Clause"
] | null | null | null |
sentry/plugins/sentry_redmine/models.py
|
optimal-outsource/django-sentry
|
1b55011f1474cde8707a9febaf0c64c8142496d2
|
[
"BSD-3-Clause"
] | null | null | null |
sentry/plugins/sentry_redmine/models.py
|
optimal-outsource/django-sentry
|
1b55011f1474cde8707a9febaf0c64c8142496d2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.plugins.sentry_redmine.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django import forms
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import models
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
from sentry.models import GroupedMessage
from sentry.plugins import GroupActionProvider
from sentry.plugins.sentry_redmine import conf
from sentry.utils import json
import base64
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
class RedmineIssue(models.Model):
group = models.ForeignKey(GroupedMessage)
issue_id = models.PositiveIntegerField()
class RedmineIssueForm(forms.Form):
subject = forms.CharField(max_length=200)
description = forms.CharField(widget=forms.Textarea())
class CreateRedmineIssue(GroupActionProvider):
title = 'Create Redmine Issue'
def actions(self, request, action_list, group):
if 'redmine' not in group.data:
action_list.append((self.title, self.__class__.get_url(group.pk)))
return action_list
def view(self, request, group):
if request.POST:
form = RedmineIssueForm(request.POST)
if form.is_valid():
data = json.dumps({
'key': conf.REDMINE_API_KEY,
'issue': {
'subject': form.cleaned_data['subject'],
'description': form.cleaned_data['description'],
}
})
url = conf.REDMINE_URL + '/projects/' + conf.REDMINE_PROJECT_SLUG + '/issues.json'
req = urllib.request.Request(url, urllib.parse.urlencode({
'key': conf.REDMINE_API_KEY,
}), headers={
'Content-type': 'application/json',
})
if conf.REDMINE_USERNAME and conf.REDMINE_PASSWORD:
authstring = base64.encodestring('%s:%s' % (conf.REDMINE_USERNAME, conf.REDMINE_PASSWORD))[:-1]
req.add_header("Authorization", "Basic %s" % authstring)
try:
response = urllib.request.urlopen(req, data).read()
except urllib.error.HTTPError as e:
if e.code == 422:
data = json.loads(e.read())
form.errors['__all__'] = 'Missing or invalid data'
for message in data:
for k, v in message.items():
if k in form.fields:
form.errors.setdefault(k, []).append(v)
else:
form.errors['__all__'] += '; %s: %s' % (k, v)
else:
form.errors['__all__'] = 'Bad response from Redmine: %s %s' % (e.code, e.msg)
except urllib.error.URLError as e:
form.errors['__all__'] = 'Unable to reach Redmine host: %s' % (e.reason,)
else:
data = json.loads(response)
RedmineIssue.objects.create(group=group, issue_id=data['issue']['id'])
group.data['redmine'] = {'issue_id': data['issue']['id']}
group.save()
return HttpResponseRedirect(reverse('sentry-group', args=[group.pk]))
else:
description = 'Sentry Message: %s' % request.build_absolute_uri(group.get_absolute_url())
description += '\n\n<pre>' + (group.traceback or group.message) + '</pre>'
form = RedmineIssueForm(initial={
'subject': group.error(),
'description': description,
})
global_errors = form.errors.get('__all__')
BASE_TEMPLATE = "sentry/group/details.html"
context = locals()
context.update(csrf(request))
return render_to_response('sentry/plugins/redmine/create_issue.html', context)
def tags(self, request, tags, group):
if 'redmine' in group.data:
issue_id = group.data['redmine']['issue_id']
tags.append(mark_safe('<a href="%s">#%s</a>' % (
'%s/issues/%s' % (conf.REDMINE_URL, issue_id),
issue_id,
)))
return tags
| 41.241071
| 115
| 0.558129
|
0368ea315314cad76e74e691091a5d364daf62f0
| 38,269
|
py
|
Python
|
pytests/backuptests.py
|
pavithra-mahamani/testrunner
|
d204491caa23f1fbe90505646534ed7810d96289
|
[
"Apache-2.0"
] | 1
|
2020-08-31T18:51:45.000Z
|
2020-08-31T18:51:45.000Z
|
pytests/backuptests.py
|
pavithra-mahamani/testrunner
|
d204491caa23f1fbe90505646534ed7810d96289
|
[
"Apache-2.0"
] | null | null | null |
pytests/backuptests.py
|
pavithra-mahamani/testrunner
|
d204491caa23f1fbe90505646534ed7810d96289
|
[
"Apache-2.0"
] | 2
|
2020-07-24T07:12:01.000Z
|
2022-03-17T23:43:28.000Z
|
import time
import unittest
import uuid
import crc32
from TestInput import TestInputSingleton
import logger
import mc_bin_client
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper
from remote.remote_util import RemoteMachineShellConnection
from builds.build_query import BuildQuery
import testconstants
import copy
from basetestcase import BaseTestCase
class BackupRestoreTests(BaseTestCase):
input = None
servers = None
log = None
membase = None
shell = None
remote_tmp_folder = None
master = None
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.master = self.servers[0]
self.shell = RemoteMachineShellConnection(self.master)
# When using custom data_paths, (smaller // sizes), creates
# backup in those custom paths ( helpful when running on ec2)
info = RestConnection(self.master).get_nodes_self()
data_path = info.storage[0].get_data_path()
self.remote_tmp_folder = None
self.remote_tmp_folder = "{2}/{0}-{1}".format("backup", uuid.uuid4(), data_path)
self.is_membase = False
self.perm_command = "mkdir -p {0}".format(self.remote_tmp_folder)
if not self.shell.is_couchbase_installed():
self.is_membase = True
def common_setUp(self):
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.stop_membase()
shell.stop_couchbase()
shell.start_membase()
shell.start_couchbase()
RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=120)
shell.disconnect()
def tearDown(self):
for server in self.servers:
self.log.info("delete remote folder @ {0}".format(self.remote_tmp_folder))
shell = RemoteMachineShellConnection(server)
shell.remove_directory(self.remote_tmp_folder)
shell.disconnect()
def add_node_and_rebalance(self, master, servers):
ClusterOperationHelper.add_all_nodes_or_assert(master, servers, self.input.membase_settings, self)
rest = RestConnection(master)
nodes = rest.node_statuses()
otpNodeIds = []
for node in nodes:
otpNodeIds.append(node.id)
rebalanceStarted = rest.rebalance(otpNodeIds, [])
self.assertTrue(rebalanceStarted,
"unable to start rebalance on master node {0}".format(master.ip))
self.log.info('started rebalance operation on master node {0}'.format(master.ip))
rebalanceSucceeded = rest.monitorRebalance()
self.assertTrue(rebalanceSucceeded,
"rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
self.log.info('rebalance operaton succeeded for nodes: {0}'.format(otpNodeIds))
#now remove the nodes
#make sure its rebalanced and node statuses are healthy
helper = RestHelper(rest)
self.assertTrue(helper.is_cluster_healthy, "cluster status is not healthy")
self.assertTrue(helper.is_cluster_rebalanced, "cluster is not balanced")
def add_nodes_and_rebalance(self):
self.add_node_and_rebalance(master=self.servers[0], servers=self.servers)
def _test_backup_add_restore_bucket_body(self,
bucket,
delay_after_data_load,
startup_flag,
single_node):
server = self.master
rest = RestConnection(server)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
if bucket == "default":
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
else:
proxyPort = info.moxi + 500
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
authType="sasl", saslPassword="password")
ready = BucketOperationHelper.wait_for_memcached(server, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
if not single_node:
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket,
ram_load_ratio=1,
value_size_distribution=distribution,
moxi=True,
write_only=True,
number_of_threads=2)
if not single_node:
rest = RestConnection(self.master)
self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=180),
msg="replication did not complete")
self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
self.assertTrue(ready, "not all items persisted. see logs")
node = RestConnection(self.master).get_nodes_self()
if not startup_flag:
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.stop_membase()
shell.stop_couchbase()
shell.disconnect()
output, error = self.shell.execute_command(self.perm_command)
self.shell.log_command_output(output, error)
#now let's back up
BackupHelper(self.master, self).backup(bucket, node, self.remote_tmp_folder)
if not startup_flag:
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.start_membase()
shell.start_couchbase()
RestHelper(RestConnection(server)).is_ns_server_running()
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
if bucket == "default":
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
else:
proxyPort = info.moxi + 500
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
authType="sasl", saslPassword="password")
BucketOperationHelper.wait_for_memcached(self.master, bucket)
if bucket == "default":
BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi)
else:
BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi, username=bucket, password='password')
keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
self.assertTrue(keys_exist, msg="unable to verify keys after restore")
def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
bucket = "default"
rest = RestConnection(self.master)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
BucketOperationHelper.wait_for_memcached(self.master, bucket)
client = MemcachedClientHelper.direct_client(self.master, bucket)
expiry = 60
test_uuid = uuid.uuid4()
keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
self.log.info("pushing keys with expiry set to {0}".format(expiry))
for key in keys:
try:
client.set(key, expiry, 0, key)
except mc_bin_client.MemcachedError as error:
msg = "unable to push key : {0} to bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
client.close()
self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
self.assertTrue(ready, "not all items persisted. see logs")
node = RestConnection(self.master).get_nodes_self()
output, error = self.shell.execute_command(self.perm_command)
self.shell.log_command_output(output, error)
backupHelper = BackupHelper(self.master, self)
backupHelper.backup(bucket, node, self.remote_tmp_folder)
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
BucketOperationHelper.wait_for_memcached(self.master, bucket)
backupHelper.restore(self.remote_tmp_folder)
time.sleep(60)
client = MemcachedClientHelper.direct_client(self.master, bucket)
self.log.info('verifying that all those keys have expired...')
for key in keys:
try:
client.get(key=key)
msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
self.fail(msg.format(expiry, key, expiry))
except mc_bin_client.MemcachedError as error:
self.assertEqual(error.status, 1,
msg="expected error code {0} but saw error code {1}".format(1, error.status))
client.close()
self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True):
bucket = "default"
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.add_nodes_and_rebalance()
client = MemcachedClientHelper.direct_client(self.master, "default")
expiry = 2400
test_uuid = uuid.uuid4()
keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
self.log.info("pushing keys with expiry set to {0}".format(expiry))
for key in keys:
try:
client.set(key, expiry, 0, "1")
except mc_bin_client.MemcachedError as error:
msg = "unable to push key : {0} to bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
self.assertTrue(ready, "not all items persisted. see logs")
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
shell.disconnect()
for key in keys:
try:
client.replace(key, expiry, 0, "2")
except mc_bin_client.MemcachedError as error:
msg = "unable to replace key : {0} in bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry))
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag)
time.sleep(10)
self.log.info('verifying that all those keys...')
for key in keys:
if overwrite_flag:
self.assertEqual("2", client.get(key=key), key + " should has value = 2")
else:
self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2")
self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
def _test_cluster_topology_change_body(self):
bucket = "default"
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
ram_load_ratio=1,
value_size_distribution=distribution,
moxi=True,
write_only=True,
number_of_threads=2)
self.log.info("Sleep after data load")
ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
self.assertTrue(ready, "not all items persisted. see logs")
#let's create a unique folder in the remote location
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
shell.disconnect()
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
servers = []
for i in range(0, len(self.servers) - 1):
servers.append(self.servers[i])
self.add_node_and_rebalance(servers[0], servers)
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder)
time.sleep(10)
BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
def _test_delete_key_and_backup_and_restore_body(self):
bucket = "default"
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
client = MemcachedClientHelper.direct_client(self.master, "default")
expiry = 2400
test_uuid = uuid.uuid4()
keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
self.log.info("pushing keys with expiry set to {0}".format(expiry))
for key in keys:
try:
client.set(key, expiry, 0, "1")
except mc_bin_client.MemcachedError as error:
msg = "unable to push key : {0} to bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
client.delete(keys[0])
ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
self.assertTrue(ready, "not all items persisted. see logs")
#let's create a unique folder in the remote location
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
shell.disconnect()
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder)
time.sleep(10)
self.log.info('verifying that all those keys...')
missing_keys = []
verify_keys = []
for key in keys:
vBucketId = crc32.crc32_hash(key) & 1023 # or & 0x3FF
client.vbucketId = vBucketId
if key == keys[0]:
missing_keys.append(key)
else:
verify_keys.append(key)
self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
"Keys are not empty")
self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
"Missing keys")
def _test_backup_and_restore_on_different_port_body(self):
bucket_before_backup = "bucket_before_backup"
bucket_after_backup = "bucket_after_backup"
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket_before_backup,
ram_load_ratio=1,
value_size_distribution=distribution,
write_only=True,
moxi=True,
number_of_threads=2)
self.log.info("Sleep after data load")
ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
self.assertTrue(ready, "not all items persisted. see logs")
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11213,
test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
self.assertTrue(ready, "wait_for_memcached failed")
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11213)
time.sleep(10)
self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11213, debug=False,
bucket=bucket_after_backup), "Missing keys")
def _test_backup_and_restore_from_to_different_buckets(self):
bucket_before_backup = "bucket_before_backup"
bucket_after_backup = "bucket_after_backup"
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket_before_backup,
ram_load_ratio=20,
value_size_distribution=distribution,
write_only=True,
moxi=True,
number_of_threads=2)
self.log.info("Sleep after data load")
ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
self.assertTrue(ready, "not all items persisted. see logs")
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
self.assertTrue(ready, "wait_for_memcached failed")
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212)
time.sleep(10)
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
bucket=bucket_after_backup), "Missing keys")
def test_backup_upgrade_restore_default(self):
if len(self.servers) < 2:
self.log.error("At least 2 servers required for this test ..")
return
original_set = copy.copy(self.servers)
worker = self.servers[len(self.servers) - 1]
self.servers = self.servers[:len(self.servers) - 1]
shell = RemoteMachineShellConnection(self.master)
o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
fin = o[0]
shell.disconnect()
initial_version = self.input.param("initial_version", fin)
final_version = self.input.param("final_version", fin)
if initial_version == final_version:
self.log.error("Same initial and final versions ..")
return
if not final_version.startswith('2.0'):
self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
return
builds, changes = BuildQuery().get_all_builds(version=final_version)
product = 'couchbase-server-enterprise'
#CASE where the worker isn't a 2.0+
worker_flag = 0
shell = RemoteMachineShellConnection(worker)
o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
temp = o[0]
if not temp.startswith('2.0'):
worker_flag = 1
if worker_flag == 1:
self.log.info("Loading version {0} on worker.. ".format(final_version))
remote = RemoteMachineShellConnection(worker)
info = remote.extract_remote_info()
older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
info.architecture_type, final_version)
remote.stop_couchbase()
remote.couchbase_uninstall()
remote.download_build(older_build)
remote.install_server(older_build)
remote.disconnect()
remote_tmp = "{1}/{0}".format("backup", "/root")
perm_comm = "mkdir -p {0}".format(remote_tmp)
if not initial_version == fin:
for server in self.servers:
remote = RemoteMachineShellConnection(server)
info = remote.extract_remote_info()
self.log.info("Loading version .. {0}".format(initial_version))
older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
info.architecture_type, initial_version)
remote.stop_couchbase()
remote.couchbase_uninstall()
remote.download_build(older_build)
remote.install_server(older_build)
rest = RestConnection(server)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster(server.rest_username, server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
remote.disconnect()
self.common_setUp()
bucket = "default"
if len(self.servers) > 1:
self.add_nodes_and_rebalance()
rest = RestConnection(self.master)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
rest.create_bucket(bucket, ramQuotaMB=size)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.assertTrue(ready, "wait_for_memcached_failed")
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket,
ram_load_ratio=0.5,
value_size_distribution=distribution,
moxi=True,
write_only=True,
delete_ratio=0.1,
number_of_threads=2)
if len(self.servers) > 1:
rest = RestConnection(self.master)
self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=180),
msg="replication did not complete")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
node = RestConnection(self.master).get_nodes_self()
shell = RemoteMachineShellConnection(worker)
o, r = shell.execute_command(perm_comm)
shell.log_command_output(o, r)
shell.disconnect()
#Backup
#BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
shell = RemoteMachineShellConnection(worker)
shell.execute_command("/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
self.master.ip, self.master.port, remote_tmp))
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
time.sleep(30)
#Upgrade
for server in self.servers:
self.log.info("Upgrading to current version {0}".format(final_version))
remote = RemoteMachineShellConnection(server)
info = remote.extract_remote_info()
new_build = BuildQuery().find_build(builds, product, info.deliverable_type,
info.architecture_type, final_version)
remote.stop_couchbase()
remote.couchbase_uninstall()
remote.download_build(new_build)
remote.install_server(new_build)
rest = RestConnection(server)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster(server.rest_username, server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
remote.disconnect()
time.sleep(30)
#Restore
rest = RestConnection(self.master)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
rest.create_bucket(bucket, ramQuotaMB=size)
ready = BucketOperationHelper.wait_for_memcached(server, bucket)
self.assertTrue(ready, "wait_for_memcached_failed")
#BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
shell = RemoteMachineShellConnection(worker)
shell.execute_command("/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
self.master.ip, self.master.port, remote_tmp, bucket))
shell.disconnect()
time.sleep(60)
keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
self.assertTrue(keys_exist, msg="unable to verify keys after restore")
time.sleep(30)
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
rest = RestConnection(self.master)
helper = RestHelper(rest)
nodes = rest.node_statuses()
master_id = rest.get_nodes_self().id
if len(self.servers) > 1:
removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in nodes if node.id != master_id],
wait_for_rebalance=True)
shell = RemoteMachineShellConnection(worker)
shell.remove_directory(remote_tmp)
shell.disconnect()
self.servers = copy.copy(original_set)
if initial_version == fin:
builds, changes = BuildQuery().get_all_builds(version=initial_version)
for server in self.servers:
remote = RemoteMachineShellConnection(server)
info = remote.extract_remote_info()
self.log.info("Loading version .. {0}".format(initial_version))
older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
info.architecture_type, initial_version)
remote.stop_couchbase()
remote.couchbase_uninstall()
remote.download_build(older_build)
remote.install_server(older_build)
rest = RestConnection(server)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster(server.rest_username, server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
remote.disconnect()
def test_backup_add_restore_default_bucket_started_server(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body("default", 180, True, True)
def test_non_default_bucket(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body(str(uuid.uuid4()), 60, True, True)
def test_default_bucket(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body("default", 60, True, True)
def test_backup_add_restore_non_default_bucket_started_server(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body("testb", 180, True, True)
#self._test_backup_add_restore_bucket_body(bucket="test_bucket")
def test_backup_add_restore_default_bucket_non_started_server(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body("default", 180, False, True)
def test_backup_add_restore_non_default_bucket_non_started_server(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body("testb", 180, False, True)
#self._test_backup_add_restore_bucket_body(bucket="test_bucket", startup_flag = False)
def test_backup_add_restore_when_ide(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body("default", 120, True, True)
def test_expired_keys_1_replica(self):
self.common_setUp()
self._test_backup_add_restore_bucket_with_expiration_key(1)
def test_expired_keys_2_replica(self):
self.common_setUp()
self._test_backup_add_restore_bucket_with_expiration_key(2)
def test_expired_keys_3_replica(self):
self.common_setUp()
self._test_backup_add_restore_bucket_with_expiration_key(3)
def test_backup_and_restore_bucket_without_overwrite(self):
self.common_setUp()
self._test_backup_and_restore_bucket_overwriting_body(False)
def test_backup_and_restore_bucket_with_overwrite(self):
self.common_setUp()
self._test_backup_and_restore_bucket_overwriting_body()
def test_cluster_topology_change(self):
self.common_setUp()
self._test_cluster_topology_change_body()
def test_delete_key_and_backup_and_restore(self):
self.common_setUp()
self._test_delete_key_and_backup_and_restore_body()
def test_backup_and_restore_on_different_port(self):
self.common_setUp()
self._test_backup_and_restore_on_different_port_body()
def test_backup_and_restore_from_to_different_buckets(self):
self.common_setUp()
self._test_backup_and_restore_from_to_different_buckets()
class BackupHelper(object):
def __init__(self, serverInfo, test):
self.server = serverInfo
self.log = logger.Logger.get_logger()
self.test = test
#data_file = default-data/default
def backup(self, bucket, node, backup_location):
mbbackup_path = "{0}/{1}".format("/opt/couchbase/bin", "cbbackup")
if self.test.is_membase:
mbbackup_path = "{0}/{1}".format("/opt/membase/bin", "mbbackup")
data_directory = "{0}/{1}-{2}/{3}".format(node.storage[0].path, bucket, "data", bucket)
command = "{0} {1} {2}".format(mbbackup_path,
data_directory,
backup_location)
output, error = self.test.shell.execute_command(command)
self.test.shell.log_command_output(output, error)
def restore(self, backup_location, moxi_port=None, overwrite_flag=False, username=None, password=None):
command = "{0}/{1}".format("/opt/couchbase/bin", "cbrestore")
if self.test.is_membase:
command = "{0}/{1}".format("/opt/membase/bin", "mbrestore")
if not overwrite_flag:
command += " -a"
if not moxi_port is None:
command += " -p {0}".format(moxi_port)
if username is not None:
command += " -u {0}".format(username)
if password is not None:
command += " -P {0}".format(password)
files = self.test.shell.list_files(backup_location)
for file in files:
command += " " + file['path'] + "/" + file['file']
command = "{0}".format(command)
self.log.info(command)
output, error = self.test.shell.execute_command(command)
self.test.shell.log_command_output(output, error)
| 51.436828
| 150
| 0.609998
|
c85f82b7093153bad6ba620351543a2478108a6a
| 2,165
|
py
|
Python
|
openmdao/docs/_exts/embed_options.py
|
toddrme2178/OpenMDAO
|
379cc6216d13d380e11cb3a46f03960981de4660
|
[
"Apache-2.0"
] | 1
|
2016-05-10T17:01:17.000Z
|
2016-05-10T17:01:17.000Z
|
openmdao/docs/_exts/embed_options.py
|
toddrme2178/OpenMDAO
|
379cc6216d13d380e11cb3a46f03960981de4660
|
[
"Apache-2.0"
] | 3
|
2016-05-10T16:55:46.000Z
|
2018-10-22T23:28:52.000Z
|
openmdao/docs/_exts/embed_options.py
|
toddrme2178/OpenMDAO
|
379cc6216d13d380e11cb3a46f03960981de4660
|
[
"Apache-2.0"
] | 2
|
2018-04-05T15:53:54.000Z
|
2018-10-22T22:48:00.000Z
|
import importlib
from six import iteritems
from docutils import nodes
from docutils.statemachine import ViewList
import sphinx
from docutils.parsers.rst import Directive
from sphinx.util.nodes import nested_parse_with_titles
from openmdao.utils.options_dictionary import OptionsDictionary, _undefined
class EmbedOptionsDirective(Directive):
"""
EmbedOptionsDirective is a custom directive to allow an OptionsDictionary
to be shown in a nice table form. An example usage would look like this:
.. embed-options::
openmdao.solvers.linear.petsc_ksp
PETScKrylov
options
The 3 arguments are the module path, the class name, and name of the options dictionary.
What the above will do is replace the directive and its args with a list of options
for the desired class.
"""
required_arguments = 3
optional_arguments = 0
has_content = True
def run(self):
module_path, class_name, attribute_name = self.arguments
mod = importlib.import_module(module_path)
klass = getattr(mod, class_name)
options = getattr(klass(), attribute_name)
if not isinstance(options, OptionsDictionary):
raise TypeError("Object '%s' is not an OptionsDictionary." % attribute_name)
lines = ViewList()
n = 0
for line in options.__rst__():
lines.append(line, "options table", n)
n += 1
# Note applicable to System, Solver and Driver 'options', but not to 'recording_options'
if attribute_name != 'recording_options':
lines.append("", "options table", n+1) # Blank line required after table.
# Create a node.
node = nodes.section()
node.document = self.state.document
# Parse the rst.
nested_parse_with_titles(self.state, lines, node)
# And return the result.
return node.children
def setup(app):
"""add custom directive into Sphinx so that it is found during document parsing"""
app.add_directive('embed-options', EmbedOptionsDirective)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| 30.069444
| 96
| 0.68776
|
f8d1622ae92d453b9c720704bbb0a9b319763d24
| 18,476
|
py
|
Python
|
homeassistant/components/xiaomi_aqara/binary_sensor.py
|
MaxG88/core
|
827711bcd153279ec56527927eaba4815bcde1d4
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/xiaomi_aqara/binary_sensor.py
|
MaxG88/core
|
827711bcd153279ec56527927eaba4815bcde1d4
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/xiaomi_aqara/binary_sensor.py
|
MaxG88/core
|
827711bcd153279ec56527927eaba4815bcde1d4
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Xiaomi aqara binary sensors."""
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_OPENING,
BinarySensorEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_call_later
from . import XiaomiDevice
from .const import DOMAIN, GATEWAYS_KEY
_LOGGER = logging.getLogger(__name__)
NO_CLOSE = "no_close"
ATTR_OPEN_SINCE = "Open since"
MOTION = "motion"
NO_MOTION = "no_motion"
ATTR_LAST_ACTION = "last_action"
ATTR_NO_MOTION_SINCE = "No motion since"
DENSITY = "density"
ATTR_DENSITY = "Density"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Perform the setup for Xiaomi devices."""
entities = []
gateway = hass.data[DOMAIN][GATEWAYS_KEY][config_entry.entry_id]
for entity in gateway.devices["binary_sensor"]:
model = entity["model"]
if model in ["motion", "sensor_motion", "sensor_motion.aq2"]:
entities.append(XiaomiMotionSensor(entity, hass, gateway, config_entry))
elif model in ["magnet", "sensor_magnet", "sensor_magnet.aq2"]:
entities.append(XiaomiDoorSensor(entity, gateway, config_entry))
elif model == "sensor_wleak.aq1":
entities.append(XiaomiWaterLeakSensor(entity, gateway, config_entry))
elif model in ["smoke", "sensor_smoke"]:
entities.append(XiaomiSmokeSensor(entity, gateway, config_entry))
elif model in ["natgas", "sensor_natgas"]:
entities.append(XiaomiNatgasSensor(entity, gateway, config_entry))
elif model in [
"switch",
"sensor_switch",
"sensor_switch.aq2",
"sensor_switch.aq3",
"remote.b1acn01",
]:
if "proto" not in entity or int(entity["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "button_0"
entities.append(
XiaomiButton(entity, "Switch", data_key, hass, gateway, config_entry)
)
elif model in [
"86sw1",
"sensor_86sw1",
"sensor_86sw1.aq1",
"remote.b186acn01",
"remote.b186acn02",
]:
if "proto" not in entity or int(entity["proto"][0:1]) == 1:
data_key = "channel_0"
else:
data_key = "button_0"
entities.append(
XiaomiButton(
entity, "Wall Switch", data_key, hass, gateway, config_entry
)
)
elif model in [
"86sw2",
"sensor_86sw2",
"sensor_86sw2.aq1",
"remote.b286acn01",
"remote.b286acn02",
]:
if "proto" not in entity or int(entity["proto"][0:1]) == 1:
data_key_left = "channel_0"
data_key_right = "channel_1"
else:
data_key_left = "button_0"
data_key_right = "button_1"
entities.append(
XiaomiButton(
entity,
"Wall Switch (Left)",
data_key_left,
hass,
gateway,
config_entry,
)
)
entities.append(
XiaomiButton(
entity,
"Wall Switch (Right)",
data_key_right,
hass,
gateway,
config_entry,
)
)
entities.append(
XiaomiButton(
entity,
"Wall Switch (Both)",
"dual_channel",
hass,
gateway,
config_entry,
)
)
elif model in ["cube", "sensor_cube", "sensor_cube.aqgl01"]:
entities.append(XiaomiCube(entity, hass, gateway, config_entry))
elif model in ["vibration", "vibration.aq1"]:
entities.append(
XiaomiVibration(entity, "Vibration", "status", gateway, config_entry)
)
else:
_LOGGER.warning("Unmapped Device Model %s", model)
async_add_entities(entities)
class XiaomiBinarySensor(XiaomiDevice, BinarySensorEntity):
"""Representation of a base XiaomiBinarySensor."""
def __init__(self, device, name, xiaomi_hub, data_key, device_class, config_entry):
"""Initialize the XiaomiSmokeSensor."""
self._data_key = data_key
self._device_class = device_class
self._should_poll = False
self._density = 0
super().__init__(device, name, xiaomi_hub, config_entry)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return self._should_poll
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of binary sensor."""
return self._device_class
def update(self):
"""Update the sensor state."""
_LOGGER.debug("Updating xiaomi sensor (%s) by polling", self._sid)
self._get_from_hub(self._sid)
class XiaomiNatgasSensor(XiaomiBinarySensor):
"""Representation of a XiaomiNatgasSensor."""
def __init__(self, device, xiaomi_hub, config_entry):
"""Initialize the XiaomiSmokeSensor."""
self._density = None
super().__init__(
device, "Natgas Sensor", xiaomi_hub, "alarm", "gas", config_entry
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value in ("1", "2"):
if self._state:
return False
self._state = True
return True
if value == "0":
if self._state:
self._state = False
return True
return False
class XiaomiMotionSensor(XiaomiBinarySensor):
"""Representation of a XiaomiMotionSensor."""
def __init__(self, device, hass, xiaomi_hub, config_entry):
"""Initialize the XiaomiMotionSensor."""
self._hass = hass
self._no_motion_since = 0
self._unsub_set_no_motion = None
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "motion_status"
super().__init__(
device, "Motion Sensor", xiaomi_hub, data_key, "motion", config_entry
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_NO_MOTION_SINCE: self._no_motion_since}
attrs.update(super().device_state_attributes)
return attrs
@callback
def _async_set_no_motion(self, now):
"""Set state to False."""
self._unsub_set_no_motion = None
self._state = False
self.async_write_ha_state()
def parse_data(self, data, raw_data):
"""Parse data sent by gateway.
Polling (proto v1, firmware version 1.4.1_159.0143)
>> { "cmd":"read","sid":"158..."}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'read_ack', 'data': '{"voltage":3005}'}
Multicast messages (proto v1, firmware version 1.4.1_159.0143)
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"status":"motion"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"120"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"180"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"300"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'heartbeat', 'data': '{"voltage":3005}'}
"""
if raw_data["cmd"] == "heartbeat":
_LOGGER.debug(
"Skipping heartbeat of the motion sensor. "
"It can introduce an incorrect state because of a firmware "
"bug (https://github.com/home-assistant/home-assistant/pull/"
"11631#issuecomment-357507744)"
)
return
if NO_MOTION in data:
self._no_motion_since = data[NO_MOTION]
self._state = False
return True
value = data.get(self._data_key)
if value is None:
return False
if value == MOTION:
if self._data_key == "motion_status":
if self._unsub_set_no_motion:
self._unsub_set_no_motion()
self._unsub_set_no_motion = async_call_later(
self._hass, 120, self._async_set_no_motion
)
if self.entity_id is not None:
self._hass.bus.fire(
"xiaomi_aqara.motion", {"entity_id": self.entity_id}
)
self._no_motion_since = 0
if self._state:
return False
self._state = True
return True
class XiaomiDoorSensor(XiaomiBinarySensor):
"""Representation of a XiaomiDoorSensor."""
def __init__(self, device, xiaomi_hub, config_entry):
"""Initialize the XiaomiDoorSensor."""
self._open_since = 0
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "window_status"
super().__init__(
device,
"Door Window Sensor",
xiaomi_hub,
data_key,
DEVICE_CLASS_OPENING,
config_entry,
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_OPEN_SINCE: self._open_since}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
self._should_poll = False
if NO_CLOSE in data: # handle push from the hub
self._open_since = data[NO_CLOSE]
return True
value = data.get(self._data_key)
if value is None:
return False
if value == "open":
self._should_poll = True
if self._state:
return False
self._state = True
return True
if value == "close":
self._open_since = 0
if self._state:
self._state = False
return True
return False
class XiaomiWaterLeakSensor(XiaomiBinarySensor):
"""Representation of a XiaomiWaterLeakSensor."""
def __init__(self, device, xiaomi_hub, config_entry):
"""Initialize the XiaomiWaterLeakSensor."""
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "wleak_status"
super().__init__(
device,
"Water Leak Sensor",
xiaomi_hub,
data_key,
"moisture",
config_entry,
)
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
self._should_poll = False
value = data.get(self._data_key)
if value is None:
return False
if value == "leak":
self._should_poll = True
if self._state:
return False
self._state = True
return True
if value == "no_leak":
if self._state:
self._state = False
return True
return False
class XiaomiSmokeSensor(XiaomiBinarySensor):
"""Representation of a XiaomiSmokeSensor."""
def __init__(self, device, xiaomi_hub, config_entry):
"""Initialize the XiaomiSmokeSensor."""
self._density = 0
super().__init__(
device, "Smoke Sensor", xiaomi_hub, "alarm", "smoke", config_entry
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value in ("1", "2"):
if self._state:
return False
self._state = True
return True
if value == "0":
if self._state:
self._state = False
return True
return False
class XiaomiVibration(XiaomiBinarySensor):
"""Representation of a Xiaomi Vibration Sensor."""
def __init__(self, device, name, data_key, xiaomi_hub, config_entry):
"""Initialize the XiaomiVibration."""
self._last_action = None
super().__init__(device, name, xiaomi_hub, data_key, None, config_entry)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value not in ("vibrate", "tilt", "free_fall", "actively"):
_LOGGER.warning("Unsupported movement_type detected: %s", value)
return False
self.hass.bus.fire(
"xiaomi_aqara.movement",
{"entity_id": self.entity_id, "movement_type": value},
)
self._last_action = value
return True
class XiaomiButton(XiaomiBinarySensor):
"""Representation of a Xiaomi Button."""
def __init__(self, device, name, data_key, hass, xiaomi_hub, config_entry):
"""Initialize the XiaomiButton."""
self._hass = hass
self._last_action = None
super().__init__(device, name, xiaomi_hub, data_key, None, config_entry)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value == "long_click_press":
self._state = True
click_type = "long_click_press"
elif value == "long_click_release":
self._state = False
click_type = "hold"
elif value == "click":
click_type = "single"
elif value == "double_click":
click_type = "double"
elif value == "both_click":
click_type = "both"
elif value == "double_both_click":
click_type = "double_both"
elif value == "shake":
click_type = "shake"
elif value == "long_click":
click_type = "long"
elif value == "long_both_click":
click_type = "long_both"
else:
_LOGGER.warning("Unsupported click_type detected: %s", value)
return False
self._hass.bus.fire(
"xiaomi_aqara.click",
{"entity_id": self.entity_id, "click_type": click_type},
)
self._last_action = click_type
return True
class XiaomiCube(XiaomiBinarySensor):
"""Representation of a Xiaomi Cube."""
def __init__(self, device, hass, xiaomi_hub, config_entry):
"""Initialize the Xiaomi Cube."""
self._hass = hass
self._last_action = None
self._state = False
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "cube_status"
super().__init__(device, "Cube", xiaomi_hub, data_key, None, config_entry)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if self._data_key in data:
self._hass.bus.fire(
"xiaomi_aqara.cube_action",
{"entity_id": self.entity_id, "action_type": data[self._data_key]},
)
self._last_action = data[self._data_key]
if "rotate" in data:
action_value = float(
data["rotate"]
if isinstance(data["rotate"], int)
else data["rotate"].replace(",", ".")
)
self._hass.bus.fire(
"xiaomi_aqara.cube_action",
{
"entity_id": self.entity_id,
"action_type": "rotate",
"action_value": action_value,
},
)
self._last_action = "rotate"
if "rotate_degree" in data:
action_value = float(
data["rotate_degree"]
if isinstance(data["rotate_degree"], int)
else data["rotate_degree"].replace(",", ".")
)
self._hass.bus.fire(
"xiaomi_aqara.cube_action",
{
"entity_id": self.entity_id,
"action_type": "rotate",
"action_value": action_value,
},
)
self._last_action = "rotate"
return True
| 32.471002
| 87
| 0.549686
|
1582d7e4a5fecdf64ea528629b21a276fe0fe927
| 19,332
|
py
|
Python
|
tests/micro/zephyr/test_zephyr.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 90
|
2021-11-30T11:58:10.000Z
|
2022-03-31T02:24:04.000Z
|
tests/micro/zephyr/test_zephyr.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 64
|
2021-11-22T23:58:23.000Z
|
2022-03-31T03:19:22.000Z
|
tests/micro/zephyr/test_zephyr.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 27
|
2021-12-09T22:39:27.000Z
|
2022-03-24T23:21:48.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import pathlib
import sys
import logging
import pytest
import numpy as np
import onnx
from PIL import Image
import tvm
import tvm.relay as relay
from tvm.relay.backend import Executor, Runtime
from tvm.relay.testing import byoc
from tvm.contrib import utils
from tvm.micro.testing import check_tune_log
import test_utils
_LOG = logging.getLogger(__name__)
def _make_sess_from_op(
temp_dir, model, zephyr_board, west_cmd, op_name, sched, arg_bufs, build_config
):
runtime = Runtime("crt", {"system-lib": True})
target = tvm.target.target.micro(model)
target = tvm.target.Target(target=target, host=target)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.build(sched, arg_bufs, target=target, runtime=runtime, name=op_name)
return _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config)
def _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config):
config_main_stack_size = None
if test_utils.qemu_boards(zephyr_board):
config_main_stack_size = 1536
project_options = {
"project_type": "host_driven",
"west_cmd": west_cmd,
"verbose": bool(build_config.get("debug")),
"zephyr_board": zephyr_board,
}
if config_main_stack_size is not None:
project_options["config_main_stack_size"] = config_main_stack_size
project = tvm.micro.generate_project(
str(test_utils.TEMPLATE_PROJECT_DIR),
mod,
temp_dir / "project",
project_options,
)
project.build()
project.flash()
return tvm.micro.Session(project.transport())
def _make_add_sess(temp_dir, model, zephyr_board, west_cmd, build_config, dtype="int8"):
A = tvm.te.placeholder((2,), dtype=dtype)
B = tvm.te.placeholder((1,), dtype=dtype)
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(
temp_dir, model, zephyr_board, west_cmd, "add", sched, [A, B, C], build_config
)
# The same test code can be executed on both the QEMU simulation and on real hardware.
@tvm.testing.requires_micro
def test_add_uint(temp_dir, board, west_cmd, tvm_debug):
"""Test compiling the on-device runtime."""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
with _make_add_sess(temp_dir, model, board, west_cmd, build_config) as sess:
test_basic_add(sess)
# The same test code can be executed on both the QEMU simulation and on real hardware.
@tvm.testing.requires_micro
def test_add_float(temp_dir, board, west_cmd, tvm_debug):
"""Test compiling the on-device runtime."""
model = test_utils.ZEPHYR_BOARDS[board]
if not test_utils.has_fpu(board):
pytest.skip(f"FPU not enabled for {board}")
build_config = {"debug": tvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2.5, 3.5], dtype="float32"), device=sess.device)
assert (A_data.numpy() == np.array([2.5, 3.5])).all()
B_data = tvm.nd.array(np.array([4.5], dtype="float32"), device=sess.device)
assert (B_data.numpy() == np.array([4.5])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="float32"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([7, 8])).all()
with _make_add_sess(temp_dir, model, board, west_cmd, build_config, dtype="float32") as sess:
test_basic_add(sess)
@tvm.testing.requires_micro
def test_platform_timer(temp_dir, board, west_cmd, tvm_debug):
"""Test compiling the on-device runtime."""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
time_eval_f = system_lib.time_evaluator(
"add", sess.device, number=20, repeat=3, min_repeat_ms=40
)
result = time_eval_f(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
assert result.mean > 0
assert len(result.results) == 3
with _make_add_sess(temp_dir, model, board, west_cmd, build_config) as sess:
test_basic_add(sess)
@tvm.testing.requires_micro
def test_relay(temp_dir, board, west_cmd, tvm_debug):
"""Testing a simple relay graph"""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
shape = (10,)
dtype = "int8"
# Construct Relay program.
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
ir_mod = tvm.IRModule.from_expr(func)
runtime = Runtime("crt", {"system-lib": True})
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(ir_mod, target=target, runtime=runtime)
with _make_session(temp_dir, board, west_cmd, mod, build_config) as session:
graph_mod = tvm.micro.create_local_graph_executor(
mod.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**mod.get_params())
x_in = np.random.randint(10, size=shape[0], dtype=dtype)
graph_mod.run(x=x_in)
result = graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(graph_mod.get_input(0).numpy(), x_in)
tvm.testing.assert_allclose(result, x_in * x_in + 1)
@tvm.testing.requires_micro
def test_onnx(temp_dir, board, west_cmd, tvm_debug):
"""Testing a simple ONNX model."""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
this_dir = pathlib.Path(os.path.dirname(__file__))
mnist_testdata = this_dir.parent / "testdata" / "mnist"
digit_2 = Image.open(mnist_testdata / "digit-2.jpg").resize((28, 28))
digit_2 = np.asarray(digit_2).astype("float32")
digit_2 = np.expand_dims(digit_2, axis=0)
digit_9 = Image.open(mnist_testdata / "digit-9.jpg").resize((28, 28))
digit_9 = np.asarray(digit_9).astype("float32")
digit_9 = np.expand_dims(digit_9, axis=0)
# Load ONNX model and convert to Relay.
onnx_model = onnx.load(mnist_testdata / "mnist-8.onnx")
shape = {"Input3": (1, 1, 28, 28)}
relay_mod, params = relay.frontend.from_onnx(onnx_model, shape=shape, freeze_params=True)
relay_mod = relay.transform.DynamicToStatic()(relay_mod)
# We add the link-params=True option to ensure the model parameters are compiled in.
# There is currently a bug preventing the host_driven environment from receiving
# the model weights when set using graph_mod.set_input().
# See: https://github.com/apache/tvm/issues/7567
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
executor = Executor("graph", {"link-params": True})
runtime = Runtime("crt", {"system-lib": True})
lowered = relay.build(relay_mod, target, params=params, executor=executor, runtime=runtime)
graph = lowered.get_graph_json()
with _make_session(temp_dir, board, west_cmd, lowered, build_config) as session:
graph_mod = tvm.micro.create_local_graph_executor(
graph, session.get_system_lib(), session.device
)
# Send the digit-2 image and confirm that the correct result is returned.
graph_mod.set_input("Input3", tvm.nd.array(digit_2))
graph_mod.run()
result = graph_mod.get_output(0).numpy()
assert np.argmax(result) == 2
# Send the digit-9 image and confirm that the correct result is returned.
graph_mod.set_input("Input3", tvm.nd.array(digit_9))
graph_mod.run()
result = graph_mod.get_output(0).numpy()
assert np.argmax(result) == 9
def check_result(
temp_dir, relay_mod, model, zephyr_board, west_cmd, map_inputs, out_shape, result, build_config
):
"""Helper function to verify results"""
TOL = 1e-5
runtime = Runtime("crt", {"system-lib": True})
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(relay_mod, target=target, runtime=runtime)
with _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config) as session:
rt_mod = tvm.micro.create_local_graph_executor(
mod.get_graph_json(), session.get_system_lib(), session.device
)
rt_mod.set_input(**mod.get_params())
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**mod.get_params())
rt_mod.run()
out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]
results = result if isinstance(result, list) else [result]
for idx, shape in enumerate(out_shapes):
out = tvm.nd.empty(shape, device=session.device)
out = rt_mod.get_output(idx, out)
tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=TOL, atol=TOL)
@tvm.testing.requires_micro
def test_byoc_microtvm(temp_dir, board, west_cmd, tvm_debug):
"""This is a simple test case to check BYOC capabilities of microTVM"""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# C compiler
z0 = relay.add(x, w0)
p0 = relay.subtract(z0, w1)
q0 = relay.multiply(p0, w2)
z1 = relay.add(x, w3)
p1 = relay.subtract(z1, w4)
q1 = relay.multiply(p1, w5)
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((q0, q1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(f)
mod = tvm.relay.transform.PartitionGraph()(mod)
mod = tvm.relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = {"w{}".format(i): w_data[i] for i in range(8)}
map_inputs["x"] = x_data
check_result(
temp_dir=temp_dir,
relay_mod=mod,
map_inputs=map_inputs,
out_shape=(30, 10),
result=np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
model=model,
zephyr_board=board,
west_cmd=west_cmd,
build_config=build_config,
)
def _make_add_sess_with_shape(temp_dir, model, zephyr_board, west_cmd, shape, build_config):
A = tvm.te.placeholder(shape, dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + A[i], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(
temp_dir, model, zephyr_board, west_cmd, "add", sched, [A, C], build_config
)
@pytest.mark.parametrize(
"shape,",
[
pytest.param((1 * 1024,), id="(1*1024)"),
pytest.param((4 * 1024,), id="(4*1024)"),
pytest.param((16 * 1024,), id="(16*1024)"),
],
)
@tvm.testing.requires_micro
def test_rpc_large_array(temp_dir, board, west_cmd, tvm_debug, shape):
"""Test large RPC array transfer."""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_tensors(sess):
a_np = np.random.randint(low=-128, high=127, size=shape, dtype="int8")
A_data = tvm.nd.array(a_np, device=sess.device)
assert (A_data.numpy() == a_np).all()
C_data = tvm.nd.array(np.zeros(shape, dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.zeros(shape)).all()
with _make_add_sess_with_shape(temp_dir, model, board, west_cmd, shape, build_config) as sess:
test_tensors(sess)
@pytest.mark.xfail(strict=False, reason="See https://github.com/apache/tvm/issues/10297")
@tvm.testing.requires_micro
def test_autotune_conv2d(temp_dir, board, west_cmd, tvm_debug):
"""Test AutoTune for microTVM Zephyr"""
if board != "qemu_x86":
pytest.xfail(f"Autotune fails on {board}.")
runtime = Runtime("crt", {"system-lib": True})
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
# Create a Relay model
data_shape = (1, 3, 16, 16)
weight_shape = (8, 3, 5, 5)
data = relay.var("data", relay.TensorType(data_shape, "float32"))
weight = relay.var("weight", relay.TensorType(weight_shape, "float32"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
kernel_layout="OIHW",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
data_sample = np.random.rand(data_shape[0], data_shape[1], data_shape[2], data_shape[3]).astype(
"float32"
)
weight_sample = np.random.rand(
weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]
).astype("float32")
params = {mod["main"].params[1].name_hint: weight_sample}
target = tvm.target.target.micro(model)
pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True})
with pass_context:
tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
assert len(tasks) > 0
config_main_stack_size = None
if test_utils.qemu_boards(board):
config_main_stack_size = 1536
project_options = {
"zephyr_board": board,
"west_cmd": west_cmd,
"verbose": 1,
"project_type": "host_driven",
}
if config_main_stack_size is not None:
project_options["config_main_stack_size"] = config_main_stack_size
module_loader = tvm.micro.AutoTvmModuleLoader(
template_project_dir=test_utils.TEMPLATE_PROJECT_DIR,
project_options=project_options,
)
timeout = 200
builder = tvm.autotvm.LocalBuilder(
timeout=timeout,
n_parallel=1,
build_kwargs={"build_option": {"tir.disable_vectorize": True}},
do_fork=True,
build_func=tvm.micro.autotvm_build_func,
runtime=runtime,
)
runner = tvm.autotvm.LocalRunner(
number=1, repeat=1, timeout=timeout, module_loader=module_loader
)
measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
log_path = pathlib.Path("zephyr_autotune.log")
if log_path.exists():
log_path.unlink()
n_trial = 10
for task in tasks:
tuner = tvm.autotvm.tuner.GATuner(task)
tuner.tune(
n_trial=n_trial,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.log_to_file(str(log_path)),
tvm.autotvm.callback.progress_bar(n_trial, si_prefix="M"),
],
si_prefix="M",
)
assert tuner.best_flops > 0
check_tune_log(log_path)
# Build without tuning
with pass_context:
lowered = tvm.relay.build(mod, target=target, runtime=runtime, params=params)
temp_dir = utils.tempdir()
with _make_session(temp_dir, board, west_cmd, lowered, build_config) as session:
graph_mod = tvm.micro.create_local_graph_executor(
lowered.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**lowered.get_params())
graph_mod.run(data=data_sample)
expected_output = graph_mod.get_output(0).numpy()
del graph_mod
# Build using autotune logs
with tvm.autotvm.apply_history_best(str(log_path)):
with pass_context:
lowered_tuned = tvm.relay.build(mod, target=target, runtime=runtime, params=params)
temp_dir = utils.tempdir()
with _make_session(temp_dir, board, west_cmd, lowered_tuned, build_config) as session:
graph_mod = tvm.micro.create_local_graph_executor(
lowered_tuned.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**lowered_tuned.get_params())
graph_mod.run(data=data_sample)
output = graph_mod.get_output(0).numpy()
del graph_mod
tvm.testing.assert_allclose(output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| 38.055118
| 100
| 0.661287
|
8d89ed43b92bb367e8c5815387b57d2c9acb0a75
| 398
|
py
|
Python
|
packages/python/plotly/plotly/validators/histogram2d/ybins/_start.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/histogram2d/ybins/_start.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/histogram2d/ybins/_start.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class StartValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="start", parent_name="histogram2d.ybins", **kwargs):
super(StartValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| 33.166667
| 87
| 0.670854
|
36ea7a64e124a7ec50e2bbb436ee507eb9b14dd6
| 8,149
|
py
|
Python
|
security_scripts/controls/buttons.py
|
loftwah/security-scripts
|
eb75b67499d3cb8d87eac114efdc9988a0f56511
|
[
"MIT"
] | 1
|
2021-12-23T05:02:51.000Z
|
2021-12-23T05:02:51.000Z
|
security_scripts/controls/buttons.py
|
loftwah/security-scripts
|
eb75b67499d3cb8d87eac114efdc9988a0f56511
|
[
"MIT"
] | null | null | null |
security_scripts/controls/buttons.py
|
loftwah/security-scripts
|
eb75b67499d3cb8d87eac114efdc9988a0f56511
|
[
"MIT"
] | 1
|
2021-12-23T05:02:57.000Z
|
2021-12-23T05:02:57.000Z
|
#!/usr/bin/env python
"""
Make requests to deprivilege a role, stop all ec2 units,
or reprivilege a role.
Options available via <command> --help
"""
from security_scripts.information.lib import shlog
import logging
import boto3
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('nose').setLevel(logging.CRITICAL)
logging.getLogger('boto').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
def detacher(args, role):
"""
Loop through all policies attached to a role and detach them
:return:
"""
# get attached policies and detach them in a loop
for policy in role.attached_policies.all():
shlog.normal('Detaching policy ' + policy.arn + ' from role ' + args.role)
response = role.detach_policy(PolicyArn=policy.arn)
shlog.debug(response)
def depriv(args):
"""
Make a request to deprivilege the target role to no permissions
:return: None
"""
boto3.setup_default_session(profile_name=args.profile)
iam = boto3.resource('iam')
role = iam.Role(args.role)
detacher(args, role)
# attach read-only
shlog.normal('Attaching ReadOnlyAccess to ' + args.role)
response = role.attach_policy(PolicyArn='arn:aws:iam::aws:policy/ReadOnlyAccess')
shlog.debug(response)
def priv(args):
"""
Make a request to elevate the target role to ProposedPoweruser
:return: None
"""
boto3.setup_default_session(profile_name=args.profile)
iam = boto3.resource('iam')
role = iam.Role(args.role)
detacher(args, role)
# attach read-only
shlog.normal('Attaching ProposedPoweruser and RoleManagementWithCondition to ' + args.role)
response = role.attach_policy(PolicyArn='arn:aws:iam::' + args.accountid + ':policy/ProposedPoweruser')
shlog.debug(response)
response = role.attach_policy(PolicyArn='arn:aws:iam::' + args.accountid + ':policy/RoleManagementWithCondition')
shlog.debug(response)
def ec2stop(args, dryrun=False):
"""
Make a request to stop all ec2 instances
:return:
"""
from botocore.exceptions import ClientError
from security_scripts.information.lib import aws_utils as au # only works in plugin and IDE
args.session = boto3.Session(profile_name=args.profile)
regions = au.decribe_regions_df(args) # use for deployment
# regions = {'RegionName':['us-east-2']} # test mode
for region in regions['RegionName']:
shlog.normal('Stopping region ' + region)
# init connection to region and get instances there
client = boto3.client('ec2', region_name=region)
response = client.describe_instances()['Reservations']
# go through intance ids
for inst in response:
# ...and allow termination...
instance = inst['Instances'][0]['InstanceId']
shlog.verbose('Allowing API termination for instance ' + instance + ' in region ' + region)
response = client.modify_instance_attribute(
InstanceId=instance,
DisableApiTermination={'Value': False}
)
shlog.debug(response)
# ...and perform halt
shlog.normal('Stopping instance ' + instance + ' in region ' + region)
try:
response = client.stop_instances(
InstanceIds=[instance],
DryRun=dryrun,
Force=True
)
except ClientError as ce:
if dryrun:
# client error is expected when simulating
shlog.normal('Stop simulation succeeded for instance ' + instance)
shlog.verbose('Success code: ' + str(ce))
else:
# we might actually want to catch real exceptions
raise ce
pass
def control_green_button(args):
"""Reprivilege target role"""
priv(args)
def control_red_button(args):
"""Deprivilege target role and stop all ec2 instances"""
depriv(args)
ec2stop(args, False)
def parser_builder(parent_parser, parser, config, remote=False):
"""Get a parser and return it with additional options
:param parent_parser: top-level parser that will receive a subcommand; can be None if remote=False
:param parser: (sub)parser in need of additional arguments
:param config: ingested config file in config object format
:param remote: whenever we
:return: parser with amended options
"""
target_role = config.get("DEFAULT", "role", fallback="scimma_power_user")
accountid = config.get("DOWNLOAD", "accountid", fallback="585193511743")
if remote:
# green button parser
green_parser = parser.add_parser('control_green_button', parents=[parent_parser], description=control_green_button.__doc__)
green_parser.set_defaults(func=control_green_button)
green_parser.add_argument('--role', '-r', default=target_role, help='AWS role to modify (default: %(default)s)')
green_parser.add_argument('--accountid', help='AWS account id (default: %(default)s)', default=accountid)
# red button parser
red_parser = parser.add_parser('control_red_button', parents=[parent_parser], description=control_red_button.__doc__)
red_parser.set_defaults(func=control_red_button)
red_parser.add_argument('--role', '-r', default=target_role, help='AWS role to modify (default: %(default)s)')
red_parser.add_argument('--accountid', help='AWS account id (default: %(default)s)', default=accountid)
else:
# augments will be added to local parser
parser.add_argument('--role', '-r', default=target_role, help='AWS role to modify (default: %(default)s)')
parser.add_argument('--accountid', help='AWS account id (default: %(default)s)', default=accountid)
return parser
if __name__ == "__main__":
import argparse
import configparser
""" get defaults from configuration system"""
from security_scripts.kli import env_control
config = configparser.ConfigParser()
import os
rel_path = "defaults.cfg"
cfg_sources = [rel_path, # built-in config for fallback
os.path.expanduser(env_control()) # env value
]
config.read(cfg_sources)
profile = config.get("DEFAULT", "profile", fallback="scimma-uiuc-aws-admin")
loglevel = config.get("BUTTONS", "loglevel", fallback="NORMAL")
"""Create command line arguments"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--profile', '-p', default=profile, help='aws profile to use (default: %(default)s)')
parser.add_argument('--loglevel', '-l', help="Level for reporting e.g. NORMAL, VERBOSE, DEBUG (default: %(default)s)",
default=loglevel,
choices=["NONE", "NORMAL", "DOTS", "WARN", "ERROR", "VERBOSE", "VVERBOSE", "DEBUG"])
# subcommands section
parser.set_defaults(func=None) # if none then there are subfunctions
subparsers = parser.add_subparsers(title="subcommands",
description='valid subcommands',
help='additional help')
# Subcommand to deprivilege
depriv_parser = subparsers.add_parser('depriv', description=depriv.__doc__)
depriv_parser.set_defaults(func=depriv)
# Subcommand to privilege
priv_parser = subparsers.add_parser('priv', description=priv.__doc__)
priv_parser.set_defaults(func=priv)
# Subcommand to stop ec2 instances
ec2_parser = subparsers.add_parser('ec2stop', description=ec2stop.__doc__)
ec2_parser.set_defaults(func=ec2stop)
parser = parser_builder(None, parser, config, False)
args = parser.parse_args()
shlog.basicConfig(level=args.loglevel)
# args.session = boto3.Session(profile_name=args.profile)
if not args.func: # there are no subfunctions
parser.print_help()
exit(1)
args.func(args)
| 40.745
| 131
| 0.665726
|
52e39bd51853358e3831d48264dfe8796688346b
| 4,966
|
py
|
Python
|
mailman2/models.py
|
edinburghhacklab/hackdb
|
3ec7d66039705aa511dd6559196fa51a53b3a110
|
[
"MIT"
] | null | null | null |
mailman2/models.py
|
edinburghhacklab/hackdb
|
3ec7d66039705aa511dd6559196fa51a53b3a110
|
[
"MIT"
] | null | null | null |
mailman2/models.py
|
edinburghhacklab/hackdb
|
3ec7d66039705aa511dd6559196fa51a53b3a110
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2022 Tim Hawes <me@timhawes.com>
#
# SPDX-License-Identifier: MIT
import re
from allauth.account.models import EmailAddress
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.db import models
def find_user_from_address(address):
try:
emailaddress = EmailAddress.objects.get(email=address, verified=True)
return emailaddress.user
except EmailAddress.DoesNotExist:
return None
class MailingList(models.Model):
NONE = 0
CONFIRM = 1
REQUIRE_APPROVAL = 2
CONFIRM_AND_APPROVE = 3
SUBSCRIBE_POLICY_CHOICES = [
(NONE, "None"),
(CONFIRM, "Confirm"),
(REQUIRE_APPROVAL, "Require approval"),
(CONFIRM_AND_APPROVE, "Confirm and approve"),
]
name = models.CharField(max_length=64, unique=True)
description = models.CharField(max_length=255, blank=True)
info = models.TextField(blank=True)
advertised = models.BooleanField()
subscribe_policy = models.SmallIntegerField(choices=SUBSCRIBE_POLICY_CHOICES)
archive_private = models.BooleanField()
subscribe_auto_approval = models.TextField(blank=True)
auto_unsubscribe = models.BooleanField(
default=False,
help_text="Should non-group members be automatically unsubscribed?",
)
def __str__(self):
return self.name
def check_subscribe_auto_approval(self, address):
for pattern in self.subscribe_auto_approval.split("\n"):
if pattern.startswith("^"):
if re.match(pattern, address):
return True
elif pattern.lower() == address.lower():
return True
return False
def user_can_see(self, user):
if self.advertised:
return True
if self.user_can_subscribe(user):
return True
return False
def user_can_subscribe(self, user):
if self.subscribe_policy in [self.NONE, self.CONFIRM]:
return True
for group in user.groups.all():
if self.group_policies.filter(group=group).exists():
return True
# if self.check_subscribe_auto_approval(user.email):
# return True
return False
def user_recommend(self, user):
for group in user.groups.all():
if self.group_policies.filter(
group=group, policy__gte=GroupPolicy.RECOMMEND
).exists():
return True
def user_prompt(self, user):
for group in user.groups.all():
try:
return self.group_policies.get(
group=group, policy=GroupPolicy.PROMPT
).prompt
except GroupPolicy.DoesNotExist:
pass
def user_subscribe_policy(self, user):
for policy in self.group_policies.order_by("-policy"):
if user.groups.contains(policy.group):
return policy
def address_can_remain(self, address):
if not self.auto_unsubscribe:
return True
if self.check_subscribe_auto_approval(address):
return True
user = find_user_from_address(address)
if user:
if self.user_can_subscribe(user):
return True
return False
class Meta:
permissions = [("audit_list", "Can audit the subscribers of a mailing list")]
class GroupPolicy(models.Model):
ALLOW = 0
RECOMMEND = 1
PROMPT = 2
FORCE = 3
POLICY_CHOICES = [
(ALLOW, "Allow"),
(RECOMMEND, "Recommend"),
(PROMPT, "Prompt"),
(FORCE, "Force"),
]
mailing_list = models.ForeignKey(
MailingList, on_delete=models.CASCADE, related_name="group_policies"
)
group = models.ForeignKey(
Group, on_delete=models.CASCADE, related_name="mailinglist_policies"
)
policy = models.SmallIntegerField(choices=POLICY_CHOICES, default=ALLOW)
prompt = models.TextField(blank=True)
def __str__(self):
return f"{self.mailing_list}:{self.group}:{self.get_policy_display()}"
def clean(self):
if self.policy == self.PROMPT:
if not self.prompt:
raise ValidationError("Must supply a message for a prompt policy.")
class Meta:
verbose_name_plural = "Group policies"
unique_together = ("mailing_list", "group")
class ChangeOfAddress(models.Model):
created = models.DateTimeField(null=False, blank=False, auto_now_add=True)
user = models.ForeignKey(get_user_model(), on_delete=models.PROTECT)
old_email = models.EmailField()
new_email = models.EmailField()
class Meta:
verbose_name_plural = "Changes of address"
class MailmanUser(models.Model):
user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)
advanced_mode = models.BooleanField(default=False)
| 31.630573
| 85
| 0.646597
|
dbc48766a3d16b55567c1c72582b51dbcb86d882
| 1,321
|
py
|
Python
|
connection_checker/connection_checker.py
|
igushev/HomeUtils
|
763f540cfbc22d354ec9497b10652bd83f3f5a5d
|
[
"MIT"
] | 2
|
2019-07-27T19:10:38.000Z
|
2019-08-31T11:32:33.000Z
|
connection_checker/connection_checker.py
|
igushev/home_tools
|
763f540cfbc22d354ec9497b10652bd83f3f5a5d
|
[
"MIT"
] | null | null | null |
connection_checker/connection_checker.py
|
igushev/home_tools
|
763f540cfbc22d354ec9497b10652bd83f3f5a5d
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import os
import time
class Status(object):
Success = 0
Fail = 1
hostnames = ["www.google.com", "www.facebook.com", "www.yandex.ru"]
interval = 1
def WriteToLog(message, append_date_time=True, draw_line=True):
if append_date_time:
message += ' '+str(datetime.now())
message += '\n'
if draw_line:
message += '-'*80+'\n'
log_file=open('log.txt', 'a')
log_file.write(message)
log_file.close()
print(message, end='')
def Ping ():
ping_failed = False
for hostname in hostnames:
ping_failed = ping_failed or os.system("ping -n 1 " + hostname)
return (Status.Fail if ping_failed else Status.Success)
def ConnectionChecker():
prev_status = Status.Success
WriteToLog('The program started working at')
try:
while True:
status = Ping()
if status != prev_status:
if (status == Status.Success):
WriteToLog('Connection was established at')
else:
WriteToLog('Connection was lost at', draw_line=False)
prev_status = status
time.sleep(interval)
except KeyboardInterrupt:
WriteToLog('The program finished working at')
except Exception:
WriteToLog('The program failed at')
if __name__ == '__main__':
ConnectionChecker()
| 23.175439
| 68
| 0.644209
|
fee13c8950aba978afea4153ebaab8139f09f78c
| 6,093
|
py
|
Python
|
tests/integration_tests/conftest.py
|
7vikpeculiar/superset
|
800ced5e257d5d83d6dbe4ced0e7318ac40d026f
|
[
"Apache-2.0"
] | 2
|
2021-12-21T15:57:16.000Z
|
2022-01-31T02:22:02.000Z
|
tests/integration_tests/conftest.py
|
changeiot/superset
|
299b5dc64448d04abe6b35ee85fbd2b938c781bc
|
[
"Apache-2.0"
] | 19
|
2022-01-29T03:16:22.000Z
|
2022-03-25T23:50:16.000Z
|
tests/integration_tests/conftest.py
|
changeiot/superset
|
299b5dc64448d04abe6b35ee85fbd2b938c781bc
|
[
"Apache-2.0"
] | 2
|
2021-12-21T13:41:18.000Z
|
2021-12-26T22:16:43.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import functools
from typing import Any, Callable, Generator, Optional, TYPE_CHECKING
from unittest.mock import patch
import pytest
from sqlalchemy.engine import Engine
from superset import db
from superset.extensions import feature_flag_manager
from superset.utils.core import json_dumps_w_dates
from superset.utils.database import get_example_database, remove_database
from tests.integration_tests.test_app import app
if TYPE_CHECKING:
from superset.connectors.sqla.models import Database
CTAS_SCHEMA_NAME = "sqllab_test_db"
ADMIN_SCHEMA_NAME = "admin_database"
@pytest.fixture
def app_context():
with app.app_context():
yield
@pytest.fixture(autouse=True, scope="session")
def setup_sample_data() -> Any:
# TODO(john-bodley): Determine a cleaner way of setting up the sample data without
# relying on `tests.integration_tests.test_app.app` leveraging an `app` fixture which is purposely
# scoped to the function level to ensure tests remain idempotent.
with app.app_context():
setup_presto_if_needed()
from superset.cli.test import load_test_users_run
load_test_users_run()
from superset.examples.css_templates import load_css_templates
load_css_templates()
yield
with app.app_context():
engine = get_example_database().get_sqla_engine()
# drop sqlachemy tables
db.session.commit()
from sqlalchemy.ext import declarative
sqla_base = declarative.declarative_base()
# uses sorted_tables to drop in proper order without violating foreign constrains
for table in sqla_base.metadata.sorted_tables:
table.__table__.drop()
db.session.commit()
def drop_from_schema(engine: Engine, schema_name: str):
schemas = engine.execute(f"SHOW SCHEMAS").fetchall()
if schema_name not in [s[0] for s in schemas]:
# schema doesn't exist
return
tables_or_views = engine.execute(f"SHOW TABLES in {schema_name}").fetchall()
for tv in tables_or_views:
engine.execute(f"DROP TABLE IF EXISTS {schema_name}.{tv[0]}")
engine.execute(f"DROP VIEW IF EXISTS {schema_name}.{tv[0]}")
@pytest.fixture(scope="session")
def example_db_provider() -> Callable[[], Database]: # type: ignore
class _example_db_provider:
_db: Optional[Database] = None
def __call__(self) -> Database:
with app.app_context():
if self._db is None:
self._db = get_example_database()
self._load_lazy_data_to_decouple_from_session()
return self._db
def _load_lazy_data_to_decouple_from_session(self) -> None:
self._db.get_sqla_engine() # type: ignore
self._db.backend # type: ignore
def remove(self) -> None:
if self._db:
with app.app_context():
remove_database(self._db)
_instance = _example_db_provider()
yield _instance
# TODO - can not use it until referenced objects will be deleted.
# _instance.remove()
def setup_presto_if_needed():
backend = app.config["SQLALCHEMY_EXAMPLES_URI"].split("://")[0]
database = get_example_database()
extra = database.get_extra()
if backend == "presto":
# decrease poll interval for tests
extra = {
**extra,
"engine_params": {
"connect_args": {"poll_interval": app.config["PRESTO_POLL_INTERVAL"]}
},
}
else:
# remove `poll_interval` from databases that do not support it
extra = {**extra, "engine_params": {}}
database.extra = json_dumps_w_dates(extra)
db.session.commit()
if backend in {"presto", "hive"}:
database = get_example_database()
engine = database.get_sqla_engine()
drop_from_schema(engine, CTAS_SCHEMA_NAME)
engine.execute(f"DROP SCHEMA IF EXISTS {CTAS_SCHEMA_NAME}")
engine.execute(f"CREATE SCHEMA {CTAS_SCHEMA_NAME}")
drop_from_schema(engine, ADMIN_SCHEMA_NAME)
engine.execute(f"DROP SCHEMA IF EXISTS {ADMIN_SCHEMA_NAME}")
engine.execute(f"CREATE SCHEMA {ADMIN_SCHEMA_NAME}")
def with_feature_flags(**mock_feature_flags):
"""
Use this decorator to mock feature flags in tests.integration_tests.
Usage:
class TestYourFeature(SupersetTestCase):
@with_feature_flags(YOUR_FEATURE=True)
def test_your_feature_enabled(self):
self.assertEqual(is_feature_enabled("YOUR_FEATURE"), True)
@with_feature_flags(YOUR_FEATURE=False)
def test_your_feature_disabled(self):
self.assertEqual(is_feature_enabled("YOUR_FEATURE"), False)
"""
def mock_get_feature_flags():
feature_flags = feature_flag_manager._feature_flags or {}
return {**feature_flags, **mock_feature_flags}
def decorate(test_fn):
def wrapper(*args, **kwargs):
with patch.object(
feature_flag_manager,
"get_feature_flags",
side_effect=mock_get_feature_flags,
):
test_fn(*args, **kwargs)
return functools.update_wrapper(wrapper, test_fn)
return decorate
| 33.295082
| 103
| 0.682422
|
033c4f748f3d0e865f123157056e5557ec38e79d
| 442
|
py
|
Python
|
client.py
|
Zhiwei5/flask_getting_started
|
67c6f4a24bd12e5716ad2d384b152be77738a1e4
|
[
"MIT"
] | null | null | null |
client.py
|
Zhiwei5/flask_getting_started
|
67c6f4a24bd12e5716ad2d384b152be77738a1e4
|
[
"MIT"
] | null | null | null |
client.py
|
Zhiwei5/flask_getting_started
|
67c6f4a24bd12e5716ad2d384b152be77738a1e4
|
[
"MIT"
] | null | null | null |
import requests
def main():
r = requests.get("http://vcm-3572.vm.duke.edu:5000/name")
name = r.json()
print(name)
r2 = requests.get("http://vcm-3572.vm.duke.edu:5000/hello/zhiwei")
message = r2.json()
print(message)
r3 = requests.post("http://vcm-3572.vm.duke.edu:5000/distance", json = {"a": [2,3], "b": [3,6]})
distance_result = r3.json()
print(distance_result)
if __name__ == "__main__":
main()
| 24.555556
| 100
| 0.613122
|
dfbc43195cb30c64703b155147c98d5f77e81b40
| 1,471
|
py
|
Python
|
nextcloud/views.py
|
Aytuar/librephotos
|
1d888fd9f49fcb82cce43412b87f9f9736ad5f47
|
[
"MIT"
] | 1
|
2021-01-12T16:59:29.000Z
|
2021-01-12T16:59:29.000Z
|
nextcloud/views.py
|
Aytuar/librephotos
|
1d888fd9f49fcb82cce43412b87f9f9736ad5f47
|
[
"MIT"
] | 4
|
2021-09-08T03:28:23.000Z
|
2022-03-12T00:59:15.000Z
|
nextcloud/views.py
|
Aytuar/librephotos
|
1d888fd9f49fcb82cce43412b87f9f9736ad5f47
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAdminUser, AllowAny
from api.models import *
import owncloud as nextcloud
from api.api_util import get_current_job
from nextcloud.directory_watcher import scan_photos
from api.util import logger
import uuid
import datetime
class ListDir(APIView):
def get(self, request, format=None):
path = request.query_params['path']
if request.user.nextcloud_server_address is None:
return Response(status=400)
nc = nextcloud.Client(request.user.nextcloud_server_address)
nc.login(request.user.nextcloud_username,
request.user.nextcloud_app_password)
try:
return Response([{
'absolute_path': p.path,
'title': p.path.split('/')[-2],
'children': []
} for p in nc.list(path) if p.is_dir()])
except nextcloud.HTTPResponseError:
return Response(status=400)
# long running jobs
class ScanPhotosView(APIView):
def get(self, request, format=None):
try:
job_id = uuid.uuid4()
scan_photos(request.user, job_id)
return Response({'status': True, 'job_id': job_id})
except BaseException as e:
logger.exception("An Error occured")
return Response({'status': False})
| 31.297872
| 77
| 0.662814
|
611a40302dfcb2501b3e552c06f4e4110b0946ad
| 432
|
py
|
Python
|
sqltemplate/contrib/django/settings.py
|
marcinn/sqltemplate
|
e083f5262ae4439843210e32843b9b644604fdf1
|
[
"BSD-2-Clause"
] | 3
|
2019-12-22T22:44:28.000Z
|
2021-04-26T04:03:26.000Z
|
sqltemplate/contrib/django/settings.py
|
marcinn/sqltemplate
|
e083f5262ae4439843210e32843b9b644604fdf1
|
[
"BSD-2-Clause"
] | null | null | null |
sqltemplate/contrib/django/settings.py
|
marcinn/sqltemplate
|
e083f5262ae4439843210e32843b9b644604fdf1
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.conf import settings
TEMPLATES = getattr(settings, 'SQL_TEMPLATES', [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': False,
'OPTIONS': {
'loaders': [
'django.template.loaders.filesystem.Loader',
'sqltemplate.contrib.django.loaders.app_directories.Loader',
]
},
}])
| 24
| 72
| 0.625
|
63a4fdab7a7899f0b0d9cfc86fa4fd598fe9c531
| 9,359
|
py
|
Python
|
environments/particle/particle.py
|
wx-b/ibc
|
2c9202e50cfee1abdcd955d3ac1b9d68b5d81e53
|
[
"Apache-2.0"
] | null | null | null |
environments/particle/particle.py
|
wx-b/ibc
|
2c9202e50cfee1abdcd955d3ac1b9d68b5d81e53
|
[
"Apache-2.0"
] | null | null | null |
environments/particle/particle.py
|
wx-b/ibc
|
2c9202e50cfee1abdcd955d3ac1b9d68b5d81e53
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple particle environment."""
import collections
import copy
import os
from typing import Union
import gin
import gym
from gym import spaces
from gym.envs import registration
from ibc.environments.particle import particle_metrics
from ibc.environments.particle import particle_viz
import matplotlib.pyplot as plt
import numpy as np
@gin.configurable
class ParticleEnv(gym.Env):
"""Simple particle environment with gym wrapper.
The env is configurable but the default is:
"go to the green goal, then the blue goal"
A key feature of this environment is that it is N-dimensional, i.e. the
observation space is:
4N:
- position of the agent (N dimensions)
- velocity of the agent (N dimensions)
- position of the first goal (N dimensions)
- position of the second goal (N dimensions)
and action space is:
N:
- position setpoint for the agent (N dimensions)
Also configurable instead to:
- *wait at the first goal for some time (forces memory)
- *randomly go to either the green OR blue goal first (multimodal)
- not observe velocity information (can also force more memory usage,
since the velocity can be used to infer intent)
* = set in the Oracle params, not the env.
Key functions:
- reset() --> state
- step(action) --> state, reward, done, info
"""
def get_metrics(self, num_episodes):
metrics = [
particle_metrics.AverageFirstGoalDistance(
self, buffer_size=num_episodes),
particle_metrics.AverageSecondGoalDistance(
self, buffer_size=num_episodes),
particle_metrics.AverageFinalSecondGoalDistance(
self, buffer_size=num_episodes),
particle_metrics.AverageSuccessMetric(
self, buffer_size=num_episodes)
]
success_metric = metrics[-1]
return metrics, success_metric
@gin.configurable
def __init__(
self,
n_steps = 50,
n_dim = 2,
hide_velocity = False,
seed = None,
dt = 0.005, # 0.005 = 200 Hz
repeat_actions = 10, # 10 makes control 200/10 = 20 Hz
k_p = 10.,
k_v = 5.,
goal_distance = 0.05
):
"""Creates an env instance with options, see options below.
Args:
n_steps: # of steps until done.
n_dim: # of dimensions.
hide_velocity: whether or not to hide velocity info from agent.
seed: random seed.
dt: timestep for internal simulation (not same as control rate)
repeat_actions: repeat the action this many times, each for dt.
k_p: P gain in PD controller. (p for position)
k_v: D gain in PD controller. (v for velocity)
goal_distance: Acceptable distances to goals for success.
"""
self.reset_counter = 0
self.img_save_dir = None
self.n_steps = n_steps
self.goal_distance = goal_distance
self.n_dim = n_dim
self.hide_velocity = hide_velocity
self._rng = np.random.RandomState(seed=seed)
self.dt = dt
self.repeat_actions = repeat_actions
# Make sure is a multiple.
assert int(1/self.dt) % self.repeat_actions == 0
self.k_p = k_p
self.k_v = k_v
self.action_space = spaces.Box(low=0., high=1., shape=(self.n_dim,),
dtype=np.float32)
self.observation_space = self._create_observation_space()
self.reset()
def _create_observation_space(self):
obs_dict = collections.OrderedDict(
pos_agent=spaces.Box(low=0., high=1., shape=(self.n_dim,),
dtype=np.float32),
# TODO(peteflorence): is this the actual max for vel_agent?
vel_agent=spaces.Box(low=-1e2, high=1e2, shape=(self.n_dim,),
dtype=np.float32),
pos_first_goal=spaces.Box(low=0., high=1., shape=(self.n_dim,),
dtype=np.float32),
pos_second_goal=spaces.Box(low=0., high=1., shape=(self.n_dim,),
dtype=np.float32)
)
if self.hide_velocity:
del obs_dict['vel_agent']
return spaces.Dict(obs_dict)
def seed(self, seed=None):
self._rng = np.random.RandomState(seed=seed)
def reset(self):
self.reset_counter += 1
self.steps = 0
# self.obs_log and self.act_log hold internal state,
# will be useful for plotting.
self.obs_log = []
self.act_log = []
self.new_actions = []
obs = dict()
obs['pos_agent'] = self._rng.rand(self.n_dim).astype(np.float32)
obs['vel_agent'] = np.zeros((self.n_dim)).astype(np.float32)
obs['pos_first_goal'] = self._rng.rand(self.n_dim).astype(np.float32)
obs['pos_second_goal'] = self._rng.rand(self.n_dim).astype(np.float32)
self.obs_log.append(obs)
self.min_dist_to_first_goal = np.inf
self.min_dist_to_second_goal = np.inf
return self._get_state()
def _get_state(self):
return copy.deepcopy(self.obs_log[-1])
def _internal_step(self, action, new_action):
if new_action:
self.new_actions.append(len(self.act_log))
self.act_log.append({'pos_setpoint': action})
obs = self.obs_log[-1]
# u = k_p (x_{desired} - x) + k_v (xdot_{desired} - xdot)
# xdot_{desired} is always (0, 0) -->
# u = k_p (x_{desired} - x) - k_v (xdot)
u_agent = self.k_p * (action - obs['pos_agent']) - self.k_v * (
obs['vel_agent'])
new_xy_agent = obs['pos_agent'] + obs['vel_agent'] * self.dt
new_velocity_agent = obs['vel_agent'] + u_agent * self.dt
obs = copy.deepcopy(obs)
obs['pos_agent'] = new_xy_agent
obs['vel_agent'] = new_velocity_agent
self.obs_log.append(obs)
def dist(self, goal):
current_position = self.obs_log[-1]['pos_agent']
return np.linalg.norm(current_position - goal)
def _get_reward(self, done):
"""Reward is 1.0 if agent hits both goals and stays at second."""
# This also statefully updates these values.
self.min_dist_to_first_goal = min(
self.dist(self.obs_log[0]['pos_first_goal']),
self.min_dist_to_first_goal)
self.min_dist_to_second_goal = min(
self.dist(self.obs_log[0]['pos_second_goal']),
self.min_dist_to_second_goal)
def _reward(thresh):
reward_first = True if self.min_dist_to_first_goal < thresh else False
reward_second = True if self.min_dist_to_second_goal < thresh else False
return 1.0 if (reward_first and reward_second and done) else 0.0
reward = _reward(self.goal_distance)
return reward
@property
def succeeded(self):
thresh = self.goal_distance
hit_first = True if self.min_dist_to_first_goal < thresh else False
hit_second = True if self.min_dist_to_second_goal < thresh else False
# TODO(peteflorence/coreylynch: this doesn't work for multimodal version)
current_distance_to_second = self.dist(self.obs_log[0]['pos_second_goal'])
still_at_second = True if current_distance_to_second < thresh else False
return hit_first and hit_second and still_at_second
def step(self, action):
self.steps += 1
self._internal_step(action, new_action=True)
for _ in range(self.repeat_actions - 1):
self._internal_step(action, new_action=False)
state = self._get_state()
done = True if self.steps >= self.n_steps else False
reward = self._get_reward(done)
return state, reward, done, {}
def render(self, mode='rgb_array'):
fig = plt.figure()
fig.add_subplot(111)
if self.n_dim == 2:
fig, _ = particle_viz.visualize_2d(self.obs_log, self.act_log)
else:
fig, _ = particle_viz.visualize_nd(self.obs_log, self.act_log)
fig.canvas.draw()
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def set_img_save_dir(self, summary_dir):
self.img_save_dir = os.path.join(summary_dir, 'imgs')
os.makedirs(self.img_save_dir, exist_ok=True)
def save_image(self, traj):
if traj.is_last():
assert self.img_save_dir is not None
if self.n_dim == 2:
fig, _ = particle_viz.visualize_2d(self.obs_log, self.act_log)
filename = os.path.join(self.img_save_dir,
str(self.reset_counter).zfill(6)+'_2d.png')
fig.savefig(filename)
plt.close(fig)
fig, _ = particle_viz.visualize_nd(self.obs_log, self.act_log)
filename = os.path.join(self.img_save_dir,
str(self.reset_counter).zfill(6)+'_nd.png')
fig.savefig(filename)
plt.close(fig)
# Make sure we only register once to allow us to reload the module in colab for
# debugging.
if 'Particle-v0' in registration.registry.env_specs:
del registration.registry.env_specs['Particle-v0']
registration.register(id='Particle-v0', entry_point=ParticleEnv)
| 34.791822
| 79
| 0.674324
|
23613ccb0bd19a27ffed1bc3f335ab978be24796
| 7,659
|
py
|
Python
|
contrib/devtools/update-translations.py
|
boozioRi/stelo
|
d29a2041b1a682f7e60d4fd1aac8027fc486743b
|
[
"MIT"
] | 2
|
2019-11-18T22:06:02.000Z
|
2020-05-16T19:07:49.000Z
|
contrib/devtools/update-translations.py
|
boozioRi/stelo
|
d29a2041b1a682f7e60d4fd1aac8027fc486743b
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
boozioRi/stelo
|
d29a2041b1a682f7e60d4fd1aac8027fc486743b
|
[
"MIT"
] | 2
|
2019-01-31T10:41:04.000Z
|
2020-06-23T21:46:02.000Z
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'stelo_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
| 37.544118
| 124
| 0.629325
|
1fb70d5625784a3d19acb83a82abce55e9b34a6d
| 2,630
|
py
|
Python
|
aws_dataclasses/cf_event.py
|
hypoport/LaWip
|
30bfe90457a83957d10ec18fe7d61439b5b74280
|
[
"Apache-2.0"
] | 4
|
2018-10-27T05:38:54.000Z
|
2021-04-09T14:49:23.000Z
|
aws_dataclasses/cf_event.py
|
hypoport/LaWip
|
30bfe90457a83957d10ec18fe7d61439b5b74280
|
[
"Apache-2.0"
] | 6
|
2018-10-01T17:11:14.000Z
|
2019-06-04T07:58:29.000Z
|
aws_dataclasses/cf_event.py
|
bweigel/python-aws-dataclasses
|
8e37d39bc8b7b7b48908d78286c0b6dea7fd90d2
|
[
"Apache-2.0"
] | 3
|
2018-08-03T21:20:26.000Z
|
2021-08-12T13:06:15.000Z
|
from collections import namedtuple
from typing import Dict, List, Optional
from dataclasses import InitVar, field, dataclass
from aws_dataclasses.base import GenericDataClass, EventClass
KVPair = namedtuple("KVPair", ['key', 'value'])
def _parse_headers(headers) -> Dict[str, List[KVPair]]:
out = {}
for hdr_name, header_list in headers.items():
out[hdr_name] = [KVPair(header.get("key"), header.get("value")) for header in header_list]
return out
@dataclass
class CloudFrontConfig(GenericDataClass):
distribution_id: str = field(init=False)
request_id: str = field(init=False)
distributionId: InitVar[str] = field(repr=False, default=None)
requestId: InitVar[str] = field(repr=False, default=None)
def __post_init__(self, distributionId: str, requestId: str):
self.request_id = requestId
self.distribution_id = distributionId
@dataclass
class CloudFrontfRequest(GenericDataClass):
querystring: str
uri: str
method: str
headers: Dict[str, List[KVPair]]
origin: str = field(default=None)
client_ip: str = field(init=False)
clientIp: InitVar[str] = field(repr=False, default=None)
def __post_init__(self, clientIp: str):
self.client_ip = clientIp
self.headers = _parse_headers(self.headers)
@dataclass
class CloudFrontResponse(GenericDataClass):
status: str
status_description: str = field(init=False)
headers: Dict[str, List[KVPair]]
statusDescription: InitVar[str] = field(repr=False, default=None)
def __post_init__(self, statusDescription: str):
self.status_description = statusDescription
self.headers = _parse_headers(self.headers)
@dataclass
class CloudFrontRecord(GenericDataClass):
config: CloudFrontConfig
request: Optional[CloudFrontfRequest] = field(default=None)
response: Optional[CloudFrontResponse] = field(default=None)
def __post_init__(self):
self.config = CloudFrontConfig.from_json(self.config)
self.request = CloudFrontfRequest.from_json(self.request) if self.request is not None else self.request
self.response = CloudFrontResponse.from_json(self.response) if self.response is not None else self.response
@dataclass
class CloudFrontEvent(EventClass):
records: List[CloudFrontRecord] = field(init=False)
first_record: CloudFrontRecord = field(init=False)
Records: InitVar[List[Dict]] = field(repr=False, default=[])
def __post_init__(self, Records: List[Dict]):
self.records = [CloudFrontRecord.from_json(record["cf"]) for record in Records]
self.first_record = self.records[0]
| 33.717949
| 115
| 0.726616
|
b28947d0b6f97f362c21f680a53eb68056959970
| 1,215
|
py
|
Python
|
tests_classla/test_tagger.py
|
IgorTavcar/classla
|
5a1246b62eb352af631d4f4593f467e9ccbd3777
|
[
"Apache-2.0"
] | 12
|
2019-11-25T14:51:21.000Z
|
2021-02-21T16:59:38.000Z
|
tests_classla/test_tagger.py
|
IgorTavcar/classla
|
5a1246b62eb352af631d4f4593f467e9ccbd3777
|
[
"Apache-2.0"
] | 23
|
2021-03-12T13:17:17.000Z
|
2022-02-14T08:56:53.000Z
|
tests_classla/test_tagger.py
|
IgorTavcar/classla
|
5a1246b62eb352af631d4f4593f467e9ccbd3777
|
[
"Apache-2.0"
] | 7
|
2021-04-04T15:04:27.000Z
|
2022-02-20T17:33:39.000Z
|
"""
Basic testing of part of speech tagging
"""
import classla
from tests_classla import *
SL_DOC = "France Prešeren se je rodil v vrbi."
SL_DOC_GOLD = """
<Token id=1;words=[<Word id=1;text=France;upos=PROPN;xpos=Npmsn;feats=Case=Nom|Gender=Masc|Number=Sing>]>
<Token id=2;words=[<Word id=2;text=Prešeren;upos=PROPN;xpos=Npmsn;feats=Case=Nom|Gender=Masc|Number=Sing>]>
<Token id=3;words=[<Word id=3;text=se;upos=PRON;xpos=Px------y;feats=PronType=Prs|Reflex=Yes|Variant=Short>]>
<Token id=4;words=[<Word id=4;text=je;upos=AUX;xpos=Va-r3s-n;feats=Mood=Ind|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin>]>
<Token id=5;words=[<Word id=5;text=rodil;upos=VERB;xpos=Vmbp-sm;feats=Gender=Masc|Number=Sing|VerbForm=Part>]>
<Token id=6;words=[<Word id=6;text=v;upos=ADP;xpos=Sl;feats=Case=Loc>]>
<Token id=7;words=[<Word id=7;text=vrbi;upos=NOUN;xpos=Ncfsl;feats=Case=Loc|Gender=Fem|Number=Sing>]>
<Token id=8;words=[<Word id=8;text=.;upos=PUNCT;xpos=Z>]>
""".strip()
def test_part_of_speech():
nlp = classla.Pipeline(**{'processors': 'tokenize,pos', 'dir': TEST_MODELS_DIR, 'lang': 'sl'})
doc = nlp(SL_DOC)
assert SL_DOC_GOLD == '\n\n'.join([sent.tokens_string() for sent in doc.sentences])
| 43.392857
| 136
| 0.714403
|
e3bc99f1cae4ed04beba1ed159ce3a33cd8dbf89
| 83
|
py
|
Python
|
examen/p4/p4.py
|
Ale-Torrico/computacion_para_ingenieria
|
919205e485ddd0a40adcf543a8ef675712354024
|
[
"Apache-2.0"
] | null | null | null |
examen/p4/p4.py
|
Ale-Torrico/computacion_para_ingenieria
|
919205e485ddd0a40adcf543a8ef675712354024
|
[
"Apache-2.0"
] | null | null | null |
examen/p4/p4.py
|
Ale-Torrico/computacion_para_ingenieria
|
919205e485ddd0a40adcf543a8ef675712354024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 9 07:44:47 2022
@author: AMD
"""
| 10.375
| 35
| 0.542169
|
e8071634d1019c0eeec4cf933fc5a5e335fd85d1
| 2,140
|
py
|
Python
|
plotjog.py
|
mostlyuseful/marlin-plotjog
|
f188a9351b4a4188d92a4865e6b5199b4065adf9
|
[
"MIT"
] | null | null | null |
plotjog.py
|
mostlyuseful/marlin-plotjog
|
f188a9351b4a4188d92a4865e6b5199b4065adf9
|
[
"MIT"
] | null | null | null |
plotjog.py
|
mostlyuseful/marlin-plotjog
|
f188a9351b4a4188d92a4865e6b5199b4065adf9
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import sys
import joystick
import serial
import threading
import numpy as np
from rx import Observable
from rx.subjects import Subject
from rx.concurrency import NewThreadScheduler
FEEDRATE = 400 # mm / minute
MAX_REACH = 0.2 # mm / feedrate
MIN_NORM = 0.2
device_fn = sys.argv[1] if len(sys.argv)>1 else joystick.Joystick.available_sticks()[0]
print("Using input device: {0}".format(device_fn))
in_stick = joystick.Joystick(device_fn)
tty = serial.Serial('/dev/ttyUSB0',250000)
def send_gcode(s):
tty.flushInput()
tty.write(s + b'\n')
return tty.readline().strip()
def poll_joystick(_):
poll_result = in_stick.poll()
x, y = None, None
if poll_result.changed_axis is not None:
axis = poll_result.changed_axis
if axis.number == 0:
x = axis.fvalue
elif axis.number == 1:
y = axis.fvalue
return x, y
def update_position(accu, update):
return tuple( (current if current is not None else previous) for current, previous in zip(update, accu) )
joystick_positions = Observable.interval(1).map(poll_joystick).scan(update_position, (0,0))
cv = threading.Condition()
new_position = None
def new_position_available():
global new_position
return new_position is not None
def execute_move():
global cv, new_position
while True:
with cv:
cv.wait_for(new_position_available)
dx, dy = new_position
new_position = None
# Relative positioning
send_gcode(b'G91')
# Rapid move
send_gcode('G1 X{:.3f} Y{:.3f} F{}'.format(dx, -dy, FEEDRATE).encode('ascii'))
consumer_thread = threading.Thread(target=execute_move)
consumer_thread.daemon = True
consumer_thread.start()
def move_printer(delta):
global cv, new_position
if np.linalg.norm(delta) > MIN_NORM:
dx, dy = np.array(delta) * MAX_REACH
print(dx, dy)
with cv:
new_position = dx, dy
cv.notify()
joystick_positions \
.filter(lambda pos: all(val is not None for val in pos)) \
.combine_latest(Observable.interval(20), lambda a,b: a) \
.observe_on(NewThreadScheduler()) \
.subscribe(on_next=move_printer)
input("Press any key to exit\n")
| 24.883721
| 107
| 0.705607
|
fcf82356b52a680b9972850e01df3873afd13ba8
| 12,980
|
py
|
Python
|
zfit/models/physics.py
|
kailiu77/zfit
|
00eed81fb34e0eb2e4bae5ddc9ebf38699e107ca
|
[
"BSD-3-Clause"
] | 1
|
2022-01-15T13:38:12.000Z
|
2022-01-15T13:38:12.000Z
|
zfit/models/physics.py
|
kailiu77/zfit
|
00eed81fb34e0eb2e4bae5ddc9ebf38699e107ca
|
[
"BSD-3-Clause"
] | null | null | null |
zfit/models/physics.py
|
kailiu77/zfit
|
00eed81fb34e0eb2e4bae5ddc9ebf38699e107ca
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2019 zfit
from typing import Type, Any
import tensorflow as tf
import tensorflow_probability.python.distributions as tfd
import numpy as np
import zfit
from zfit import ztf
from ..core.basepdf import BasePDF
from ..core.limits import ANY_UPPER, ANY_LOWER, Space
from ..settings import ztypes
from ..util import ztyping
def _powerlaw(x, a, k):
return a * tf.pow(x, k)
def crystalball_func(x, mu, sigma, alpha, n):
t = (x - mu) / sigma * tf.sign(alpha)
abs_alpha = tf.abs(alpha)
a = tf.pow((n / abs_alpha), n) * tf.exp(-0.5 * tf.square(alpha))
b = (n / abs_alpha) - abs_alpha
cond = tf.less(t, -abs_alpha)
# func = tf.where(cond, tf.exp(-0.5 * tf.square(t)), _powerlaw(b - t, a, -n))
func = ztf.safe_where(cond,
lambda t: _powerlaw(b - t, a, -n),
lambda t: tf.exp(-0.5 * tf.square(t)),
values=t, value_safer=lambda t: tf.ones_like(t) * (b - 2))
return func
def double_crystalball_func(x, mu, sigma, alphal, nl, alphar, nr):
cond = tf.less(x, mu)
func = tf.compat.v1.where(cond,
crystalball_func(x, mu, sigma, alphal, nl),
crystalball_func(x, mu, sigma, -alphar, nr))
return func
# def _python_crystalball_integral(limits, params): # not working with tf, used for autoconvert
# mu = params['mu']
# sigma = params['sigma']
# alpha = params['alpha']
# n = params['n']
#
# (lower,), (upper,) = limits.limits
#
# sqrt_pi_over_two = np.sqrt(np.pi / 2)
# sqrt2 = np.sqrt(2)
#
# result = 0.0
# use_log = tf.abs(n - 1.0) < 1.0e-05
#
# abs_sigma = tf.abs(sigma)
# abs_alpha = tf.abs(alpha)
#
# tmin = (lower - mu) / abs_sigma
# tmax = (upper - mu) / abs_sigma
#
# if alpha < 0:
# tmin, tmax = -tmax, -tmin
#
# if tmin >= -abs_alpha:
# result += abs_sigma * sqrt_pi_over_two * (tf.erf(tmax / sqrt2)
# - tf.erf(tmin / sqrt2))
#
# elif tmax <= -abs_alpha:
# a = tf.pow(n / abs_alpha, n) * tf.exp(-0.5 * tf.square(abs_alpha))
#
# b = n / abs_alpha - abs_alpha
#
# if use_log:
# result += a * abs_sigma * (tf.log(b - tmin) - tf.log(b - tmax))
# else:
# result += a * abs_sigma / (1.0 - n) * (1.0 / (tf.pow(b - tmin, n - 1.0))
# - 1.0 / (tf.pow(b - tmax, n - 1.0)))
# else:
# a = tf.pow(n / abs_alpha, n) * tf.exp(-0.5 * tf.square(abs_alpha))
# b = n / abs_alpha - abs_alpha
#
# if use_log:
# term1 = a * abs_sigma * (tf.log(b - tmin) - tf.log(n / abs_alpha))
#
# else:
# term1 = a * abs_sigma / (1.0 - n) * (1.0 / (tf.pow(b - tmin, n - 1.0))
# - 1.0 / (tf.pow(n / abs_alpha, n - 1.0)))
#
# term2 = abs_sigma * sqrt_pi_over_two * (tf.erf(tmax / sqrt2)
# - tf.erf(-abs_alpha / sqrt2))
#
# result += term1 + term2
#
# return result
# created with the help of TensorFlow autograph used on python code converted from ShapeCB of RooFit
def crystalball_integral(limits, params, model):
mu = params['mu']
sigma = params['sigma']
alpha = params['alpha']
n = params['n']
(lower,), (upper,) = limits.limits
lower = lower[0] # obs number 0
upper = upper[0]
sqrt_pi_over_two = np.sqrt(np.pi / 2)
sqrt2 = np.sqrt(2)
result = 0.0
use_log = tf.less(tf.abs(n - 1.0), 1e-05)
abs_sigma = tf.abs(sigma)
abs_alpha = tf.abs(alpha)
tmin = (lower - mu) / abs_sigma
tmax = (upper - mu) / abs_sigma
def if_true():
return tf.negative(tmin), tf.negative(tmax)
def if_false():
return tmax, tmin
tmax, tmin = tf.cond(pred=tf.less(alpha, 0), true_fn=if_true, false_fn=if_false)
def if_true_4():
result_5, = result,
result_5 += abs_sigma * sqrt_pi_over_two * (tf.math.erf(tmax / sqrt2) - tf.math.erf(tmin / sqrt2))
return result_5
def if_false_4():
result_6 = result
def if_true_3():
result_3 = result_6
a = tf.pow(n / abs_alpha, n) * tf.exp(-0.5 * tf.square(abs_alpha))
b = n / abs_alpha - abs_alpha
def if_true_1():
result_1, = result_3,
result_1 += a * abs_sigma * (tf.math.log(b - tmin) - tf.math.log(b - tmax))
return result_1
def if_false_1():
result_2, = result_3,
result_2 += a * abs_sigma / (1.0 - n) * (
1.0 / tf.pow(b - tmin, n - 1.0) - 1.0 / tf.pow(b - tmax, n - 1.0))
return result_2
result_3 = tf.cond(pred=use_log, true_fn=if_true_1, false_fn=if_false_1)
return result_3
def if_false_3():
result_4, = result_6,
a = tf.pow(n / abs_alpha, n) * tf.exp(-0.5 * tf.square(abs_alpha))
b = n / abs_alpha - abs_alpha
def if_true_2():
term1 = a * abs_sigma * (tf.math.log(b - tmin) - tf.math.log(n / abs_alpha))
return term1
def if_false_2():
term1 = a * abs_sigma / (1.0 - n) * (
1.0 / tf.pow(b - tmin, n - 1.0) - 1.0 / tf.pow(n / abs_alpha, n - 1.0))
return term1
term1 = tf.cond(pred=use_log, true_fn=if_true_2, false_fn=if_false_2)
term2 = abs_sigma * sqrt_pi_over_two * (
tf.math.erf(tmax / sqrt2) - tf.math.erf(-abs_alpha / sqrt2))
result_4 += term1 + term2
return result_4
result_6 = tf.cond(pred=tf.less_equal(tmax, -abs_alpha), true_fn=if_true_3, false_fn=if_false_3)
return result_6
# if_false_4()
result = tf.cond(pred=tf.greater_equal(tmin, -abs_alpha), true_fn=if_true_4, false_fn=if_false_4)
return result
def double_crystalball_integral(limits, params, model):
mu = params['mu']
sigma = params['sigma']
(lower,), (upper,) = limits.limits
lower = lower[0] # obs number 0
upper = upper[0]
limits_left = Space(limits.obs, (lower, mu))
limits_right = Space(limits.obs, (mu, upper))
params_left = dict(mu=mu, sigma=sigma, alpha=params["alphal"],
n=params["nl"])
params_right = dict(mu=mu, sigma=sigma, alpha=-params["alphar"],
n=params["nr"])
left = tf.cond(pred=tf.less(mu, lower), true_fn=0., false_fn=crystalball_integral(limits_left, params_left))
right = tf.cond(pred=tf.greater(mu, upper), true_fn=0., false_fn=crystalball_integral(limits_right, params_right))
return left + right
class CrystalBall(BasePDF):
_N_OBS = 1
def __init__(self, mu: ztyping.ParamTypeInput, sigma: ztyping.ParamTypeInput,
alpha: ztyping.ParamTypeInput, n: ztyping.ParamTypeInput,
obs: ztyping.ObsTypeInput, name: str = "CrystalBall", dtype: Type = ztypes.float):
"""`Crystal Ball shaped PDF`__. A combination of a Gaussian with an powerlaw tail.
The function is defined as follows:
.. math::
f(x;\\mu, \\sigma, \\alpha, n) = \\begin{cases} \\exp(- \\frac{(x - \\mu)^2}{2 \\sigma^2}),
& \\mbox{for}\\frac{x - \\mu}{\\sigma} \\geqslant -\\alpha \\newline
A \\cdot (B - \\frac{x - \\mu}{\\sigma})^{-n}, & \\mbox{for }\\frac{x - \\mu}{\\sigma}
< -\\alpha \\end{cases}
with
.. math::
A = \\left(\\frac{n}{\\left| \\alpha \\right|}\\right)^n \\cdot
\\exp\\left(- \\frac {\\left|\\alpha \\right|^2}{2}\\right)
B = \\frac{n}{\\left| \\alpha \\right|} - \\left| \\alpha \\right|
Args:
mu (`zfit.Parameter`): The mean of the gaussian
sigma (`zfit.Parameter`): Standard deviation of the gaussian
alpha (`zfit.Parameter`): parameter where to switch from a gaussian to the powertail
n (`zfit.Parameter`): Exponent of the powertail
obs (:py:class:`~zfit.Space`):
name (str):
dtype (tf.DType):
.. _CBShape: https://en.wikipedia.org/wiki/Crystal_Ball_function
__CBShape_
"""
params = {'mu': mu,
'sigma': sigma,
'alpha': alpha,
'n': n}
super().__init__(obs=obs, dtype=dtype, name=name, params=params)
def _unnormalized_pdf(self, x):
mu = self.params['mu']
sigma = self.params['sigma']
alpha = self.params['alpha']
n = self.params['n']
x = x.unstack_x()
return crystalball_func(x=x, mu=mu, sigma=sigma, alpha=alpha, n=n)
crystalball_integral_limits = Space.from_axes(axes=(0,), limits=(((ANY_LOWER,),), ((ANY_UPPER,),)))
# TODO uncomment, dependency: bug in TF (31.1.19) # 25339 that breaks gradient of resource var in cond
# CrystalBall.register_analytic_integral(func=crystalball_integral, limits=crystalball_integral_limits)
class DoubleCB(BasePDF):
_N_OBS = 1
def __init__(self, mu: ztyping.ParamTypeInput, sigma: ztyping.ParamTypeInput,
alphal: ztyping.ParamTypeInput, nl: ztyping.ParamTypeInput,
alphar: ztyping.ParamTypeInput, nr: ztyping.ParamTypeInput,
obs: ztyping.ObsTypeInput, name: str = "DoubleCB", dtype: Type = ztypes.float):
"""`Double sided Crystal Ball shaped PDF`__. A combination of two CB using the **mu** (not a frac).
on each side.
The function is defined as follows:
.. math::
f(x;\\mu, \\sigma, \\alpha_{L}, n_{L}, \\alpha_{R}, n_{R}) = \\begin{cases}
A_{L} \\cdot (B_{L} - \\frac{x - \\mu}{\\sigma})^{-n},
& \\mbox{for }\\frac{x - \\mu}{\\sigma} < -\\alpha_{L} \\newline
\\exp(- \\frac{(x - \\mu)^2}{2 \\sigma^2}),
& -\\alpha_{L} \\leqslant \\mbox{for}\\frac{x - \\mu}{\\sigma} \\leqslant \\alpha_{R} \\newline
A_{R} \\cdot (B_{R} - \\frac{x - \\mu}{\\sigma})^{-n},
& \\mbox{for }\\frac{x - \\mu}{\\sigma} > \\alpha_{R}
\\end{cases}
with
.. math::
A_{L/R} = \\left(\\frac{n_{L/R}}{\\left| \\alpha_{L/R} \\right|}\\right)^n_{L/R} \\cdot
\\exp\\left(- \\frac {\\left|\\alpha_{L/R} \\right|^2}{2}\\right)
B_{L/R} = \\frac{n_{L/R}}{\\left| \\alpha_{L/R} \\right|} - \\left| \\alpha_{L/R} \\right|
Args:
mu (`zfit.Parameter`): The mean of the gaussian
sigma (`zfit.Parameter`): Standard deviation of the gaussian
alphal (`zfit.Parameter`): parameter where to switch from a gaussian to the powertail on the left
side
nl (`zfit.Parameter`): Exponent of the powertail on the left side
alphar (`zfit.Parameter`): parameter where to switch from a gaussian to the powertail on the right
side
nr (`zfit.Parameter`): Exponent of the powertail on the right side
obs (:py:class:`~zfit.Space`):
name (str):
dtype (tf.DType):
"""
params = {'mu': mu,
'sigma': sigma,
'alphal': alphal,
'nl': nl,
'alphar': alphar,
'nr': nr}
super().__init__(obs=obs, dtype=dtype, name=name, params=params)
def _unnormalized_pdf(self, x):
mu = self.params['mu']
sigma = self.params['sigma']
alphal = self.params['alphal']
nl = self.params['nl']
alphar = self.params['alphar']
nr = self.params['nr']
x = x.unstack_x()
return double_crystalball_func(x=x, mu=mu, sigma=sigma, alphal=alphal, nl=nl,
alphar=alphar, nr=nr)
# DoubleCB.register_analytic_integral(func=double_crystalball_integral, limits=crystalball_integral_limits)
if __name__ == '__main__':
mu = ztf.constant(0)
sigma = ztf.constant(0.5)
alpha = ztf.constant(3)
n = ztf.constant(1)
# res = crystalball_func(np.random.random(size=100), mu, sigma, alpha, n)
# int1 = crystalball_integral(limits=zfit.Space(obs='obs1', limits=(-3, 5)),
# params={'mu': mu, "sigma": sigma, "alpha": alpha, "n": n})
from tensorflow.contrib import autograph
import matplotlib.pyplot as plt
new_code = autograph.to_code(crystalball_integral)
obs = zfit.Space(obs='obs1', limits=(-3, 1))
cb1 = CrystalBall(mu, sigma, alpha, n, obs=obs)
res = cb1.pdf(np.random.random(size=100))
int1 = cb1.integrate(limits=(-0.01, 2), norm_range=obs)
# tf.add_check_numerics_ops()
x = np.linspace(-5, 1, num=1000)
vals = cb1.pdf(x=x)
y = zfit.run(vals)[0]
plt.plot(x, y)
plt.show()
# print(new_code)
print(zfit.run(res))
print(zfit.run(int1))
| 36.256983
| 118
| 0.545455
|
ca546b8edec8439712e578285a1e1b1750ddc5bc
| 1,071
|
py
|
Python
|
boto3_type_annotations/boto3_type_annotations/dynamodbstreams/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations/boto3_type_annotations/dynamodbstreams/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations/boto3_type_annotations/dynamodbstreams/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Optional
from botocore.client import BaseClient
from botocore.waiter import Waiter
from typing import Union
from typing import Dict
from botocore.paginate import Paginator
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def describe_stream(self, StreamArn: str, Limit: int = None, ExclusiveStartShardId: str = None) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_records(self, ShardIterator: str, Limit: int = None) -> Dict:
pass
def get_shard_iterator(self, StreamArn: str, ShardId: str, ShardIteratorType: str, SequenceNumber: str = None) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_streams(self, TableName: str = None, Limit: int = None, ExclusiveStartStreamArn: str = None) -> Dict:
pass
| 32.454545
| 131
| 0.694678
|
03d924307bb6a727d3d7b445f200a655a6b3e04d
| 324
|
py
|
Python
|
Model.py
|
tuantvk/flask-mongodb-quiz-app
|
54a0b214332294517cfe759e76e7904e6f2f2e7a
|
[
"MIT"
] | 2
|
2019-10-03T08:03:33.000Z
|
2022-03-27T13:30:26.000Z
|
Model.py
|
tuantvk/flask-mongodb-quiz-app
|
54a0b214332294517cfe759e76e7904e6f2f2e7a
|
[
"MIT"
] | null | null | null |
Model.py
|
tuantvk/flask-mongodb-quiz-app
|
54a0b214332294517cfe759e76e7904e6f2f2e7a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from pymongo import MongoClient, errors
from config import HOST_DB
try:
client = MongoClient(HOST_DB, serverSelectionTimeoutMS=10)
db = client.quizapp
except errors.ServerSelectionTimeoutError as err:
print("DB error", err)
# create db user
user_list = db.user
# create db room
room = db.room
| 20.25
| 60
| 0.765432
|
c5eaeabcabd4ff96dc4ed7deac7ed457a1d7a351
| 17,223
|
py
|
Python
|
src/poio/ui/Ui_MainAnalyzerQML.py
|
cidles/poio-analyzer
|
64fd7327164d7d67a3615c10b047a95d5f34a2cd
|
[
"Apache-2.0"
] | 3
|
2017-04-07T08:20:27.000Z
|
2021-01-07T20:32:24.000Z
|
src/poio/ui/Ui_MainAnalyzerQML.py
|
cidles/poio-analyzer
|
64fd7327164d7d67a3615c10b047a95d5f34a2cd
|
[
"Apache-2.0"
] | null | null | null |
src/poio/ui/Ui_MainAnalyzerQML.py
|
cidles/poio-analyzer
|
64fd7327164d7d67a3615c10b047a95d5f34a2cd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainAnalyzerQML.ui'
#
# Created: Tue May 28 11:30:41 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralWidget = QtGui.QWidget(MainWindow)
self.centralWidget.setObjectName(_fromUtf8("centralWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_7 = QtGui.QLabel(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_7.sizePolicy().hasHeightForWidth())
self.label_7.setSizePolicy(sizePolicy)
self.label_7.setSizeIncrement(QtCore.QSize(1, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 12, 1, 1, 2)
self.line = QtGui.QFrame(self.centralWidget)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.gridLayout.addWidget(self.line, 11, 1, 1, 7)
self.buttonRemoveFiles = QtGui.QPushButton(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonRemoveFiles.sizePolicy().hasHeightForWidth())
self.buttonRemoveFiles.setSizePolicy(sizePolicy)
self.buttonRemoveFiles.setObjectName(_fromUtf8("buttonRemoveFiles"))
self.gridLayout.addWidget(self.buttonRemoveFiles, 14, 2, 1, 1)
self.listFiles = QtGui.QListView(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listFiles.sizePolicy().hasHeightForWidth())
self.listFiles.setSizePolicy(sizePolicy)
self.listFiles.setSizeIncrement(QtCore.QSize(1, 0))
self.listFiles.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.listFiles.setObjectName(_fromUtf8("listFiles"))
self.gridLayout.addWidget(self.listFiles, 13, 1, 1, 2)
self.lineeditQuickSearch = QtGui.QLineEdit(self.centralWidget)
self.lineeditQuickSearch.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineeditQuickSearch.sizePolicy().hasHeightForWidth())
self.lineeditQuickSearch.setSizePolicy(sizePolicy)
self.lineeditQuickSearch.setObjectName(_fromUtf8("lineeditQuickSearch"))
self.gridLayout.addWidget(self.lineeditQuickSearch, 14, 5, 1, 3)
self.buttonSearch = QtGui.QPushButton(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonSearch.sizePolicy().hasHeightForWidth())
self.buttonSearch.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.buttonSearch.setFont(font)
self.buttonSearch.setObjectName(_fromUtf8("buttonSearch"))
self.gridLayout.addWidget(self.buttonSearch, 3, 5, 1, 3)
self.tabWidget = QtGui.QTabWidget(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.tabWidget.setTabsClosable(False)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tabNewSearch = QtGui.QWidget()
self.tabNewSearch.setObjectName(_fromUtf8("tabNewSearch"))
self.tabWidget.addTab(self.tabNewSearch, _fromUtf8(""))
self.gridLayout.addWidget(self.tabWidget, 2, 1, 1, 7)
self.buttonAlignCenter = QtGui.QToolButton(self.centralWidget)
self.buttonAlignCenter.setEnabled(False)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/aligncenter.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonAlignCenter.setIcon(icon)
self.buttonAlignCenter.setIconSize(QtCore.QSize(24, 24))
self.buttonAlignCenter.setCheckable(True)
self.buttonAlignCenter.setAutoRaise(True)
self.buttonAlignCenter.setObjectName(_fromUtf8("buttonAlignCenter"))
self.gridLayout.addWidget(self.buttonAlignCenter, 12, 7, 1, 1)
self.buttonAlignLeft = QtGui.QToolButton(self.centralWidget)
self.buttonAlignLeft.setEnabled(False)
self.buttonAlignLeft.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/alignleft.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.buttonAlignLeft.setIcon(icon1)
self.buttonAlignLeft.setIconSize(QtCore.QSize(24, 24))
self.buttonAlignLeft.setCheckable(True)
self.buttonAlignLeft.setChecked(True)
self.buttonAlignLeft.setAutoRaise(True)
self.buttonAlignLeft.setObjectName(_fromUtf8("buttonAlignLeft"))
self.gridLayout.addWidget(self.buttonAlignLeft, 12, 6, 1, 1)
self.buttonClearThisSearch = QtGui.QPushButton(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonClearThisSearch.sizePolicy().hasHeightForWidth())
self.buttonClearThisSearch.setSizePolicy(sizePolicy)
self.buttonClearThisSearch.setObjectName(_fromUtf8("buttonClearThisSearch"))
self.gridLayout.addWidget(self.buttonClearThisSearch, 3, 1, 1, 1)
self.declarativeviewResult = QtDeclarative.QDeclarativeView(self.centralWidget)
self.declarativeviewResult.setFrameShape(QtGui.QFrame.StyledPanel)
self.declarativeviewResult.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.declarativeviewResult.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.declarativeviewResult.setObjectName(_fromUtf8("declarativeviewResult"))
self.gridLayout.addWidget(self.declarativeviewResult, 13, 3, 1, 5)
self.buttonCloseThisSearch = QtGui.QPushButton(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonCloseThisSearch.sizePolicy().hasHeightForWidth())
self.buttonCloseThisSearch.setSizePolicy(sizePolicy)
self.buttonCloseThisSearch.setObjectName(_fromUtf8("buttonCloseThisSearch"))
self.gridLayout.addWidget(self.buttonCloseThisSearch, 3, 2, 1, 1)
self.buttonSaveSearches = QtGui.QPushButton(self.centralWidget)
self.buttonSaveSearches.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonSaveSearches.sizePolicy().hasHeightForWidth())
self.buttonSaveSearches.setSizePolicy(sizePolicy)
self.buttonSaveSearches.setObjectName(_fromUtf8("buttonSaveSearches"))
self.gridLayout.addWidget(self.buttonSaveSearches, 3, 3, 1, 1)
self.label_8 = QtGui.QLabel(self.centralWidget)
self.label_8.setSizeIncrement(QtCore.QSize(2, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 12, 3, 1, 1)
self.label_9 = QtGui.QLabel(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
self.label_9.setSizePolicy(sizePolicy)
self.label_9.setLayoutDirection(QtCore.Qt.RightToLeft)
self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout.addWidget(self.label_9, 14, 3, 1, 1)
self.buttonAddFiles = QtGui.QPushButton(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonAddFiles.sizePolicy().hasHeightForWidth())
self.buttonAddFiles.setSizePolicy(sizePolicy)
self.buttonAddFiles.setObjectName(_fromUtf8("buttonAddFiles"))
self.gridLayout.addWidget(self.buttonAddFiles, 14, 1, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
MainWindow.setCentralWidget(self.centralWidget)
self.statusBar = QtGui.QStatusBar(MainWindow)
self.statusBar.setObjectName(_fromUtf8("statusBar"))
MainWindow.setStatusBar(self.statusBar)
self.menuBar = QtGui.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menuBar.setObjectName(_fromUtf8("menuBar"))
self.menuFiel = QtGui.QMenu(self.menuBar)
self.menuFiel.setObjectName(_fromUtf8("menuFiel"))
self.menuEdit = QtGui.QMenu(self.menuBar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
self.menuAbout = QtGui.QMenu(self.menuBar)
self.menuAbout.setObjectName(_fromUtf8("menuAbout"))
MainWindow.setMenuBar(self.menuBar)
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.actionNewProject = QtGui.QAction(MainWindow)
self.actionNewProject.setEnabled(False)
self.actionNewProject.setObjectName(_fromUtf8("actionNewProject"))
self.actionSaveProject = QtGui.QAction(MainWindow)
self.actionSaveProject.setEnabled(False)
self.actionSaveProject.setObjectName(_fromUtf8("actionSaveProject"))
self.actionSaveProjectAs = QtGui.QAction(MainWindow)
self.actionSaveProjectAs.setEnabled(False)
self.actionSaveProjectAs.setObjectName(_fromUtf8("actionSaveProjectAs"))
self.actionEditWordClasses = QtGui.QAction(MainWindow)
self.actionEditWordClasses.setEnabled(False)
self.actionEditWordClasses.setObjectName(_fromUtf8("actionEditWordClasses"))
self.actionSearches = QtGui.QAction(MainWindow)
self.actionSearches.setEnabled(False)
self.actionSearches.setObjectName(_fromUtf8("actionSearches"))
self.actionAboutPoioAnalyzer = QtGui.QAction(MainWindow)
self.actionAboutPoioAnalyzer.setObjectName(_fromUtf8("actionAboutPoioAnalyzer"))
self.actionQuickSearch = QtGui.QAction(MainWindow)
self.actionQuickSearch.setEnabled(False)
self.actionQuickSearch.setObjectName(_fromUtf8("actionQuickSearch"))
self.actionExportSearchResult = QtGui.QAction(MainWindow)
self.actionExportSearchResult.setObjectName(_fromUtf8("actionExportSearchResult"))
self.menuFiel.addAction(self.actionNewProject)
self.menuFiel.addAction(self.actionSaveProject)
self.menuFiel.addSeparator()
self.menuFiel.addAction(self.actionExportSearchResult)
self.menuFiel.addSeparator()
self.menuFiel.addAction(self.actionSaveProjectAs)
self.menuFiel.addAction(self.actionQuit)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionEditWordClasses)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionSearches)
self.menuEdit.addAction(self.actionQuickSearch)
self.menuAbout.addAction(self.actionAboutPoioAnalyzer)
self.menuBar.addAction(self.menuFiel.menuAction())
self.menuBar.addAction(self.menuEdit.menuAction())
self.menuBar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.tabWidget, self.buttonClearThisSearch)
MainWindow.setTabOrder(self.buttonClearThisSearch, self.buttonSearch)
MainWindow.setTabOrder(self.buttonSearch, self.listFiles)
MainWindow.setTabOrder(self.listFiles, self.buttonAlignLeft)
MainWindow.setTabOrder(self.buttonAlignLeft, self.buttonAlignCenter)
MainWindow.setTabOrder(self.buttonAlignCenter, self.buttonAddFiles)
MainWindow.setTabOrder(self.buttonAddFiles, self.buttonRemoveFiles)
MainWindow.setTabOrder(self.buttonRemoveFiles, self.lineeditQuickSearch)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "PoioAnalyzer", None))
self.label_7.setText(_translate("MainWindow", "Files:", None))
self.buttonRemoveFiles.setText(_translate("MainWindow", "Remove files", None))
self.buttonSearch.setText(_translate("MainWindow", "Search", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabNewSearch), _translate("MainWindow", "New Search...", None))
self.buttonAlignCenter.setText(_translate("MainWindow", "...", None))
self.buttonAlignLeft.setText(_translate("MainWindow", "...", None))
self.buttonClearThisSearch.setText(_translate("MainWindow", "Clear This Search", None))
self.buttonCloseThisSearch.setText(_translate("MainWindow", "Close This Search", None))
self.buttonSaveSearches.setText(_translate("MainWindow", "Save Searches...", None))
self.label_8.setText(_translate("MainWindow", "Result:", None))
self.label_9.setText(_translate("MainWindow", "Quick Search:", None))
self.buttonAddFiles.setText(_translate("MainWindow", "Add files...", None))
self.menuFiel.setTitle(_translate("MainWindow", "File", None))
self.menuEdit.setTitle(_translate("MainWindow", "Edit", None))
self.menuAbout.setTitle(_translate("MainWindow", "About", None))
self.actionQuit.setText(_translate("MainWindow", "Quit", None))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
self.actionNewProject.setText(_translate("MainWindow", "New Project", None))
self.actionNewProject.setShortcut(_translate("MainWindow", "Ctrl+N", None))
self.actionSaveProject.setText(_translate("MainWindow", "Save Project", None))
self.actionSaveProject.setShortcut(_translate("MainWindow", "Ctrl+S", None))
self.actionSaveProjectAs.setText(_translate("MainWindow", "Save Project As...", None))
self.actionEditWordClasses.setText(_translate("MainWindow", "Edit Word Classes...", None))
self.actionSearches.setText(_translate("MainWindow", "Saved Searches...", None))
self.actionAboutPoioAnalyzer.setText(_translate("MainWindow", "About PoioAnalyzer...", None))
self.actionQuickSearch.setText(_translate("MainWindow", "Quick Search", None))
self.actionQuickSearch.setShortcut(_translate("MainWindow", "Ctrl+F", None))
self.actionExportSearchResult.setText(_translate("MainWindow", "Export Search Result...", None))
self.actionExportSearchResult.setShortcut(_translate("MainWindow", "Ctrl+E", None))
from PyQt4 import QtDeclarative
import poioanalyzer_rc
| 60.010453
| 125
| 0.733438
|
94f53b225ade52d44373bd510ab5dc0c66ca6cd4
| 4,250
|
py
|
Python
|
newassess.py
|
Juzhang18/assessmentV1.1
|
13862450a31cbc34db960c76d5040b8517d90c78
|
[
"MIT"
] | 1
|
2019-08-15T17:52:33.000Z
|
2019-08-15T17:52:33.000Z
|
newassess.py
|
Juzhang18/assessmentV1.1
|
13862450a31cbc34db960c76d5040b8517d90c78
|
[
"MIT"
] | null | null | null |
newassess.py
|
Juzhang18/assessmentV1.1
|
13862450a31cbc34db960c76d5040b8517d90c78
|
[
"MIT"
] | null | null | null |
import csv
import smtplib
import flask
from flask import Flask, render_template,request, jsonify
#-------------CSV functions ported over--------
def csver(myDict):
writefile = 'masterDatabase.csv'
header = ['name', 'email', 'company', 'emailing', 'messaging', 'calendar', 'video', 'voice', 'secure', 'collab', 'mobile', 'access', 'remotely', 'fit', 'check', 'personalized', 'outside', 'onboarding', 'training', 'self-service', 'hardware', 'resources', 'security']
with open(writefile, 'a', newline='\n') as csvFile:
w = csv.writer(csvFile)
w.writerow(myDict.values())
def intoDict(filename): #compiles a dictionary with the values in the csv
with open(filename, mode='r', newline='') as csv_file:
csv_reader = csv.reader(csv_file)
counter=0
dictionary={}
for row in csv_reader:
dictionary[counter] = row
counter +=1
return dictionary
def locator(dic,term):
index=0
for i in dic[0]: #looks through the header for the term
if i == term:
print(term + " has been found at " + str(index))
break
else:
index += 1
return index
def sort(dic,location): #sorts the items in the dic into a new one by location
counter=0
sortedDict = {}
for j in dic:
if dic[counter][location] not in sortedDict:
sortedDict[dic[counter][location]] = [counter] #if list does not exist yet, create new list
else:
sortedDict[dic[counter][location]].append(counter) #else it appends on an existing one
counter+=1
return sortedDict
def ultraFind(filename,headerTerm):
dictionary=intoDict(filename)
location=locator(dictionary,headerTerm)
print('---------this is the dictionary we are working with---------- \n')
print(dictionary)
sortedDict=sort(dictionary,location)
del sortedDict[headerTerm] #deletes the header element in the dictionary
print('\n ----------this is the dictionary sorted by the header term, ' + headerTerm + ':---------- \n')
print(sortedDict)
return sortedDict
#-----------FLASK APPLICATION---------------
#THIS IS THE APPLICATION OBJECT, ALLOWING USE OF APP
app = Flask(__name__)
app.config["DEBUG"] = True #DEBUGGER
#DECORATORS: THEY LINK FUNCTION TO A URL
@app.route('/')
def home():
return 'Hello world! Perhaps you were looking for index' #returns hello world
@app.route('/index', methods = ['GET', 'POST'])
def index():
return render_template('index.HTML')
@app.route('/results', methods = ['GET', 'POST'])
def results():
if request.method == 'POST':
result = request.form
categories = ultraFind('assessment.csv','category')
resultValues=list(result.values())[4:]
index=0
counter=0
for res in resultValues:
resultValues[counter] = int(res)
counter+=1
for category in categories:
catLength=len(categories[category])
percentify=100/(5*catLength)
categories[category] = int(sum(resultValues[index:index+catLength])*percentify)
index += catLength
#categories['Communication'] = (int(result['emailing']) + int(result['messaging']) + int(result['calendar']) + int(result['video']) + int(result['voice'])) *4 #adds all the results up for COM and change it into percent
#categories['Collaboration'] = (int(result['secure']) + int(result['collab']) + int(result['mobile']) + int(result['access']) + int(result['remotely'])) *4 #adds all the results up for COM
#categories['Employment'] = (int(result['fit']) + int(result['check']) + int(result['personalized']) + int(result['outside']) + int(result['onboarding'])) *4 #adds all the results up for COM
#categories['Investment'] = (int(result['training']) + int(result['self-service']) + int(result['hardware']) + int(result['resources']) + int(result['security'])) *4 #adds all the results up for COM
csver(result)
return render_template('results.HTML', categories = categories, result=result)#returns the results for HTML manipulation
app.run()
| 40.865385
| 268
| 0.619294
|
c1e9b65af41b6d227a7602dd1d81fd2b9b5bbe79
| 19,226
|
py
|
Python
|
chrome/common/extensions/docs/server2/api_data_source.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-02-03T05:19:48.000Z
|
2021-11-15T15:07:21.000Z
|
chrome/common/extensions/docs/server2/api_data_source.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/common/extensions/docs/server2/api_data_source.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import os
from collections import defaultdict, Mapping
import third_party.json_schema_compiler.json_parse as json_parse
import third_party.json_schema_compiler.model as model
import third_party.json_schema_compiler.idl_schema as idl_schema
import third_party.json_schema_compiler.idl_parser as idl_parser
def _RemoveNoDocs(item):
if json_parse.IsDict(item):
if item.get('nodoc', False):
return True
for key, value in item.items():
if _RemoveNoDocs(value):
del item[key]
elif type(item) == list:
to_remove = []
for i in item:
if _RemoveNoDocs(i):
to_remove.append(i)
for i in to_remove:
item.remove(i)
return False
def _DetectInlineableTypes(schema):
"""Look for documents that are only referenced once and mark them as inline.
Actual inlining is done by _InlineDocs.
"""
if not schema.get('types'):
return
banned = frozenset(('value', 'choices', 'items', 'returns'))
refcounts = defaultdict(int)
# Use an explicit stack instead of recursion.
stack = [schema]
while stack:
node = stack.pop()
if isinstance(node, list):
stack.extend(node)
elif isinstance(node, Mapping):
if '$ref' in node:
refcounts[node['$ref']] += 1
stack.extend(v for k, v in node.iteritems() if k not in banned)
for type_ in schema['types']:
if not 'enum' in type_:
if refcounts[type_['id']] == 1:
type_['inline_doc'] = True
def _InlineDocs(schema):
"""Replace '$ref's that refer to inline_docs with the json for those docs.
"""
types = schema.get('types')
if types is None:
return
inline_docs = {}
types_without_inline_doc = []
# Gather the types with inline_doc.
for type_ in types:
if type_.get('inline_doc'):
inline_docs[type_['id']] = type_
for k in ('description', 'id', 'inline_doc'):
type_.pop(k, None)
else:
types_without_inline_doc.append(type_)
schema['types'] = types_without_inline_doc
def apply_inline(node):
if isinstance(node, list):
for i in node:
apply_inline(i)
elif isinstance(node, Mapping):
ref = node.get('$ref')
if ref and ref in inline_docs:
node.update(inline_docs[ref])
del node['$ref']
for k, v in node.iteritems():
apply_inline(v)
apply_inline(schema)
def _CreateId(node, prefix):
if node.parent is not None and not isinstance(node.parent, model.Namespace):
return '-'.join([prefix, node.parent.simple_name, node.simple_name])
return '-'.join([prefix, node.simple_name])
def _FormatValue(value):
"""Inserts commas every three digits for integer values. It is magic.
"""
s = str(value)
return ','.join([s[max(0, i - 3):i] for i in range(len(s), 0, -3)][::-1])
class _JSCModel(object):
"""Uses a Model from the JSON Schema Compiler and generates a dict that
a Handlebar template can use for a data source.
"""
def __init__(self, json, ref_resolver, disable_refs, idl=False):
self._ref_resolver = ref_resolver
self._disable_refs = disable_refs
clean_json = copy.deepcopy(json)
if _RemoveNoDocs(clean_json):
self._namespace = None
else:
if idl:
_DetectInlineableTypes(clean_json)
_InlineDocs(clean_json)
self._namespace = model.Namespace(clean_json, clean_json['namespace'])
def _FormatDescription(self, description):
if self._disable_refs:
return description
return self._ref_resolver.ResolveAllLinks(description,
namespace=self._namespace.name)
def _GetLink(self, link):
if self._disable_refs:
type_name = link.split('.', 1)[-1]
return { 'href': '#type-%s' % type_name, 'text': link, 'name': link }
return self._ref_resolver.SafeGetLink(link, namespace=self._namespace.name)
def ToDict(self):
if self._namespace is None:
return {}
return {
'name': self._namespace.name,
'types': self._GenerateTypes(self._namespace.types.values()),
'functions': self._GenerateFunctions(self._namespace.functions),
'events': self._GenerateEvents(self._namespace.events),
'properties': self._GenerateProperties(self._namespace.properties)
}
def _GenerateTypes(self, types):
return [self._GenerateType(t) for t in types]
def _GenerateType(self, type_):
type_dict = {
'name': type_.simple_name,
'description': self._FormatDescription(type_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'events': self._GenerateEvents(type_.events),
'id': _CreateId(type_, 'type')
}
self._RenderTypeInformation(type_, type_dict)
return type_dict
def _GenerateFunctions(self, functions):
return [self._GenerateFunction(f) for f in functions.values()]
def _GenerateFunction(self, function):
function_dict = {
'name': function.simple_name,
'description': self._FormatDescription(function.description),
'callback': self._GenerateCallback(function.callback),
'parameters': [],
'returns': None,
'id': _CreateId(function, 'method')
}
if (function.parent is not None and
not isinstance(function.parent, model.Namespace)):
function_dict['parent_name'] = function.parent.simple_name
if function.returns:
function_dict['returns'] = self._GenerateType(function.returns)
for param in function.params:
function_dict['parameters'].append(self._GenerateProperty(param))
if function.callback is not None:
# Show the callback as an extra parameter.
function_dict['parameters'].append(
self._GenerateCallbackProperty(function.callback))
if len(function_dict['parameters']) > 0:
function_dict['parameters'][-1]['last'] = True
return function_dict
def _GenerateEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()]
def _GenerateEvent(self, event):
event_dict = {
'name': event.simple_name,
'description': self._FormatDescription(event.description),
'parameters': [self._GenerateProperty(p) for p in event.params],
'callback': self._GenerateCallback(event.callback),
'filters': [self._GenerateProperty(f) for f in event.filters],
'conditions': [self._GetLink(condition)
for condition in event.conditions],
'actions': [self._GetLink(action) for action in event.actions],
'supportsRules': event.supports_rules,
'id': _CreateId(event, 'event')
}
if (event.parent is not None and
not isinstance(event.parent, model.Namespace)):
event_dict['parent_name'] = event.parent.simple_name
if event.callback is not None:
# Show the callback as an extra parameter.
event_dict['parameters'].append(
self._GenerateCallbackProperty(event.callback))
if len(event_dict['parameters']) > 0:
event_dict['parameters'][-1]['last'] = True
return event_dict
def _GenerateCallback(self, callback):
if not callback:
return None
callback_dict = {
'name': callback.simple_name,
'simple_type': {'simple_type': 'function'},
'optional': callback.optional,
'parameters': []
}
for param in callback.params:
callback_dict['parameters'].append(self._GenerateProperty(param))
if (len(callback_dict['parameters']) > 0):
callback_dict['parameters'][-1]['last'] = True
return callback_dict
def _GenerateProperties(self, properties):
return [self._GenerateProperty(v) for v in properties.values()]
def _GenerateProperty(self, property_):
if not hasattr(property_, 'type_'):
for d in dir(property_):
if not d.startswith('_'):
print ('%s -> %s' % (d, getattr(property_, d)))
type_ = property_.type_
# Make sure we generate property info for arrays, too.
# TODO(kalman): what about choices?
if type_.property_type == model.PropertyType.ARRAY:
properties = type_.item_type.properties
else:
properties = type_.properties
property_dict = {
'name': property_.simple_name,
'optional': property_.optional,
'description': self._FormatDescription(property_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'parameters': [],
'returns': None,
'id': _CreateId(property_, 'property')
}
if type_.property_type == model.PropertyType.FUNCTION:
function = type_.function
for param in function.params:
property_dict['parameters'].append(self._GenerateProperty(param))
if function.returns:
property_dict['returns'] = self._GenerateType(function.returns)
if (property_.parent is not None and
not isinstance(property_.parent, model.Namespace)):
property_dict['parent_name'] = property_.parent.simple_name
value = property_.value
if value is not None:
if isinstance(value, int):
property_dict['value'] = _FormatValue(value)
else:
property_dict['value'] = value
else:
self._RenderTypeInformation(type_, property_dict)
return property_dict
def _GenerateCallbackProperty(self, callback):
property_dict = {
'name': callback.simple_name,
'description': self._FormatDescription(callback.description),
'optional': callback.optional,
'id': _CreateId(callback, 'property'),
'simple_type': 'function',
}
if (callback.parent is not None and
not isinstance(callback.parent, model.Namespace)):
property_dict['parent_name'] = callback.parent.simple_name
return property_dict
def _RenderTypeInformation(self, type_, dst_dict):
dst_dict['is_object'] = type_.property_type == model.PropertyType.OBJECT
if type_.property_type == model.PropertyType.CHOICES:
dst_dict['choices'] = self._GenerateTypes(type_.choices)
# We keep track of which == last for knowing when to add "or" between
# choices in templates.
if len(dst_dict['choices']) > 0:
dst_dict['choices'][-1]['last'] = True
elif type_.property_type == model.PropertyType.REF:
dst_dict['link'] = self._GetLink(type_.ref_type)
elif type_.property_type == model.PropertyType.ARRAY:
dst_dict['array'] = self._GenerateType(type_.item_type)
elif type_.property_type == model.PropertyType.ENUM:
dst_dict['enum_values'] = []
for enum_value in type_.enum_values:
dst_dict['enum_values'].append({'name': enum_value})
if len(dst_dict['enum_values']) > 0:
dst_dict['enum_values'][-1]['last'] = True
elif type_.instance_of is not None:
dst_dict['simple_type'] = type_.instance_of.lower()
else:
dst_dict['simple_type'] = type_.property_type.name.lower()
class _LazySamplesGetter(object):
"""This class is needed so that an extensions API page does not have to fetch
the apps samples page and vice versa.
"""
def __init__(self, api_name, samples):
self._api_name = api_name
self._samples = samples
def get(self, key):
return self._samples.FilterSamples(key, self._api_name)
class APIDataSource(object):
"""This class fetches and loads JSON APIs from the FileSystem passed in with
|compiled_fs_factory|, so the APIs can be plugged into templates.
"""
class Factory(object):
def __init__(self, compiled_fs_factory, base_path):
def create_compiled_fs(fn, category):
return compiled_fs_factory.Create(fn, APIDataSource, category=category)
self._permissions_cache = create_compiled_fs(self._LoadPermissions,
'permissions')
self._json_cache = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, False),
'json')
self._idl_cache = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, False),
'idl')
# These caches are used if an APIDataSource does not want to resolve the
# $refs in an API. This is needed to prevent infinite recursion in
# ReferenceResolver.
self._json_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, True),
'json-no-refs')
self._idl_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, True),
'idl-no-refs')
self._idl_names_cache = create_compiled_fs(self._GetIDLNames, 'idl-names')
self._names_cache = create_compiled_fs(self._GetAllNames, 'names')
self._base_path = base_path
# These must be set later via the SetFooDataSourceFactory methods.
self._ref_resolver_factory = None
self._samples_data_source_factory = None
def SetSamplesDataSourceFactory(self, samples_data_source_factory):
self._samples_data_source_factory = samples_data_source_factory
def SetReferenceResolverFactory(self, ref_resolver_factory):
self._ref_resolver_factory = ref_resolver_factory
def Create(self, request, disable_refs=False):
"""Create an APIDataSource. |disable_refs| specifies whether $ref's in
APIs being processed by the |ToDict| method of _JSCModel follows $ref's
in the API. This prevents endless recursion in ReferenceResolver.
"""
if self._samples_data_source_factory is None:
# Only error if there is a request, which means this APIDataSource is
# actually being used to render a page.
if request is not None:
logging.error('SamplesDataSource.Factory was never set in '
'APIDataSource.Factory.')
samples = None
else:
samples = self._samples_data_source_factory.Create(request)
if not disable_refs and self._ref_resolver_factory is None:
logging.error('ReferenceResolver.Factory was never set in '
'APIDataSource.Factory.')
return APIDataSource(self._permissions_cache,
self._json_cache,
self._idl_cache,
self._json_cache_no_refs,
self._idl_cache_no_refs,
self._names_cache,
self._idl_names_cache,
self._base_path,
samples,
disable_refs)
def _LoadPermissions(self, file_name, json_str):
return json_parse.Parse(json_str)
def _LoadJsonAPI(self, api, disable_refs):
return _JSCModel(
json_parse.Parse(api)[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs).ToDict()
def _LoadIdlAPI(self, api, disable_refs):
idl = idl_parser.IDLParser().ParseData(api)
return _JSCModel(
idl_schema.IDLSchema(idl).process()[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs,
idl=True).ToDict()
def _GetIDLNames(self, base_dir, apis):
return self._GetExtNames(apis, ['idl'])
def _GetAllNames(self, base_dir, apis):
return self._GetExtNames(apis, ['json', 'idl'])
def _GetExtNames(self, apis, exts):
return [model.UnixName(os.path.splitext(api)[0]) for api in apis
if os.path.splitext(api)[1][1:] in exts]
def __init__(self,
permissions_cache,
json_cache,
idl_cache,
json_cache_no_refs,
idl_cache_no_refs,
names_cache,
idl_names_cache,
base_path,
samples,
disable_refs):
self._base_path = base_path
self._permissions_cache = permissions_cache
self._json_cache = json_cache
self._idl_cache = idl_cache
self._json_cache_no_refs = json_cache_no_refs
self._idl_cache_no_refs = idl_cache_no_refs
self._names_cache = names_cache
self._idl_names_cache = idl_names_cache
self._samples = samples
self._disable_refs = disable_refs
def _GetFeatureFile(self, filename):
perms = self._permissions_cache.GetFromFile('%s/%s' %
(self._base_path, filename))
return dict((model.UnixName(k), v) for k, v in perms.iteritems())
def _GetFeatureData(self, path):
# Remove 'experimental_' from path name to match the keys in
# _permissions_features.json.
path = model.UnixName(path.replace('experimental_', ''))
for filename in ['_permission_features.json', '_manifest_features.json']:
feature_data = self._GetFeatureFile(filename).get(path, None)
if feature_data is not None:
break
# There are specific cases in which the feature is actually a list of
# features where only one needs to match; but currently these are only
# used to whitelist features for specific extension IDs. Filter those out.
if isinstance(feature_data, list):
feature_list = feature_data
feature_data = None
for single_feature in feature_list:
if 'whitelist' in single_feature:
continue
if feature_data is not None:
# Note: if you are seeing the exception below, add more heuristics as
# required to form a single feature.
raise ValueError('Multiple potential features match %s. I can\'t '
'decide which one to use. Please help!' % path)
feature_data = single_feature
if feature_data and feature_data['channel'] in ('trunk', 'dev', 'beta'):
feature_data[feature_data['channel']] = True
return feature_data
def _GenerateHandlebarContext(self, handlebar_dict, path):
handlebar_dict['permissions'] = self._GetFeatureData(path)
handlebar_dict['samples'] = _LazySamplesGetter(path, self._samples)
return handlebar_dict
def _GetAsSubdirectory(self, name):
if name.startswith('experimental_'):
parts = name[len('experimental_'):].split('_', 1)
parts[1] = 'experimental_%s' % parts[1]
return '/'.join(parts)
return name.replace('_', '/', 1)
def get(self, key):
if key.endswith('.html') or key.endswith('.json') or key.endswith('.idl'):
path, ext = os.path.splitext(key)
else:
path = key
unix_name = model.UnixName(path)
idl_names = self._idl_names_cache.GetFromFileListing(self._base_path)
names = self._names_cache.GetFromFileListing(self._base_path)
if unix_name not in names and self._GetAsSubdirectory(unix_name) in names:
unix_name = self._GetAsSubdirectory(unix_name)
if self._disable_refs:
cache, ext = (
(self._idl_cache_no_refs, '.idl') if (unix_name in idl_names) else
(self._json_cache_no_refs, '.json'))
else:
cache, ext = ((self._idl_cache, '.idl') if (unix_name in idl_names) else
(self._json_cache, '.json'))
return self._GenerateHandlebarContext(
cache.GetFromFile('%s/%s%s' % (self._base_path, unix_name, ext)),
path)
| 37.624266
| 80
| 0.66899
|
af0b3fe32ac45a9ad0685fd1434b62d385fd260d
| 34
|
py
|
Python
|
Rooms/tiger.py
|
ventin75/house
|
99c9e34f4f6c03593eb7a95f70877fdbf13b07e7
|
[
"MIT"
] | null | null | null |
Rooms/tiger.py
|
ventin75/house
|
99c9e34f4f6c03593eb7a95f70877fdbf13b07e7
|
[
"MIT"
] | null | null | null |
Rooms/tiger.py
|
ventin75/house
|
99c9e34f4f6c03593eb7a95f70877fdbf13b07e7
|
[
"MIT"
] | null | null | null |
print("Have a roar-some Year!!!")
| 17
| 33
| 0.647059
|
1a3e7ee1ee16727dae7eb217f1e43bfa80670912
| 710
|
py
|
Python
|
dataspace/transform/dataframe.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | 3
|
2021-06-28T09:45:51.000Z
|
2022-01-10T15:38:07.000Z
|
dataspace/transform/dataframe.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | null | null | null |
dataspace/transform/dataframe.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | 1
|
2021-07-01T08:50:32.000Z
|
2021-07-01T08:50:32.000Z
|
import pandas as pd
from ..utils.messages import msg_warning, msg_info
def _drop(df: pd.DataFrame, *cols) -> pd.DataFrame:
try:
index = df.columns.values
for col in cols:
if col not in index:
msg_warning("Column", col, "not found. Aborting")
return
df = df.drop(col, axis=1)
except Exception as e:
raise ("Can not drop column", e)
return df
def _rename(df: pd.DataFrame, source_col: str, dest_col: str) -> pd.DataFrame:
try:
df = df.rename(columns={source_col: dest_col})
except Exception as e:
raise ("Can not rename column", e)
msg_info("Column", source_col, "renamed")
return df
| 28.4
| 78
| 0.601408
|
5cbdfc7e9e9a67f1053114b3e73f8205d99024be
| 2,286
|
py
|
Python
|
api/api/tests.py
|
erik-sn/mako
|
f64644dacb132b0063805ccafc3f485bdb62595b
|
[
"MIT"
] | null | null | null |
api/api/tests.py
|
erik-sn/mako
|
f64644dacb132b0063805ccafc3f485bdb62595b
|
[
"MIT"
] | 12
|
2020-02-11T23:12:23.000Z
|
2022-02-26T13:08:40.000Z
|
api/api/tests.py
|
erik-sn/mako
|
f64644dacb132b0063805ccafc3f485bdb62595b
|
[
"MIT"
] | 2
|
2018-08-10T08:14:31.000Z
|
2018-10-01T16:46:37.000Z
|
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from api.models import Software, FileUploadEvent, File
class TestRun(TestCase):
def setUp(self):
user = User.objects.create(username='testuser')
user.set_password('testpw')
user.save()
self.client = Client()
self.client.login(username='testuser', password='testpw')
def testSoftware(self):
#post software
response_post = self.client.post('/api/v1/software/', {"name":"test software", "run_command":"python test.py"})
self.assertEqual(response_post.status_code, 201)
#get software
response_get = self.client.get('/api/v1/software/')
self.assertEqual(response_get.status_code, 200)
#check creation of software
self.assertEqual(Software.objects.all().count(), 1)
def testUpload(self):
#post software
software_post = self.client.post('/api/v1/software/', {"name":"test software", "run_command":"python test.py"})
self.assertEqual(software_post.status_code, 201)
#get software
software_get = self.client.get('/api/v1/software/')
self.assertEqual(software_get.status_code, 200)
#check creation of software
self.assertEqual(Software.objects.all().count(), 1)
software_id = software_get.data[0]['id']
python_file = SimpleUploadedFile('test.py', b'file_content')
#post upload event
upload_post = self.client.post('/api/v1/file_upload_event/', {"file":python_file, "relative_dir":"./",\
"file_type":"static", "software":software_id})
self.assertEqual(upload_post.status_code, 201)
#get upload event
upload_get = self.client.get('/api/v1/file_upload_event/')
self.assertEqual(upload_get.status_code, 200)
#check creation of upload event
self.assertEqual(FileUploadEvent.objects.all().count(), 1)
#get file
file_get = self.client.get('/api/v1/files/')
self.assertEqual(file_get.status_code, 200)
#check creation of file
self.assertEqual(File.objects.all().count(), 1)
| 44.823529
| 119
| 0.646544
|
bbb45d883d7489e4cf66ef3a6a962c35e12e0a0e
| 151
|
py
|
Python
|
jp.atcoder/abc136/abc136_a/11409946.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc136/abc136_a/11409946.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc136/abc136_a/11409946.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
a, b, c = map(int, sys.stdin.readline().split())
def main():
print(max(0, b + c - a))
if __name__ == '__main__':
main()
| 15.1
| 49
| 0.529801
|
06159a4d350b46ecbbaee7c309b5b8261486c15d
| 9,300
|
py
|
Python
|
scripts/python/findTADs.py
|
sergpolly/cworld-dekker
|
7557bbe873e623e9059482722922faca4e784ad0
|
[
"Apache-2.0"
] | 43
|
2016-10-28T06:24:47.000Z
|
2022-03-20T09:39:23.000Z
|
scripts/python/findTADs.py
|
sergpolly/cworld-dekker
|
7557bbe873e623e9059482722922faca4e784ad0
|
[
"Apache-2.0"
] | 13
|
2017-12-02T20:00:11.000Z
|
2022-03-18T16:28:34.000Z
|
scripts/python/findTADs.py
|
sergpolly/cworld-dekker
|
7557bbe873e623e9059482722922faca4e784ad0
|
[
"Apache-2.0"
] | 24
|
2016-12-05T06:03:08.000Z
|
2021-11-24T12:43:03.000Z
|
#!/usr/local/bin/python
"""
***********************************************
- PROGRAM: findTADs.py
- CONTACT: Bryan lajoie (bryan.lajoie@umassmed.edu)
***********************************************
"""
from __future__ import print_function
# Built in modules
import argparse
import os.path
import sys
import gzip
import re
import itertools
import time
import gzip
# deprecated from scipy and unusde in the script:
# from scipy.stats.stats import nanmean
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import linkage,dendrogram
from scipy.cluster.vq import kmeans, vq, whiten
import scipy.spatial.distance as dist
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
# For eigenvectors and eigenvalues
from scipy import linalg as la
from math import cos,log,sin,sqrt
# deprecated from scipy and unusde in the script:
# from scipy import weave
def main():
print("")
# Get input options
args = check_options()
# Store the variables
inputMatrix = args.inputMatrix
nclust = args.nclust
if not os.path.isfile(inputMatrix):
sys.exit('invalid input file! (non-existant)')
print("inputMatrix",inputMatrix)
inputMatrixName=os.path.basename(inputMatrix)
inputMatrixName=re.sub(".gz", "", inputMatrixName)
inputMatrixName=re.sub(".matrix", "", inputMatrixName)
print("inputMatrixName",inputMatrixName)
print("")
print("loading matrix ... ",end="")
if inputMatrix.endswith('.gz'):
infh=gzip.open(inputMatrix,'r')
else:
infh=open(inputMatrix,'r')
matrix,header_rows,header_cols = load_matrix((l for l in infh if not l.startswith('#')), hrows=1, hcols=1) # since this returns data, header_rows and header_cols
infh.close()
print("done")
nrows=len(header_rows)
ncols=len(header_cols)
print("")
# enfore symmetrical matrices only
enforceSymmetrical(matrix)
# get assembly from all headers
assembly=getMatrixAssembly(header_rows+header_cols)
# find nan rows
print("finding nan rows ... ",end="\n")
nan_rows=np.sum(np.isnan(matrix),axis=0)==matrix.shape[0]
nan_cols=np.sum(np.isnan(matrix),axis=1)==matrix.shape[1]
nan_rowcols=nan_rows | nan_cols
nans=(np.isnan(matrix))
matrix[nans]=np.nan
print("converting all 2D nan to 0 ... ",end="\n")
matrix = np.nan_to_num(matrix)
#z = linkage(matrix)
#codebook, _ = kmeans(matrix,nclust,iter=1) # three clusters
#cluster_indices,cluster_distances = vq(matrix, codebook)
for i in xrange(nrows):
tmp_header=header_rows[i]
header_name,header_assembly,header_chr,header_start,header_end=splitHeader(tmp_header)
tmp_cluster=cluster_indices[i]
tmp_cluster_dist=cluster_distances[i]
print(header_chr+"\t"+header_start+"\t"+header_end+"\t"+str(tmp_cluster))
#d = dendrogram(z)
#matplotlib.pyplot.gcf()
#matplotlib.pyplot.savefig("tmp.pdf")
print("")
def writeMatrix(header_rows,header_cols,matrix,matrixFile,precision=4):
"""
write a np matrix with row/col headers - my5C file format - txt formatted gzipped file
"""
nrows=len(header_rows)
ncols=len(header_cols)
# interaction matrix output
out_fh=gzip.open(matrixFile,"wb")
# write matrix col headers
header=[str(i) for i in header_cols]
print(str(nrows)+"x"+str(ncols)+"\t"+"\t".join(header),file=out_fh)
format_func=("{:0."+str(precision)+"f}").format
k=0
for i in xrange(nrows):
print(header_rows[i]+"\t"+"\t".join(map(format_func,matrix[i,:])),file=out_fh)
out_fh.close()
def enforceSymmetrical(matrix):
nmatrix_rows=matrix.shape[0]
nmatrix_cols=matrix.shape[1]
if(nmatrix_rows != nmatrix_cols):
sys.exit('non-symmetrical matrix!')
all((matrix[y,:].T==matrix[y,:]).all() for y in range(matrix.shape[0]))
all((matrix[:,x].T==matrix[:,x]).all() for x in range(matrix.shape[1]))
def splitHeader(header):
m=re.search(r'(\S+)\|(\S+)\|(\S+):(\d+)-(\d+)',header)
if m==None:
sys.exit('error: incorrect input format!')
header_name,header_assembly,header_chr,header_start,header_end=m.groups()
return(header_name,header_assembly,header_chr,header_start,header_end)
def getMatrixAssembly(headers):
assembly=None
for i,header in enumerate(headers):
m=re.search(r'(\S+)\|(\S+)\|(\S+):(\d+)-(\d+)',header)
if m==None:
sys.exit('error: incorrect input format!')
header_name,header_assembly,header_chr,header_start,header_end=m.groups()
if assembly==None:
assembly=header_assembly
else:
if assembly!=header_assembly:
sys.exit('assembly/header_assembly is not constant!')
assembly=header_assembly
print("matrix assembly:",assembly)
return(assembly)
def load_matrix(fh,hrows=0,hcols=0,np_dtype='float32',row_block_size=1000,numpy_mode=True,max_rows=None,verbose=False,return_all=False,pad=None):
"""
load a np.array or a list of lists from a text file handle (but works with any iterator) or filename, more memory efficient than numpy.genfromtxt(), headers are returned as lists of strings
"""
fh_from_filename=False
if type(fh)==str:
if (fh=='-'):
fh=sys.stdin
else:
fh=open(fh,'r')
fh_from_filename=True
original_fh=fh
# init
firstline=fh.next()
fh=itertools.chain([firstline],fh)
cols=len(firstline.rstrip("\n").split("\t"))
rows=row_block_size
if (max_rows!=None and max_rows<row_block_size):
rows=max_rows
if(hcols):
cols-=hcols
if numpy_mode:
data=np.zeros((rows,cols),dtype=np_dtype)
else:
data=[]
header_rows=[[] for i in range(hrows)]
for i in range(hrows):
header_rows[i]=fh.next().rstrip("\n").split("\t")[hcols:]
header_cols=[[] for i in range(hcols)]
# fill one line at a time
prev_cols=-1
r=0
if (max_rows==None or r<max_rows):
for i in fh:
line=i.rstrip("\n").split("\t")
cols=len(line)-hcols
# if(cols==0):
# sys.exit('no valid columns in input line '+str(r))
if(prev_cols>-1 and cols!=prev_cols):
if(pad and cols<prev_cols):
line=line+['']*(prev_cols-cols)
cols=len(line)-hcols
else:
sys.exit('inconsistent number of columns in input line '+str(r))
prev_cols=cols
if numpy_mode:
not_allowed = ['','NA']
try: # if np_dtype does not except ''or 'NA' as a value
np.dtype(np_dtype).type(not_allowed)
except ValueError:
try:
np.dtype(np_dtype).type('nan')
line=[('nan' if i in not_allowed else i) for i in line] # '' or 'NA' are replaced with 'nan'
except ValueError:
pass
for j in range(hcols):
header_cols[j].append(line[j])
if numpy_mode:
data[r,:]=line[hcols:]
# enlarge data if needed
if(r==(data.shape[0]-1)):
data=np.resize(data,(data.shape[0]+row_block_size,cols))
rows=data.shape[0]
else:
data.append(line[hcols:])
r+=1
if (max_rows!=None and r>=max_rows):
break
rows=r
if numpy_mode:
data=np.resize(data,(rows,cols))
if (fh_from_filename):
original_fh.close()
if (hcols==1):
header_cols=header_cols[0]
if (hrows==1):
header_rows=header_rows[0]
if(verbose):
sys.stderr.write("loaded matrix with dimensions ("+str(len(data))+","+str(cols)+")\n")
nrows=len(header_rows)
ncols=len(header_cols)
ndata_rows=data.shape[0]
ndata_cols=data.shape[1]
if(nrows != ndata_rows):
sys.exit('header/data mismatch!')
if(ncols != ndata_cols):
sys.exit('header/data mismatch!')
if (return_all or (hrows and hcols)):
return data,header_rows,header_cols
if(hrows):
return data,header_rows
if(hcols):
return data,header_cols
return data
def check_options():
''' Checks the options to the program '''
# Create parser object
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Add arguments
parser.add_argument('-i' , metavar='--inputMatrix' , help="*Input matrix file", dest="inputMatrix", type=str, default="")
parser.add_argument('-n' , metavar='--nclust' , help="number of clusters", dest="nclust", type=int, default=1)
# Parse command line with parse_args and store it in an object
args = parser.parse_args()
return args
if __name__=="__main__":
main()
| 28.012048
| 193
| 0.600968
|
0f21d20edf219f360834c8955d1b46514bc3a892
| 5,857
|
py
|
Python
|
gaphor/RAAML/fta/ftatoolbox.py
|
mrmonkington/gaphor
|
f0fcd4deb90d24b14723840a689fac901f645a43
|
[
"Apache-2.0"
] | 867
|
2018-01-09T00:19:09.000Z
|
2022-03-31T02:49:23.000Z
|
gaphor/RAAML/fta/ftatoolbox.py
|
mrmonkington/gaphor
|
f0fcd4deb90d24b14723840a689fac901f645a43
|
[
"Apache-2.0"
] | 790
|
2018-01-13T23:47:07.000Z
|
2022-03-31T16:04:27.000Z
|
gaphor/RAAML/fta/ftatoolbox.py
|
mrmonkington/gaphor
|
f0fcd4deb90d24b14723840a689fac901f645a43
|
[
"Apache-2.0"
] | 117
|
2018-01-09T02:24:49.000Z
|
2022-03-23T08:07:42.000Z
|
"""The definition for the FTA section of the RAAML toolbox."""
from gaphor.diagram.diagramtoolbox import ToolDef, ToolSection, namespace_config
from gaphor.diagram.diagramtools import new_item_factory
from gaphor.i18n import gettext
from gaphor.RAAML import diagramitems, raaml
from gaphor.UML import diagramitems as uml_items
fta = ToolSection(
gettext("Fault Tree Analysis"),
(
ToolDef(
"dependency",
gettext("Dependency"),
"gaphor-dependency-symbolic",
"<Shift>D",
new_item_factory(uml_items.DependencyItem),
),
ToolDef(
"and",
gettext("AND Gate"),
"gaphor-and-symbolic",
"a",
new_item_factory(
diagramitems.ANDItem, raaml.AND, config_func=namespace_config
),
),
ToolDef(
"or",
gettext("OR Gate"),
"gaphor-or-symbolic",
"o",
new_item_factory(
diagramitems.ORItem, raaml.OR, config_func=namespace_config
),
),
ToolDef(
"not",
gettext("NOT Gate"),
"gaphor-not-symbolic",
"n",
new_item_factory(
diagramitems.NOTItem, raaml.NOT, config_func=namespace_config
),
),
ToolDef(
"seq",
gettext("Sequence Enforcing (SEQ) Gate"),
"gaphor-seq-symbolic",
"<Shift>S",
new_item_factory(
diagramitems.SEQItem, raaml.SEQ, config_func=namespace_config
),
),
ToolDef(
"xor",
gettext("Exclusive OR Gate"),
"gaphor-xor-symbolic",
"x",
new_item_factory(
diagramitems.XORItem, raaml.XOR, config_func=namespace_config
),
),
ToolDef(
"majority_vote",
gettext("Majority Vote Gate"),
"gaphor-majority_vote-symbolic",
"m",
new_item_factory(
diagramitems.MajorityVoteItem,
raaml.MAJORITY_VOTE,
config_func=namespace_config,
),
),
ToolDef(
"inhibit",
gettext("Inhibit Gate"),
"gaphor-inhibit-symbolic",
"i",
new_item_factory(
diagramitems.InhibitItem, raaml.INHIBIT, config_func=namespace_config
),
),
ToolDef(
"transfer-in",
gettext("Transfer In"),
"gaphor-transfer-in-symbolic",
"t",
new_item_factory(
diagramitems.TransferInItem,
raaml.TransferIn,
config_func=namespace_config,
),
),
ToolDef(
"transfer-out",
gettext("Transfer Out"),
"gaphor-transfer-out-symbolic",
"<Shift>T",
new_item_factory(
diagramitems.TransferOutItem,
raaml.TransferOut,
config_func=namespace_config,
),
),
ToolDef(
"basic-event",
gettext("Basic Event"),
"gaphor-basic-event-symbolic",
"<Shift>B",
new_item_factory(
diagramitems.BasicEventItem,
raaml.BasicEvent,
config_func=namespace_config,
),
),
ToolDef(
"conditional-event",
gettext("Conditional Event"),
"gaphor-conditional-event-symbolic",
"c",
new_item_factory(
diagramitems.ConditionalEventItem,
raaml.ConditionalEvent,
config_func=namespace_config,
),
),
ToolDef(
"undeveloped-event",
gettext("Undeveloped Event"),
"gaphor-undeveloped-event-symbolic",
"<Shift>U",
new_item_factory(
diagramitems.UndevelopedEventItem,
raaml.Undeveloped,
config_func=namespace_config,
),
),
ToolDef(
"dormant-event",
gettext("Dormant Event"),
"gaphor-dormant-event-symbolic",
"d",
new_item_factory(
diagramitems.DormantEventItem,
raaml.DormantEvent,
config_func=namespace_config,
),
),
ToolDef(
"house-event",
gettext("House Event"),
"gaphor-house-event-symbolic",
"h",
new_item_factory(
diagramitems.HouseEventItem,
raaml.HouseEvent,
config_func=namespace_config,
),
),
ToolDef(
"zero-event",
gettext("Zero Event"),
"gaphor-zero-event-symbolic",
"z",
new_item_factory(
diagramitems.ZeroEventItem,
raaml.ZeroEvent,
config_func=namespace_config,
),
),
ToolDef(
"top-event",
gettext("Top Event"),
"gaphor-top-event-symbolic",
"p",
new_item_factory(
diagramitems.TopEventItem,
raaml.TopEvent,
config_func=namespace_config,
),
),
ToolDef(
"intermediate-event",
gettext("Intermediate Event"),
"gaphor-intermediate-event-symbolic",
"<Shift>I",
new_item_factory(
diagramitems.IntermediateEventItem,
raaml.IntermediateEvent,
config_func=namespace_config,
),
),
),
)
| 29.882653
| 85
| 0.483695
|
8a9a21f081f1b210d1c7ce0e3e41eb1ab0a27065
| 20,162
|
py
|
Python
|
tests/sc/test_organizations.py
|
39biradar/pyTenable
|
a055140bc864bb950fd1053ab598ff2da12cf408
|
[
"MIT"
] | null | null | null |
tests/sc/test_organizations.py
|
39biradar/pyTenable
|
a055140bc864bb950fd1053ab598ff2da12cf408
|
[
"MIT"
] | null | null | null |
tests/sc/test_organizations.py
|
39biradar/pyTenable
|
a055140bc864bb950fd1053ab598ff2da12cf408
|
[
"MIT"
] | null | null | null |
import pytest
from ..checker import check
from tenable.errors import APIError, UnexpectedValueError
from tests.pytenable_log_handler import log_exception
def test_organizations_constructor_name_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(name=1)
def test_organizations_constructor_description_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(description=1)
def test_organizations_constructor_address_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(address=1)
def test_organizations_constructor_city_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(city=1)
def test_organizations_constructor_state_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(state=1)
def test_organizations_constructor_country_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(country=1)
def test_organizations_constructor_phone_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(phone=1)
def test_organizations_constructor_lce_ids_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(lce_ids=1)
def test_organizations_constructor_lce_ids_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(lce_ids=['one', ])
def test_organizations_constructor_zone_selection_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(zone_selection=1)
def test_organizations_constructor_zone_selection_unexpectedvalueerror(sc):
with pytest.raises(UnexpectedValueError):
sc.organizations._constructor(zone_selection='something')
def test_organizations_constructor_restricted_ips_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(restricted_ips=1)
def test_organizations_constructor_restricted_ips_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(restricted_ips=[1])
def test_organizations_constructor_repos_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(repos=1)
def test_organizations_constructor_repos_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(repos=['one'])
def test_organizations_constructor_pub_sites_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(pub_sites=1)
def test_organizations_constructor_pub_sites_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(pub_sites=['one'])
def test_organizations_constructor_ldap_ids_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(ldap_ids=1)
def test_organizations_constructor_ldap_ids_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(ldap_ids=['one'])
def test_organizations_constructor_nessus_managers_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(nessus_managers=1)
def test_organizations_constructor_nessus_managers_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(nessus_managers=['one'])
def test_organizations_constructor_info_links_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(info_links=1)
def test_organizations_constructor_info_links_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(info_links=[1])
def test_organizations_constructor_info_links_item_name_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(info_links=[(1, 'http://site.com/%IP%')])
def test_organizations_constructor_info_links_item_link_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(info_links=[('name', 1)])
def test_organizations_constructor_vuln_score_low_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(vuln_score_low='one')
def test_organizations_constructor_vuln_score_medium_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(vuln_score_medium='one')
def test_organizations_constructor_vuln_score_high_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(vuln_score_high='one')
def test_organizations_constructor_vuln_score_critical_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations._constructor(vuln_score_critical='one')
def test_organizations_constructor_success(sc):
organization = sc.organizations._constructor(
name='name',
description='description',
address='123 main street',
city='anytown',
state='IL',
country='USA',
phone='999.888.7766',
lce_ids=[1, 2, 3],
zone_selection='auto_only',
restricted_ips=['127.0.0.1', '127.0.0.0/8'],
repos=[1, 2, 3],
pub_sites=[1, 2, 3],
ldap_ids=[1, 2, 3],
nessus_managers=[1, 2, 3],
info_links=[('link', 'http://url/%IP%')],
vuln_score_low=1,
vuln_score_medium=5,
vuln_score_high=10,
vuln_score_critical=40,
)
assert organization == {
'name': 'name',
'description': 'description',
'address': '123 main street',
'city': 'anytown',
'state': 'IL',
'country': 'USA',
'phone': '999.888.7766',
'lces': [{'id': 1}, {'id': 2}, {'id': 3}],
'zoneSelection': 'auto_only',
'restrictedIPs': '127.0.0.1,127.0.0.0/8',
'repositories': [{'id': 1}, {'id': 2}, {'id': 3}],
'pubSites': [{'id': 1}, {'id': 2}, {'id': 3}],
'ldaps': [{'id': 1}, {'id': 2}, {'id': 3}],
'nessusManagers': [{'id': 1}, {'id': 2}, {'id': 3}],
'ipInfoLinks': [{'name': 'link', 'link': 'http://url/%IP%'}],
'vulnScoreLow': 1,
'vulnScoreMedium': 5,
'vulnScoreHigh': 10,
'vulnScoreCritical': 40
}
@pytest.fixture
def org(request, admin, vcr):
with vcr.use_cassette('test_organizations_create_success'):
organization = admin.organizations.create('New Org')
def teardown():
try:
with vcr.use_cassette('test_organizations_delete_success'):
admin.organizations.delete(int(organization['id']))
except APIError as error:
log_exception(error)
request.addfinalizer(teardown)
return organization
@pytest.mark.vcr()
def test_organizations_create_success(admin, org):
assert isinstance(org, dict)
check(org, 'id', str)
check(org, 'name', str)
check(org, 'description', str)
check(org, 'email', str)
check(org, 'address', str)
check(org, 'city', str)
check(org, 'state', str)
check(org, 'country', str)
check(org, 'phone', str)
check(org, 'fax', str)
check(org, 'ipInfoLinks', list)
for i in org['ipInfoLinks']:
check(i, 'name', str)
check(i, 'link', str)
check(org, 'zoneSelection', str)
check(org, 'restrictedIPs', str)
check(org, 'vulnScoreLow', str)
check(org, 'vulnScoreMedium', str)
check(org, 'vulnScoreHigh', str)
check(org, 'vulnScoreCritical', str)
check(org, 'createdTime', str)
check(org, 'modifiedTime', str)
check(org, 'userCount', str)
check(org, 'lces', list)
for lces in org['lces']:
check(lces, 'id', str)
check(lces, 'name', str)
check(lces, 'description', str)
check(org, 'repositories', list)
for repository in org['repositories']:
check(repository, 'id', str)
check(repository, 'name', str)
check(repository, 'description', str)
check(repository, 'type', str)
check(repository, 'dataFormat', str)
check(repository, 'groupAssign', str)
check(org, 'zones', list)
for zone in org['zones']:
check(zone, 'id', str)
check(zone, 'name', str)
check(zone, 'description', str)
check(org, 'ldaps', list)
for ldaps in org['ldaps']:
check(ldaps, 'id', str)
check(ldaps, 'name', str)
check(ldaps, 'description', str)
check(org, 'nessusManagers', list)
for manager in org['nessusManagers']:
check(manager, 'id', str)
check(manager, 'name', str)
check(manager, 'description', str)
check(org, 'pubSites', list)
for pub_sites in org['pubSites']:
check(pub_sites, 'id', str)
check(pub_sites, 'name', str)
check(pub_sites, 'description', str)
@pytest.mark.vcr()
def test_organizations_delete_success(admin, org):
admin.organizations.delete(int(org['id']))
@pytest.mark.vcr()
def test_organizations_list_success(admin):
organizations = admin.organizations.list(fields=['id', 'name', 'description'])
assert isinstance(organizations, list)
for organization in organizations:
check(organization, 'id', str)
check(organization, 'name', str)
check(organization, 'description', str)
@pytest.mark.vcr()
def test_organizations_managers_list_success(admin):
managers = admin.organizations.managers_list(org_id=1, fields=['id', 'name', 'description'])
assert isinstance(managers, list)
for manager in managers:
check(manager, 'id', str)
check(manager, 'name', str)
check(manager, 'description', str)
@pytest.mark.vcr()
def test_organizations_manager_details_success(admin):
manager = admin.organizations.manager_details(org_id=1, user_id=1, fields=['id', 'name', 'description'])
assert isinstance(manager, dict)
check(manager, 'id', str)
check(manager, 'name', str)
check(manager, 'description', str)
@pytest.mark.vcr()
def test_organizations_manager_create_edit_delete_success(admin):
manager = admin.organizations.manager_create(org_id=1,
username='username',
password='password',
role=1)
assert isinstance(manager, dict)
check(manager, 'id', str)
check(manager, 'name', str)
check(manager, 'description', str)
manager = admin.organizations.manager_edit(user_id=int(manager['id']),
org_id=1,
name='new mgr name')
assert isinstance(manager, dict)
check(manager, 'id', str)
check(manager, 'name', str)
check(manager, 'description', str)
admin.organizations.manager_delete(org_id=1, user_id=1, migrate_to=1)
@pytest.mark.vcr()
def test_organizations_details_success(admin, org):
organization = admin.organizations.details(int(org['id']))
assert isinstance(organization, dict)
check(organization, 'id', str)
check(organization, 'name', str)
check(organization, 'description', str)
check(organization, 'email', str)
check(organization, 'address', str)
check(organization, 'city', str)
check(organization, 'state', str)
check(organization, 'country', str)
check(organization, 'phone', str)
check(organization, 'fax', str)
check(organization, 'ipInfoLinks', list)
for ip_info in organization['ipInfoLinks']:
check(ip_info, 'name', str)
check(ip_info, 'link', str)
check(organization, 'zoneSelection', str)
check(organization, 'restrictedIPs', str)
check(organization, 'vulnScoreLow', str)
check(organization, 'vulnScoreMedium', str)
check(organization, 'vulnScoreHigh', str)
check(organization, 'vulnScoreCritical', str)
check(organization, 'createdTime', str)
check(organization, 'modifiedTime', str)
check(organization, 'userCount', str)
check(organization, 'lces', list)
for lces in organization['lces']:
check(lces, 'id', str)
check(lces, 'name', str)
check(lces, 'description', str)
check(organization, 'repositories', list)
for repository in organization['repositories']:
check(repository, 'id', str)
check(repository, 'name', str)
check(repository, 'description', str)
check(repository, 'type', str)
check(repository, 'dataFormat', str)
check(repository, 'groupAssign', str)
check(organization, 'zones', list)
for zone in organization['zones']:
check(zone, 'id', str)
check(zone, 'name', str)
check(zone, 'description', str)
check(organization, 'ldaps', list)
for ldap in organization['ldaps']:
check(ldap, 'id', str)
check(ldap, 'name', str)
check(ldap, 'description', str)
check(organization, 'nessusManagers', list)
for manager in organization['nessusManagers']:
check(manager, 'id', str)
check(manager, 'name', str)
check(manager, 'description', str)
check(organization, 'pubSites', list)
for pub_site in organization['pubSites']:
check(pub_site, 'id', str)
check(pub_site, 'name', str)
check(pub_site, 'description', str)
@pytest.mark.vcr()
def test_organizations_details_success_for_fields(admin, org):
organization = admin.organizations.details(int(org['id']), fields=['id', 'name', 'description'])
assert isinstance(organization, dict)
check(organization, 'id', str)
check(organization, 'name', str)
check(organization, 'description', str)
@pytest.mark.vcr()
def test_organizations_edit_success(admin, org):
organization = admin.organizations.edit(int(org['id']), name='new org name')
assert isinstance(organization, dict)
check(organization, 'id', str)
check(organization, 'name', str)
check(organization, 'description', str)
check(organization, 'email', str)
check(organization, 'address', str)
check(organization, 'city', str)
check(organization, 'state', str)
check(organization, 'country', str)
check(organization, 'phone', str)
check(organization, 'fax', str)
check(organization, 'ipInfoLinks', list)
for ip_info in organization['ipInfoLinks']:
check(ip_info, 'name', str)
check(ip_info, 'link', str)
check(organization, 'zoneSelection', str)
check(organization, 'restrictedIPs', str)
check(organization, 'vulnScoreLow', str)
check(organization, 'vulnScoreMedium', str)
check(organization, 'vulnScoreHigh', str)
check(organization, 'vulnScoreCritical', str)
check(organization, 'createdTime', str)
check(organization, 'modifiedTime', str)
check(organization, 'userCount', str)
check(organization, 'lces', list)
for lces in organization['lces']:
check(lces, 'id', str)
check(lces, 'name', str)
check(lces, 'description', str)
check(organization, 'repositories', list)
for repository in organization['repositories']:
check(repository, 'id', str)
check(repository, 'name', str)
check(repository, 'description', str)
check(repository, 'type', str)
check(repository, 'dataFormat', str)
check(repository, 'groupAssign', str)
check(organization, 'zones', list)
for zone in organization['zones']:
check(zone, 'id', str)
check(zone, 'name', str)
check(zone, 'description', str)
check(organization, 'ldaps', list)
for ldap in organization['ldaps']:
check(ldap, 'id', str)
check(ldap, 'name', str)
check(ldap, 'description', str)
check(organization, 'nessusManagers', list)
for manager in organization['nessusManagers']:
check(manager, 'id', str)
check(manager, 'name', str)
check(manager, 'description', str)
check(organization, 'pubSites', list)
for pub_site in organization['pubSites']:
check(pub_site, 'id', str)
check(pub_site, 'name', str)
check(pub_site, 'description', str)
@pytest.mark.vcr()
def test_organizations_accept_risk_rules_id_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.accept_risk_rules('one')
@pytest.mark.vcr()
def test_organizations_accept_risk_rules_repos_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.accept_risk_rules(1, repos=1)
@pytest.mark.vcr()
def test_organizations_accept_risk_rules_repos_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.accept_risk_rules(1, repos=['one'])
@pytest.mark.vcr()
def test_organizations_accept_risk_rules_plugin_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.accept_risk_rules(1, plugin='one')
@pytest.mark.vcr()
def test_organizations_accept_risk_rules_port_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.accept_risk_rules(1, port='one')
@pytest.mark.vcr()
def test_organizations_accept_risk_rules_success(admin, org):
rules = admin.organizations.accept_risk_rules(int(org['id']))
assert isinstance(rules, list)
for rule in rules:
check(rule, 'id', str)
check(rule, 'hostType', str)
check(rule, 'hostValue', str)
check(rule, 'port', str)
check(rule, 'protocol', str)
check(rule, 'expires', str)
check(rule, 'status', str)
check(rule, 'repository', dict)
check(rule['repository'], 'id', str)
check(rule['repository'], 'name', str)
check(rule['repository'], 'description', str)
check(rule, 'organization', dict)
check(rule['organization'], 'id', str)
check(rule['organization'], 'name', str)
check(rule['organization'], 'description', str)
check(rule, 'user', dict)
check(rule['user'], 'id', str)
check(rule['user'], 'username', str)
check(rule['user'], 'firstname', str)
check(rule['user'], 'lastname', str)
check(rule, 'plugin', dict)
check(rule['plugin'], 'id', str)
check(rule['plugin'], 'name', str)
check(rule['plugin'], 'description', str)
@pytest.mark.vcr()
def test_organizations_recast_risk_rules_id_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.recast_risk_rules('one')
@pytest.mark.vcr()
def test_organizations_recast_risk_rules_repos_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.recast_risk_rules(1, repos=1)
@pytest.mark.vcr()
def test_organizations_recast_risk_rules_repos_item_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.recast_risk_rules(1, repos=['one'])
@pytest.mark.vcr()
def test_organizations_recast_risk_rules_plugin_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.recast_risk_rules(1, plugin='one')
@pytest.mark.vcr()
def test_organizations_recast_risk_rules_port_typeerror(sc):
with pytest.raises(TypeError):
sc.organizations.recast_risk_rules(1, port='one')
@pytest.mark.vcr()
def test_organizations_recast_risk_rules_success(admin, org):
rules = admin.organizations.recast_risk_rules(int(org['id']))
assert isinstance(rules, list)
for rule in rules:
check(rule, 'id', str)
check(rule, 'hostType', str)
check(rule, 'hostValue', str)
check(rule, 'port', str)
check(rule, 'protocol', str)
check(rule, 'status', str)
check(rule, 'repository', dict)
check(rule['repository'], 'id', str)
check(rule['repository'], 'name', str)
check(rule['repository'], 'description', str)
check(rule, 'organization', dict)
check(rule['organization'], 'id', str)
check(rule['organization'], 'name', str)
check(rule['organization'], 'description', str)
check(rule, 'user', dict)
check(rule['user'], 'id', str)
check(rule['user'], 'username', str)
check(rule['user'], 'firstname', str)
check(rule['user'], 'lastname', str)
check(rule, 'plugin', dict)
check(rule['plugin'], 'id', str)
check(rule['plugin'], 'name', str)
check(rule['plugin'], 'description', str)
| 34.58319
| 108
| 0.664865
|
ef931fe95d2677068e6c6de9e34da199d71dee70
| 3,606
|
py
|
Python
|
mturk/models.py
|
Kyeongan/crowdsource-platform
|
af34363158ff30ebfdade4a543648bf26a3c9698
|
[
"MIT"
] | 138
|
2015-04-17T20:07:12.000Z
|
2017-05-03T17:58:47.000Z
|
mturk/models.py
|
cescgie/crowdi
|
c16ab625f27915919e21f7eec93c45af551d9022
|
[
"MIT"
] | 657
|
2015-04-19T04:54:51.000Z
|
2017-06-26T18:07:42.000Z
|
mturk/models.py
|
cescgie/crowdi
|
c16ab625f27915919e21f7eec93c45af551d9022
|
[
"MIT"
] | 311
|
2015-04-16T19:20:55.000Z
|
2017-06-13T05:32:01.000Z
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField, ArrayField
from django.db import models
from crowdsourcing.models import Task, TaskWorker
class Timed(models.Model):
created_at = models.DateTimeField(auto_now_add=True, auto_now=False)
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
abstract = True
class MTurkHIT(Timed):
STATUS_IN_PROGRESS = 1
STATUS_COMPLETED = 2
STATUS_EXPIRED = 3
STATUS_DELETED = 4
STATUS = (
(STATUS_IN_PROGRESS, 'In Progress'),
(STATUS_COMPLETED, 'Completed'),
(STATUS_EXPIRED, 'Expired'),
(STATUS_DELETED, 'Deleted')
)
hit_id = models.TextField(max_length=256)
hit_type = models.ForeignKey('MTurkHITType')
hit_group_id = models.TextField(max_length=128, default='')
num_assignments = models.IntegerField(default=1)
task = models.OneToOneField(Task, related_name='mturk_hit', on_delete=models.CASCADE)
status = models.IntegerField(default=STATUS_IN_PROGRESS, choices=STATUS)
class MTurkAssignment(Timed):
hit = models.ForeignKey(MTurkHIT, related_name='mturk_assignments')
assignment_id = models.TextField(max_length=128)
status = models.IntegerField(choices=TaskWorker.STATUS, default=TaskWorker.STATUS_IN_PROGRESS)
task_worker = models.ForeignKey(TaskWorker, related_name='mturk_assignments', on_delete=models.CASCADE, null=True)
class MTurkNotification(Timed):
data = JSONField(null=True)
class MTurkAccount(Timed):
user = models.OneToOneField(User, related_name='mturk_account')
client_id = models.CharField(max_length=64, null=True, blank=True)
client_secret = models.CharField(max_length=128, null=True, blank=True)
description = models.CharField(max_length=128, null=True, blank=True)
is_valid = models.BooleanField(default=True)
class MTurkHITType(Timed):
string_id = models.CharField(max_length=64, null=True)
name = models.CharField(max_length=128)
description = models.CharField(max_length=512, blank=True, null=True)
price = models.DecimalField(decimal_places=2, max_digits=8)
keywords = ArrayField(models.CharField(max_length=128), null=True, default=[])
duration = models.DurationField(null=True)
qualifications_mask = models.IntegerField(default=0)
boomerang_qualification = models.ForeignKey('MTurkQualification', null=True)
boomerang_threshold = models.IntegerField()
owner = models.ForeignKey(User, related_name='mturk_hit_types')
class MTurkQualification(Timed):
name = models.CharField(max_length=64)
description = models.CharField(max_length=512)
status = models.CharField(max_length=16, default='Active')
keywords = ArrayField(models.CharField(max_length=128), null=True, default=[])
auto_granted = models.BooleanField(default=False)
auto_granted_value = models.IntegerField(default=1, null=True)
type_id = models.CharField(max_length=128)
flag = models.IntegerField()
owner = models.ForeignKey(User, related_name='mturk_qualifications')
lower_bound = models.IntegerField(default=100)
upper_bound = models.IntegerField(default=300)
is_blacklist = models.BooleanField(default=False)
class Meta:
unique_together = ('owner', 'flag', 'name')
class MTurkWorkerQualification(Timed):
qualification = models.ForeignKey(MTurkQualification)
worker = models.CharField(max_length=32)
score = models.IntegerField(default=1)
overwritten = models.BooleanField(default=False)
| 38.361702
| 118
| 0.751248
|
a9e8d5bdf36e36a3ba9fd8138e7168125ba932d9
| 23,052
|
py
|
Python
|
bin/cpu_monitor.py
|
MarbleInc/ros-system-monitor
|
39d8ca3368c34483e71eb69f38452050d58b865c
|
[
"BSD-3-Clause"
] | null | null | null |
bin/cpu_monitor.py
|
MarbleInc/ros-system-monitor
|
39d8ca3368c34483e71eb69f38452050d58b865c
|
[
"BSD-3-Clause"
] | 2
|
2019-01-16T19:03:34.000Z
|
2019-09-30T18:57:39.000Z
|
bin/cpu_monitor.py
|
MarbleInc/ros-system-monitor
|
39d8ca3368c34483e71eb69f38452050d58b865c
|
[
"BSD-3-Clause"
] | 1
|
2019-01-11T21:49:27.000Z
|
2019-01-11T21:49:27.000Z
|
#!/usr/bin/env python
############################################################################
# Copyright (C) 2009, Willow Garage, Inc. #
# Copyright (C) 2013 by Ralf Kaestner #
# ralf.kaestner@gmail.com #
# Copyright (C) 2013 by Jerome Maye #
# jerome.maye@mavt.ethz.ch #
# #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# 3. The name of the copyright holders may be used to endorse or #
# promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
from __future__ import with_statement
import rospy
import traceback
import threading
from threading import Timer
import sys, os, time
from time import sleep
import subprocess
import string
import socket
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from marble_structs.diagnostics import Status
from mbot_diagnostics import DiagnosticUpdater, GenericDiagnostic
cpu_load_warn = 0.9
cpu_load_error = 1.1
cpu_load1_warn = 0.9
cpu_load5_warn = 0.8
cpu_temp_warn = 85.0
cpu_temp_error = 90.0
stat_dict = { 0: 'OK', 1: 'Warning', 2: 'Error' }
def update_status_stale(stat, last_update_time):
time_since_update = rospy.get_time() - last_update_time
stale_status = 'OK'
if time_since_update > 20 and time_since_update <= 35:
stale_status = 'Lagging'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.WARN)
if time_since_update > 35:
stale_status = 'Stale'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.ERROR)
stat.values.pop(0)
stat.values.pop(0)
stat.values.insert(0, KeyValue(key = 'Update Status', value = stale_status))
stat.values.insert(1, KeyValue(key = 'Time Since Update', value = str(time_since_update)))
class CPUMonitor():
def __init__(self, hostname, namespace, diag_hostname):
self._diag_updater = DiagnosticUpdater(
name=namespace + 'cpu',
display_name=diag_hostname + ' CPU',
)
self._namespace = namespace
self._mutex = threading.Lock()
self._check_core_temps = rospy.get_param('~check_core_temps', True)
self._cpu_load_warn = rospy.get_param('~cpu_load_warn', cpu_load_warn)
self._cpu_load_error = rospy.get_param('~cpu_load_error', cpu_load_error)
self._cpu_load1_warn = rospy.get_param('~cpu_load1_warn', cpu_load1_warn)
self._cpu_load5_warn = rospy.get_param('~cpu_load5_warn', cpu_load5_warn)
self._cpu_temp_warn = rospy.get_param('~cpu_temp_warn', cpu_temp_warn)
self._cpu_temp_error = rospy.get_param('~cpu_temp_error', cpu_temp_error)
self._num_cores = rospy.get_param('~num_cores', 0)
self._temps_timer = None
self._usage_timer = None
# Get temp_input files
self._temp_vals = self.get_core_temp_names()
# CPU stats
self._temp_stat = DiagnosticStatus()
self._temp_stat.name = 'CPU Temperature'
self._temp_stat.level = 1
self._temp_stat.hardware_id = hostname
self._temp_stat.message = 'No Data'
self._temp_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self._temp_diagnostic = GenericDiagnostic('/temp')
self._temp_diagnostic.add_to_updater(self._diag_updater)
self._usage_stat = DiagnosticStatus()
self._usage_stat.name = 'CPU Usage'
self._usage_stat.level = 1
self._usage_stat.hardware_id = hostname
self._usage_stat.message = 'No Data'
self._usage_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self._usage_diagnostic = GenericDiagnostic('/usage')
self._usage_diagnostic.add_to_updater(self._diag_updater)
self._last_temp_time = 0
self._last_usage_time = 0
self._usage_old = 0
self._has_warned_mpstat = False
self._has_error_core_count = False
# Start checking everything
self.check_temps()
self.check_usage()
# Restart temperature checking
def _restart_temp_check(self):
rospy.logerr('Restarting temperature check thread in cpu_monitor. This should not happen')
try:
with self._mutex:
if self._temps_timer:
self._temps_timer.cancel()
self.check_temps()
except Exception, e:
rospy.logerr('Unable to restart temp thread. Error: %s' % traceback.format_exc())
## Must have the lock to cancel everything
def cancel_timers(self):
if self._temps_timer:
self._temps_timer.cancel()
if self._usage_timer:
self._usage_timer.cancel()
##\brief Check CPU core temps
##
## Use 'find /sys -name temp1_input' to find cores
## Read from every core, divide by 1000
def check_core_temps(self, sys_temp_strings):
diag_vals = []
diag_level = 0
diag_msgs = []
for index, temp_str in enumerate(sys_temp_strings):
if len(temp_str) < 5:
continue
cmd = 'cat %s' % temp_str
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
diag_level = DiagnosticStatus.ERROR
diag_msg = [ 'Core Temperature Error' ]
diag_vals = [ KeyValue(key = 'Core Temperature Error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
return diag_vals, diag_msgs, diag_level
tmp = stdout.strip()
tmp_label = 'Core %d Temperature' % index
cmd = 'cat %s' % temp_str.replace('_input', '_label')
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode == 0 and stdout:
tmp_label = stdout.strip() + ' Temperature'
else:
continue
if unicode(tmp).isnumeric():
temp = float(tmp) / 1000
diag_vals.append(KeyValue(key=tmp_label, value=str(temp)+"DegC"))
if temp >= self._cpu_temp_warn:
diag_level = max(diag_level, DiagnosticStatus.WARN)
diag_msgs.append('Warm')
elif temp >= self._cpu_temp_error:
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('Hot')
else:
diag_level = max(diag_level, DiagnosticStatus.ERROR) # Error if not numeric value
diag_vals.append(KeyValue(key = 'Core %s Temperature' % index, value = tmp))
return diag_vals, diag_msgs, diag_level
## Checks clock speed from reading from CPU info
def check_clock_speed(self):
vals = []
msgs = []
lvl = DiagnosticStatus.OK
try:
p = subprocess.Popen('cat /proc/cpuinfo | grep MHz',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
lvl = DiagnosticStatus.ERROR
msgs = [ 'Clock speed error' ]
vals = [ KeyValue(key = 'Clock speed error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
return (vals, msgs, lvl)
for index, ln in enumerate(stdout.split('\n')):
words = ln.split(':')
if len(words) < 2:
continue
speed = words[1].strip().split('.')[0] # Conversion to float doesn't work with decimal
vals.append(KeyValue(key = 'Core %d Clock Speed' % index, value = speed+"MHz"))
except Exception, e:
rospy.logerr(traceback.format_exc())
lvl = DiagnosticStatus.ERROR
msgs.append('Exception')
vals.append(KeyValue(key = 'Exception', value = traceback.format_exc()))
return vals, msgs, lvl
# Add msgs output, too
##\brief Uses 'uptime' to see load average
def check_uptime(self):
level = DiagnosticStatus.OK
vals = []
load_dict = { 0: 'OK', 1: 'High load', 2: 'Very high load' }
try:
p = subprocess.Popen('uptime', stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
vals.append(KeyValue(key = 'uptime Failed', value = stderr))
return DiagnosticStatus.ERROR, vals
upvals = stdout.split()
load1 = float(upvals[-3].rstrip(','))/self._num_cores
load5 = float(upvals[-2].rstrip(','))/self._num_cores
load15 = float(upvals[-1])/self._num_cores
# Give warning if we go over load limit
if load1 > self._cpu_load1_warn or load5 > self._cpu_load5_warn:
level = DiagnosticStatus.WARN
vals.append(KeyValue(key = 'Load Average Status', value = load_dict[level]))
vals.append(KeyValue(key = 'Load Average (1min)', value = str(load1*1e2)+"%"))
vals.append(KeyValue(key = 'Load Average (5min)', value = str(load5*1e2)+"%"))
vals.append(KeyValue(key = 'Load Average (15min)', value = str(load15*1e2)+"%"))
except Exception, e:
rospy.logerr(traceback.format_exc())
level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'Load Average Status', value = traceback.format_exc()))
diag_msg = load_dict[level]
return level, diag_msg, vals
##\brief Use mpstat to find CPU usage
##
def check_mpstat(self):
vals = []
mp_level = DiagnosticStatus.OK
load_dict = { 0: 'OK', 1: 'High load', 2: 'Error' }
try:
p = subprocess.Popen('mpstat -P ALL 1 1',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
if not self._has_warned_mpstat:
rospy.logerr("mpstat failed to run for cpu_monitor. Return code %d.", retcode)
self._has_warned_mpstat = True
mp_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = '\"mpstat\" Call Error', value = str(retcode)))
return mp_level, 'Unable to Check CPU Usage', vals
# Check which column '%idle' is, #4539
# mpstat output changed between 8.06 and 8.1
rows = stdout.split('\n')
col_names = rows[2].split()
idle_col = -1 if (len(col_names) > 2 and col_names[-1] == '%idle') else -2
num_cores = 0
cores_loaded = 0
for index, row in enumerate(stdout.split('\n')):
if index < 3:
continue
# Skip row containing 'all' data
if row.find('all') > -1:
continue
lst = row.split()
if len(lst) < 8:
continue
## Ignore 'Average: ...' data
if lst[0].startswith('Average'):
continue
cpu_name = '%d' % (num_cores)
idle = lst[idle_col]
user = lst[3]
nice = lst[4]
system = lst[5]
core_level = 0
usage = (float(user)+float(nice))*1e-2
if usage > 10.0: # wrong reading, use old reading instead
rospy.logwarn('Read CPU usage of %f percent. Reverting to previous reading of %f percent'%(usage, self._usage_old))
usage = self._usage_old
self._usage_old = usage
if usage >= self._cpu_load_warn:
cores_loaded += 1
core_level = DiagnosticStatus.WARN
elif usage >= self._cpu_load_error:
core_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'Core %s Status' % cpu_name, value = load_dict[core_level]))
vals.append(KeyValue(key = 'Core %s User' % cpu_name, value = user+"%"))
vals.append(KeyValue(key = 'Core %s Nice' % cpu_name, value = nice+"%"))
vals.append(KeyValue(key = 'Core %s System' % cpu_name, value = system+"%"))
vals.append(KeyValue(key = 'Core %s Idle' % cpu_name, value = idle+"%"))
num_cores += 1
# Warn for high load only if we have <= 2 cores that aren't loaded
if num_cores - cores_loaded <= 2 and num_cores > 2:
mp_level = DiagnosticStatus.WARN
if not self._num_cores:
self._num_cores = num_cores
# Check the number of cores if self._num_cores > 0, #4850
if self._num_cores != num_cores:
mp_level = DiagnosticStatus.ERROR
if not self._has_error_core_count:
rospy.logerr('Error checking number of cores. Expected %d, got %d. Computer may have not booted properly.',
self._num_cores, num_cores)
self._has_error_core_count = True
return DiagnosticStatus.ERROR, 'Incorrect number of CPU cores', vals
except Exception, e:
mp_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'mpstat Exception', value = str(e)))
diag_msg = load_dict[mp_level]
return mp_level, diag_msg, vals
## Returns names for core temperature files
## Returns list of names, each name can be read like file
def get_core_temp_names(self):
temp_vals = []
try:
p = subprocess.Popen('find /sys/devices -name \'temp*_input\'',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
rospy.logerr('Error find core temp locations: %s' % stderr)
return []
for ln in stdout.split('\n'):
temp_vals.append(ln.strip())
return temp_vals
except:
rospy.logerr('Exception finding temp vals: %s' % traceback.format_exc())
return []
## Call every 10sec at minimum
def check_temps(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = str(0) ) ]
diag_msgs = []
diag_level = 0
if self._check_core_temps:
core_vals, core_msgs, core_level = self.check_core_temps(self._temp_vals)
diag_vals.extend(core_vals)
diag_msgs.extend(core_msgs)
diag_level = max(diag_level, core_level)
diag_log = set(diag_msgs)
if len(diag_log) > 0:
message = ', '.join(diag_log)
else:
message = stat_dict[diag_level]
with self._mutex:
self._last_temp_time = rospy.get_time()
self._temp_stat.level = diag_level
self._temp_stat.message = message
self._temp_stat.values = diag_vals
if not rospy.is_shutdown():
self._temps_timer = threading.Timer(5.0, self.check_temps)
self._temps_timer.start()
else:
self.cancel_timers()
def check_usage(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_level = 0
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = 0 )]
diag_msgs = []
# Check clock speed
clock_vals, clock_msgs, clock_level = self.check_clock_speed()
diag_vals.extend(clock_vals)
diag_msgs.extend(clock_msgs)
diag_level = max(diag_level, clock_level)
# Check mpstat
mp_level, mp_msg, mp_vals = self.check_mpstat()
diag_vals.extend(mp_vals)
if mp_level > 0:
diag_msgs.append(mp_msg)
diag_level = max(diag_level, mp_level)
# Check uptime
uptime_level, up_msg, up_vals = self.check_uptime()
diag_vals.extend(up_vals)
if uptime_level > 0:
diag_msgs.append(up_msg)
diag_level = max(diag_level, uptime_level)
if diag_msgs and diag_level > 0:
usage_msg = ', '.join(set(diag_msgs))
else:
usage_msg = stat_dict[diag_level]
# Update status
with self._mutex:
self._last_usage_time = rospy.get_time()
self._usage_stat.level = diag_level
self._usage_stat.values = diag_vals
self._usage_stat.message = usage_msg
if not rospy.is_shutdown():
self._usage_timer = threading.Timer(5.0, self.check_usage)
self._usage_timer.start()
else:
self.cancel_timers()
def publish_stats(self):
with self._mutex:
# Update everything with last update times
update_status_stale(self._temp_stat, self._last_temp_time)
update_status_stale(self._usage_stat, self._last_usage_time)
# Convert from ROS diagnostics to mbot_diagnostics for publishing.
self._temp_diagnostic.set_status(
Status(self._temp_stat.level),
self._temp_stat.message,
)
for diag_val in self._temp_stat.values:
self._temp_diagnostic.set_metric(diag_val.key, diag_val.value)
self._usage_diagnostic.set_status(
Status(self._usage_stat.level),
self._usage_stat.message,
)
for diag_val in self._usage_stat.values:
self._usage_diagnostic.set_metric(diag_val.key, diag_val.value)
# Restart temperature checking if it goes stale, #4171
# Need to run this without mutex
if rospy.get_time() - self._last_temp_time > 90:
self._restart_temp_check()
if __name__ == '__main__':
hostname = socket.gethostname()
hostname = hostname.replace('-', '_')
import optparse
parser = optparse.OptionParser(usage="usage: cpu_monitor.py --diag-hostname=com-X")
parser.add_option("--diag-hostname", dest="diag_hostname",
help="Computer name in diagnostics output (ex: 'com-1')",
metavar="DIAG_HOSTNAME",
action="store", default = hostname)
options, args = parser.parse_args(rospy.myargv())
try:
rospy.init_node('cpu_monitor_%s' % hostname)
except rospy.exceptions.ROSInitException:
print >> sys.stderr, 'CPU monitor is unable to initialize node. Master may not be running.'
sys.exit(0)
namespace = rospy.get_namespace() or hostname
cpu_node = CPUMonitor(hostname, namespace, options.diag_hostname)
rate = rospy.Rate(1.0)
try:
while not rospy.is_shutdown():
rate.sleep()
cpu_node.publish_stats()
except KeyboardInterrupt:
pass
except Exception, e:
traceback.print_exc()
rospy.logerr(traceback.format_exc())
cpu_node.cancel_timers()
sys.exit(0)
| 39.744828
| 135
| 0.556394
|
8a0207c5553c2fa89c0006390261e3844cc2c2a8
| 4,964
|
py
|
Python
|
tf_agents/policies/q_policy.py
|
arryyao/agents
|
011351f234f0e73eb2dbcbc6d6c5796875ab7b31
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/policies/q_policy.py
|
arryyao/agents
|
011351f234f0e73eb2dbcbc6d6c5796875ab7b31
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/policies/q_policy.py
|
arryyao/agents
|
011351f234f0e73eb2dbcbc6d6c5796875ab7b31
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Policy for DQN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import numpy as np
import tensorflow as tf
from tf_agents.distributions import shifted_categorical
from tf_agents.policies import tf_policy
from tf_agents.trajectories import policy_step
@gin.configurable
class QPolicy(tf_policy.Base):
"""Class to build Q-Policies."""
def __init__(self,
time_step_spec,
action_spec,
q_network,
emit_log_probability=False,
name=None):
"""Builds a Q-Policy given a q_network.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
q_network: An instance of a `tf_agents.network.Network`,
callable via `network(observation, step_type) -> (output, final_state)`.
emit_log_probability: Whether to emit log-probs in info of `PolicyStep`.
name: The name of this policy. All variables in this module will fall
under that name. Defaults to the class name.
Raises:
ValueError: If `q_network.action_spec` exists and is not compatible with
`action_spec`.
NotImplementedError: If `action_spec` contains more than one
`BoundedTensorSpec`.
"""
network_action_spec = getattr(q_network, 'action_spec', None)
if network_action_spec is not None:
if not action_spec.is_compatible_with(network_action_spec):
raise ValueError(
'action_spec must be compatible with q_network.action_spec; '
'instead got action_spec=%s, q_network.action_spec=%s' % (
action_spec, network_action_spec))
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise NotImplementedError(
'action_spec can only contain a single BoundedTensorSpec.')
# We need to maintain the flat action spec for dtype, shape and range.
self._flat_action_spec = flat_action_spec[0]
self._q_network = q_network
super(QPolicy, self).__init__(
time_step_spec,
action_spec,
policy_state_spec=q_network.state_spec,
clip=False,
emit_log_probability=emit_log_probability,
name=name)
def _variables(self):
return self._q_network.variables
def _distribution(self, time_step, policy_state):
# In DQN, we always either take a uniformly random action, or the action
# with the highest Q-value. However, to support more complicated policies,
# we expose all Q-values as a categorical distribution with Q-values as
# logits, and apply the GreedyPolicy wrapper in dqn_agent.py to select the
# action with the highest Q-value.
q_values, policy_state = self._q_network(
time_step.observation, time_step.step_type, policy_state)
# TODO(b/122314058): Validate and enforce that sampling distributions
# created with the q_network logits generate the right action shapes. This
# is curretly patching the problem.
# If the action spec says each action should be shaped (1,), add another
# dimension so the final shape is (B, 1, A), where A is the number of
# actions. This will make Categorical emit events shaped (B, 1) rather than
# (B,). Using axis -2 to allow for (B, T, 1, A) shaped q_values.
if self._flat_action_spec.shape.ndims == 1:
q_values = tf.expand_dims(q_values, -2)
logits = q_values
mask_split_fn = self._q_network.mask_split_fn
if mask_split_fn:
_, mask = mask_split_fn(time_step.observation)
# Expand the mask as needed in the same way as q_values above.
if self._flat_action_spec.shape.ndims == 1:
mask = tf.expand_dims(mask, -2)
# Overwrite the logits for invalid actions to -inf.
neg_inf = tf.constant(-np.inf, dtype=logits.dtype)
logits = tf.compat.v2.where(tf.cast(mask, tf.bool), logits, neg_inf)
# TODO(kbanoop): Handle distributions over nests.
distribution = shifted_categorical.ShiftedCategorical(
logits=logits,
dtype=self._flat_action_spec.dtype,
shift=self._flat_action_spec.minimum)
distribution = tf.nest.pack_sequence_as(self._action_spec, [distribution])
return policy_step.PolicyStep(distribution, policy_state)
| 39.396825
| 80
| 0.713135
|
fb7d5311487dbb4b64505992562e185aff671deb
| 2,804
|
py
|
Python
|
kornia/feature/__init__.py
|
lferraz/kornia
|
c30ef6149bd92054d482339a2b0cd18f8272f5f5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/feature/__init__.py
|
lferraz/kornia
|
c30ef6149bd92054d482339a2b0cd18f8272f5f5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/feature/__init__.py
|
lferraz/kornia
|
c30ef6149bd92054d482339a2b0cd18f8272f5f5
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-15T03:22:24.000Z
|
2021-05-15T03:22:24.000Z
|
from .responses import (CornerHarris,
CornerGFTT,
BlobHessian,
BlobDoG,
harris_response,
gftt_response,
hessian_response,
dog_response)
from .nms import (NonMaximaSuppression2d,
nms2d,
NonMaximaSuppression3d,
nms3d)
# Backward compatibility
from .nms import nms2d as non_maxima_suppression2d
from .nms import nms3d as non_maxima_suppression3d
from .laf import (extract_patches_from_pyramid,
extract_patches_simple,
normalize_laf,
denormalize_laf,
laf_to_boundary_points,
ellipse_to_laf,
make_upright,
scale_laf,
get_laf_scale,
get_laf_center,
get_laf_orientation,
set_laf_orientation,
raise_error_if_laf_is_not_valid,
laf_from_center_scale_ori,
laf_is_inside_image,
laf_to_three_points,
laf_from_three_points)
from .siftdesc import SIFTDescriptor
from .mkd import MKDDescriptor
from .hardnet import HardNet
from .tfeat import TFeat
from .sosnet import SOSNet
from .scale_space_detector import ScaleSpaceDetector, PassLAF
from .affine_shape import LAFAffineShapeEstimator, PatchAffineShapeEstimator, LAFAffNetShapeEstimator
from .orientation import LAFOrienter, PatchDominantGradientOrientation, OriNet
from .matching import match_nn, match_mnn, match_snn, match_smnn
__all__ = [
"nms2d",
"nms3d",
"non_maxima_suppression2d",
"non_maxima_suppression3d",
"harris_response",
"gftt_response",
"hessian_response",
"dog_response",
"NonMaximaSuppression2d",
"NonMaximaSuppression3d",
"CornerHarris",
"CornerGFTT",
"BlobHessian",
"BlobDoG",
"extract_patches_from_pyramid",
"extract_patches_simple",
"normalize_laf",
"denormalize_laf",
"laf_to_boundary_points",
"ellipse_to_laf",
"make_upright",
"get_laf_scale",
"get_laf_center",
"get_laf_orientation",
"set_laf_orientation",
"scale_laf",
"SIFTDescriptor",
"MKDDescriptor",
"HardNet",
"TFeat",
"OriNet",
"LAFAffNetShapeEstimator",
"PassLAF",
"ScaleSpaceDetector",
"LAFAffineShapeEstimator",
"PatchAffineShapeEstimator",
"LAFOrienter",
"PatchDominantGradientOrientation",
"raise_error_if_laf_is_not_valid",
"laf_is_inside_image",
"laf_from_center_scale_ori",
"laf_to_three_points",
"laf_from_three_points",
"match_nn",
"match_mnn",
"match_snn",
"match_smnn",
]
| 29.515789
| 101
| 0.625892
|
adbeb796d099684013032a39baa56849ac54a35c
| 8,354
|
py
|
Python
|
lib/jnpr/healthbot/swagger/models/rule_schema_field.py
|
minefuto/healthbot-py-client
|
bb81452c974456af44299aebf32a73abeda8a943
|
[
"Apache-2.0"
] | null | null | null |
lib/jnpr/healthbot/swagger/models/rule_schema_field.py
|
minefuto/healthbot-py-client
|
bb81452c974456af44299aebf32a73abeda8a943
|
[
"Apache-2.0"
] | null | null | null |
lib/jnpr/healthbot/swagger/models/rule_schema_field.py
|
minefuto/healthbot-py-client
|
bb81452c974456af44299aebf32a73abeda8a943
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaField(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'constant': 'RuleSchemaConstant',
'description': 'str',
'field_name': 'str',
'formula': 'RuleSchemaFormula',
'reference': 'RuleSchemaReference',
'sensor': 'list[RuleSchemaSensor]',
'type': 'str'
}
attribute_map = {
'constant': 'constant',
'description': 'description',
'field_name': 'field-name',
'formula': 'formula',
'reference': 'reference',
'sensor': 'sensor',
'type': 'type'
}
def __init__(self, constant=None, description=None, field_name=None, formula=None, reference=None, sensor=None, type=None): # noqa: E501
"""RuleSchemaField - a model defined in Swagger""" # noqa: E501
self._constant = None
self._description = None
self._field_name = None
self._formula = None
self._reference = None
self._sensor = None
self._type = None
self.discriminator = None
if constant is not None:
self.constant = constant
if description is not None:
self.description = description
self.field_name = field_name
if formula is not None:
self.formula = formula
if reference is not None:
self.reference = reference
if sensor is not None:
self.sensor = sensor
if type is not None:
self.type = type
@property
def constant(self):
"""Gets the constant of this RuleSchemaField. # noqa: E501
:return: The constant of this RuleSchemaField. # noqa: E501
:rtype: RuleSchemaConstant
"""
return self._constant
@constant.setter
def constant(self, constant):
"""Sets the constant of this RuleSchemaField.
:param constant: The constant of this RuleSchemaField. # noqa: E501
:type: RuleSchemaConstant
"""
self._constant = constant
@property
def description(self):
"""Gets the description of this RuleSchemaField. # noqa: E501
Description about this field # noqa: E501
:return: The description of this RuleSchemaField. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RuleSchemaField.
Description about this field # noqa: E501
:param description: The description of this RuleSchemaField. # noqa: E501
:type: str
"""
self._description = description
@property
def field_name(self):
"""Gets the field_name of this RuleSchemaField. # noqa: E501
Name of the field. Should be of pattern [a-z][a-zA-Z0-9_-]* # noqa: E501
:return: The field_name of this RuleSchemaField. # noqa: E501
:rtype: str
"""
return self._field_name
@field_name.setter
def field_name(self, field_name):
"""Sets the field_name of this RuleSchemaField.
Name of the field. Should be of pattern [a-z][a-zA-Z0-9_-]* # noqa: E501
:param field_name: The field_name of this RuleSchemaField. # noqa: E501
:type: str
"""
if field_name is None:
raise ValueError("Invalid value for `field_name`, must not be `None`") # noqa: E501
if field_name is not None and len(field_name) > 64:
raise ValueError("Invalid value for `field_name`, length must be less than or equal to `64`") # noqa: E501
if field_name is not None and not re.search(r'^[a-z][a-zA-Z0-9_-]*$', field_name): # noqa: E501
raise ValueError(r"Invalid value for `field_name`, must be a follow pattern or equal to `/^[a-z][a-zA-Z0-9_-]*$/`") # noqa: E501
self._field_name = field_name
@property
def formula(self):
"""Gets the formula of this RuleSchemaField. # noqa: E501
:return: The formula of this RuleSchemaField. # noqa: E501
:rtype: RuleSchemaFormula
"""
return self._formula
@formula.setter
def formula(self, formula):
"""Sets the formula of this RuleSchemaField.
:param formula: The formula of this RuleSchemaField. # noqa: E501
:type: RuleSchemaFormula
"""
self._formula = formula
@property
def reference(self):
"""Gets the reference of this RuleSchemaField. # noqa: E501
:return: The reference of this RuleSchemaField. # noqa: E501
:rtype: RuleSchemaReference
"""
return self._reference
@reference.setter
def reference(self, reference):
"""Sets the reference of this RuleSchemaField.
:param reference: The reference of this RuleSchemaField. # noqa: E501
:type: RuleSchemaReference
"""
self._reference = reference
@property
def sensor(self):
"""Gets the sensor of this RuleSchemaField. # noqa: E501
:return: The sensor of this RuleSchemaField. # noqa: E501
:rtype: list[RuleSchemaSensor]
"""
return self._sensor
@sensor.setter
def sensor(self, sensor):
"""Sets the sensor of this RuleSchemaField.
:param sensor: The sensor of this RuleSchemaField. # noqa: E501
:type: list[RuleSchemaSensor]
"""
self._sensor = sensor
@property
def type(self):
"""Gets the type of this RuleSchemaField. # noqa: E501
:return: The type of this RuleSchemaField. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this RuleSchemaField.
:param type: The type of this RuleSchemaField. # noqa: E501
:type: str
"""
allowed_values = ["string", "integer", "float"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleSchemaField, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaField):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.108014
| 141
| 0.582715
|
6ec2bc2205e6365abca64285e92e1d386825e17a
| 834
|
py
|
Python
|
tests/programs/multiprocessing_using/foo/__main__.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | null | null | null |
tests/programs/multiprocessing_using/foo/__main__.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | 1
|
2019-03-01T11:33:40.000Z
|
2019-03-01T11:33:40.000Z
|
tests/programs/multiprocessing_using/foo/__main__.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | 1
|
2019-03-26T16:56:21.000Z
|
2019-03-26T16:56:21.000Z
|
# Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from foo import entry
if __name__ == '__main__':
entry.main()
| 36.26087
| 79
| 0.714628
|
e8305b129c5799a86d6b6457b918a7b877794cd8
| 568
|
py
|
Python
|
services/card/cards/platform.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/card/cards/platform.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/card/cards/platform.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ----------------------------------------------------------------------
# Platform card
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.inv.models.platform import Platform
from .base import BaseCard
class PlatformCard(BaseCard):
name = "platform"
default_template_name = "platform"
model = Platform
def get_data(self):
return {"object": self.object}
| 28.4
| 72
| 0.43838
|
d76fb73f8daabd3ab785706f13baf94bff025069
| 9,463
|
py
|
Python
|
baselines/gail/run_mujoco.py
|
syllogismos/wandb_baselines
|
c0b0e5d5d9b2db3797e3fddfa07c3299df7523d5
|
[
"MIT"
] | 1
|
2020-03-06T13:37:05.000Z
|
2020-03-06T13:37:05.000Z
|
baselines/gail/run_mujoco.py
|
cvphelps/wandb_baselines
|
140011d40570f6f213650ea720cfe9374191786f
|
[
"MIT"
] | null | null | null |
baselines/gail/run_mujoco.py
|
cvphelps/wandb_baselines
|
140011d40570f6f213650ea720cfe9374191786f
|
[
"MIT"
] | 1
|
2018-07-15T03:36:16.000Z
|
2018-07-15T03:36:16.000Z
|
'''
Disclaimer: this code is highly based on trpo_mpi at @openai/baselines and @openai/imitation
'''
import wandb
wandb.init()
import argparse
import os.path as osp
import logging
from mpi4py import MPI
from tqdm import tqdm
import numpy as np
import gym, roboschool
from baselines.gail import mlp_policy
from baselines.common import set_global_seeds, tf_util as U
from baselines.common.misc_util import boolean_flag
from baselines import bench
from baselines import logger
from baselines.gail.dataset.mujoco_dset import Mujoco_Dset
from baselines.gail.adversary import TransitionClassifier
def argsparser():
parser = argparse.ArgumentParser("Tensorflow Implementation of GAIL")
parser.add_argument('--env_id', help='environment ID', default='Hopper-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--expert_path', type=str, default='data/deterministic.trpo.Hopper.0.00.npz')
parser.add_argument('--checkpoint_dir', help='the directory to save model', default='checkpoint')
parser.add_argument('--log_dir', help='the directory to save log file', default='log')
parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None)
# Task
parser.add_argument('--task', type=str, choices=['train', 'evaluate', 'sample'], default='train')
# for evaluatation
boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate')
boolean_flag(parser, 'save_sample', default=False, help='save the trajectories or not')
# Mujoco Dataset Configuration
parser.add_argument('--traj_limitation', type=int, default=-1)
# Optimization Configuration
parser.add_argument('--g_step', help='number of steps to train policy in each epoch', type=int, default=3)
parser.add_argument('--d_step', help='number of steps to train discriminator in each epoch', type=int, default=1)
# Network Configuration (Using MLP Policy)
parser.add_argument('--policy_hidden_size', type=int, default=100)
parser.add_argument('--adversary_hidden_size', type=int, default=100)
# Algorithms Configuration
parser.add_argument('--algo', type=str, choices=['trpo', 'ppo'], default='trpo')
parser.add_argument('--max_kl', type=float, default=0.01)
parser.add_argument('--policy_entcoeff', help='entropy coefficiency of policy', type=float, default=0)
parser.add_argument('--adversary_entcoeff', help='entropy coefficiency of discriminator', type=float, default=1e-3)
# Traing Configuration
parser.add_argument('--save_per_iter', help='save model every xx iterations', type=int, default=100)
parser.add_argument('--num_timesteps', help='number of timesteps per episode', type=int, default=5e6)
# Behavior Cloning
boolean_flag(parser, 'pretrained', default=False, help='Use BC to pretrain')
parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=1e4)
return parser.parse_args()
def get_task_name(args):
task_name = args.algo + "_gail."
if args.pretrained:
task_name += "with_pretrained."
if args.traj_limitation != np.inf:
task_name += "transition_limitation_%d." % args.traj_limitation
task_name += args.env_id.split("-")[0]
task_name = task_name + ".g_step_" + str(args.g_step) + ".d_step_" + str(args.d_step) + \
".policy_entcoeff_" + str(args.policy_entcoeff) + ".adversary_entcoeff_" + str(args.adversary_entcoeff)
task_name += ".seed_" + str(args.seed)
return task_name
def main(args):
U.make_session(num_cpu=1).__enter__()
set_global_seeds(args.seed)
env = gym.make(args.env_id)
def policy_fn(name, ob_space, ac_space, reuse=False):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
reuse=reuse, hid_size=args.policy_hidden_size, num_hid_layers=2)
env = bench.Monitor(env, logger.get_dir() and
osp.join(logger.get_dir(), "monitor.json"))
env.seed(args.seed)
gym.logger.setLevel(logging.WARN)
task_name = get_task_name(args)
args.checkpoint_dir = osp.join(args.checkpoint_dir, task_name)
args.log_dir = osp.join(args.log_dir, task_name)
if args.task == 'train':
dataset = Mujoco_Dset(expert_path=args.expert_path, traj_limitation=args.traj_limitation)
reward_giver = TransitionClassifier(env, args.adversary_hidden_size, entcoeff=args.adversary_entcoeff)
train(env,
args.seed,
policy_fn,
reward_giver,
dataset,
args.algo,
args.g_step,
args.d_step,
args.policy_entcoeff,
args.num_timesteps,
args.save_per_iter,
args.checkpoint_dir,
args.log_dir,
args.pretrained,
args.BC_max_iter,
task_name
)
elif args.task == 'evaluate':
runner(env,
policy_fn,
args.load_model_path,
timesteps_per_batch=1024,
number_trajs=10,
stochastic_policy=args.stochastic_policy,
save=args.save_sample
)
else:
raise NotImplementedError
env.close()
def train(env, seed, policy_fn, reward_giver, dataset, algo,
g_step, d_step, policy_entcoeff, num_timesteps, save_per_iter,
checkpoint_dir, log_dir, pretrained, BC_max_iter, task_name=None):
pretrained_weight = None
if pretrained and (BC_max_iter > 0):
# Pretrain with behavior cloning
from baselines.gail import behavior_clone
pretrained_weight = behavior_clone.learn(env, policy_fn, dataset,
max_iters=BC_max_iter)
if algo == 'trpo':
from baselines.gail import trpo_mpi
# Set up for MPI seed
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
set_global_seeds(workerseed)
env.seed(workerseed)
trpo_mpi.learn(env, policy_fn, reward_giver, dataset, rank,
pretrained=pretrained, pretrained_weight=pretrained_weight,
g_step=g_step, d_step=d_step,
entcoeff=policy_entcoeff,
max_timesteps=num_timesteps,
ckpt_dir=checkpoint_dir, log_dir=log_dir,
save_per_iter=save_per_iter,
timesteps_per_batch=1024,
max_kl=0.01, cg_iters=10, cg_damping=0.1,
gamma=0.995, lam=0.97,
vf_iters=5, vf_stepsize=1e-3,
task_name=task_name)
else:
raise NotImplementedError
def runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs,
stochastic_policy, save=False, reuse=False):
# Setup network
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func("pi", ob_space, ac_space, reuse=reuse)
U.initialize()
# Prepare for rollouts
# ----------------------------------------
U.load_state(load_model_path)
obs_list = []
acs_list = []
len_list = []
ret_list = []
for _ in tqdm(range(number_trajs)):
traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy)
obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret']
obs_list.append(obs)
acs_list.append(acs)
len_list.append(ep_len)
ret_list.append(ep_ret)
if stochastic_policy:
print('stochastic policy:')
else:
print('deterministic policy:')
if save:
filename = load_model_path.split('/')[-1] + '.' + env.spec.id
np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list),
lens=np.array(len_list), rets=np.array(ret_list))
avg_len = sum(len_list)/len(len_list)
avg_ret = sum(ret_list)/len(ret_list)
print("Average length:", avg_len)
print("Average return:", avg_ret)
return avg_len, avg_ret
# Sample one trajectory (until trajectory end)
def traj_1_generator(pi, env, horizon, stochastic):
t = 0
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
ob = env.reset()
cur_ep_ret = 0 # return in current episode
cur_ep_len = 0 # len of current episode
# Initialize history arrays
obs = []
rews = []
news = []
acs = []
while True:
ac, vpred = pi.act(stochastic, ob)
obs.append(ob)
news.append(new)
acs.append(ac)
ob, rew, new, _ = env.step(ac)
rews.append(rew)
cur_ep_ret += rew
cur_ep_len += 1
if new or t >= horizon:
break
t += 1
obs = np.array(obs)
rews = np.array(rews)
news = np.array(news)
acs = np.array(acs)
traj = {"ob": obs, "rew": rews, "new": news, "ac": acs,
"ep_ret": cur_ep_ret, "ep_len": cur_ep_len}
return traj
if __name__ == '__main__':
args = argsparser()
wandb.config.update(args)
# wandb.config.algo = 'gail'
main(args)
| 38.782787
| 119
| 0.640283
|
50db3de9d89a041b272d3a28d84cbffc9cbb298f
| 2,268
|
py
|
Python
|
examples/paging-predicate/paging_predicate_example.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 98
|
2015-12-08T14:26:27.000Z
|
2022-03-23T17:44:11.000Z
|
examples/paging-predicate/paging_predicate_example.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 396
|
2016-02-23T11:07:55.000Z
|
2022-03-31T14:26:34.000Z
|
examples/paging-predicate/paging_predicate_example.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 62
|
2015-12-09T11:20:53.000Z
|
2022-01-28T01:30:54.000Z
|
import random
import hazelcast
from hazelcast import predicate
from hazelcast.core import HazelcastJsonValue
from hazelcast.serialization.api import IdentifiedDataSerializable
class AgeComparator(IdentifiedDataSerializable):
"""
This is just a marker class that identifies which comparator
to use on the member.
It also carries a boolean flag to determine the order of
sorting done on the member side.
It must implement the counterpart of the interface
implemented by the Java code and have the same class and
factory id.
"""
def __init__(self, reverse=False):
self._reverse = reverse
def write_data(self, object_data_output):
object_data_output.write_boolean(self._reverse)
def read_data(self, object_data_input):
self._reverse = object_data_input.read_boolean()
def get_factory_id(self):
return 1
def get_class_id(self):
return 1
client = hazelcast.HazelcastClient()
students = client.get_map("students").blocking()
# Populate the map with some random data.
for i in range(42):
students.set(
i,
HazelcastJsonValue(
{
"student_id": i,
"age": random.randrange(8, 24),
}
),
)
# Use the paging predicate with true predicate
# to get all students with the page size of 10.
# It also uses the custom comparator we have
# written and sorts the values in ascending
# order of age.
paging_predicate = predicate.paging(
predicate=predicate.true(),
page_size=10,
comparator=AgeComparator(),
)
print(students.values(paging_predicate))
# Set up the next page and fetch it.
paging_predicate.next_page()
print(students.values(paging_predicate))
# This time, we will fetch students with the
# student_id between 10 to 40 with the page size
# of 5. We will also make use of the custom comparator
# and sort the results in descending order of age.
paging_predicate = predicate.paging(
predicate=predicate.between("student_id", 10, 40),
page_size=5,
comparator=AgeComparator(reverse=True),
)
print(students.values(paging_predicate))
# Set up the next page and fetch it.
paging_predicate.next_page()
print(students.values(paging_predicate))
client.shutdown()
| 26.372093
| 66
| 0.716931
|
d8c9910bad542251562209f0596856735d80ea78
| 2,601
|
py
|
Python
|
python/sdk/client/models/endpoint_status.py
|
ashwinath/merlin
|
087a7fa6fb21e4c771d64418bd58873175226ca1
|
[
"Apache-2.0"
] | null | null | null |
python/sdk/client/models/endpoint_status.py
|
ashwinath/merlin
|
087a7fa6fb21e4c771d64418bd58873175226ca1
|
[
"Apache-2.0"
] | null | null | null |
python/sdk/client/models/endpoint_status.py
|
ashwinath/merlin
|
087a7fa6fb21e4c771d64418bd58873175226ca1
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Merlin
API Guide for accessing Merlin's model management, deployment, and serving functionalities # noqa: E501
OpenAPI spec version: 0.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EndpointStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
PENDING = "pending"
RUNNING = "running"
SERVING = "serving"
FAILED = "failed"
TERMINATED = "terminated"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""EndpointStatus - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EndpointStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EndpointStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.814433
| 108
| 0.55248
|
5da3cbdde39618ebc9fb57f1847583f4603bb71d
| 383
|
py
|
Python
|
src/hataraku/migrations/0004_auto_20190614_1323.py
|
HNJ755329/Hatarako
|
253d107db7a15caaf63794d9b8a4acc99168fba0
|
[
"MIT"
] | null | null | null |
src/hataraku/migrations/0004_auto_20190614_1323.py
|
HNJ755329/Hatarako
|
253d107db7a15caaf63794d9b8a4acc99168fba0
|
[
"MIT"
] | null | null | null |
src/hataraku/migrations/0004_auto_20190614_1323.py
|
HNJ755329/Hatarako
|
253d107db7a15caaf63794d9b8a4acc99168fba0
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.4 on 2019-06-14 04:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hataraku', '0003_auto_20190614_1319'),
]
operations = [
migrations.AlterField(
model_name='post',
name='color',
field=models.CharField(max_length=7),
),
]
| 20.157895
| 49
| 0.5953
|
37df33e7c099cf9f88b7ee4261db32a420f885b9
| 1,934
|
py
|
Python
|
assault/cli.py
|
joseph-mcdaniel/load-testing
|
159c083cc9c0981837ff9f21b9d9ddaa4df4fe6f
|
[
"MIT"
] | null | null | null |
assault/cli.py
|
joseph-mcdaniel/load-testing
|
159c083cc9c0981837ff9f21b9d9ddaa4df4fe6f
|
[
"MIT"
] | 3
|
2020-03-24T18:26:32.000Z
|
2021-02-02T22:38:13.000Z
|
assault/cli.py
|
joseph-mcdaniel/load-testing
|
159c083cc9c0981837ff9f21b9d9ddaa4df4fe6f
|
[
"MIT"
] | null | null | null |
import click
import sys
import json
from typing import TextIO
from .http import assault
from .stats import Results
@click.command()
@click.option("--requests", "-r", default=500, help="Number of requests")
@click.option("--concurrency", "-c", default=1, help="Number of concurrent requests")
@click.option("--json-file", "-j", default=None, help="Path to output JSON file")
@click.argument("url")
def cli(requests, concurrency, json_file, url):
output_file = None
if json_file:
try:
output_file = open(json_file, "w")
except:
print(f"Unable to open file {json_file}")
sys.exit(1)
total_time, requests_dicts = assault(url, requests, concurrency)
results = Results(total_time, requests_dicts)
display(results, output_file)
def display(results: Results, json_file: TextIO):
if json_file:
# Write to the file
json.dump(
{
"successful_requests": results.successful_requests(),
"slowest": results.slowest(),
"fastest": results.fastest(),
"total_time": results.total_time,
"requests_per_minute": results.requests_per_minute(),
"requests_per_second": results.requests_per_second(),
},
json_file,
)
json_file.close()
print(".... Done!")
else:
# Print to screen
print(".... Done!")
print("--- Results ---")
print(f"Successful requests\t{results.successful_requests()}")
print(f"Slowest \t{results.slowest()}")
print(f"Fastest \t{results.fastest()}")
print(f"Average \t{results.average_time()}")
print(f"Total time \t{results.total_time}")
print(f"Requests per minute\t{results.requests_per_minute()}")
print(f"Requests per second\t{results.requests_per_second()}")
| 34.535714
| 85
| 0.599793
|
c26ac1a91dabdb0034c28b5241ea7cfad78d438f
| 3,375
|
py
|
Python
|
jscatter/jscatter_test.py
|
flekschas/jupyter-scatter
|
550eceb2311b0394caad83dbb399ed2f29e55af6
|
[
"Apache-2.0"
] | 23
|
2021-02-03T02:05:47.000Z
|
2022-03-17T14:53:39.000Z
|
jscatter/jscatter_test.py
|
manzt/jupyter-scatter
|
c38f94abfb655e03f407e7fcec80a883439796b5
|
[
"Apache-2.0"
] | 5
|
2021-02-04T22:19:35.000Z
|
2022-03-07T04:49:31.000Z
|
jscatter/jscatter_test.py
|
manzt/jupyter-scatter
|
c38f94abfb655e03f407e7fcec80a883439796b5
|
[
"Apache-2.0"
] | 1
|
2021-06-15T14:14:47.000Z
|
2021-06-15T14:14:47.000Z
|
import numpy as np
import pandas as pd
from .jscatter import Scatter, component_idx_to_name
from .utils import minmax_scale
def test_component_idx_to_name():
assert 'valueA' == component_idx_to_name(2)
assert 'valueB' == component_idx_to_name(3)
assert None == component_idx_to_name(4)
assert None == component_idx_to_name(1)
assert None == component_idx_to_name(None)
def test_scatter_numpy():
x = np.random.rand(500)
y = np.random.rand(500)
scatter = Scatter(x, y)
widget = scatter.widget
widget_data = np.asarray(widget.points)
assert (500, 4) == widget_data.shape
assert np.allclose(minmax_scale(x, (-1,1)), widget_data[:,0])
assert np.allclose(minmax_scale(y, (-1,1)), widget_data[:,1])
assert np.sum(widget_data[:,2:]) == 0
def get_df():
num_groups = 8
data = np.random.rand(500, 7)
data[:,2] *= 100
data[:,3] *= 100
data[:,3] = data[:,3].astype(int)
data[:,4] = np.round(data[:,4] * (num_groups - 1)).astype(int)
data[:,5] = np.repeat(np.arange(100), 5).astype(int)
data[:,6] = np.resize(np.arange(5), 500).astype(int)
df = pd.DataFrame(
data,
columns=['a', 'b', 'c', 'd', 'group', 'connect', 'connect_order']
)
df['group'] = df['group'].astype('int').astype('category').map(lambda c: chr(65 + c), na_action=None)
df['connect'] = df['connect'].astype('int')
df['connect_order'] = df['connect_order'].astype('int')
return df
def test_scatter_pandas():
df = get_df()
scatter = Scatter(data=df, x='a', y='b')
widget = scatter.widget
widget_data = np.asarray(widget.points)
assert (500, 4) == np.asarray(widget.points).shape
assert np.allclose(minmax_scale(df['a'].values, (-1,1)), widget_data[:,0])
assert np.allclose(minmax_scale(df['b'].values, (-1,1)), widget_data[:,1])
def test_scatter_point_encoding_updates():
df = get_df()
scatter = Scatter(data=df, x='a', y='b')
widget = scatter.widget
widget_data = np.asarray(widget.points)
assert len(scatter._encodings.data) == 0
assert np.sum(widget_data[:,2:]) == 0
scatter.color(by='group')
widget_data = np.asarray(widget.points)
assert 'color' in scatter._encodings.visual
assert 'group' in scatter._encodings.data
assert np.sum(widget_data[:,2]) > 0
assert np.sum(widget_data[:,3]) == 0
scatter.opacity(by='c')
widget_data = np.asarray(widget.points)
assert 'opacity' in scatter._encodings.visual
assert 'c' in scatter._encodings.data
assert np.sum(widget_data[:,3]) > 0
scatter.size(by='c')
widget_data = np.asarray(widget.points)
assert 'size' in scatter._encodings.visual
assert 'c' in scatter._encodings.data
assert np.sum(widget_data[:,3]) > 0
def test_scatter_connection_encoding_updates():
df = get_df()
scatter = Scatter(data=df, x='a', y='b')
widget = scatter.widget
scatter.connect(by='connect')
widget_data = np.asarray(widget.points)
assert widget_data.shape == (500, 5)
assert np.all(
df['connect'].values == widget_data[:,4].astype(df['connect'].dtype)
)
scatter.connect(order='connect_order')
widget_data = np.asarray(widget.points)
assert widget_data.shape == (500, 6)
assert np.all(
df['connect_order'].values == widget_data[:,5].astype(df['connect_order'].dtype)
)
| 31.25
| 105
| 0.647704
|
2af8a1e22dfbc99a5cf33569f3a1a48ef476b490
| 730
|
py
|
Python
|
weather-station/bme280_sensor.py
|
jeremyhamm/pi-weather-dashboard
|
ada4441f72251f7238c1d86f1e5b69af089c7baa
|
[
"MIT"
] | null | null | null |
weather-station/bme280_sensor.py
|
jeremyhamm/pi-weather-dashboard
|
ada4441f72251f7238c1d86f1e5b69af089c7baa
|
[
"MIT"
] | null | null | null |
weather-station/bme280_sensor.py
|
jeremyhamm/pi-weather-dashboard
|
ada4441f72251f7238c1d86f1e5b69af089c7baa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import bme280
import smbus2
# RaspberryPi Logic
port = 1
address = 0x76 # Adafruit BME280 address. Other BME280s may be different
bus = smbus2.SMBus(port)
bme280.load_calibration_params(bus,address)
def read_all():
bme280_data = bme280.sample(bus,address)
humidity = bme280_data.humidity
pressure = bme280_data.pressure * 0.029530
celsius_temperature = bme280_data.temperature
fahrenheit_temp = ((celsius_temperature * 1.8000) + 32.00)
# Print for debug
degree = u"\u00b0"
# print( str(round(fahrenheit_temp)) + degree + "F" )
# print( str(round(humidity)) + "% humidity" )
# print( str(round(pressure, 2)) + " in" )
return fahrenheit_temp, humidity, pressure
# read_all()
| 26.071429
| 72
| 0.716438
|
809521775abac2ec5b2d1b84ebaa91bd00dc6a93
| 7,843
|
py
|
Python
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/operations/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/operations/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/operations/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .application_gateways_operations import ApplicationGatewaysOperations
from .application_security_groups_operations import ApplicationSecurityGroupsOperations
from .available_delegations_operations import AvailableDelegationsOperations
from .available_resource_group_delegations_operations import AvailableResourceGroupDelegationsOperations
from .azure_firewalls_operations import AzureFirewallsOperations
from .azure_firewall_fqdn_tags_operations import AzureFirewallFqdnTagsOperations
from .ddos_protection_plans_operations import DdosProtectionPlansOperations
from .available_endpoint_services_operations import AvailableEndpointServicesOperations
from .express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from .express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from .express_route_circuit_connections_operations import ExpressRouteCircuitConnectionsOperations
from .express_route_circuits_operations import ExpressRouteCircuitsOperations
from .express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from .express_route_cross_connections_operations import ExpressRouteCrossConnectionsOperations
from .express_route_cross_connection_peerings_operations import ExpressRouteCrossConnectionPeeringsOperations
from .express_route_gateways_operations import ExpressRouteGatewaysOperations
from .express_route_connections_operations import ExpressRouteConnectionsOperations
from .interface_endpoints_operations import InterfaceEndpointsOperations
from .load_balancers_operations import LoadBalancersOperations
from .load_balancer_backend_address_pools_operations import LoadBalancerBackendAddressPoolsOperations
from .load_balancer_frontend_ip_configurations_operations import LoadBalancerFrontendIPConfigurationsOperations
from .inbound_nat_rules_operations import InboundNatRulesOperations
from .load_balancer_load_balancing_rules_operations import LoadBalancerLoadBalancingRulesOperations
from .load_balancer_network_interfaces_operations import LoadBalancerNetworkInterfacesOperations
from .load_balancer_probes_operations import LoadBalancerProbesOperations
from .network_interfaces_operations import NetworkInterfacesOperations
from .network_interface_ip_configurations_operations import NetworkInterfaceIPConfigurationsOperations
from .network_interface_load_balancers_operations import NetworkInterfaceLoadBalancersOperations
from .network_interface_tap_configurations_operations import NetworkInterfaceTapConfigurationsOperations
from .network_profiles_operations import NetworkProfilesOperations
from .network_security_groups_operations import NetworkSecurityGroupsOperations
from .security_rules_operations import SecurityRulesOperations
from .default_security_rules_operations import DefaultSecurityRulesOperations
from .network_watchers_operations import NetworkWatchersOperations
from .packet_captures_operations import PacketCapturesOperations
from .connection_monitors_operations import ConnectionMonitorsOperations
from .operations import Operations
from .public_ip_addresses_operations import PublicIPAddressesOperations
from .public_ip_prefixes_operations import PublicIPPrefixesOperations
from .route_filters_operations import RouteFiltersOperations
from .route_filter_rules_operations import RouteFilterRulesOperations
from .route_tables_operations import RouteTablesOperations
from .routes_operations import RoutesOperations
from .bgp_service_communities_operations import BgpServiceCommunitiesOperations
from .service_endpoint_policies_operations import ServiceEndpointPoliciesOperations
from .service_endpoint_policy_definitions_operations import ServiceEndpointPolicyDefinitionsOperations
from .usages_operations import UsagesOperations
from .virtual_networks_operations import VirtualNetworksOperations
from .subnets_operations import SubnetsOperations
from .virtual_network_peerings_operations import VirtualNetworkPeeringsOperations
from .virtual_network_taps_operations import VirtualNetworkTapsOperations
from .virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from .virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from .local_network_gateways_operations import LocalNetworkGatewaysOperations
from .virtual_wans_operations import VirtualWansOperations
from .vpn_sites_operations import VpnSitesOperations
from .vpn_sites_configuration_operations import VpnSitesConfigurationOperations
from .virtual_hubs_operations import VirtualHubsOperations
from .hub_virtual_network_connections_operations import HubVirtualNetworkConnectionsOperations
from .vpn_gateways_operations import VpnGatewaysOperations
from .vpn_connections_operations import VpnConnectionsOperations
from .p2s_vpn_server_configurations_operations import P2sVpnServerConfigurationsOperations
from .p2s_vpn_gateways_operations import P2sVpnGatewaysOperations
__all__ = [
'ApplicationGatewaysOperations',
'ApplicationSecurityGroupsOperations',
'AvailableDelegationsOperations',
'AvailableResourceGroupDelegationsOperations',
'AzureFirewallsOperations',
'AzureFirewallFqdnTagsOperations',
'DdosProtectionPlansOperations',
'AvailableEndpointServicesOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitConnectionsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'ExpressRouteCrossConnectionsOperations',
'ExpressRouteCrossConnectionPeeringsOperations',
'ExpressRouteGatewaysOperations',
'ExpressRouteConnectionsOperations',
'InterfaceEndpointsOperations',
'LoadBalancersOperations',
'LoadBalancerBackendAddressPoolsOperations',
'LoadBalancerFrontendIPConfigurationsOperations',
'InboundNatRulesOperations',
'LoadBalancerLoadBalancingRulesOperations',
'LoadBalancerNetworkInterfacesOperations',
'LoadBalancerProbesOperations',
'NetworkInterfacesOperations',
'NetworkInterfaceIPConfigurationsOperations',
'NetworkInterfaceLoadBalancersOperations',
'NetworkInterfaceTapConfigurationsOperations',
'NetworkProfilesOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'DefaultSecurityRulesOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'ConnectionMonitorsOperations',
'Operations',
'PublicIPAddressesOperations',
'PublicIPPrefixesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'BgpServiceCommunitiesOperations',
'ServiceEndpointPoliciesOperations',
'ServiceEndpointPolicyDefinitionsOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkTapsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
'VirtualWansOperations',
'VpnSitesOperations',
'VpnSitesConfigurationOperations',
'VirtualHubsOperations',
'HubVirtualNetworkConnectionsOperations',
'VpnGatewaysOperations',
'VpnConnectionsOperations',
'P2sVpnServerConfigurationsOperations',
'P2sVpnGatewaysOperations',
]
| 55.624113
| 111
| 0.865485
|
5a4a7d0b916bf074e121c3e0ff1cc12975b2bf82
| 641
|
py
|
Python
|
export_example.py
|
WorldException/jira2gitlab
|
bae0d2a5912f1e7e4569a534f1b1f4177eda9449
|
[
"MIT"
] | null | null | null |
export_example.py
|
WorldException/jira2gitlab
|
bae0d2a5912f1e7e4569a534f1b1f4177eda9449
|
[
"MIT"
] | null | null | null |
export_example.py
|
WorldException/jira2gitlab
|
bae0d2a5912f1e7e4569a534f1b1f4177eda9449
|
[
"MIT"
] | null | null | null |
from jira2gitlab import jira
# Example export project to local files (issues database and attachments)
# name from url https://{name}.atlassian.net/
# token from https://id.atlassian.com/manage-profile/security/api-tokens
jira.configure('name', 'you_mail@gmail.com', 'jira_token')
# export all projects
jira.export_projects()
# export some project
jira.export_projects(['AC', 'AB'])
# for download export-users.csv
# 1. go https://{site}.atlassian.net/jira/people/search and click "Manage users"
# 2. click "Export users"
jira.save_users(jira.export_users('export-users.csv'))
# grab users from tasks
jira.save_users(jira.grab_users())
| 32.05
| 80
| 0.75663
|
5d64c2b2bd29c80c79ed1a24f90b2b2e45a6bb8b
| 910
|
py
|
Python
|
src/som/primitives/object_primitives.py
|
SOM-st/RPySOM
|
2dcfc71786a3bd5be5a842c649645f71d6c35f89
|
[
"MIT"
] | 12
|
2016-01-07T14:20:57.000Z
|
2019-10-13T06:56:20.000Z
|
src/som/primitives/object_primitives.py
|
SOM-st/RPySOM
|
2dcfc71786a3bd5be5a842c649645f71d6c35f89
|
[
"MIT"
] | 2
|
2016-05-26T06:53:33.000Z
|
2020-09-02T15:58:28.000Z
|
src/som/primitives/object_primitives.py
|
SOM-st/RPySOM
|
2dcfc71786a3bd5be5a842c649645f71d6c35f89
|
[
"MIT"
] | 2
|
2016-05-25T06:07:52.000Z
|
2019-10-02T16:52:25.000Z
|
from rlib.objectmodel import compute_identity_hash
from som.primitives.primitives import Primitives
from som.vm.globals import trueObject, falseObject
from som.vmobjects.primitive import UnaryPrimitive, BinaryPrimitive
def _equals(op1, op2):
if op1 is op2:
return trueObject
else:
return falseObject
def _hashcode(rcvr):
from som.vmobjects.integer import Integer
return Integer(compute_identity_hash(rcvr))
def _inst_var_at(rcvr, idx):
return rcvr.get_field(idx.get_embedded_integer() - 1)
class ObjectPrimitivesBase(Primitives):
def install_primitives(self):
self._install_instance_primitive(BinaryPrimitive("==", self._universe, _equals))
self._install_instance_primitive(UnaryPrimitive("hashcode", self._universe, _hashcode))
self._install_instance_primitive(
BinaryPrimitive("instVarAt:", self._universe, _inst_var_at))
| 29.354839
| 95
| 0.759341
|
c19297d0ca95b632a8b470de953f10811a111927
| 1,738
|
py
|
Python
|
land_planning_and_allocation/api.py
|
the-bantoo/Land-Planning-And-Allocation
|
67a6b57c7c4ddc40a51691f94cacb5e28b1c5cd5
|
[
"MIT"
] | null | null | null |
land_planning_and_allocation/api.py
|
the-bantoo/Land-Planning-And-Allocation
|
67a6b57c7c4ddc40a51691f94cacb5e28b1c5cd5
|
[
"MIT"
] | 1
|
2020-10-05T15:50:33.000Z
|
2020-10-07T10:28:31.000Z
|
land_planning_and_allocation/api.py
|
the-bantoo/Land-Planning-And-Allocation
|
67a6b57c7c4ddc40a51691f94cacb5e28b1c5cd5
|
[
"MIT"
] | 2
|
2020-10-29T17:38:08.000Z
|
2021-09-07T10:59:27.000Z
|
import frappe
from frappe import _
def create_item(plot, method):
settings = frappe.get_doc('Land Settings')
plot_item = frappe.get_doc({
"doctype": "Item",
"item_group": settings.land_for_sale_group,
"default_warehouse": settings.sales_land_warehouse,
"item_code": "Plot " + str(plot.plot_id),
"land": 1,
"is_stock_item": 1,
"stock_uom": "Nos",
"opening_stock": 1,
"standard_rate": plot.plot_price,
"is_purchase_item": 0,
"is_sales_item": 1,
"valuation_rate": plot.plot_price,
"include_item_in_manufacturing": 0,
"description": "Project: " + str(plot.project) + "<br \>" + "Subdivision: " + str(plot.subdivision) + "<br \>" + "Plot ID: " + str(plot.plot_id) + "<br \>" + "Dimensions: " + str(plot.dimensions) + "<br \>" + "Area: " + str(plot.area) + "sqm",
})
plot_item.flags.ignore_permission = True
plot_item.insert()
plot.plot_item = "Plot " + str(plot.plot_id)
plot.save()
def calculate_plot_details(plot, method):
if not plot.area or int(plot.area) <= 0:
plot.area = int(plot.width) * int(plot.length)
plot.dimensions = str(plot.width) + " x " + str(plot.length) + "m"
def project_item(project, method):
settings = frappe.get_doc('Land Settings')
project_item = frappe.get_doc({
"doctype": "Item",
"item_name": project.project_name,
"item_group": settings.land_in_bulk_group,
"land": 1,
"item_code": project.project_name,
"is_stock_item": 1,
"stock_uom": "Square Meter",
"include_item_in_manufacturing": 0,
})
project_item.flags.ignore_permission = True
project_item.insert()
| 36.208333
| 251
| 0.611047
|
5ab348c162bd9c449f42f0a2e999c7c6876ec2d5
| 415
|
py
|
Python
|
thinkpython_allen_downey/exercise_5_3.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
thinkpython_allen_downey/exercise_5_3.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
thinkpython_allen_downey/exercise_5_3.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
def check_fermat(a, b, c, n):
if a**n + b**n == c**n:
print('Holy smokes, Fermat was wrong!')
else:
print("No, that doesn't work")
def get_input():
a = int(input('Please enter a value for a:'))
b = int(input('Please enter a value for b:'))
c = int(input('Please enter a value for c:'))
n = int(input('Please enter a value for n:'))
check_fermat(a, b, c, n)
get_input()
| 27.666667
| 49
| 0.573494
|
ff650ed250f94b07d5e1891a08f9a2fd7c37d4c2
| 929
|
py
|
Python
|
imagegallery/migrations/0002_post.py
|
Vincent-Juma/Django-Portfolio
|
53dd6a811f04b66c99d4985c3cb97d2bc5a86cc5
|
[
"MIT"
] | 1
|
2021-05-28T14:16:59.000Z
|
2021-05-28T14:16:59.000Z
|
imagegallery/migrations/0002_post.py
|
Vincent-Juma/Django-Portfolio
|
53dd6a811f04b66c99d4985c3cb97d2bc5a86cc5
|
[
"MIT"
] | null | null | null |
imagegallery/migrations/0002_post.py
|
Vincent-Juma/Django-Portfolio
|
53dd6a811f04b66c99d4985c3cb97d2bc5a86cc5
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-05 12:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('imagegallery', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.178571
| 120
| 0.636168
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.