hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
043a5251d2cf80a18a96e2246c9287875ab82f96
| 15,050
|
py
|
Python
|
examples/Deeplab/experiments/resnet_model_deeplabv3.py
|
MarcWong/tensorpack
|
51ab279480dc1e3ffdc07884a9e8149dea9651e9
|
[
"Apache-2.0"
] | 5
|
2018-05-04T02:04:15.000Z
|
2020-04-02T05:38:48.000Z
|
examples/Deeplab/experiments/resnet_model_deeplabv3.py
|
MarcWong/tensorpack
|
51ab279480dc1e3ffdc07884a9e8149dea9651e9
|
[
"Apache-2.0"
] | null | null | null |
examples/Deeplab/experiments/resnet_model_deeplabv3.py
|
MarcWong/tensorpack
|
51ab279480dc1e3ffdc07884a9e8149dea9651e9
|
[
"Apache-2.0"
] | 2
|
2018-04-23T13:43:10.000Z
|
2019-10-30T09:56:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: resnet_model.py
import tensorflow as tf
from tensorflow.contrib.layers import variance_scaling_initializer
import numpy as np
slim = tf.contrib.slim
from tensorpack.tfutils.argscope import argscope, get_arg_scope
from tensorpack.models import (
Conv2D, GlobalAvgPooling, BatchNorm, BNReLU, FullyConnected,
LinearWrap)
from tensorpack.models.common import layer_register, VariableHolder, rename_get_variable
from tensorpack.utils.argtools import shape2d, shape4d
@layer_register(log_shape=True)
def AtrousConv2D(x, out_channel, kernel_shape,
padding='SAME', rate=1,
W_init=None, b_init=None,
nl=tf.identity, use_bias=False,
data_format='NHWC'):
"""
2D AtrousConvolution on 4D inputs.
Args:
x (tf.Tensor): a 4D tensor.
Must have known number of channels, but can have other unknown dimensions.
out_channel (int): number of output channel.
kernel_shape: (h, w) tuple or a int.
stride: (h, w) tuple or a int.
rate: A positive int32, In the literature, the same parameter is sometimes called input stride or dilation.
padding (str): 'valid' or 'same'. Case insensitive.
W_init: initializer for W. Defaults to `variance_scaling_initializer`.
b_init: initializer for b. Defaults to zero.
nl: a nonlinearity function.
use_bias (bool): whether to use bias.
Returns:
tf.Tensor named ``output`` with attribute `variables`.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
in_shape = x.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[AtrousConv2D] Input cannot have unknown channel!"
kernel_shape = shape2d(kernel_shape)
padding = padding.upper()
filter_shape = kernel_shape + [in_channel, out_channel]
if W_init is None:
W_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
W = tf.get_variable('W', filter_shape, initializer=W_init)
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
conv = tf.nn.atrous_conv2d(x, W, rate, padding)
ret = nl(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
@layer_register(log_shape=True)
def LayerConv2D(x, out_channel, kernel_shape,
padding='SAME', stride=1,
W_init=None, b_init=None,
nl=tf.identity, split=1, use_bias=True,
data_format='NHWC'):
"""
2D convolution on 4D inputs.
Args:
x (tf.Tensor): a 4D tensor.
Must have known number of channels, but can have other unknown dimensions.
out_channel (int): number of output channel.
kernel_shape: (h, w) tuple or a int.
stride: (h, w) tuple or a int.
padding (str): 'valid' or 'same'. Case insensitive.
split (int): Split channels as used in Alexnet. Defaults to 1 (no split).
W_init: initializer for W. Defaults to `variance_scaling_initializer`.
b_init: initializer for b. Defaults to zero.
nl: a nonlinearity function.
use_bias (bool): whether to use bias.
Returns:
tf.Tensor named ``output`` with attribute `variables`.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
in_shape = x.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
assert in_channel % split == 0
assert out_channel % split == 0
kernel_shape = shape2d(kernel_shape)
padding = padding.upper()
filter_shape = kernel_shape + [in_channel / split, out_channel]
stride = shape4d(stride, data_format=data_format)
if W_init is None:
W_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
W = tf.get_variable('W', filter_shape, initializer=W_init)
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
if split == 1:
conv = tf.layers.conv2d(x, W, stride, padding, data_format=data_format)
else:
inputs = tf.split(x, split, channel_axis)
kernels = tf.split(W, split, 3)
outputs = [tf.nn.conv2d(i, k, stride, padding, data_format=data_format)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
ret = nl(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
def resnet_shortcut(l, n_out, stride, nl=tf.identity):
data_format = get_arg_scope()['Conv2D']['data_format']
n_in = l.get_shape().as_list()[1 if data_format == 'NCHW' else 3]
if n_in != n_out: # change dimension when channel is not the same
return Conv2D('convshortcut', l, n_out, 1, stride=stride, nl=nl)
else:
return l
def apply_preactivation(l, preact):
if preact == 'bnrelu':
shortcut = l # preserve identity mapping
l = BNReLU('preact', l)
else:
shortcut = l
return l, shortcut
def get_bn(zero_init=False):
"""
Zero init gamma is good for resnet. See https://arxiv.org/abs/1706.02677.
"""
if zero_init:
return lambda x, name: BatchNorm('bn', x, gamma_init=tf.zeros_initializer())
else:
return lambda x, name: BatchNorm('bn', x)
def preresnet_basicblock(l, ch_out, stride, preact):
l, shortcut = apply_preactivation(l, preact)
l = Conv2D('conv1', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3)
return l + resnet_shortcut(shortcut, ch_out, stride)
def preresnet_bottleneck(l, ch_out, stride, preact):
# stride is applied on the second conv, following fb.resnet.torch
l, shortcut = apply_preactivation(l, preact)
l = Conv2D('conv1', l, ch_out, 1, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1)
return l + resnet_shortcut(shortcut, ch_out * 4, stride)
def preresnet_group(l, name, block_func, features, count, stride):
with tf.variable_scope(name):
for i in range(0, count):
with tf.variable_scope('block{}'.format(i)):
# first block doesn't need activation
l = block_func(l, features,
stride if i == 0 else 1,
'no_preact' if i == 0 else 'bnrelu')
# end of each group need an extra activation
l = BNReLU('bnlast', l)
return l
def resnet_basicblock(l, ch_out, stride):
shortcut = l
l = Conv2D('conv1', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, nl=get_bn(zero_init=True))
return l + resnet_shortcut(shortcut, ch_out, stride, nl=get_bn(zero_init=False))
def resnet_bottleneck_deeplab(l, ch_out, stride, dilation, stride_first=False):
"""
stride_first: original resnet put stride on first conv. fb.resnet.torch put stride on second conv.
"""
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, stride=stride if stride_first else 1, nl=BNReLU)
if dilation == 1:
l = Conv2D('conv2', l, ch_out, 3, stride=1 if stride_first else stride, nl=BNReLU)
else:
l = AtrousConv2D('conv2', l, ch_out, kernel_shape=3, rate=dilation, nl=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, nl=get_bn(zero_init=True))
return l + resnet_shortcut(shortcut, ch_out * 4, stride, nl=get_bn(zero_init=False))
def resnet_bottleneck(l, ch_out, stride, stride_first=False):
"""
stride_first: original resnet put stride on first conv. fb.resnet.torch put stride on second conv.
"""
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, stride=stride if stride_first else 1, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, stride=1 if stride_first else stride, nl=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, nl=get_bn(zero_init=True))
return l + resnet_shortcut(shortcut, ch_out * 4, stride, nl=get_bn(zero_init=False))
def se_resnet_bottleneck(l, ch_out, stride):
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, nl=get_bn(zero_init=True))
squeeze = GlobalAvgPooling('gap', l)
squeeze = FullyConnected('fc1', squeeze, ch_out // 4, nl=tf.nn.relu)
squeeze = FullyConnected('fc2', squeeze, ch_out * 4, nl=tf.nn.sigmoid)
l = l * tf.reshape(squeeze, [-1, ch_out * 4, 1, 1])
return l + resnet_shortcut(shortcut, ch_out * 4, stride, nl=get_bn(zero_init=False))
def resnet_bottleneck_hdc(l, ch_out, stride, dilation, stride_first=False):
"""
stride_first: original resnet put stride on first conv. fb.resnet.torch put stride on second conv.
"""
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, stride=stride if stride_first else 1, nl=BNReLU)
l = AtrousConv2D('conv2', l, ch_out, kernel_shape=3, rate=dilation, nl=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, nl=get_bn(zero_init=True))
return l + resnet_shortcut(shortcut, ch_out * 4, stride, nl=get_bn(zero_init=False))
def bottleneck_hdc(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
multi_grid=(1,2,4),
outputs_collections=None,
scope=None,
use_bounded_activations=False):
"""Hybrid Dilated Convolution Bottleneck.
Multi_Grid = (1,2,4)
See Understanding Convolution for Semantic Segmentation.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
multi_grid: multi_grid sturcture.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
use_bounded_activations: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=tf.nn.relu6 if use_bounded_activations else None,
scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
rate=rate*multi_grid[0], scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate*multi_grid[1], scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
rate=rate*multi_grid[2], activation_fn=None, scope='conv3')
if use_bounded_activations:
# Use clip_by_value to simulate bandpass activation.
residual = tf.clip_by_value(residual, -6.0, 6.0)
output = tf.nn.relu6(shortcut + residual)
else:
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_group_deeplab(l, name, block_func, features, count, stride, dilation, stride_first):
with tf.variable_scope(name):
for i in range(0, count):
with tf.variable_scope('block{}'.format(i)):
l = block_func(l, features, stride if i == 0 else 1, dilation, stride_first)
# end of each block need an activation
l = tf.nn.relu(l)
return l
def resnet_backbone_deeplab(image, num_blocks, group_func, block_func, class_num, ASPP = False, SPK = False):
with argscope(Conv2D, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_OUT')):
resnet_head = (LinearWrap(image)
.Conv2D('conv0', 64, 7, stride=2, nl=BNReLU)
.MaxPooling('pool0', shape=3, stride=2, padding='SAME')
.apply(group_func, 'group0', block_func, 64, num_blocks[0], 1, dilation=1, stride_first=False)
.apply(group_func, 'group1', block_func, 128, num_blocks[1], 2, dilation=1, stride_first=True)
.apply(group_func, 'group2', block_func, 256, num_blocks[2], 2, dilation=2, stride_first=True)
.apply(group_func, 'group3', block_func, 512, num_blocks[3], 1, dilation=4, stride_first=False)())
def aspp_branch(input, rate):
input = AtrousConv2D('aspp{}_conv'.format(rate), input, class_num, kernel_shape=3, rate=rate)
return input
if ASPP:
output = aspp_branch(resnet_head , 6) +aspp_branch(resnet_head, 12) +aspp_branch(resnet_head, 18)+aspp_branch(resnet_head, 24)
else:
output = aspp_branch(resnet_head, 6)
if SPK:
output = edge_conv(output,"spk",class_num)
output = tf.image.resize_bilinear(output, image.shape[1:3])
return output
def resnet_group(l, name, block_func, features, count, stride):
with tf.variable_scope(name):
for i in range(0, count):
with tf.variable_scope('block{}'.format(i)):
l = block_func(l, features, stride if i == 0 else 1)
# end of each block need an activation
l = tf.nn.relu(l)
return l
def resnet_backbone(image, num_blocks, group_func, block_func,class_num):
with argscope(Conv2D, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_OUT')):
logits = (LinearWrap(image)
.Conv2D('conv0', 64, 7, stride=2, nl=BNReLU)
.MaxPooling('pool0', shape=3, stride=2, padding='SAME')
.apply(group_func, 'group0', block_func, 64, num_blocks[0], 1)
.apply(group_func, 'group1', block_func, 128, num_blocks[1], 2)
.apply(group_func, 'group2', block_func, 256, num_blocks[2], 2)
.apply(group_func, 'group3', block_func, 512, num_blocks[3], 2)())
output = resnet_basicblock(logits,class_num,stride=1)
output = tf.image.resize_bilinear(output, image.shape[1:3])
return output
| 39.605263
| 134
| 0.647641
|
6a0eea1eb027672b5bf45938904b4ffdd8cc3449
| 14,093
|
py
|
Python
|
tools/mountsnoop.py
|
Zheaoli/bcc
|
b768015a594e6a715a6d016682ac4ee4873d2bdc
|
[
"Apache-2.0"
] | 1
|
2021-10-08T03:24:45.000Z
|
2021-10-08T03:24:45.000Z
|
tools/mountsnoop.py
|
Zheaoli/bcc
|
b768015a594e6a715a6d016682ac4ee4873d2bdc
|
[
"Apache-2.0"
] | null | null | null |
tools/mountsnoop.py
|
Zheaoli/bcc
|
b768015a594e6a715a6d016682ac4ee4873d2bdc
|
[
"Apache-2.0"
] | 1
|
2021-09-28T08:41:22.000Z
|
2021-09-28T08:41:22.000Z
|
#!/usr/bin/python
#
# mountsnoop Trace mount() and umount syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: mountsnoop [-h]
#
# Copyright (c) 2016 Facebook, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 14-Oct-2016 Omar Sandoval Created this.
from __future__ import print_function
import argparse
import bcc
import ctypes
import errno
import functools
import sys
bpf_text = r"""
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
/*
* XXX: struct mnt_namespace is defined in fs/mount.h, which is private to the
* VFS and not installed in any kernel-devel packages. So, let's duplicate the
* important part of the definition. There are actually more members in the
* real struct, but we don't need them, and they're more likely to change.
*/
struct mnt_namespace {
// This field was removed in https://github.com/torvalds/linux/commit/1a7b8969e664d6af328f00fe6eb7aabd61a71d13
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)
atomic_t count;
#endif
struct ns_common ns;
};
/*
* XXX: this could really use first-class string support in BPF. target is a
* NUL-terminated path up to PATH_MAX in length. source and type are
* NUL-terminated strings up to PAGE_SIZE in length. data is a weird case: it's
* almost always a NUL-terminated string, but for some filesystems (e.g., older
* NFS variants), it's a binary structure with plenty of NUL bytes, so the
* kernel always copies up to PAGE_SIZE bytes, stopping when it hits a fault.
*
* The best we can do with the existing BPF helpers is to copy as much of each
* argument as we can. Our stack space is limited, and we need to leave some
* headroom for the rest of the function, so this should be a decent value.
*/
#define MAX_STR_LEN 412
enum event_type {
EVENT_MOUNT,
EVENT_MOUNT_SOURCE,
EVENT_MOUNT_TARGET,
EVENT_MOUNT_TYPE,
EVENT_MOUNT_DATA,
EVENT_MOUNT_RET,
EVENT_UMOUNT,
EVENT_UMOUNT_TARGET,
EVENT_UMOUNT_RET,
};
struct data_t {
enum event_type type;
pid_t pid, tgid;
union {
/* EVENT_MOUNT, EVENT_UMOUNT */
struct {
/* current->nsproxy->mnt_ns->ns.inum */
unsigned int mnt_ns;
char comm[TASK_COMM_LEN];
char pcomm[TASK_COMM_LEN];
pid_t ppid;
unsigned long flags;
} enter;
/*
* EVENT_MOUNT_SOURCE, EVENT_MOUNT_TARGET, EVENT_MOUNT_TYPE,
* EVENT_MOUNT_DATA, EVENT_UMOUNT_TARGET
*/
char str[MAX_STR_LEN];
/* EVENT_MOUNT_RET, EVENT_UMOUNT_RET */
int retval;
};
};
BPF_PERF_OUTPUT(events);
int syscall__mount(struct pt_regs *ctx, char __user *source,
char __user *target, char __user *type,
unsigned long flags, char __user *data)
{
struct data_t event = {};
struct task_struct *task;
struct nsproxy *nsproxy;
struct mnt_namespace *mnt_ns;
event.pid = bpf_get_current_pid_tgid() & 0xffffffff;
event.tgid = bpf_get_current_pid_tgid() >> 32;
event.type = EVENT_MOUNT;
bpf_get_current_comm(event.enter.comm, sizeof(event.enter.comm));
event.enter.flags = flags;
task = (struct task_struct *)bpf_get_current_task();
event.enter.ppid = task->real_parent->tgid;
bpf_probe_read_kernel_str(&event.enter.pcomm, TASK_COMM_LEN, task->real_parent->comm);
nsproxy = task->nsproxy;
mnt_ns = nsproxy->mnt_ns;
event.enter.mnt_ns = mnt_ns->ns.inum;
events.perf_submit(ctx, &event, sizeof(event));
event.type = EVENT_MOUNT_SOURCE;
__builtin_memset(event.str, 0, sizeof(event.str));
bpf_probe_read_user(event.str, sizeof(event.str), source);
events.perf_submit(ctx, &event, sizeof(event));
event.type = EVENT_MOUNT_TARGET;
__builtin_memset(event.str, 0, sizeof(event.str));
bpf_probe_read_user(event.str, sizeof(event.str), target);
events.perf_submit(ctx, &event, sizeof(event));
event.type = EVENT_MOUNT_TYPE;
__builtin_memset(event.str, 0, sizeof(event.str));
bpf_probe_read_user(event.str, sizeof(event.str), type);
events.perf_submit(ctx, &event, sizeof(event));
event.type = EVENT_MOUNT_DATA;
__builtin_memset(event.str, 0, sizeof(event.str));
bpf_probe_read_user(event.str, sizeof(event.str), data);
events.perf_submit(ctx, &event, sizeof(event));
return 0;
}
int do_ret_sys_mount(struct pt_regs *ctx)
{
struct data_t event = {};
event.type = EVENT_MOUNT_RET;
event.pid = bpf_get_current_pid_tgid() & 0xffffffff;
event.tgid = bpf_get_current_pid_tgid() >> 32;
event.retval = PT_REGS_RC(ctx);
events.perf_submit(ctx, &event, sizeof(event));
return 0;
}
int syscall__umount(struct pt_regs *ctx, char __user *target, int flags)
{
struct data_t event = {};
struct task_struct *task;
struct nsproxy *nsproxy;
struct mnt_namespace *mnt_ns;
event.pid = bpf_get_current_pid_tgid() & 0xffffffff;
event.tgid = bpf_get_current_pid_tgid() >> 32;
event.type = EVENT_UMOUNT;
bpf_get_current_comm(event.enter.comm, sizeof(event.enter.comm));
event.enter.flags = flags;
task = (struct task_struct *)bpf_get_current_task();
event.enter.ppid = task->real_parent->tgid;
bpf_probe_read_kernel_str(&event.enter.pcomm, TASK_COMM_LEN, task->real_parent->comm);
nsproxy = task->nsproxy;
mnt_ns = nsproxy->mnt_ns;
event.enter.mnt_ns = mnt_ns->ns.inum;
events.perf_submit(ctx, &event, sizeof(event));
event.type = EVENT_UMOUNT_TARGET;
__builtin_memset(event.str, 0, sizeof(event.str));
bpf_probe_read_user(event.str, sizeof(event.str), target);
events.perf_submit(ctx, &event, sizeof(event));
return 0;
}
int do_ret_sys_umount(struct pt_regs *ctx)
{
struct data_t event = {};
event.type = EVENT_UMOUNT_RET;
event.pid = bpf_get_current_pid_tgid() & 0xffffffff;
event.tgid = bpf_get_current_pid_tgid() >> 32;
event.retval = PT_REGS_RC(ctx);
events.perf_submit(ctx, &event, sizeof(event));
return 0;
}
"""
# sys/mount.h
MS_MGC_VAL = 0xc0ed0000
MS_MGC_MSK = 0xffff0000
MOUNT_FLAGS = [
('MS_RDONLY', 1),
('MS_NOSUID', 2),
('MS_NODEV', 4),
('MS_NOEXEC', 8),
('MS_SYNCHRONOUS', 16),
('MS_REMOUNT', 32),
('MS_MANDLOCK', 64),
('MS_DIRSYNC', 128),
('MS_NOATIME', 1024),
('MS_NODIRATIME', 2048),
('MS_BIND', 4096),
('MS_MOVE', 8192),
('MS_REC', 16384),
('MS_SILENT', 32768),
('MS_POSIXACL', 1 << 16),
('MS_UNBINDABLE', 1 << 17),
('MS_PRIVATE', 1 << 18),
('MS_SLAVE', 1 << 19),
('MS_SHARED', 1 << 20),
('MS_RELATIME', 1 << 21),
('MS_KERNMOUNT', 1 << 22),
('MS_I_VERSION', 1 << 23),
('MS_STRICTATIME', 1 << 24),
('MS_LAZYTIME', 1 << 25),
('MS_ACTIVE', 1 << 30),
('MS_NOUSER', 1 << 31),
]
UMOUNT_FLAGS = [
('MNT_FORCE', 1),
('MNT_DETACH', 2),
('MNT_EXPIRE', 4),
('UMOUNT_NOFOLLOW', 8),
]
TASK_COMM_LEN = 16 # linux/sched.h
MAX_STR_LEN = 412
class EventType(object):
EVENT_MOUNT = 0
EVENT_MOUNT_SOURCE = 1
EVENT_MOUNT_TARGET = 2
EVENT_MOUNT_TYPE = 3
EVENT_MOUNT_DATA = 4
EVENT_MOUNT_RET = 5
EVENT_UMOUNT = 6
EVENT_UMOUNT_TARGET = 7
EVENT_UMOUNT_RET = 8
class EnterData(ctypes.Structure):
_fields_ = [
('mnt_ns', ctypes.c_uint),
('comm', ctypes.c_char * TASK_COMM_LEN),
('pcomm', ctypes.c_char * TASK_COMM_LEN),
('ppid', ctypes.c_uint),
('flags', ctypes.c_ulong),
]
class DataUnion(ctypes.Union):
_fields_ = [
('enter', EnterData),
('str', ctypes.c_char * MAX_STR_LEN),
('retval', ctypes.c_int),
]
class Event(ctypes.Structure):
_fields_ = [
('type', ctypes.c_uint),
('pid', ctypes.c_uint),
('tgid', ctypes.c_uint),
('union', DataUnion),
]
def _decode_flags(flags, flag_list):
str_flags = []
for flag, bit in flag_list:
if flags & bit:
str_flags.append(flag)
flags &= ~bit
if flags or not str_flags:
str_flags.append('0x{:x}'.format(flags))
return str_flags
def decode_flags(flags, flag_list):
return '|'.join(_decode_flags(flags, flag_list))
def decode_mount_flags(flags):
str_flags = []
if flags & MS_MGC_MSK == MS_MGC_VAL:
flags &= ~MS_MGC_MSK
str_flags.append('MS_MGC_VAL')
str_flags.extend(_decode_flags(flags, MOUNT_FLAGS))
return '|'.join(str_flags)
def decode_umount_flags(flags):
return decode_flags(flags, UMOUNT_FLAGS)
def decode_errno(retval):
try:
return '-' + errno.errorcode[-retval]
except KeyError:
return str(retval)
_escape_chars = {
ord('\a'): '\\a',
ord('\b'): '\\b',
ord('\t'): '\\t',
ord('\n'): '\\n',
ord('\v'): '\\v',
ord('\f'): '\\f',
ord('\r'): '\\r',
ord('"'): '\\"',
ord('\\'): '\\\\',
}
def escape_character(c):
try:
return _escape_chars[c]
except KeyError:
if 0x20 <= c <= 0x7e:
return chr(c)
else:
return '\\x{:02x}'.format(c)
if sys.version_info.major < 3:
def decode_mount_string(s):
return '"{}"'.format(''.join(escape_character(ord(c)) for c in s))
else:
def decode_mount_string(s):
return '"{}"'.format(''.join(escape_character(c) for c in s))
def print_event(mounts, umounts, parent, cpu, data, size):
event = ctypes.cast(data, ctypes.POINTER(Event)).contents
try:
if event.type == EventType.EVENT_MOUNT:
mounts[event.pid] = {
'pid': event.pid,
'tgid': event.tgid,
'mnt_ns': event.union.enter.mnt_ns,
'comm': event.union.enter.comm,
'flags': event.union.enter.flags,
'ppid': event.union.enter.ppid,
'pcomm': event.union.enter.pcomm,
}
elif event.type == EventType.EVENT_MOUNT_SOURCE:
mounts[event.pid]['source'] = event.union.str
elif event.type == EventType.EVENT_MOUNT_TARGET:
mounts[event.pid]['target'] = event.union.str
elif event.type == EventType.EVENT_MOUNT_TYPE:
mounts[event.pid]['type'] = event.union.str
elif event.type == EventType.EVENT_MOUNT_DATA:
# XXX: data is not always a NUL-terminated string
mounts[event.pid]['data'] = event.union.str
elif event.type == EventType.EVENT_UMOUNT:
umounts[event.pid] = {
'pid': event.pid,
'tgid': event.tgid,
'mnt_ns': event.union.enter.mnt_ns,
'comm': event.union.enter.comm,
'flags': event.union.enter.flags,
'ppid': event.union.enter.ppid,
'pcomm': event.union.enter.pcomm,
}
elif event.type == EventType.EVENT_UMOUNT_TARGET:
umounts[event.pid]['target'] = event.union.str
elif (event.type == EventType.EVENT_MOUNT_RET or
event.type == EventType.EVENT_UMOUNT_RET):
if event.type == EventType.EVENT_MOUNT_RET:
syscall = mounts.pop(event.pid)
call = ('mount({source}, {target}, {type}, {flags}, {data}) ' +
'= {retval}').format(
source=decode_mount_string(syscall['source']),
target=decode_mount_string(syscall['target']),
type=decode_mount_string(syscall['type']),
flags=decode_mount_flags(syscall['flags']),
data=decode_mount_string(syscall['data']),
retval=decode_errno(event.union.retval))
else:
syscall = umounts.pop(event.pid)
call = 'umount({target}, {flags}) = {retval}'.format(
target=decode_mount_string(syscall['target']),
flags=decode_umount_flags(syscall['flags']),
retval=decode_errno(event.union.retval))
if parent:
print('{:16} {:<7} {:<7} {:16} {:<7} {:<11} {}'.format(
syscall['comm'].decode('utf-8', 'replace'), syscall['tgid'],
syscall['pid'], syscall['pcomm'].decode('utf-8', 'replace'),
syscall['ppid'], syscall['mnt_ns'], call))
else:
print('{:16} {:<7} {:<7} {:<11} {}'.format(
syscall['comm'].decode('utf-8', 'replace'), syscall['tgid'],
syscall['pid'], syscall['mnt_ns'], call))
except KeyError:
# This might happen if we lost an event.
pass
def main():
parser = argparse.ArgumentParser(
description='trace mount() and umount() syscalls'
)
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("-P", "--parent_process", action="store_true",
help="also snoop the parent process")
args = parser.parse_args()
mounts = {}
umounts = {}
if args.ebpf:
print(bpf_text)
exit()
b = bcc.BPF(text=bpf_text)
mount_fnname = b.get_syscall_fnname("mount")
b.attach_kprobe(event=mount_fnname, fn_name="syscall__mount")
b.attach_kretprobe(event=mount_fnname, fn_name="do_ret_sys_mount")
umount_fnname = b.get_syscall_fnname("umount")
b.attach_kprobe(event=umount_fnname, fn_name="syscall__umount")
b.attach_kretprobe(event=umount_fnname, fn_name="do_ret_sys_umount")
b['events'].open_perf_buffer(
functools.partial(print_event, mounts, umounts, args.parent_process))
if args.parent_process:
print('{:16} {:<7} {:<7} {:16} {:<7} {:<11} {}'.format(
'COMM', 'PID', 'TID', 'PCOMM', 'PPID', 'MNT_NS', 'CALL'))
else:
print('{:16} {:<7} {:<7} {:<11} {}'.format(
'COMM', 'PID', 'TID', 'MNT_NS', 'CALL'))
while True:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
if __name__ == '__main__':
main()
| 31.110375
| 114
| 0.612574
|
9d61a3acc16b4f0294ed2ed03dc8158792924c76
| 1,033
|
py
|
Python
|
test_delete.py
|
ArtemVavilov88/test2-Se-Python-14
|
522791b4fcde1d26b8e56244b10ebaf2e7146e57
|
[
"Apache-2.0"
] | null | null | null |
test_delete.py
|
ArtemVavilov88/test2-Se-Python-14
|
522791b4fcde1d26b8e56244b10ebaf2e7146e57
|
[
"Apache-2.0"
] | null | null | null |
test_delete.py
|
ArtemVavilov88/test2-Se-Python-14
|
522791b4fcde1d26b8e56244b10ebaf2e7146e57
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class php4dvd_delete_film(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(20)
def test_delete(self):
driver = self.driver
driver.get("http://localhost/php4dvd/")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("admin")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("submit").click()
#just find the first element with class="nocover"
driver.find_element_by_class_name("nocover").click()
driver.find_element_by_link_text("Remove").click()
#accept the "Are you sure you want to remove this?" dialog
driver.switch_to_alert().accept()
def tearDown(self):
self.driver.quit()
if __name__=="__main__":
unittest.main()
| 33.322581
| 66
| 0.689255
|
4f9f2758c672a44d3dba717ce32a64ebcbea6d24
| 256
|
py
|
Python
|
old/iq-test.py
|
zerexei/codewars-solutions
|
0e1e9c3ce28bb8926561d5dd504f650aca47ca96
|
[
"MIT"
] | null | null | null |
old/iq-test.py
|
zerexei/codewars-solutions
|
0e1e9c3ce28bb8926561d5dd504f650aca47ca96
|
[
"MIT"
] | null | null | null |
old/iq-test.py
|
zerexei/codewars-solutions
|
0e1e9c3ce28bb8926561d5dd504f650aca47ca96
|
[
"MIT"
] | null | null | null |
def iq_test(numbers):
# convert values to boolean
e = [int(i) % 2 == 0 for i in numbers.split()]
# ternary | count all true if 1 return index of true else index of false
return e.index(True) + 1 if e.count(True) == 1 else e.index(False) + 1
| 51.2
| 76
| 0.644531
|
66f1185a3fa49e1ae57933f66abe5749fb646f91
| 11,318
|
py
|
Python
|
rest_framework/filters.py
|
akx/django-rest-framework
|
c7bf99330e9fb8e44e082e86938155272c0d41e8
|
[
"BSD-2-Clause"
] | null | null | null |
rest_framework/filters.py
|
akx/django-rest-framework
|
c7bf99330e9fb8e44e082e86938155272c0d41e8
|
[
"BSD-2-Clause"
] | null | null | null |
rest_framework/filters.py
|
akx/django-rest-framework
|
c7bf99330e9fb8e44e082e86938155272c0d41e8
|
[
"BSD-2-Clause"
] | 2
|
2016-09-06T16:12:47.000Z
|
2018-10-10T15:33:58.000Z
|
"""
Provides generic filtering backends that can be used to filter the results
returned by list views.
"""
from __future__ import unicode_literals
import operator
from functools import reduce
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.template import loader
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import (
crispy_forms, distinct, django_filters, guardian, template_render
)
from rest_framework.settings import api_settings
if 'crispy_forms' in settings.INSTALLED_APPS and crispy_forms and django_filters:
# If django-crispy-forms is installed, use it to get a bootstrap3 rendering
# of the DjangoFilterBackend controls when displayed as HTML.
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit
class FilterSet(django_filters.FilterSet):
def __init__(self, *args, **kwargs):
super(FilterSet, self).__init__(*args, **kwargs)
for field in self.form.fields.values():
field.help_text = None
layout_components = list(self.form.fields.keys()) + [
Submit('', _('Submit'), css_class='btn-default'),
]
helper = FormHelper()
helper.form_method = 'GET'
helper.template_pack = 'bootstrap3'
helper.layout = Layout(*layout_components)
self.form.helper = helper
filter_template = 'rest_framework/filters/django_filter_crispyforms.html'
elif django_filters:
# If django-crispy-forms is not installed, use the standard
# 'form.as_p' rendering when DjangoFilterBackend is displayed as HTML.
class FilterSet(django_filters.FilterSet):
def __init__(self, *args, **kwargs):
super(FilterSet, self).__init__(*args, **kwargs)
for field in self.form.fields.values():
field.help_text = None
filter_template = 'rest_framework/filters/django_filter.html'
else:
FilterSet = None
filter_template = None
class BaseFilterBackend(object):
"""
A base class from which all filter backend classes should inherit.
"""
def filter_queryset(self, request, queryset, view):
"""
Return a filtered queryset.
"""
raise NotImplementedError(".filter_queryset() must be overridden.")
class DjangoFilterBackend(BaseFilterBackend):
"""
A filter backend that uses django-filter.
"""
default_filter_set = FilterSet
template = filter_template
def __init__(self):
assert django_filters, 'Using DjangoFilterBackend, but django-filter is not installed'
def get_filter_class(self, view, queryset=None):
"""
Return the django-filters `FilterSet` used to filter the queryset.
"""
filter_class = getattr(view, 'filter_class', None)
filter_fields = getattr(view, 'filter_fields', None)
if filter_class:
filter_model = filter_class.Meta.model
assert issubclass(queryset.model, filter_model), \
'FilterSet model %s does not match queryset model %s' % \
(filter_model, queryset.model)
return filter_class
if filter_fields:
class AutoFilterSet(FilterSet):
class Meta:
model = queryset.model
fields = filter_fields
return AutoFilterSet
return None
def filter_queryset(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if filter_class:
return filter_class(request.query_params, queryset=queryset).qs
return queryset
def to_html(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if filter_class:
filter_instance = filter_class(request.query_params, queryset=queryset)
else:
filter_instance = None
context = {
'filter': filter_instance
}
template = loader.get_template(self.template)
return template_render(template, context)
class SearchFilter(BaseFilterBackend):
# The URL query parameter used for the search.
search_param = api_settings.SEARCH_PARAM
template = 'rest_framework/filters/search.html'
def get_search_terms(self, request):
"""
Search terms are set by a ?search=... query parameter,
and may be comma and/or whitespace delimited.
"""
params = request.query_params.get(self.search_param, '')
return params.replace(',', ' ').split()
def construct_search(self, field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
if field_name.startswith('$'):
return "%s__iregex" % field_name[1:]
else:
return "%s__icontains" % field_name
def filter_queryset(self, request, queryset, view):
search_fields = getattr(view, 'search_fields', None)
search_terms = self.get_search_terms(request)
if not search_fields or not search_terms:
return queryset
orm_lookups = [
self.construct_search(six.text_type(search_field))
for search_field in search_fields
]
base = queryset
for search_term in search_terms:
queries = [
models.Q(**{orm_lookup: search_term})
for orm_lookup in orm_lookups
]
queryset = queryset.filter(reduce(operator.or_, queries))
# Filtering against a many-to-many field requires us to
# call queryset.distinct() in order to avoid duplicate items
# in the resulting queryset.
return distinct(queryset, base)
def to_html(self, request, queryset, view):
if not getattr(view, 'search_fields', None):
return ''
term = self.get_search_terms(request)
term = term[0] if term else ''
context = {
'param': self.search_param,
'term': term
}
template = loader.get_template(self.template)
return template_render(template, context)
class OrderingFilter(BaseFilterBackend):
# The URL query parameter used for the ordering.
ordering_param = api_settings.ORDERING_PARAM
ordering_fields = None
template = 'rest_framework/filters/ordering.html'
def get_ordering(self, request, queryset, view):
"""
Ordering is set by a comma delimited ?ordering=... query parameter.
The `ordering` query parameter can be overridden by setting
the `ordering_param` value on the OrderingFilter or by
specifying an `ORDERING_PARAM` value in the API settings.
"""
params = request.query_params.get(self.ordering_param)
if params:
fields = [param.strip() for param in params.split(',')]
ordering = self.remove_invalid_fields(queryset, fields, view)
if ordering:
return ordering
# No ordering was included, or all the ordering fields were invalid
return self.get_default_ordering(view)
def get_default_ordering(self, view):
ordering = getattr(view, 'ordering', None)
if isinstance(ordering, six.string_types):
return (ordering,)
return ordering
def get_valid_fields(self, queryset, view):
valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)
if valid_fields is None:
# Default to allowing filtering on serializer fields
serializer_class = getattr(view, 'serializer_class')
if serializer_class is None:
msg = ("Cannot use %s on a view which does not have either a "
"'serializer_class' or 'ordering_fields' attribute.")
raise ImproperlyConfigured(msg % self.__class__.__name__)
valid_fields = [
(field.source or field_name, field.label)
for field_name, field in serializer_class().fields.items()
if not getattr(field, 'write_only', False) and not field.source == '*'
]
elif valid_fields == '__all__':
# View explicitly allows filtering on any model field
valid_fields = [
(field.name, getattr(field, 'label', field.name.title()))
for field in queryset.model._meta.fields
]
valid_fields += [
(key, key.title().split('__'))
for key in queryset.query.aggregates.keys()
]
else:
valid_fields = [
(item, item) if isinstance(item, six.string_types) else item
for item in valid_fields
]
return valid_fields
def remove_invalid_fields(self, queryset, fields, view):
valid_fields = [item[0] for item in self.get_valid_fields(queryset, view)]
return [term for term in fields if term.lstrip('-') in valid_fields]
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.order_by(*ordering)
return queryset
def get_template_context(self, request, queryset, view):
current = self.get_ordering(request, queryset, view)
current = None if current is None else current[0]
options = []
for key, label in self.get_valid_fields(queryset, view):
options.append((key, '%s - ascending' % label))
options.append(('-' + key, '%s - descending' % label))
return {
'request': request,
'current': current,
'param': self.ordering_param,
'options': options,
}
def to_html(self, request, queryset, view):
template = loader.get_template(self.template)
context = self.get_template_context(request, queryset, view)
return template_render(template, context)
class DjangoObjectPermissionsFilter(BaseFilterBackend):
"""
A filter backend that limits results to those where the requesting user
has read object level permissions.
"""
def __init__(self):
assert guardian, 'Using DjangoObjectPermissionsFilter, but django-guardian is not installed'
perm_format = '%(app_label)s.view_%(model_name)s'
def filter_queryset(self, request, queryset, view):
extra = {}
user = request.user
model_cls = queryset.model
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': model_cls._meta.model_name
}
permission = self.perm_format % kwargs
if guardian.VERSION >= (1, 3):
# Maintain behavior compatibility with versions prior to 1.3
extra = {'accept_global_perms': False}
else:
extra = {}
return guardian.shortcuts.get_objects_for_user(user, permission, queryset, **extra)
| 35.816456
| 100
| 0.63536
|
643ac33a9a3865655030c09a303accde0a104450
| 543
|
py
|
Python
|
test/units/temperature/__init__.py
|
gnmerritt/pyqudt
|
2579e824b2e002d2e27a40eae84f1b1449006487
|
[
"BSD-3-Clause"
] | 4
|
2019-07-27T22:19:26.000Z
|
2022-01-19T16:36:10.000Z
|
test/units/temperature/__init__.py
|
gnmerritt/pyqudt
|
2579e824b2e002d2e27a40eae84f1b1449006487
|
[
"BSD-3-Clause"
] | 3
|
2021-05-28T01:18:35.000Z
|
2021-12-22T01:03:39.000Z
|
test/units/temperature/__init__.py
|
gnmerritt/pyqudt
|
2579e824b2e002d2e27a40eae84f1b1449006487
|
[
"BSD-3-Clause"
] | 1
|
2020-06-03T20:17:41.000Z
|
2020-06-03T20:17:41.000Z
|
################################################################################
#
# Copyright (C) 2019 Garrett Brown
# This file is part of pyqudt - https://github.com/eigendude/pyqudt
#
# pyqudt is derived from jQUDT
# Copyright (C) 2012-2013 Egon Willighagen <egonw@users.sf.net>
#
# SPDX-License-Identifier: BSD-3-Clause
# See the file LICENSE for more information.
#
################################################################################
from .celsius_test import CelsiusTest
from .fahrenheit_test import FahrenheitTest
| 33.9375
| 80
| 0.528545
|
afdf26c5d4450424a2b1b77caae58e1cf2c8febd
| 6,929
|
py
|
Python
|
spikeextractors/extractors/klustaextractors/klustaextractors.py
|
TRuikes/spikeextractors
|
c3cbdaa18629aeb5ecb52f648e69b503a4f091d2
|
[
"MIT"
] | null | null | null |
spikeextractors/extractors/klustaextractors/klustaextractors.py
|
TRuikes/spikeextractors
|
c3cbdaa18629aeb5ecb52f648e69b503a4f091d2
|
[
"MIT"
] | null | null | null |
spikeextractors/extractors/klustaextractors/klustaextractors.py
|
TRuikes/spikeextractors
|
c3cbdaa18629aeb5ecb52f648e69b503a4f091d2
|
[
"MIT"
] | null | null | null |
"""
kwik structure based on:
https://github.com/kwikteam/phy-doc/blob/master/docs/kwik-format.md
cluster_group defaults based on:
https://github.com/kwikteam/phy-doc/blob/master/docs/kwik-model.md
04/08/20
"""
from spikeextractors import SortingExtractor
from spikeextractors.extractors.bindatrecordingextractor import BinDatRecordingExtractor
from spikeextractors.extraction_tools import read_python, check_valid_unit_id
import numpy as np
from pathlib import Path
try:
import h5py
HAVE_KLSX = True
except ImportError:
HAVE_KLSX = False
# noinspection SpellCheckingInspection
class KlustaRecordingExtractor(BinDatRecordingExtractor):
extractor_name = 'KlustaRecordingExtractor'
has_default_locations = False
installed = HAVE_KLSX # check at class level if installed or not
is_writable = True
mode = 'folder'
installation_mesg = "To use the KlustaSortingExtractor install h5py: \n\n pip install h5py\n\n" # error message when not installed
def __init__(self, folder_path):
assert HAVE_KLSX, "To use the KlustaSortingExtractor install h5py: \n\n pip install h5py\n\n"
klustafolder = Path(folder_path).absolute()
config_file = [f for f in klustafolder.iterdir() if f.suffix == '.prm'][0]
dat_file = [f for f in klustafolder.iterdir() if f.suffix == '.dat'][0]
assert config_file.is_file() and dat_file.is_file(), "Not a valid klusta folder"
config = read_python(str(config_file))
sampling_frequency = config['traces']['sample_rate']
n_channels = config['traces']['n_channels']
dtype = config['traces']['dtype']
BinDatRecordingExtractor.__init__(self, file_path=dat_file, sampling_frequency=sampling_frequency, numchan=n_channels,
dtype=dtype)
self._kwargs = {'folder_path': str(Path(folder_path).absolute())}
# noinspection SpellCheckingInspection
class KlustaSortingExtractor(SortingExtractor):
extractor_name = 'KlustaSortingExtractor'
installed = HAVE_KLSX # check at class level if installed or not
installation_mesg = "To use the KlustaSortingExtractor install h5py: \n\n pip install h5py\n\n" # error message when not installed
is_writable = True
mode = 'file_or_folder'
default_cluster_groups = {0: 'Noise', 1: 'MUA', 2: 'Good', 3: 'Unsorted'}
def __init__(self, file_or_folder_path, exclude_cluster_groups=None):
assert HAVE_KLSX, "To use the KlustaSortingExtractor install h5py: \n\n pip install h5py\n\n"
SortingExtractor.__init__(self)
kwik_file_or_folder = Path(file_or_folder_path)
kwikfile = None
klustafolder = None
if kwik_file_or_folder.is_file():
assert kwik_file_or_folder.suffix == '.kwik', "Not a '.kwik' file"
kwikfile = Path(kwik_file_or_folder).absolute()
klustafolder = kwikfile.parent
elif kwik_file_or_folder.is_dir():
klustafolder = kwik_file_or_folder
kwikfiles = [f for f in kwik_file_or_folder.iterdir() if f.suffix == '.kwik']
if len(kwikfiles) == 1:
kwikfile = kwikfiles[0]
assert kwikfile is not None, "Could not load '.kwik' file"
try:
config_file = [f for f in klustafolder.iterdir() if f.suffix == '.prm'][0]
config = read_python(str(config_file))
sampling_frequency = config['traces']['sample_rate']
self._sampling_frequency = sampling_frequency
except Exception as e:
print("Could not load sampling frequency info")
kf_reader = h5py.File(kwikfile, 'r')
self._spiketrains = []
self._unit_ids = []
unique_units = []
klusta_units = []
cluster_groups_name = []
groups = []
unit = 0
cs_to_exclude = []
valid_group_names = [i[1].lower() for i in self.default_cluster_groups.items()]
if exclude_cluster_groups is not None:
assert isinstance(exclude_cluster_groups, list), 'exclude_cluster_groups should be a list'
for ec in exclude_cluster_groups:
assert ec in valid_group_names, f'select exclude names out of: {valid_group_names}'
cs_to_exclude.append(ec.lower())
for channel_group in kf_reader.get('/channel_groups'):
chan_cluster_id_arr = kf_reader.get(f'/channel_groups/{channel_group}/spikes/clusters/main')[()]
chan_cluster_times_arr = kf_reader.get(f'/channel_groups/{channel_group}/spikes/time_samples')[()]
chan_cluster_ids = np.unique(chan_cluster_id_arr) # if clusters were merged in gui,
# the original id's are still in the kwiktree, but
# in this array
for cluster_id in chan_cluster_ids:
cluster_frame_idx = np.nonzero(chan_cluster_id_arr == cluster_id) # the [()] is a h5py thing
st = chan_cluster_times_arr[cluster_frame_idx]
assert st.shape[0] > 0, 'no spikes in cluster'
cluster_group = kf_reader.get(f'/channel_groups/{channel_group}/clusters/main/{cluster_id}').attrs['cluster_group']
assert cluster_group in self.default_cluster_groups.keys(), f'cluster_group not in "default_dict: {cluster_group}'
cluster_group_name = self.default_cluster_groups[cluster_group]
if cluster_group_name.lower() in cs_to_exclude:
continue
self._spiketrains.append(st)
klusta_units.append(int(cluster_id))
unique_units.append(unit)
unit += 1
groups.append(int(channel_group))
cluster_groups_name.append(cluster_group_name)
if len(np.unique(klusta_units)) == len(np.unique(unique_units)):
self._unit_ids = klusta_units
else:
print('Klusta units are not unique! Using unique unit ids')
self._unit_ids = unique_units
for i, u in enumerate(self._unit_ids):
self.set_unit_property(u, 'group', groups[i])
self.set_unit_property(u, 'quality', cluster_groups_name[i].lower())
self._kwargs = {'file_or_folder_path': str(Path(file_or_folder_path).absolute())}
def get_unit_ids(self):
return list(self._unit_ids)
@check_valid_unit_id
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None):
start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame)
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = np.Inf
times = self._spiketrains[self.get_unit_ids().index(unit_id)]
inds = np.where((start_frame <= times) & (times < end_frame))
return times[inds]
| 44.993506
| 135
| 0.654784
|
8ffda39be987e3d956d6e216ca33cd6075fd1076
| 2,246
|
py
|
Python
|
src/sst/elements/memHierarchy/tests/goblinCustomCmd-2.py
|
sudhanshu2/sst-elements
|
d658e5e4b26e5725488f9e93528506ddb22072ee
|
[
"BSD-3-Clause"
] | null | null | null |
src/sst/elements/memHierarchy/tests/goblinCustomCmd-2.py
|
sudhanshu2/sst-elements
|
d658e5e4b26e5725488f9e93528506ddb22072ee
|
[
"BSD-3-Clause"
] | null | null | null |
src/sst/elements/memHierarchy/tests/goblinCustomCmd-2.py
|
sudhanshu2/sst-elements
|
d658e5e4b26e5725488f9e93528506ddb22072ee
|
[
"BSD-3-Clause"
] | null | null | null |
import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.STREAMBenchGeneratorCustomCmd",
"generatorParams.verbose" : 0,
"generatorParams.startat" : 3,
"generatorParams.count" : 500000,
"generatorParams.max_address" : 512000,
"generatorParams.read_cmd" : 20,
"printStats" : 1,
})
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "2KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.CoherentMemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "1000 ns",
"backend.mem_size" : "512MiB",
"clock" : "1GHz",
"customCmdHandler" : "memHierarchy.defCustomCmdHandler",
"backendConvertor" : "memHierarchy.extMemBackendConvertor",
"backend" : "memHierarchy.goblinHMCSim",
"backend.verbose" : "0",
"backend.trace_banks" : "1",
"backend.trace_queue" : "1",
"backend.trace_cmds" : "1",
"backend.trace_latency" : "1",
"backend.trace_stalls" : "1",
"backend.cmd_map" : "[CUSTOM:20:64:RD64]"
})
# Define the simulation links
link_cpu_cache_link = sst.Link("link_cpu_cache_link")
link_cpu_cache_link.connect( (comp_cpu, "cache_link", "1000ps"), (comp_l1cache, "high_network_0", "1000ps") )
link_cpu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
| 32.085714
| 109
| 0.689671
|
b9dd38ec3603db18300d2677f7770463bd6aa745
| 13,455
|
py
|
Python
|
samples/snippets/subscriber_test.py
|
cclauss/python-pubsub
|
4080b0ad65320965b7e6460566d39595d00674cb
|
[
"Apache-2.0"
] | null | null | null |
samples/snippets/subscriber_test.py
|
cclauss/python-pubsub
|
4080b0ad65320965b7e6460566d39595d00674cb
|
[
"Apache-2.0"
] | null | null | null |
samples/snippets/subscriber_test.py
|
cclauss/python-pubsub
|
4080b0ad65320965b7e6460566d39595d00674cb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import uuid
import backoff
from flaky import flaky
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import Unknown
from google.cloud import pubsub_v1
import pytest
import subscriber
UUID = uuid.uuid4().hex
PY_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}"
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
TOPIC = f"subscription-test-topic-{PY_VERSION}-{UUID}"
DEAD_LETTER_TOPIC = f"subscription-test-dead-letter-topic-{PY_VERSION}-{UUID}"
SUBSCRIPTION_ADMIN = f"subscription-test-subscription-admin-{PY_VERSION}-{UUID}"
SUBSCRIPTION_ASYNC = f"subscription-test-subscription-async-{PY_VERSION}-{UUID}"
SUBSCRIPTION_SYNC = f"subscription-test-subscription-sync-{PY_VERSION}-{UUID}"
SUBSCRIPTION_DLQ = f"subscription-test-subscription-dlq-{PY_VERSION}-{UUID}"
ENDPOINT = f"https://{PROJECT_ID}.appspot.com/push"
NEW_ENDPOINT = f"https://{PROJECT_ID}.appspot.com/push2"
DEFAULT_MAX_DELIVERY_ATTEMPTS = 5
UPDATED_MAX_DELIVERY_ATTEMPTS = 20
@pytest.fixture(scope="module")
def publisher_client():
yield pubsub_v1.PublisherClient()
@pytest.fixture(scope="module")
def topic(publisher_client):
topic_path = publisher_client.topic_path(PROJECT_ID, TOPIC)
try:
topic = publisher_client.get_topic(request={"topic": topic_path})
except: # noqa
topic = publisher_client.create_topic(request={"name": topic_path})
yield topic.name
publisher_client.delete_topic(request={"topic": topic.name})
@pytest.fixture(scope="module")
def dead_letter_topic(publisher_client):
topic_path = publisher_client.topic_path(PROJECT_ID, DEAD_LETTER_TOPIC)
try:
dead_letter_topic = publisher_client.get_topic(request={"topic": topic_path})
except NotFound:
dead_letter_topic = publisher_client.create_topic(request={"name": topic_path})
yield dead_letter_topic.name
publisher_client.delete_topic(request={"topic": dead_letter_topic.name})
@pytest.fixture(scope="module")
def subscriber_client():
subscriber_client = pubsub_v1.SubscriberClient()
yield subscriber_client
subscriber_client.close()
@pytest.fixture(scope="module")
def subscription_admin(subscriber_client, topic):
subscription_path = subscriber_client.subscription_path(
PROJECT_ID, SUBSCRIPTION_ADMIN
)
try:
subscription = subscriber_client.get_subscription(
request={"subscription": subscription_path}
)
except NotFound:
subscription = subscriber_client.create_subscription(
request={"name": subscription_path, "topic": topic}
)
yield subscription.name
@pytest.fixture(scope="module")
def subscription_sync(subscriber_client, topic):
subscription_path = subscriber_client.subscription_path(
PROJECT_ID, SUBSCRIPTION_SYNC
)
try:
subscription = subscriber_client.get_subscription(
request={"subscription": subscription_path}
)
except NotFound:
subscription = subscriber_client.create_subscription(
request={"name": subscription_path, "topic": topic}
)
yield subscription.name
@backoff.on_exception(backoff.expo, Unknown, max_time=300)
def delete_subscription():
try:
subscriber_client.delete_subscription(request={"subscription": subscription.name})
except NotFound:
print("When Unknown error happens, the server might have"
" successfully deleted the subscription under the cover, so"
" we ignore NotFound")
delete_subscription()
@pytest.fixture(scope="module")
def subscription_async(subscriber_client, topic):
subscription_path = subscriber_client.subscription_path(
PROJECT_ID, SUBSCRIPTION_ASYNC
)
try:
subscription = subscriber_client.get_subscription(
request={"subscription": subscription_path}
)
except NotFound:
subscription = subscriber_client.create_subscription(
request={"name": subscription_path, "topic": topic}
)
yield subscription.name
subscriber_client.delete_subscription(request={"subscription": subscription.name})
@pytest.fixture(scope="module")
def subscription_dlq(subscriber_client, topic, dead_letter_topic):
from google.cloud.pubsub_v1.types import DeadLetterPolicy
subscription_path = subscriber_client.subscription_path(
PROJECT_ID, SUBSCRIPTION_DLQ
)
try:
subscription = subscriber_client.get_subscription(
request={"subscription": subscription_path}
)
except NotFound:
request = {
"name": subscription_path,
"topic": topic,
"dead_letter_policy": DeadLetterPolicy(
dead_letter_topic=dead_letter_topic, max_delivery_attempts=10
),
}
subscription = subscriber_client.create_subscription(request)
yield subscription.name
subscriber_client.delete_subscription(request={"subscription": subscription.name})
def _publish_messages(publisher_client, topic, message_num=5, **attrs):
for n in range(message_num):
data = f"message {n}".encode("utf-8")
publish_future = publisher_client.publish(topic, data, **attrs)
publish_future.result()
def test_list_in_topic(subscription_admin, capsys):
@backoff.on_exception(backoff.expo, AssertionError, max_time=60)
def eventually_consistent_test():
subscriber.list_subscriptions_in_topic(PROJECT_ID, TOPIC)
out, _ = capsys.readouterr()
assert subscription_admin in out
eventually_consistent_test()
def test_list_in_project(subscription_admin, capsys):
@backoff.on_exception(backoff.expo, AssertionError, max_time=60)
def eventually_consistent_test():
subscriber.list_subscriptions_in_project(PROJECT_ID)
out, _ = capsys.readouterr()
assert subscription_admin in out
eventually_consistent_test()
def test_create(subscriber_client, subscription_admin, capsys):
subscription_path = subscriber_client.subscription_path(
PROJECT_ID, SUBSCRIPTION_ADMIN
)
try:
subscriber_client.delete_subscription(
request={"subscription": subscription_path}
)
except NotFound:
pass
subscriber.create_subscription(PROJECT_ID, TOPIC, SUBSCRIPTION_ADMIN)
out, _ = capsys.readouterr()
assert f"{subscription_admin}" in out
def test_create_subscription_with_dead_letter_policy(
subscriber_client, subscription_dlq, dead_letter_topic, capsys
):
try:
subscriber_client.delete_subscription(
request={"subscription": subscription_dlq}
)
except NotFound:
pass
subscriber.create_subscription_with_dead_letter_topic(
PROJECT_ID, TOPIC, SUBSCRIPTION_DLQ, DEAD_LETTER_TOPIC
)
out, _ = capsys.readouterr()
assert f"Subscription created: {subscription_dlq}" in out
assert f"It will forward dead letter messages to: {dead_letter_topic}" in out
assert f"After {DEFAULT_MAX_DELIVERY_ATTEMPTS} delivery attempts." in out
def test_receive_with_delivery_attempts(
publisher_client, topic, dead_letter_topic, subscription_dlq, capsys
):
# The dlq subscription raises 404 before it's ready.
# We keep retrying up to 10 minutes for mitigating the flakiness.
@backoff.on_exception(backoff.expo, (Unknown, NotFound), max_time=600)
def run_sample():
_publish_messages(publisher_client, topic)
subscriber.receive_messages_with_delivery_attempts(PROJECT_ID, SUBSCRIPTION_DLQ, 90)
run_sample()
out, _ = capsys.readouterr()
assert f"Listening for messages on {subscription_dlq}.." in out
assert "With delivery attempts: " in out
def test_update_dead_letter_policy(subscription_dlq, dead_letter_topic, capsys):
_ = subscriber.update_subscription_with_dead_letter_policy(
PROJECT_ID,
TOPIC,
SUBSCRIPTION_DLQ,
DEAD_LETTER_TOPIC,
UPDATED_MAX_DELIVERY_ATTEMPTS,
)
out, _ = capsys.readouterr()
assert dead_letter_topic in out
assert subscription_dlq in out
assert f"max_delivery_attempts: {UPDATED_MAX_DELIVERY_ATTEMPTS}" in out
@flaky(max_runs=3, min_passes=1)
def test_remove_dead_letter_policy(subscription_dlq, capsys):
subscription_after_update = subscriber.remove_dead_letter_policy(
PROJECT_ID, TOPIC, SUBSCRIPTION_DLQ
)
out, _ = capsys.readouterr()
assert subscription_dlq in out
assert subscription_after_update.dead_letter_policy.dead_letter_topic == ""
def test_create_subscription_with_ordering(
subscriber_client, subscription_admin, capsys
):
subscription_path = subscriber_client.subscription_path(
PROJECT_ID, SUBSCRIPTION_ADMIN
)
try:
subscriber_client.delete_subscription(
request={"subscription": subscription_path}
)
except NotFound:
pass
subscriber.create_subscription_with_ordering(PROJECT_ID, TOPIC, SUBSCRIPTION_ADMIN)
out, _ = capsys.readouterr()
assert "Created subscription with ordering" in out
assert f"{subscription_admin}" in out
assert "enable_message_ordering: true" in out
def test_create_push(subscriber_client, subscription_admin, capsys):
# The scope of `subscription_path` is limited to this function.
subscription_path = subscriber_client.subscription_path(
PROJECT_ID, SUBSCRIPTION_ADMIN
)
try:
subscriber_client.delete_subscription(
request={"subscription": subscription_path}
)
except NotFound:
pass
subscriber.create_push_subscription(PROJECT_ID, TOPIC, SUBSCRIPTION_ADMIN, ENDPOINT)
out, _ = capsys.readouterr()
assert f"{subscription_admin}" in out
def test_update(subscription_admin, capsys):
subscriber.update_push_subscription(
PROJECT_ID, TOPIC, SUBSCRIPTION_ADMIN, NEW_ENDPOINT
)
out, _ = capsys.readouterr()
assert "Subscription updated" in out
assert f"{subscription_admin}" in out
def test_delete(subscriber_client, subscription_admin):
subscriber.delete_subscription(PROJECT_ID, SUBSCRIPTION_ADMIN)
@backoff.on_exception(backoff.expo, AssertionError, max_time=60)
def eventually_consistent_test():
with pytest.raises(Exception):
subscriber_client.get_subscription(
request={"subscription": subscription_admin}
)
eventually_consistent_test()
def test_receive(publisher_client, topic, subscription_async, capsys):
_publish_messages(publisher_client, topic)
subscriber.receive_messages(PROJECT_ID, SUBSCRIPTION_ASYNC, 5)
out, _ = capsys.readouterr()
assert "Listening" in out
assert subscription_async in out
assert "message" in out
def test_receive_with_custom_attributes(
publisher_client, topic, subscription_async, capsys
):
_publish_messages(publisher_client, topic, origin="python-sample")
subscriber.receive_messages_with_custom_attributes(
PROJECT_ID, SUBSCRIPTION_ASYNC, 5
)
out, _ = capsys.readouterr()
assert subscription_async in out
assert "message" in out
assert "origin" in out
assert "python-sample" in out
def test_receive_with_flow_control(publisher_client, topic, subscription_async, capsys):
_publish_messages(publisher_client, topic)
subscriber.receive_messages_with_flow_control(PROJECT_ID, SUBSCRIPTION_ASYNC, 5)
out, _ = capsys.readouterr()
assert "Listening" in out
assert subscription_async in out
assert "message" in out
def test_listen_for_errors(publisher_client, topic, subscription_async, capsys):
_publish_messages(publisher_client, topic)
subscriber.listen_for_errors(PROJECT_ID, SUBSCRIPTION_ASYNC, 5)
out, _ = capsys.readouterr()
assert subscription_async in out
assert "threw an exception" in out
def test_receive_synchronously(publisher_client, topic, subscription_sync, capsys):
_publish_messages(publisher_client, topic)
subscriber.synchronous_pull(PROJECT_ID, SUBSCRIPTION_SYNC)
out, _ = capsys.readouterr()
assert "Received" in out
assert f"{subscription_sync}" in out
def test_receive_synchronously_with_lease(
publisher_client, topic, subscription_sync, capsys
):
@backoff.on_exception(backoff.expo, Unknown, max_time=300)
def run_sample():
_publish_messages(publisher_client, topic, message_num=3)
subscriber.synchronous_pull_with_lease_management(PROJECT_ID, SUBSCRIPTION_SYNC)
run_sample()
out, _ = capsys.readouterr()
# Sometimes the subscriber only gets 1 or 2 messages and test fails.
# I think it's ok to consider those cases as passing.
assert "Received and acknowledged" in out
assert f"messages from {subscription_sync}." in out
| 31.584507
| 94
| 0.734077
|
4c61fe2f7645955683efee1b77ae9b5ab3f768b3
| 24,085
|
py
|
Python
|
corehq/messaging/scheduling/scheduling_partitioned/models.py
|
akshita-sh/commcare-hq
|
236b25ff9cd1a634fc5586e0f9e29473c8ac71b8
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
corehq/messaging/scheduling/scheduling_partitioned/models.py
|
akshita-sh/commcare-hq
|
236b25ff9cd1a634fc5586e0f9e29473c8ac71b8
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/messaging/scheduling/scheduling_partitioned/models.py
|
akshita-sh/commcare-hq
|
236b25ff9cd1a634fc5586e0f9e29473c8ac71b8
|
[
"BSD-3-Clause"
] | null | null | null |
import pytz
import sys
import uuid
from corehq.apps.casegroups.models import CommCareCaseGroup
from corehq.apps.groups.models import Group
from corehq.apps.locations.dbaccessors import get_all_users_by_location
from corehq.apps.locations.models import SQLLocation
from corehq.apps.sms.models import MessagingEvent
from corehq.apps.users.cases import get_owner_id, get_wrapped_owner
from corehq.apps.users.models import CommCareUser, WebUser, CouchUser
from corehq.form_processor.abstract_models import DEFAULT_PARENT_IDENTIFIER
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.scheduling import util
from corehq.messaging.scheduling.exceptions import UnknownRecipientType
from corehq.messaging.scheduling.models import AlertSchedule, TimedSchedule, IVRSurveyContent, SMSCallbackContent
from corehq.sql_db.models import PartitionedModel
from corehq.util.timezones.conversions import ServerTime, UserTime
from corehq.util.timezones.utils import get_timezone_for_domain, coerce_timezone_value
from couchdbkit.exceptions import ResourceNotFound
from datetime import timedelta, date, datetime, time
from memoized import memoized
from dimagi.utils.couch import get_redis_lock
from dimagi.utils.modules import to_function
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
# The number of minutes after which a schedule instance is considered stale.
# Stale instances are just fast-forwarded according to their schedule and
# no content is sent.
STALE_SCHEDULE_INSTANCE_INTERVAL = 2 * 24 * 60
class ScheduleInstance(PartitionedModel):
schedule_instance_id = models.UUIDField(primary_key=True, default=uuid.uuid4)
domain = models.CharField(max_length=126)
recipient_type = models.CharField(max_length=126)
recipient_id = models.CharField(max_length=126, null=True)
current_event_num = models.IntegerField()
schedule_iteration_num = models.IntegerField()
next_event_due = models.DateTimeField()
active = models.BooleanField()
RECIPIENT_TYPE_CASE = 'CommCareCase'
RECIPIENT_TYPE_MOBILE_WORKER = 'CommCareUser'
RECIPIENT_TYPE_WEB_USER = 'WebUser'
RECIPIENT_TYPE_CASE_GROUP = 'CommCareCaseGroup'
RECIPIENT_TYPE_USER_GROUP = 'Group'
RECIPIENT_TYPE_LOCATION = 'Location'
class Meta(object):
abstract = True
index_together = (
# index for equality comparisons on the leading columns
('active', 'next_event_due'),
('domain', 'active', 'next_event_due'),
)
def get_today_for_recipient(self, schedule):
return ServerTime(util.utcnow()).user_time(self.get_timezone(schedule)).done().date()
@property
@memoized
def recipient(self):
if self.recipient_type == self.RECIPIENT_TYPE_CASE:
try:
case = CaseAccessors(self.domain).get_case(self.recipient_id)
except CaseNotFound:
return None
if case.domain != self.domain:
return None
return case
elif self.recipient_type == self.RECIPIENT_TYPE_MOBILE_WORKER:
user = CouchUser.get_by_user_id(self.recipient_id, domain=self.domain)
if not isinstance(user, CommCareUser):
return None
return user
elif self.recipient_type == self.RECIPIENT_TYPE_WEB_USER:
user = CouchUser.get_by_user_id(self.recipient_id, domain=self.domain)
if not isinstance(user, WebUser):
return None
return user
elif self.recipient_type == self.RECIPIENT_TYPE_CASE_GROUP:
try:
group = CommCareCaseGroup.get(self.recipient_id)
except ResourceNotFound:
return None
if group.domain != self.domain:
return None
return group
elif self.recipient_type == self.RECIPIENT_TYPE_USER_GROUP:
try:
group = Group.get(self.recipient_id)
except ResourceNotFound:
return None
if group.domain != self.domain:
return None
return group
elif self.recipient_type == self.RECIPIENT_TYPE_LOCATION:
location = SQLLocation.by_location_id(self.recipient_id)
if location is None:
return None
if location.domain != self.domain:
return None
return location
else:
raise UnknownRecipientType(self.recipient_type)
@staticmethod
def recipient_is_an_individual_contact(recipient):
return (
isinstance(recipient, (CommCareUser, WebUser)) or
is_commcarecase(recipient)
)
@property
@memoized
def domain_timezone(self):
try:
return get_timezone_for_domain(self.domain)
except ValidationError:
return pytz.UTC
def get_timezone(self, schedule):
if self.recipient_is_an_individual_contact(self.recipient):
try:
timezone_str = self.recipient.get_time_zone()
if timezone_str:
return coerce_timezone_value(timezone_str)
except ValidationError:
pass
if schedule.use_utc_as_default_timezone:
# See note on Schedule.use_utc_as_default_timezone.
# When use_utc_as_default_timezone is enabled and the contact has
# no time zone configured, use UTC.
return pytz.UTC
else:
return self.domain_timezone
@classmethod
def create_for_recipient(cls, schedule, recipient_type, recipient_id, start_date=None,
move_to_next_event_not_in_the_past=True, **additional_fields):
obj = cls(
domain=schedule.domain,
recipient_type=recipient_type,
recipient_id=recipient_id,
current_event_num=0,
schedule_iteration_num=1,
active=True,
**additional_fields
)
obj.schedule = schedule
schedule.set_first_event_due_timestamp(obj, start_date)
if move_to_next_event_not_in_the_past:
schedule.move_to_next_event_not_in_the_past(obj)
return obj
@staticmethod
def expand_group(group):
if not isinstance(group, Group):
raise TypeError("Expected Group")
for user in group.get_users(is_active=True, only_commcare=False):
yield user
@staticmethod
def expand_location_ids(domain, location_ids):
user_ids = set()
for location_id in location_ids:
for user in get_all_users_by_location(domain, location_id):
if user.is_active and user.get_id not in user_ids:
user_ids.add(user.get_id)
yield user
def _expand_recipient(self, recipient):
if recipient is None:
return
elif self.recipient_is_an_individual_contact(recipient):
yield recipient
elif isinstance(recipient, CommCareCaseGroup):
case_group = recipient
for case in case_group.get_cases():
yield case
elif isinstance(recipient, Group):
for user in self.expand_group(recipient):
yield user
elif isinstance(recipient, SQLLocation):
location = recipient
if (
self.recipient_type == self.RECIPIENT_TYPE_LOCATION and
self.memoized_schedule.include_descendant_locations
):
# Only include descendant locations when the recipient_type
# is RECIPIENT_TYPE_LOCATION. This is because we only do this
# for locations the user selected in the UI, and not for
# locations that happen to get here because they are a case
# owner, for example.
qs = location.get_descendants(include_self=True).filter(is_archived=False)
# We also only apply the location_type_filter when the recipient_type
# is RECIPIENT_TYPE_LOCATION for the same reason.
if self.memoized_schedule.location_type_filter:
qs = qs.filter(location_type_id__in=self.memoized_schedule.location_type_filter)
location_ids = qs.location_ids()
else:
location_ids = [location.location_id]
for user in self.expand_location_ids(self.domain, location_ids):
yield user
else:
raise UnknownRecipientType(recipient.__class__.__name__)
def convert_to_set(self, value):
if isinstance(value, (list, tuple)):
return set(value)
return set([value])
def passes_user_data_filter(self, contact):
if not isinstance(contact, CouchUser):
return True
if not self.memoized_schedule.user_data_filter:
return True
for key, value in self.memoized_schedule.user_data_filter.items():
if key not in contact.user_data:
return False
allowed_values_set = self.convert_to_set(value)
actual_values_set = self.convert_to_set(contact.user_data[key])
if actual_values_set.isdisjoint(allowed_values_set):
return False
return True
def expand_recipients(self):
"""
Can be used as a generator to iterate over all individual contacts who
are the recipients of this ScheduleInstance.
"""
recipient_list = self.recipient
if not isinstance(recipient_list, list):
recipient_list = [recipient_list]
for member in recipient_list:
for contact in self._expand_recipient(member):
if self.passes_user_data_filter(contact):
yield contact
def get_content_send_lock(self, recipient):
if is_commcarecase(recipient):
doc_type = 'CommCareCase'
doc_id = recipient.case_id
else:
doc_type = recipient.doc_type
doc_id = recipient.get_id
key = "send-content-for-%s-%s-%s-%s-%s" % (
self.__class__.__name__,
self.schedule_instance_id.hex,
self.next_event_due.strftime('%Y-%m-%d %H:%M:%S'),
doc_type,
doc_id,
)
return get_redis_lock(
key,
timeout=STALE_SCHEDULE_INSTANCE_INTERVAL * 60,
name="send_content_for_%s" % type(self).__name__,
track_unreleased=False,
)
def send_current_event_content_to_recipients(self):
content = self.memoized_schedule.get_current_event_content(self)
if isinstance(content, (IVRSurveyContent, SMSCallbackContent)):
raise TypeError(
"IVR and Callback use cases are no longer supported. "
"How did this schedule instance end up as active?"
)
if isinstance(self, CaseScheduleInstanceMixin):
content.set_context(case=self.case, schedule_instance=self)
else:
content.set_context(schedule_instance=self)
logged_event = MessagingEvent.create_from_schedule_instance(self, content)
recipient_count = 0
for recipient in self.expand_recipients():
recipient_count += 1
# The framework will retry sending a non-processed schedule instance
# once every hour.
# If we are processing a long list of recipients here and an error
# occurs half-way through, we don't want to reprocess the entire list
# of recipients again when the framework retries it an hour later.
# So we use a non-blocking lock tied to the event due time and recipient
# to make sure that we don't try resending the same content to the same
# recipient more than once in the event of a retry.
# If we succeed in sending the content, we don't release the lock so
# that it won't retry later. If we fail in sending the content, we release
# the lock so that it will retry later.
lock = self.get_content_send_lock(recipient)
if lock.acquire(blocking=False):
try:
content.send(recipient, logged_event)
except:
error = sys.exc_info()[1]
# Release the lock if an error happened so that we can try sending
# to this recipient again later.
lock.release()
logged_event.error(
MessagingEvent.ERROR_INTERNAL_SERVER_ERROR,
additional_error_text=str(error),
)
raise
# Update the MessagingEvent for reporting
if recipient_count == 0:
logged_event.error(MessagingEvent.ERROR_NO_RECIPIENT)
else:
logged_event.completed()
@property
def is_stale(self):
return (util.utcnow() - self.next_event_due) > timedelta(minutes=STALE_SCHEDULE_INSTANCE_INTERVAL)
def handle_current_event(self):
if not self.is_stale:
self.send_current_event_content_to_recipients()
# As a precaution, always explicitly move to the next event after processing the current
# event to prevent ever getting stuck on the current event.
self.memoized_schedule.move_to_next_event(self)
self.memoized_schedule.move_to_next_event_not_in_the_past(self)
@property
def schedule(self):
raise NotImplementedError()
@schedule.setter
def schedule(self, value):
raise NotImplementedError()
@property
@memoized
def memoized_schedule(self):
"""
This is named with a memoized_ prefix to be clear that it should only be used
when the schedule is not changing.
"""
return self.schedule
def additional_deactivation_condition_reached(self):
"""
Subclasses can override this to provide additional checks under
which a ScheduleInstance should be deactivated, which will be checked
when the ScheduleInstances are being refreshed as well as right before
and after processing them, through check_active_flag_against_schedule().
"""
return False
def should_be_active(self):
return self.memoized_schedule.active and not self.additional_deactivation_condition_reached()
def check_active_flag_against_schedule(self):
"""
Returns True if the active flag was changed and the schedule instance should be saved.
Returns False if nothing changed.
"""
should_be_active = self.should_be_active()
if self.active and not should_be_active:
self.active = False
return True
if not self.active and should_be_active:
if self.memoized_schedule.total_iterations_complete(self):
return False
self.active = True
self.memoized_schedule.move_to_next_event_not_in_the_past(self)
return True
return False
class AbstractAlertScheduleInstance(ScheduleInstance):
alert_schedule_id = models.UUIDField()
class Meta(ScheduleInstance.Meta):
abstract = True
@property
def schedule(self):
return AlertSchedule.objects.get(schedule_id=self.alert_schedule_id)
@schedule.setter
def schedule(self, value):
if not isinstance(value, AlertSchedule):
raise ValueError("Expected an instance of AlertSchedule")
self.alert_schedule_id = value.schedule_id
@staticmethod
def copy_for_recipient(instance, recipient_type, recipient_id):
"""
We can copy alert schedule instances for any recipient because the
recipient's time zone doesn't factor into the calculation of the
next event due timestamp as it does for timed schedule instances.
"""
if not isinstance(instance, AbstractAlertScheduleInstance):
raise TypeError("Expected an alert schedule instance")
new_instance = type(instance)()
for field in instance._meta.fields:
if field.name not in ['schedule_instance_id', 'recipient_type', 'recipient_id']:
setattr(new_instance, field.name, getattr(instance, field.name))
new_instance.recipient_type = recipient_type
new_instance.recipient_id = recipient_id
return new_instance
def reset_schedule(self, schedule=None):
"""
Resets this alert schedule instance and puts it into a state which
is the same as if it had just spawned now.
"""
schedule = schedule or self.memoized_schedule
self.current_event_num = 0
self.schedule_iteration_num = 1
self.active = True
schedule.set_first_event_due_timestamp(self)
class AbstractTimedScheduleInstance(ScheduleInstance):
timed_schedule_id = models.UUIDField()
start_date = models.DateField()
schedule_revision = models.CharField(max_length=126, null=True)
class Meta(ScheduleInstance.Meta):
abstract = True
@property
def schedule(self):
return TimedSchedule.objects.get(schedule_id=self.timed_schedule_id)
@schedule.setter
def schedule(self, value):
if not isinstance(value, TimedSchedule):
raise ValueError("Expected an instance of TimedSchedule")
self.timed_schedule_id = value.schedule_id
def recalculate_schedule(self, schedule=None, new_start_date=None):
"""
Resets the start_date and recalulates the next_event_due timestamp for
this AbstractTimedScheduleInstance.
:param schedule: The TimedSchedule to use to avoid a lookup; if None,
self.memoized_schedule is used
:param new_start_date: The start date to use when recalculating the schedule;
If None, the current date is used
"""
schedule = schedule or self.memoized_schedule
self.current_event_num = 0
self.schedule_iteration_num = 1
self.active = True
schedule.set_first_event_due_timestamp(self, start_date=new_start_date)
schedule.move_to_next_event_not_in_the_past(self)
self.schedule_revision = schedule.get_schedule_revision(case=schedule.get_case_or_none(self))
class AlertScheduleInstance(AbstractAlertScheduleInstance):
partition_attr = 'schedule_instance_id'
class Meta(AbstractAlertScheduleInstance.Meta):
db_table = 'scheduling_alertscheduleinstance'
unique_together = (
('alert_schedule_id', 'recipient_type', 'recipient_id'),
)
class TimedScheduleInstance(AbstractTimedScheduleInstance):
partition_attr = 'schedule_instance_id'
class Meta(AbstractTimedScheduleInstance.Meta):
db_table = 'scheduling_timedscheduleinstance'
unique_together = (
('timed_schedule_id', 'recipient_type', 'recipient_id'),
)
class CaseScheduleInstanceMixin(object):
RECIPIENT_TYPE_SELF = 'Self'
RECIPIENT_TYPE_CASE_OWNER = 'Owner'
RECIPIENT_TYPE_LAST_SUBMITTING_USER = 'LastSubmittingUser'
RECIPIENT_TYPE_PARENT_CASE = 'ParentCase'
RECIPIENT_TYPE_ALL_CHILD_CASES = 'AllChildCases'
RECIPIENT_TYPE_CUSTOM = 'CustomRecipient'
@property
@memoized
def case(self):
try:
return CaseAccessors(self.domain).get_case(self.case_id)
except CaseNotFound:
return None
@property
@memoized
def case_owner(self):
if self.case:
return get_wrapped_owner(get_owner_id(self.case))
return None
def additional_deactivation_condition_reached(self):
from corehq.apps.data_interfaces.models import _try_date_conversion
if self.memoized_schedule.stop_date_case_property_name and self.case:
values = self.case.resolve_case_property(self.memoized_schedule.stop_date_case_property_name)
values = [element.value for element in values]
timezone = pytz.UTC if self.memoized_schedule.use_utc_as_default_timezone else self.domain_timezone
for stop_date in values:
if isinstance(stop_date, datetime):
pass
elif isinstance(stop_date, date):
stop_date = datetime.combine(stop_date, time(0, 0))
else:
stop_date = _try_date_conversion(stop_date)
if not isinstance(stop_date, datetime):
continue
if stop_date.tzinfo:
stop_date = stop_date.astimezone(pytz.UTC).replace(tzinfo=None)
else:
stop_date = UserTime(stop_date, timezone).server_time().done()
if self.next_event_due >= stop_date:
return True
return False
@property
@memoized
def recipient(self):
if self.recipient_type == self.RECIPIENT_TYPE_SELF:
return self.case
elif self.recipient_type == self.RECIPIENT_TYPE_CASE_OWNER:
return self.case_owner
elif self.recipient_type == self.RECIPIENT_TYPE_LAST_SUBMITTING_USER:
if self.case and self.case.modified_by:
return CouchUser.get_by_user_id(self.case.modified_by, domain=self.domain)
return None
elif self.recipient_type == self.RECIPIENT_TYPE_PARENT_CASE:
if self.case:
return self.case.parent
return None
elif self.recipient_type == self.RECIPIENT_TYPE_ALL_CHILD_CASES:
if self.case:
return list(self.case.get_subcases(index_identifier=DEFAULT_PARENT_IDENTIFIER))
return None
elif self.recipient_type == self.RECIPIENT_TYPE_CUSTOM:
custom_function = to_function(
settings.AVAILABLE_CUSTOM_SCHEDULING_RECIPIENTS[self.recipient_id][0]
)
return custom_function(self)
else:
return super(CaseScheduleInstanceMixin, self).recipient
class CaseAlertScheduleInstance(CaseScheduleInstanceMixin, AbstractAlertScheduleInstance):
# Points to the CommCareCase/SQL that spawned this schedule instance
partition_attr = 'case_id'
case_id = models.CharField(max_length=255)
# Points to the AutomaticUpdateRule that spawned this schedule instance
rule_id = models.IntegerField()
# See corehq.apps.data_interfaces.models.CreateScheduleInstanceActionDefinition.reset_case_property_name
last_reset_case_property_value = models.TextField(null=True)
class Meta(AbstractAlertScheduleInstance.Meta):
db_table = 'scheduling_casealertscheduleinstance'
index_together = AbstractAlertScheduleInstance.Meta.index_together
unique_together = (
('case_id', 'alert_schedule_id', 'recipient_type', 'recipient_id'),
)
class CaseTimedScheduleInstance(CaseScheduleInstanceMixin, AbstractTimedScheduleInstance):
# Points to the CommCareCase/SQL that spawned this schedule instance
partition_attr = 'case_id'
case_id = models.CharField(max_length=255)
# Points to the AutomaticUpdateRule that spawned this schedule instance
rule_id = models.IntegerField()
# See corehq.apps.data_interfaces.models.CreateScheduleInstanceActionDefinition.reset_case_property_name
last_reset_case_property_value = models.TextField(null=True)
class Meta(AbstractTimedScheduleInstance.Meta):
db_table = 'scheduling_casetimedscheduleinstance'
index_together = AbstractTimedScheduleInstance.Meta.index_together
unique_together = (
('case_id', 'timed_schedule_id', 'recipient_type', 'recipient_id'),
)
| 37.283282
| 113
| 0.665975
|
4ef1b0b714d72de49af243020513c7d0e0214711
| 3,847
|
py
|
Python
|
soc/hps_proto2_platform.py
|
alanvgreen/CFU-Playground
|
1faba8de355e2e4d3928e648ac1b008a5ee71195
|
[
"Apache-2.0"
] | 1
|
2022-02-08T01:39:29.000Z
|
2022-02-08T01:39:29.000Z
|
soc/hps_proto2_platform.py
|
JosephBushagour/CFU-Playground
|
d4f660ad81f8807558120bbd311371eff1a0ce3b
|
[
"Apache-2.0"
] | null | null | null |
soc/hps_proto2_platform.py
|
JosephBushagour/CFU-Playground
|
d4f660ad81f8807558120bbd311371eff1a0ce3b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from migen import Module, ClockDomain, Signal, If, log2_int
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.generic_platform import Pins, Subsignal, IOStandard
from litex.build.lattice import LatticePlatform
from litex.build.lattice.programmer import LatticeProgrammer
from litex.soc.cores.ram import NXLRAM
from litex.soc.cores.clock import NXOSCA
hps_io = [
("done", 0, Pins("A5"), IOStandard("LVCMOS18H")),
("programn", 0, Pins("A4"), IOStandard("LVCMOS18H")),
# JTAG: not usually programatically accessible
("jtag", 0,
Subsignal("en", Pins("C2")),
Subsignal("tck", Pins("D2")),
Subsignal("tdi", Pins("C3")),
Subsignal("tdo", Pins("D3")),
Subsignal("tms", Pins("B1")),
IOStandard("LVCMOS18H")
),
# SPI flash, defined two ways
("spiflash", 0,
Subsignal("cs_n", Pins("A3")),
Subsignal("clk", Pins("B4")),
Subsignal("mosi", Pins("B5")),
Subsignal("miso", Pins("C4")),
Subsignal("wp", Pins("B3")),
Subsignal("hold", Pins("B2")),
IOStandard("LVCMOS18")
),
("spiflash4x", 0,
Subsignal("cs_n", Pins("A3")),
Subsignal("clk", Pins("B4")),
Subsignal("dq", Pins("B5 C4 B3 B2")),
IOStandard("LVCMOS18")
),
]
# Debug IO that is specific to the HPS hardware. These should have equivalents
# defined in simulation.py if they are referenced from C code.
hps_nx17_debug_io = [
# Debug UART
("serial", 0,
Subsignal("rx", Pins("E2"), IOStandard("LVCMOS18")),
Subsignal("tx", Pins("G1"), IOStandard("LVCMOS18H")),
),
]
# Debug IO that is common to both simulation and hardware.
hps_debug_common = [
("user_led", 0, Pins("G3"), IOStandard("LVCMOS18H")),
]
class _CRG(Module):
"""Clock Reset Generator"""
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_por = ClockDomain()
# Clock from HFOSC
self.submodules.sys_clk = sys_osc = NXOSCA()
sys_osc.create_hf_clk(self.cd_sys, sys_clk_freq)
# We make the period constraint 10% tighter than our actual system
# clock frequency, because the CrossLink-NX internal oscillator runs
# at ±10% of nominal frequency.
platform.add_period_constraint(self.cd_sys.clk,
1e9 / (sys_clk_freq * 1.1))
# Power On Reset
por_cycles = 4096
por_counter = Signal(log2_int(por_cycles), reset=por_cycles-1)
self.comb += self.cd_por.clk.eq(self.cd_sys.clk)
self.sync.por += If(por_counter != 0, por_counter.eq(por_counter - 1))
self.specials += AsyncResetSynchronizer(
self.cd_sys, (por_counter != 0))
class Platform(LatticePlatform):
# The NX-17 has a 450 MHz clock. Our system clock should be a divisor of that.
clk_divisor = 7
sys_clk_freq = int(450e6 / clk_divisor)
def __init__(self, toolchain="radiant"):
LatticePlatform.__init__(self,
# The HPS actually has the LIFCL-17-7UWG72C, but that doesn't
# seem to be available in Radiant 2.2, at least on Linux.
device="LIFCL-17-8UWG72C",
io=hps_io + hps_nx17_debug_io + hps_debug_common,
connectors=[],
toolchain=toolchain)
def create_crg(self):
return _CRG(self, self.sys_clk_freq)
def create_ram(self, width, size):
return NXLRAM(width, size)
# TODO: add create_programmer function
| 36.638095
| 94
| 0.613205
|
babfc708c15958fe7bd152a27e1914e6803726bc
| 4,687
|
py
|
Python
|
kNN/main.py
|
unbrokenguy/machine_learning
|
b33e41dd775eecd0c79cd287fe4f8cb0b5164b74
|
[
"MIT"
] | null | null | null |
kNN/main.py
|
unbrokenguy/machine_learning
|
b33e41dd775eecd0c79cd287fe4f8cb0b5164b74
|
[
"MIT"
] | null | null | null |
kNN/main.py
|
unbrokenguy/machine_learning
|
b33e41dd775eecd0c79cd287fe4f8cb0b5164b74
|
[
"MIT"
] | null | null | null |
from collections import Counter
from dataclasses import dataclass
from enum import Enum
from typing import Tuple
import numpy as np
import pygame
import random
from scipy.stats import mode
N = 3
R = 4
FPS = 5
POINTS_NUMBER = 10
MIN_NEIGHBOURS = 5
MAX_NEIGHBOURS = 15
OPTIMAL_NEIGHBOURS_COUNT = [0 for _ in range(MAX_NEIGHBOURS + 1)]
class Color(Enum):
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
def generate_colors(n: int):
colors = []
for _ in range(n):
hex_color = "%06x" % random.randint(0, 0xFFFFFF)
colors.append(tuple(int(hex_color[i: i + 2], 16) for i in (0, 2, 4)))
return colors
def generate_points(clusters, points_number, colors):
points = []
for cluster in range(clusters):
center_x, center_y = random.randint(50, 550), random.randint(50, 350)
for element in range(points_number):
points.append(Point(x=int(random.gauss(center_x, 20)), y=int(random.gauss(center_y, 20)),
cluster=cluster, color=colors[cluster]))
return points
@dataclass
class Point:
x: int
y: int
cluster: int
color: Color = Color.BLACK
def dist(self, other):
if not isinstance(other, Point):
return ValueError
return np.sqrt((self.x - other.x) ** 2 + (self.y - other.y) ** 2)
class KNN:
def __init__(self, points, colors):
self.points = points
self.cluster_colors = colors
def find_neighbours(self, point, k):
return sorted(self.points, key=lambda p: point.dist(p))[:k]
def add_point(self, point, cluster):
point.cluster = cluster
point.color = self.cluster_colors[cluster]
for k in range(MIN_NEIGHBOURS, MAX_NEIGHBOURS):
neighbors = self.find_neighbours(point, k)
clusters = list(map(lambda p: p.cluster, neighbors))
OPTIMAL_NEIGHBOURS_COUNT[k] = OPTIMAL_NEIGHBOURS_COUNT[k] + 1 if self.predict(point) == clusters else OPTIMAL_NEIGHBOURS_COUNT[k]
self.points.append(point)
def predict(self, point):
optimal_cluster_number = 1 if max(OPTIMAL_NEIGHBOURS_COUNT) == 0 else OPTIMAL_NEIGHBOURS_COUNT.index(max(OPTIMAL_NEIGHBOURS_COUNT))
neighbours = self.find_neighbours(point, optimal_cluster_number)
count = Counter(list(map(lambda p: p.color, neighbours)))
max_color = max(count.values())
return list(count.keys())[list(count.values()).index(max_color)]
def main():
colors = [Color.RED, Color.GREEN, Color.BLUE]
points = generate_points(N, POINTS_NUMBER, colors)
knn = KNN(points=points, colors=colors)
pygame.init()
screen = pygame.display.set_mode((600, 400), pygame.RESIZABLE)
screen.fill("WHITE")
pygame.display.update()
clock = pygame.time.Clock()
play = True
point = None
while play:
screen.fill("WHITE")
for event in pygame.event.get():
if event.type == pygame.QUIT:
play = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
x, y = pygame.mouse.get_pos()
point = Point(x, y, 0, Color.BLACK)
if event.button == 3:
x, y = pygame.mouse.get_pos()
knn.points.append(Point(x, y, 0, knn.predict(Point(x, y, 0))))
point = None
if event.type == pygame.KEYDOWN:
cluster = 0
if event.key == pygame.K_1:
point.color = colors[0]
cluster = 1
if event.key == pygame.K_2:
point.color = colors[1]
cluster = 2
if event.key == pygame.K_3:
point.color = colors[2]
cluster = 3
if point is not None:
knn.add_point(point, cluster)
point = None
if point:
pygame.draw.circle(
screen,
point.color.value if isinstance(point.color, Color) else point.color,
(point.x, point.y),
R,
)
for p in knn.points:
pygame.draw.circle(
screen,
p.color.value if isinstance(p.color, Color) else p.color,
(p.x, p.y),
R,
)
pygame.display.update()
clock.tick(FPS)
if __name__ == "__main__":
main()
| 32.548611
| 142
| 0.551312
|
f741d96a7be5cad77ac86ffe06f37fceec063d14
| 7,616
|
py
|
Python
|
tests/sensors/test_http_sensor.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 1
|
2019-10-10T23:53:01.000Z
|
2019-10-10T23:53:01.000Z
|
tests/sensors/test_http_sensor.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 3
|
2020-07-07T20:39:24.000Z
|
2021-09-29T17:34:46.000Z
|
tests/sensors/test_http_sensor.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 1
|
2020-11-04T03:17:51.000Z
|
2020-11-04T03:17:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
import requests
from airflow import DAG
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import TaskInstance
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.sensors.http_sensor import HttpSensor
from airflow.utils.timezone import datetime
from tests.compat import mock
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
TEST_DAG_ID = 'unit_test_dag'
class TestHttpSensor(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
@patch("airflow.hooks.http_hook.requests.Session.send")
def test_poke_exception(self, mock_session_send):
"""
Exception occurs in poke function should not be ignored.
"""
response = requests.Response()
response.status_code = 200
mock_session_send.return_value = response
def resp_check(resp):
raise AirflowException('AirflowException raised here!')
task = HttpSensor(
task_id='http_sensor_poke_exception',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=resp_check,
timeout=5,
poke_interval=1)
with self.assertRaisesRegex(AirflowException, 'AirflowException raised here!'):
task.execute(context={})
@patch("airflow.hooks.http_hook.requests.Session.send")
def test_head_method(self, mock_session_send):
def resp_check(_):
return True
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_head_method',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
timeout=5,
poke_interval=1)
task.execute(context={})
args, kwargs = mock_session_send.call_args
received_request = args[0]
prep_request = requests.Request(
'HEAD',
'https://www.google.com',
{}).prepare()
self.assertEqual(prep_request.url, received_request.url)
self.assertTrue(prep_request.method, received_request.method)
@patch("airflow.hooks.http_hook.requests.Session.send")
def test_poke_context(self, mock_session_send):
response = requests.Response()
response.status_code = 200
mock_session_send.return_value = response
def resp_check(resp, execution_date):
if execution_date == DEFAULT_DATE:
return True
raise AirflowException('AirflowException raised here!')
task = HttpSensor(
task_id='http_sensor_poke_exception',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=resp_check,
timeout=5,
poke_interval=1,
dag=self.dag)
task_instance = TaskInstance(task=task, execution_date=DEFAULT_DATE)
task.execute(task_instance.get_template_context())
@patch("airflow.hooks.http_hook.requests.Session.send")
def test_logging_head_error_request(
self,
mock_session_send
):
def resp_check(_):
return True
response = requests.Response()
response.status_code = 404
response.reason = 'Not Found'
mock_session_send.return_value = response
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_head_method',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
timeout=5,
poke_interval=1
)
with mock.patch.object(task.hook.log, 'error') as mock_errors:
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
self.assertTrue(mock_errors.called)
calls = [
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
]
mock_errors.assert_has_calls(calls)
class FakeSession:
def __init__(self):
self.response = requests.Response()
self.response.status_code = 200
self.response._content = 'apache/airflow'.encode('ascii', 'ignore')
def send(self, *args, **kwargs):
return self.response
def prepare_request(self, request):
if 'date' in request.params:
self.response._content += (
'/' + request.params['date']
).encode('ascii', 'ignore')
return self.response
class TestHttpOpSensor(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE_ISO}
dag = DAG(TEST_DAG_ID, default_args=args)
self.dag = dag
@mock.patch('requests.Session', FakeSession)
def test_get(self):
t = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_get_response_check(self):
t = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
response_check=lambda response: ("apache/airflow" in response.text),
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_sensor(self):
sensor = HttpSensor(
task_id='http_sensor_check',
http_conn_id='http_default',
endpoint='/search',
request_params={"client": "ubuntu", "q": "airflow", 'date': '{{ds}}'},
headers={},
response_check=lambda response: (
"apache/airflow/" + DEFAULT_DATE.strftime('%Y-%m-%d')
in response.text),
poke_interval=5,
timeout=15,
dag=self.dag)
sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
| 34.152466
| 88
| 0.616859
|
f9e1faaca74675111460d25b0da975abe58feee2
| 3,802
|
py
|
Python
|
hedgehog/structure.py
|
h8hw8943/hedgehog
|
8d2927604c0e9f4f0ed9dcc3a12ce7ae6638cc51
|
[
"MIT"
] | 121
|
2020-05-14T14:46:35.000Z
|
2022-03-21T22:34:12.000Z
|
hedgehog/structure.py
|
h8hw8943/hedgehog
|
8d2927604c0e9f4f0ed9dcc3a12ce7ae6638cc51
|
[
"MIT"
] | 14
|
2020-05-14T20:13:31.000Z
|
2022-01-03T09:04:28.000Z
|
hedgehog/structure.py
|
h8hw8943/hedgehog
|
8d2927604c0e9f4f0ed9dcc3a12ce7ae6638cc51
|
[
"MIT"
] | 24
|
2020-05-15T01:37:20.000Z
|
2022-03-21T22:34:14.000Z
|
import collections
import itertools
import numpy as np
__all__ = ['chow_liu']
def chow_liu(X, root=None):
"""Return a Chow-Liu tree.
A Chow-Liu tree takes three steps to build:
1. Compute the mutual information between each pair of variables. The values are organised in
a fully connected graph.
2. Extract the maximum spanning tree from the graph.
3. Orient the edges of the tree by picking a root.
TODO: the current implementation uses Kruskal's algorithm to extract the MST. According to
Wikipedia, faster algorithms exist for fully connected graphs.
References
----------
1. Chow, C. and Liu, C., 1968. Approximating discrete probability distributions with
dependence trees. IEEE transactions on Information Theory, 14(3), pp.462-467.
2. https://www.wikiwand.com/en/Chow-Liu_tree
"""
# Compute the mutual information between each pair of variables
marginals = {v: X[v].value_counts(normalize=True) for v in X.columns}
edge = collections.namedtuple('edge', ['u', 'v', 'mi'])
mis = (
edge(
u, v, mutual_info(
puv=X.groupby([u, v]).size() / len(X),
pu=marginals[u],
pv=marginals[v]
))
for u, v in itertools.combinations(sorted(X.columns), 2)
)
edges = ((e.u, e.v) for e in sorted(mis, key=lambda e: e.mi, reverse=True))
# Extract the maximum spanning tree
neighbors = kruskal(vertices=X.columns, edges=edges)
if root is None:
root = X.columns[0]
return list(orient_tree(neighbors, root, visited=set()))
def mutual_info(puv, pu, pv):
"""Return the mutual information between variables u and v."""
# We first align pu and pv with puv so that we can vectorise the MI computation
# TODO: maybe there's a faster way to align pu and pv with respect to puv
pu = pu.reindex(puv.index.get_level_values(pu.name)).values
pv = pv.reindex(puv.index.get_level_values(pv.name)).values
return (puv * np.log(puv / (pv * pu))).sum()
class DisjointSet:
"""Disjoint-set data structure.
References
----------
1. Tarjan, R.E. and Van Leeuwen, J., 1984. Worst-case analysis of set union algorithms.
Journal of the ACM (JACM), 31(2), pp.245-281.
2. https://www.wikiwand.com/en/Disjoint-set_data_structure
"""
def __init__(self, *values):
self.parents = {x: x for x in values}
self.sizes = {x: 1 for x in values}
def find(self, x):
while self.parents[x] != x:
x, self.parents[x] = self.parents[x], self.parents[self.parents[x]]
return x
def union(self, x, y):
if self.sizes[x] < self.sizes[y]:
x, y = y, x
self.parents[y] = x
self.sizes[x] += self.sizes[y]
def kruskal(vertices, edges):
"""Find the Maximum Spanning Tree of a dense graph using Kruskal's algorithm.
The provided edges are assumed to be sorted in descending order.
References
----------
1. Kruskal, J.B., 1956. On the shortest spanning subtree of a graph and the traveling
salesman problem. Proceedings of the American Mathematical society, 7(1), pp.48-50.
"""
ds = DisjointSet(*vertices)
neighbors = collections.defaultdict(set)
for u, v in edges:
if ds.find(u) != ds.find(v):
neighbors[u].add(v)
neighbors[v].add(u)
ds.union(ds.find(u), ds.find(v))
if len(neighbors) == len(vertices):
break
return neighbors
def orient_tree(neighbors, root, visited):
"""Return tree edges that originate from the given root.
"""
for neighbor in neighbors[root] - visited:
yield root, neighbor
yield from orient_tree(neighbors, root=neighbor, visited={root})
| 29.022901
| 97
| 0.631247
|
0c7ef5151a0ac3fa82271ff21fc48486be15c89e
| 170
|
py
|
Python
|
dasbit/irc/message/numeric.py
|
DASPRiD/DASBiT
|
5ce105786c3528b8611f005ac1749685be2a9ff5
|
[
"BSD-3-Clause"
] | 2
|
2015-06-11T14:57:31.000Z
|
2016-08-09T21:25:11.000Z
|
dasbit/irc/message/numeric.py
|
DASPRiD/DASBiT
|
5ce105786c3528b8611f005ac1749685be2a9ff5
|
[
"BSD-3-Clause"
] | 1
|
2015-02-12T09:29:57.000Z
|
2015-02-12T10:47:13.000Z
|
dasbit/irc/message/numeric.py
|
DASPRiD/DASBiT
|
5ce105786c3528b8611f005ac1749685be2a9ff5
|
[
"BSD-3-Clause"
] | null | null | null |
from dasbit.irc.message import Generic
class Numeric(Generic):
def __init__(self, prefix, command, args):
Generic.__init__(self, prefix, int(command), args)
| 28.333333
| 58
| 0.723529
|
6e9c127e88f9c5ec43f57b9dcc1cb4ae1f5ba725
| 9,998
|
py
|
Python
|
tests/test_action_value.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | 18
|
2018-08-07T07:27:41.000Z
|
2018-08-20T01:51:21.000Z
|
tests/test_action_value.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | null | null | null |
tests/test_action_value.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | 2
|
2018-08-16T06:47:26.000Z
|
2018-08-20T01:51:22.000Z
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import unittest
import chainer
import chainer.functions as F
from chainer import testing
import numpy as np
from chainerrl import action_value
class TestDiscreteActionValue(unittest.TestCase):
def setUp(self):
self.batch_size = 30
self.action_size = 3
self.q_values = np.random.normal(
size=(self.batch_size, self.action_size)).astype(np.float32)
self.qout = action_value.DiscreteActionValue(
chainer.Variable(self.q_values))
def test_max(self):
self.assertIsInstance(self.qout.max, chainer.Variable)
np.testing.assert_almost_equal(self.qout.max.data,
self.q_values.max(axis=1))
def test_greedy_actions(self):
self.assertIsInstance(self.qout.greedy_actions, chainer.Variable)
np.testing.assert_equal(self.qout.greedy_actions.data,
self.q_values.argmax(axis=1))
def test_evaluate_actions(self):
sample_actions = np.random.randint(self.action_size,
size=self.batch_size)
ret = self.qout.evaluate_actions(sample_actions)
self.assertIsInstance(ret, chainer.Variable)
for b in range(self.batch_size):
self.assertAlmostEqual(ret.data[b],
self.q_values[b, sample_actions[b]])
def test_compute_advantage(self):
sample_actions = np.random.randint(self.action_size,
size=self.batch_size)
greedy_actions = self.q_values.argmax(axis=1)
ret = self.qout.compute_advantage(sample_actions)
self.assertIsInstance(ret, chainer.Variable)
for b in range(self.batch_size):
if sample_actions[b] == greedy_actions[b]:
self.assertAlmostEqual(ret.data[b], 0)
else:
# An advantage to the optimal policy must be always negative
self.assertLess(ret.data[b], 0)
q = self.q_values[b, sample_actions[b]]
v = self.q_values[b, greedy_actions[b]]
adv = q - v
self.assertAlmostEqual(ret.data[b], adv)
def test_params(self):
self.assertEqual(len(self.qout.params), 1)
self.assertEqual(id(self.qout.params[0]), id(self.qout.q_values))
class TestDistributionalDiscreteActionValue(unittest.TestCase):
def setUp(self):
self.batch_size = 30
self.action_size = 3
self.n_atoms = 51
self.atom_probs = np.random.dirichlet(
alpha=np.ones(self.n_atoms),
size=(self.batch_size, self.action_size)).astype(np.float32)
self.z_values = np.linspace(
-10, 10, num=self.n_atoms, dtype=np.float32)
self.qout = action_value.DistributionalDiscreteActionValue(
chainer.Variable(self.atom_probs), self.z_values)
self.q_values = (self.atom_probs * self.z_values).sum(axis=2)
def test_max(self):
self.assertIsInstance(self.qout.max, chainer.Variable)
np.testing.assert_almost_equal(self.qout.max.data,
self.q_values.max(axis=1))
def test_max_as_distribution(self):
self.assertIsInstance(
self.qout.max_as_distribution, chainer.Variable)
for b in range(self.batch_size):
np.testing.assert_almost_equal(
self.qout.max_as_distribution.data[b],
self.atom_probs[b, self.qout.greedy_actions.data[b]])
def test_greedy_actions(self):
self.assertIsInstance(self.qout.greedy_actions, chainer.Variable)
np.testing.assert_equal(self.qout.greedy_actions.data,
self.q_values.argmax(axis=1))
def test_evaluate_actions(self):
sample_actions = np.random.randint(self.action_size,
size=self.batch_size)
ret = self.qout.evaluate_actions(sample_actions)
self.assertIsInstance(ret, chainer.Variable)
for b in range(self.batch_size):
self.assertAlmostEqual(ret.data[b],
self.q_values[b, sample_actions[b]])
def test_evaluate_actions_as_distribution(self):
sample_actions = np.random.randint(self.action_size,
size=self.batch_size)
ret = self.qout.evaluate_actions_as_distribution(sample_actions)
self.assertIsInstance(ret, chainer.Variable)
for b in range(self.batch_size):
np.testing.assert_almost_equal(
ret.data[b],
self.atom_probs[b, sample_actions[b]])
def test_compute_advantage(self):
sample_actions = np.random.randint(self.action_size,
size=self.batch_size)
greedy_actions = self.q_values.argmax(axis=1)
ret = self.qout.compute_advantage(sample_actions)
self.assertIsInstance(ret, chainer.Variable)
for b in range(self.batch_size):
if sample_actions[b] == greedy_actions[b]:
self.assertAlmostEqual(ret.data[b], 0)
else:
# An advantage to the optimal policy must be always negative
self.assertLess(ret.data[b], 0)
q = self.q_values[b, sample_actions[b]]
v = self.q_values[b, greedy_actions[b]]
adv = q - v
self.assertAlmostEqual(ret.data[b], adv)
def test_params(self):
self.assertEqual(len(self.qout.params), 1)
self.assertIs(self.qout.params[0], self.qout.q_dist)
class TestQuadraticActionValue(unittest.TestCase):
def test_max_unbounded(self):
n_batch = 7
ndim_action = 3
mu = np.random.randn(n_batch, ndim_action).astype(np.float32)
mat = np.broadcast_to(
np.eye(ndim_action, dtype=np.float32)[None],
(n_batch, ndim_action, ndim_action))
v = np.random.randn(n_batch).astype(np.float32)
q_out = action_value.QuadraticActionValue(
chainer.Variable(mu),
chainer.Variable(mat),
chainer.Variable(v))
v_out = q_out.max
self.assertIsInstance(v_out, chainer.Variable)
v_out = v_out.data
np.testing.assert_almost_equal(v_out, v)
def test_max_bounded(self):
n_batch = 20
ndim_action = 3
mu = np.random.randn(n_batch, ndim_action).astype(np.float32)
mat = np.broadcast_to(
np.eye(ndim_action, dtype=np.float32)[None],
(n_batch, ndim_action, ndim_action))
v = np.random.randn(n_batch).astype(np.float32)
min_action, max_action = -1.3, 1.3
q_out = action_value.QuadraticActionValue(
chainer.Variable(mu),
chainer.Variable(mat),
chainer.Variable(v),
min_action, max_action)
v_out = q_out.max
self.assertIsInstance(v_out, chainer.Variable)
v_out = v_out.data
# If mu[i] is an valid action, v_out[i] should be v[i]
mu_is_allowed = np.all(
(min_action < mu) * (mu < max_action),
axis=1)
np.testing.assert_almost_equal(v_out[mu_is_allowed], v[mu_is_allowed])
# Otherwise, v_out[i] should be less than v[i]
mu_is_not_allowed = ~np.all(
(min_action - 1e-2 < mu) * (mu < max_action + 1e-2),
axis=1)
np.testing.assert_array_less(
v_out[mu_is_not_allowed],
v[mu_is_not_allowed])
@testing.parameterize(*testing.product({
'batch_size': [1, 3],
'action_size': [1, 2],
'has_maximizer': [True, False],
}))
class TestSingleActionValue(unittest.TestCase):
def setUp(self):
def evaluator(actions):
# negative square norm of actions
return -F.sum(actions ** 2, axis=1)
self.evaluator = evaluator
if self.has_maximizer:
def maximizer():
return chainer.Variable(np.zeros(
(self.batch_size, self.action_size), dtype=np.float32))
else:
maximizer = None
self.maximizer = maximizer
self.av = action_value.SingleActionValue(
evaluator=evaluator, maximizer=maximizer)
def test_max(self):
if not self.has_maximizer:
return
self.assertIsInstance(self.av.max, chainer.Variable)
np.testing.assert_almost_equal(
self.av.max.data,
self.evaluator(self.maximizer()).data)
def test_greedy_actions(self):
if not self.has_maximizer:
return
self.assertIsInstance(self.av.greedy_actions, chainer.Variable)
np.testing.assert_equal(self.av.greedy_actions.data,
self.maximizer().data)
def test_evaluate_actions(self):
sample_actions = np.random.randn(
self.batch_size, self.action_size).astype(np.float32)
ret = self.av.evaluate_actions(sample_actions)
self.assertIsInstance(ret, chainer.Variable)
np.testing.assert_equal(ret.data, self.evaluator(sample_actions).data)
def test_compute_advantage(self):
if not self.has_maximizer:
return
sample_actions = np.random.randn(
self.batch_size, self.action_size).astype(np.float32)
ret = self.av.compute_advantage(sample_actions)
self.assertIsInstance(ret, chainer.Variable)
np.testing.assert_equal(
ret.data,
(self.evaluator(sample_actions).data
- self.evaluator(self.maximizer()).data))
def test_params(self):
# no params
self.assertEqual(len(self.av.params), 0)
| 38.453846
| 78
| 0.617824
|
5760974f74d5e5a0bc70d6da77262ea7ebb6fdfe
| 7,235
|
py
|
Python
|
backpack/extensions/secondorder/hbp/__init__.py
|
paulkogni/backpack
|
3122de062d5bbcdcba8f8e02d24adb1bd2cdada6
|
[
"MIT"
] | null | null | null |
backpack/extensions/secondorder/hbp/__init__.py
|
paulkogni/backpack
|
3122de062d5bbcdcba8f8e02d24adb1bd2cdada6
|
[
"MIT"
] | null | null | null |
backpack/extensions/secondorder/hbp/__init__.py
|
paulkogni/backpack
|
3122de062d5bbcdcba8f8e02d24adb1bd2cdada6
|
[
"MIT"
] | null | null | null |
from torch.nn import (
AvgPool2d,
Conv2d,
CrossEntropyLoss,
Dropout,
Flatten,
Linear,
MaxPool2d,
MSELoss,
ReLU,
Sigmoid,
Tanh,
ZeroPad2d,
)
from backpack.extensions.backprop_extension import BackpropExtension
from backpack.extensions.curvature import Curvature
from backpack.extensions.secondorder.hbp.hbp_options import (
BackpropStrategy,
ExpectationApproximation,
LossHessianStrategy,
)
from . import activations, conv2d, dropout, flatten, linear, losses, padding, pooling
class HBP(BackpropExtension):
def __init__(
self,
curv_type,
loss_hessian_strategy,
backprop_strategy,
ea_strategy,
savefield="hbp",
):
self.curv_type = curv_type
self.loss_hessian_strategy = loss_hessian_strategy
self.backprop_strategy = backprop_strategy
self.ea_strategy = ea_strategy
super().__init__(
savefield=savefield,
fail_mode="ERROR",
module_exts={
MSELoss: losses.HBPMSELoss(),
CrossEntropyLoss: losses.HBPCrossEntropyLoss(),
Linear: linear.HBPLinear(),
MaxPool2d: pooling.HBPMaxpool2d(),
AvgPool2d: pooling.HBPAvgPool2d(),
ZeroPad2d: padding.HBPZeroPad2d(),
Conv2d: conv2d.HBPConv2d(),
Dropout: dropout.HBPDropout(),
Flatten: flatten.HBPFlatten(),
ReLU: activations.HBPReLU(),
Sigmoid: activations.HBPSigmoid(),
Tanh: activations.HBPTanh(),
},
)
def get_curv_type(self):
return self.curv_type
def get_loss_hessian_strategy(self):
return self.loss_hessian_strategy
def get_backprop_strategy(self):
return self.backprop_strategy
def get_ea_strategy(self):
return self.ea_strategy
class KFAC(HBP):
"""
Approximate Kronecker factorization of the Generalized Gauss-Newton/Fisher
using Monte-Carlo sampling.
Stores the output in :code:`kfac` as a list of Kronecker factors.
- If there is only one element, the item represents the GGN/Fisher
approximation itself.
- If there are multiple elements, they are arranged in the order such
that their Kronecker product represents the Generalized Gauss-Newton/Fisher
approximation.
- The dimension of the factors depends on the layer, but the product
of all row dimensions (or column dimensions) yields the dimension of the
layer parameter.
.. note::
The literature uses column-stacking as vectorization convention.
This is in contrast to the default row-major storing scheme of tensors
in :code:`torch`. Therefore, the order of factors differs from the
presentation in the literature.
Implements the procedures described by
- `Optimizing Neural Networks with Kronecker-factored Approximate Curvature
<http://proceedings.mlr.press/v37/martens15.html>`_
by James Martens and Roger Grosse, 2015.
- `A Kronecker-factored approximate Fisher matrix for convolution layers
<http://proceedings.mlr.press/v48/grosse16.html>`_
by Roger Grosse and James Martens, 2016
"""
def __init__(self, mc_samples=1):
self._mc_samples = mc_samples
super().__init__(
curv_type=Curvature.GGN,
loss_hessian_strategy=LossHessianStrategy.SAMPLING,
backprop_strategy=BackpropStrategy.SQRT,
ea_strategy=ExpectationApproximation.BOTEV_MARTENS,
savefield="kfac",
)
def get_num_mc_samples(self):
return self._mc_samples
class KFRA(HBP):
"""
Approximate Kronecker factorization of the Generalized Gauss-Newton/Fisher
using the full Hessian of the loss function w.r.t. the model output
and averaging after every backpropagation step.
Stores the output in :code:`kfra` as a list of Kronecker factors.
- If there is only one element, the item represents the GGN/Fisher
approximation itself.
- If there are multiple elements, they are arranged in the order such
that their Kronecker product represents the Generalized Gauss-Newton/Fisher
approximation.
- The dimension of the factors depends on the layer, but the product
of all row dimensions (or column dimensions) yields the dimension of the
layer parameter.
.. note::
The literature uses column-stacking as vectorization convention.
This is in contrast to the default row-major storing scheme of tensors
in :code:`torch`. Therefore, the order of factors differs from the
presentation in the literature.
- `Practical Gauss-Newton Optimisation for Deep Learning
<http://proceedings.mlr.press/v70/botev17a.html>`_
by Aleksandar Botev, Hippolyt Ritter and David Barber, 2017.
Extended for convolutions following
- `A Kronecker-factored approximate Fisher matrix for convolution layers
<http://proceedings.mlr.press/v48/grosse16.html>`_
by Roger Grosse and James Martens, 2016
"""
def __init__(self):
super().__init__(
curv_type=Curvature.GGN,
loss_hessian_strategy=LossHessianStrategy.SUM,
backprop_strategy=BackpropStrategy.BATCH_AVERAGE,
ea_strategy=ExpectationApproximation.BOTEV_MARTENS,
savefield="kfra",
)
class KFLR(HBP):
"""
Approximate Kronecker factorization of the Generalized Gauss-Newton/Fisher
using the full Hessian of the loss function w.r.t. the model output.
Stores the output in :code:`kflr` as a list of Kronecker factors.
- If there is only one element, the item represents the GGN/Fisher
approximation itself.
- If there are multiple elements, they are arranged in the order such
that their Kronecker product represents the Generalized Gauss-Newton/Fisher
approximation.
- The dimension of the factors depends on the layer, but the product
of all row dimensions (or column dimensions) yields the dimension of the
layer parameter.
.. note::
The literature uses column-stacking as vectorization convention.
This is in contrast to the default row-major storing scheme of tensors
in :code:`torch`. Therefore, the order of factors differs from the
presentation in the literature.
Implements the procedures described by
- `Practical Gauss-Newton Optimisation for Deep Learning
<http://proceedings.mlr.press/v70/botev17a.html>`_
by Aleksandar Botev, Hippolyt Ritter and David Barber, 2017.
Extended for convolutions following
- `A Kronecker-factored approximate Fisher matrix for convolution layers
<http://proceedings.mlr.press/v48/grosse16.html>`_
by Roger Grosse and James Martens, 2016
"""
def __init__(self):
super().__init__(
curv_type=Curvature.GGN,
loss_hessian_strategy=LossHessianStrategy.EXACT,
backprop_strategy=BackpropStrategy.SQRT,
ea_strategy=ExpectationApproximation.BOTEV_MARTENS,
savefield="kflr",
)
| 34.452381
| 85
| 0.683483
|
96f32a3c82523dd0aec0e0fa53cbc2dc69c64fb7
| 719
|
py
|
Python
|
duendecat/dir.py
|
patarapolw/duen-gui
|
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
|
[
"MIT"
] | 3
|
2019-03-18T18:34:34.000Z
|
2021-09-09T07:47:59.000Z
|
duendecat/dir.py
|
patarapolw/duen-gui
|
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
|
[
"MIT"
] | null | null | null |
duendecat/dir.py
|
patarapolw/duen-gui
|
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
|
[
"MIT"
] | null | null | null |
import os, sys
import inspect
PROJECT_NAME = 'duendecat'
filename = os.path.dirname(inspect.getframeinfo(inspect.currentframe()).filename)
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.environ.get("_MEIPASS2", os.path.dirname(os.path.abspath(filename)))
return os.path.join(base_path, PROJECT_NAME, relative_path)
def database_path(database):
return resource_path(os.path.join('database', database))
LOG_FILE = resource_path('log.txt')
CONFIG_FILE = resource_path('config.json')
| 28.76
| 91
| 0.734353
|
c673044bcc290180fdf452b0853301fdfcae935e
| 6,163
|
py
|
Python
|
env/lib/python3.9/site-packages/ansible/modules/cloud/hcloud/_hcloud_image_facts.py
|
unbounce/aws-name-asg-instances
|
e0379442e3ce71bf66ba9b8975b2cc57a2c7648d
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
env/lib/python3.9/site-packages/ansible/modules/cloud/hcloud/_hcloud_image_facts.py
|
unbounce/aws-name-asg-instances
|
e0379442e3ce71bf66ba9b8975b2cc57a2c7648d
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
env/lib/python3.9/site-packages/ansible/modules/cloud/hcloud/_hcloud_image_facts.py
|
unbounce/aws-name-asg-instances
|
e0379442e3ce71bf66ba9b8975b2cc57a2c7648d
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: hcloud_image_info
short_description: Gather infos about your Hetzner Cloud images.
version_added: "2.8"
description:
- Gather infos about your Hetzner Cloud images.
- This module was called C(hcloud_location_facts) before Ansible 2.9, returning C(ansible_facts) and C(hcloud_location_facts).
Note that the M(hcloud_image_info) module no longer returns C(ansible_facts) and the value was renamed to C(hcloud_image_info)!
author:
- Lukas Kaemmerling (@LKaemmerling)
options:
id:
description:
- The ID of the image you want to get.
type: int
name:
description:
- The name of the image you want to get.
type: str
label_selector:
description:
- The label selector for the images you want to get.
type: str
type:
description:
- The label selector for the images you want to get.
default: system
choices: [ system, snapshot, backup ]
type: str
extends_documentation_fragment: hcloud
"""
EXAMPLES = """
- name: Gather hcloud image infos
hcloud_image_info:
register: output
- name: Print the gathered infos
debug:
var: output
"""
RETURN = """
hcloud_image_info:
description: The image infos as list
returned: always
type: complex
contains:
id:
description: Numeric identifier of the image
returned: always
type: int
sample: 1937415
type:
description: Type of the image
returned: always
type: str
sample: system
status:
description: Status of the image
returned: always
type: str
sample: available
name:
description: Name of the image
returned: always
type: str
sample: ubuntu-18.04
description:
description: Detail description of the image
returned: always
type: str
sample: Ubuntu 18.04 Standard 64 bit
os_flavor:
description: OS flavor of the image
returned: always
type: str
sample: ubuntu
os_version:
description: OS version of the image
returned: always
type: str
sample: 18.04
labels:
description: User-defined labels (key-value pairs)
returned: always
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.hcloud import Hcloud
try:
from hcloud import APIException
except ImportError:
pass
class AnsibleHcloudImageInfo(Hcloud):
def __init__(self, module):
Hcloud.__init__(self, module, "hcloud_image_info")
self.hcloud_image_info = None
def _prepare_result(self):
tmp = []
for image in self.hcloud_image_info:
if image is not None:
tmp.append({
"id": to_native(image.id),
"status": to_native(image.status),
"type": to_native(image.type),
"name": to_native(image.name),
"description": to_native(image.description),
"os_flavor": to_native(image.os_flavor),
"os_version": to_native(image.os_version),
"labels": image.labels,
})
return tmp
def get_images(self):
try:
if self.module.params.get("id") is not None:
self.hcloud_image_info = [self.client.images.get_by_id(
self.module.params.get("id")
)]
elif self.module.params.get("name") is not None:
self.hcloud_image_info = [self.client.images.get_by_name(
self.module.params.get("name")
)]
else:
params = {}
label_selector = self.module.params.get("label_selector")
if label_selector:
params["label_selector"] = label_selector
image_type = self.module.params.get("type")
if image_type:
params["type"] = image_type
self.hcloud_image_info = self.client.images.get_all(**params)
except APIException as e:
self.module.fail_json(msg=e.message)
@staticmethod
def define_module():
return AnsibleModule(
argument_spec=dict(
id={"type": "int"},
name={"type": "str"},
label_selector={"type": "str"},
type={"choices": ["system", "snapshot", "backup"], "default": "system", "type": "str"},
**Hcloud.base_module_arguments()
),
supports_check_mode=True,
)
def main():
module = AnsibleHcloudImageInfo.define_module()
is_old_facts = module._name == 'hcloud_image_facts'
if is_old_facts:
module.deprecate("The 'hcloud_image_facts' module has been renamed to 'hcloud_image_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
hcloud = AnsibleHcloudImageInfo(module)
hcloud.get_images()
result = hcloud.get_result()
if is_old_facts:
ansible_info = {
'hcloud_imagen_facts': result['hcloud_image_info']
}
module.exit_json(ansible_s=ansible_info)
else:
ansible_info = {
'hcloud_image_info': result['hcloud_image_info']
}
module.exit_json(**ansible_info)
if __name__ == "__main__":
main()
| 29.488038
| 133
| 0.58121
|
4746727abc915d409cb606faadbf06853814d322
| 1,040
|
py
|
Python
|
src/ui/main_frame.py
|
MAE-M/PotentiallyInactiveCpeAnalysisTool
|
58f897fb45437ff72a6db4d490f364061d779c50
|
[
"Apache-2.0"
] | null | null | null |
src/ui/main_frame.py
|
MAE-M/PotentiallyInactiveCpeAnalysisTool
|
58f897fb45437ff72a6db4d490f364061d779c50
|
[
"Apache-2.0"
] | null | null | null |
src/ui/main_frame.py
|
MAE-M/PotentiallyInactiveCpeAnalysisTool
|
58f897fb45437ff72a6db4d490f364061d779c50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# foss@huawei.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.ui.scrollable_frame import ScrollableFrame
class MainFrame:
def __init__(self, root):
# 滚动条
# 创建逻辑:把main_frame放到canvas中
main_frame_scrollable_frame = ScrollableFrame(root)
main_frame_scrollable_frame.pack(fill='y', expand=1)
self.main_frame = main_frame_scrollable_frame.scrollable_frame
def get_main_frame_object(self):
return self.main_frame
| 34.666667
| 75
| 0.736538
|
b0f6c31a7d1f60c142074dab7f762305b169bf81
| 1,670
|
py
|
Python
|
GenomicData/utils/visualize.py
|
Zaoyee/deep-geometric-pathway
|
9c1ac8a586b1e1debaedee7eff1e193396d564e4
|
[
"MIT"
] | null | null | null |
GenomicData/utils/visualize.py
|
Zaoyee/deep-geometric-pathway
|
9c1ac8a586b1e1debaedee7eff1e193396d564e4
|
[
"MIT"
] | null | null | null |
GenomicData/utils/visualize.py
|
Zaoyee/deep-geometric-pathway
|
9c1ac8a586b1e1debaedee7eff1e193396d564e4
|
[
"MIT"
] | 2
|
2020-03-24T17:10:59.000Z
|
2021-02-22T23:57:28.000Z
|
import networkx as nx
import matplotlib.pyplot as plt
def show_pth(pathway_obj, *argv):
"""
The simple function that print out the pathway dataset
The different node types are visualized by different color
**Inputs:** `pathway_obj`: the pathwayway `object` that called by function `cancer_data()`
`batch`: the `batch` of the returned hops_sampler object
Example: hops_samples_obj = hops_sampler(pathway = data,
batch_size = 1,
num_hops = 2)
batch = hops_samples_obj.samples[0]
"""
if len(argv) == 1:
batch = argv[0]
elif len(argv) == 2:
dataflow = argv[0]
batch = dataflow[argv[1]]
else:
raise TypeError("The input number is incorrect.")
g = nx.DiGraph()
# we can put these input some global variable parts
pathway_info_namelist = pathway_obj.pthway_NameList
for block in batch.dataflow:
temp_edge_index = block.edge_index_ori
temp_edge = pathway_info_namelist.iloc[temp_edge_index.reshape(-1),:]['GenomeName'].values.reshape(2,-1).T
temp_edge = ([x,y] for x, y in zip(list(temp_edge[:,0]), list(temp_edge[:,1])))
g.add_edges_from(temp_edge)
node_color = [pathway_obj.node_class[pathway_obj.pthway_NameList[pathway_obj.pthway_NameList['GenomeName'] == name].index] for name in g.nodes()]
plt.figure(figsize=(30,30))
nx.draw_planar(g, with_labels = True, node_size=1500, node_color=node_color)
plt.show()
| 43.947368
| 153
| 0.596407
|
df276f7b4af6c28a6144450c89918d533def4837
| 1,490
|
py
|
Python
|
pipeline/tasks/requirements/vgen_requirements.py
|
enterstudio/artman
|
b9b2e6c0d42a0698b6ee59f4e755e7f2e603f8aa
|
[
"Apache-2.0"
] | 2
|
2019-11-30T23:42:09.000Z
|
2021-08-30T19:54:48.000Z
|
pipeline/tasks/requirements/vgen_requirements.py
|
enterstudio/artman
|
b9b2e6c0d42a0698b6ee59f4e755e7f2e603f8aa
|
[
"Apache-2.0"
] | null | null | null |
pipeline/tasks/requirements/vgen_requirements.py
|
enterstudio/artman
|
b9b2e6c0d42a0698b6ee59f4e755e7f2e603f8aa
|
[
"Apache-2.0"
] | 1
|
2017-03-30T00:28:15.000Z
|
2017-03-30T00:28:15.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Requirements to run VGen"""
from pipeline.tasks.requirements import task_requirement_base
class VGenRequirements(task_requirement_base.TaskRequirementBase):
@classmethod
def require(cls):
return ['java']
@classmethod
def install(cls):
# TODO(jgeiger): Do we really want to auto-install Java?
raise Exception('Java not installed')
class MergeRequirements(task_requirement_base.TaskRequirementBase):
@classmethod
def require(cls):
return ['kdiff3']
@classmethod
def install(cls):
# TODO(shinfan): Install kdiff3
raise Exception('Kdiff3 not installed')
class ConfigGenRequirements(task_requirement_base.TaskRequirementBase):
@classmethod
def require(cls):
# Intentionally do nothing
return []
@classmethod
def install(cls):
# Intentionally do nothing
pass
| 27.090909
| 74
| 0.71745
|
ee9885c0ba2a83cbfc2b0466a1adac416c83abdd
| 484
|
py
|
Python
|
main/migrations/0008_auto_20210120_1947.py
|
opustm/mvp-backend
|
3c25d575fbd299c84df48e8448ec63d11cc08a8e
|
[
"MIT"
] | null | null | null |
main/migrations/0008_auto_20210120_1947.py
|
opustm/mvp-backend
|
3c25d575fbd299c84df48e8448ec63d11cc08a8e
|
[
"MIT"
] | null | null | null |
main/migrations/0008_auto_20210120_1947.py
|
opustm/mvp-backend
|
3c25d575fbd299c84df48e8448ec63d11cc08a8e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-01-21 01:47
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20210120_1944'),
]
operations = [
migrations.AlterField(
model_name='user',
name='id',
field=models.UUIDField(default=uuid.UUID('febdfe64-5c05-4a4a-ac59-25ef6a8681df'), editable=False, primary_key=True, serialize=False),
),
]
| 24.2
| 145
| 0.63843
|
b646db6b076130ad9fbead28c47027addfc90c02
| 61
|
py
|
Python
|
gym_invaders/ai_invader/__init__.py
|
Jh123x/Orbital
|
6f8f2da4fd26ef1d77c0c6183230c3a5e6bf0bb9
|
[
"MIT"
] | 4
|
2020-05-15T11:17:09.000Z
|
2020-06-30T01:11:41.000Z
|
gym_invaders/ai_invader/__init__.py
|
Jh123x/Orbital
|
6f8f2da4fd26ef1d77c0c6183230c3a5e6bf0bb9
|
[
"MIT"
] | 10
|
2020-05-16T10:45:32.000Z
|
2020-07-27T07:17:02.000Z
|
gym_invaders/ai_invader/__init__.py
|
Jh123x/Orbital
|
6f8f2da4fd26ef1d77c0c6183230c3a5e6bf0bb9
|
[
"MIT"
] | null | null | null |
from .util import *
from .model import *
from .agent import *
| 20.333333
| 20
| 0.721311
|
724b3ca40462d94e33d95a5dd4016fa34f61daa0
| 7,941
|
py
|
Python
|
methods/binary_classifier.py
|
aber-wgr/OD-test
|
1f0836dd7a1c5ede34caca1a3492e9e7c3023538
|
[
"MIT"
] | 61
|
2018-09-14T02:48:01.000Z
|
2022-02-14T09:13:45.000Z
|
methods/binary_classifier.py
|
ashafaei/OD-test
|
8252aace84e2ae1ab95067876985f62a1060aad6
|
[
"MIT"
] | 3
|
2019-07-31T09:59:46.000Z
|
2020-04-16T21:55:16.000Z
|
methods/binary_classifier.py
|
aber-wgr/OD-test
|
1f0836dd7a1c5ede34caca1a3492e9e7c3023538
|
[
"MIT"
] | 12
|
2018-09-25T10:36:39.000Z
|
2022-03-28T18:09:00.000Z
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from utils.iterative_trainer import IterativeTrainerConfig, IterativeTrainer
from utils.logger import Logger
import os
from os import path
from termcolor import colored
from methods.base_threshold import ProbabilityThreshold
from datasets import MirroredDataset
class BinaryModelWrapper(nn.Module):
""" The wrapper class for H.
We add a layer at the end of any classifier. This module takes the |y| dimensional output
and maps it to a one-dimensional prediction.
"""
def __init__(self, base_model):
super(BinaryModelWrapper, self).__init__()
self.base_model = base_model
output_size = base_model.output_size()[1].item()
self.H = nn.Sequential(
nn.BatchNorm1d(output_size),
nn.Linear(output_size, 1),
)
def forward(self, x):
base_output = self.base_model.forward(x, softmax=False)
output = self.H(base_output)
return output
def preferred_name(self):
return self.base_model.__class__.__name__
def classify(self, x):
return (x>0).long()
class BinaryClassifier(ProbabilityThreshold):
def method_identifier(self):
output = "BinaryClassifier"
if len(self.add_identifier) > 0:
output = output + "/" + self.add_identifier
return output
def get_H_config(self, dataset, will_train=True):
print("Preparing training D1+D2 (H)")
print("Mixture size: %s"%colored('%d'%len(dataset), 'green'))
import global_vars as Global
# 80%, 20% for local train+test
train_ds, valid_ds = dataset.split_dataset(0.8)
if self.args.D1 in Global.mirror_augment:
print(colored("Mirror augmenting %s"%self.args.D1, 'green'))
new_train_ds = train_ds + MirroredDataset(train_ds)
train_ds = new_train_ds
# Initialize the multi-threaded loaders.
train_loader = DataLoader(train_ds, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.workers, pin_memory=True, drop_last=True)
valid_loader = DataLoader(valid_ds, batch_size=self.args.batch_size, num_workers=self.args.workers, pin_memory=True)
all_loader = DataLoader(dataset, batch_size=self.args.batch_size, num_workers=self.args.workers, pin_memory=True)
# Set up the criterion
criterion = nn.BCEWithLogitsLoss().cuda()
# Set up the model
model = Global.get_ref_classifier(self.args.D1)[self.default_model]().to(self.args.device)
self.add_identifier = model.__class__.__name__
if hasattr(model, 'preferred_name'):
self.add_identifier = model.preferred_name()
model = BinaryModelWrapper(model).to(self.args.device)
# Set up the config
config = IterativeTrainerConfig()
base_model_name = model.__class__.__name__
if hasattr(model, 'preferred_name'):
base_model_name = model.preferred_name()
config.name = '_%s[%s](%s->%s)'%(self.__class__.__name__, base_model_name, self.args.D1, self.args.D2)
config.train_loader = train_loader
config.valid_loader = valid_loader
config.phases = {
'train': {'dataset' : train_loader, 'backward': True},
'test': {'dataset' : valid_loader, 'backward': False},
'testU': {'dataset' : all_loader, 'backward': False},
}
config.criterion = criterion
config.classification = True
config.cast_float_label = True
config.stochastic_gradient = True
config.visualize = not self.args.no_visualize
config.model = model
config.logger = Logger()
config.optim = optim.Adam(model.parameters(), lr=1e-3)
config.scheduler = optim.lr_scheduler.ReduceLROnPlateau(config.optim, patience=5, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config.max_epoch = 30
if hasattr(model, 'train_config'):
model_train_config = model.train_config()
for key, value in model_train_config.iteritems():
print('Overriding config.%s'%key)
config.__setattr__(key, value)
return config
def propose_H(self, dataset):
raise NotImplementedError("You know, you're not supposed to treat me like this!")
def train_H(self, dataset):
# Wrap the (mixture)dataset in SubDataset so to easily
# split it later. God knows how many wrappers we have by this point.
from datasets import SubDataset
dataset = SubDataset('%s-%s'%(self.args.D1, self.args.D2), dataset, torch.arange(len(dataset)).int())
h_path = path.join(self.args.experiment_path, '%s'%(self.__class__.__name__),
'%d'%(self.default_model),
'%s->%s.pth'%(self.args.D1, self.args.D2))
h_parent = path.dirname(h_path)
if not path.isdir(h_parent):
os.makedirs(h_parent)
done_path = h_path + '.done'
will_train = self.args.force_train_h or not path.isfile(done_path)
h_config = self.get_H_config(dataset)
trainer = IterativeTrainer(h_config, self.args)
if will_train:
print(colored('Training from scratch', 'green'))
best_accuracy = -1
trainer.run_epoch(0, phase='test')
for epoch in range(1, h_config.max_epoch):
trainer.run_epoch(epoch, phase='train')
trainer.run_epoch(epoch, phase='test')
train_loss = h_config.logger.get_measure('train_loss').mean_epoch()
h_config.scheduler.step(train_loss)
# Track the learning rates and threshold.
lrs = [float(param_group['lr']) for param_group in h_config.optim.param_groups]
h_config.logger.log('LRs', lrs, epoch)
h_config.logger.get_measure('LRs').legend = ['LR%d'%i for i in range(len(lrs))]
if h_config.visualize:
# Show the average losses for all the phases in one figure.
h_config.logger.visualize_average_keys('.*_loss', 'Average Loss', trainer.visdom)
h_config.logger.visualize_average_keys('.*_accuracy', 'Average Accuracy', trainer.visdom)
h_config.logger.visualize_average('LRs', trainer.visdom)
test_average_acc = h_config.logger.get_measure('test_accuracy').mean_epoch()
# Save the logger for future reference.
torch.save(h_config.logger.measures, path.join(h_parent, 'logger.%s->%s.pth'%(self.args.D1, self.args.D2)))
if best_accuracy < test_average_acc:
print('Updating the on file model with %s'%(colored('%.4f'%test_average_acc, 'red')))
best_accuracy = test_average_acc
torch.save(h_config.model.state_dict(), h_path)
if test_average_acc > 1-1e-4:
break
torch.save({'finished':True}, done_path)
if h_config.visualize:
trainer.visdom.save([trainer.visdom.env])
# Load the best model.
print(colored('Loading H model from %s'%h_path, 'red'))
h_config.model.load_state_dict(torch.load(h_path))
trainer.run_epoch(0, phase='testU')
test_average_acc = h_config.logger.get_measure('testU_accuracy').mean_epoch(epoch=0)
print("Valid/Test average accuracy %s"%colored('%.4f%%'%(test_average_acc*100), 'red'))
self.H_class = h_config.model
self.H_class.eval()
return test_average_acc
| 43.157609
| 154
| 0.622718
|
8a3fa70bdde79acb6c99fe38f6f52389465d42fa
| 4,584
|
py
|
Python
|
tests/basepage.py
|
Skhsouravhalder/newbot
|
b3ce01dafdbdc5f93dec551449fc720572ea253d
|
[
"MIT"
] | null | null | null |
tests/basepage.py
|
Skhsouravhalder/newbot
|
b3ce01dafdbdc5f93dec551449fc720572ea253d
|
[
"MIT"
] | null | null | null |
tests/basepage.py
|
Skhsouravhalder/newbot
|
b3ce01dafdbdc5f93dec551449fc720572ea253d
|
[
"MIT"
] | null | null | null |
"""BasePage tests subclasses."""
#
# (C) Pywikibot team, 2015-2022
#
# Distributed under the terms of the MIT license.
#
from pywikibot.page import BasePage
from tests.aspects import TestCase
class BasePageTestBase(TestCase):
"""Base of BasePage test classes."""
_page = None
def setUp(self):
"""Set up test."""
super().setUp()
assert self._page, 'setUp() must create an empty BasePage in _page'
assert isinstance(self._page, BasePage)
class BasePageLoadRevisionsCachingTestBase(BasePageTestBase):
"""
Test site.loadrevisions() caching.
This test class monkey patches site.loadrevisions, which will cause
the pickling tests in site_tests and page_tests to fail, if it
is done on the same site as those tests use (the default site).
"""
cached = False
custom_text = 'foobar'
def setUp(self):
"""Set up test."""
super().setUp()
assert self.cached is False, 'Tests do not support caching'
def _test_page_text(self, get_text=True):
"""Test site.loadrevisions() with .text."""
page = self._page
self.assertFalse(hasattr(page, '_revid'))
self.assertFalse(hasattr(page, '_text'))
self.assertTrue(hasattr(page, '_revisions'))
self.assertFalse(page._revisions)
# verify that initializing the page content
# does not discard the custom text
custom_text = self.custom_text
page.text = custom_text
self.site.loadrevisions(page, total=1)
self.assertTrue(hasattr(page, '_revid'))
self.assertTrue(hasattr(page, '_revisions'))
self.assertLength(page._revisions, 1)
self.assertIn(page._revid, page._revisions)
self.assertEqual(page._text, custom_text)
self.assertEqual(page.text, page._text)
del page.text
self.assertFalse(hasattr(page, '_text'))
self.assertIsNone(page._revisions[page._revid].text)
self.assertFalse(hasattr(page, '_text'))
self.assertIsNone(page._latest_cached_revision())
page.text = custom_text
self.site.loadrevisions(page, total=1, content=True)
self.assertIsNotNone(page._latest_cached_revision())
self.assertEqual(page._text, custom_text)
self.assertEqual(page.text, page._text)
del page.text
self.assertFalse(hasattr(page, '_text'))
# Verify that calling .text doesn't call loadrevisions again
loadrevisions = self.site.loadrevisions
try:
self.site.loadrevisions = None
if get_text:
loaded_text = page.text
else: # T107537
with self.assertRaises(NotImplementedError):
page.text
loaded_text = ''
self.assertIsNotNone(loaded_text)
self.assertFalse(hasattr(page, '_text'))
page.text = custom_text
if get_text:
self.assertEqual(page.get(), loaded_text)
self.assertEqual(page._text, custom_text)
self.assertEqual(page.text, page._text)
del page.text
self.assertFalse(hasattr(page, '_text'))
if get_text:
self.assertEqual(page.text, loaded_text)
finally:
self.site.loadrevisions = loadrevisions
class BasePageMethodsTestBase(BasePageTestBase):
"""Test base methods."""
def _test_invoke(self):
"""Basic invocation of some base methods and properties."""
self.assertTrue(self._page.exists())
self.assertIsNotNone(self._page.latest_revision)
self.assertIsInstance(self._page.latest_revision_id, int)
self.assertGreaterEqual(self._page.latest_revision_id, 1)
self.assertIsInstance(self._page.latest_revision.parentid, int)
self.assertGreaterEqual(self._page.latest_revision.parentid, 0)
self._page.botMayEdit()
def _test_return_datatypes(self):
"""Test the base methods have correct datatypes only."""
self.assertIsInstance(self._page.langlinks(), list)
self.assertIsInstance(self._page.templates(), list)
self.assertIsInstance(self._page.isCategoryRedirect(), int)
def _test_no_wikitext(self):
"""Test the base methods responses simulate no wikitext."""
self._test_return_datatypes()
self.assertEqual(self._page.langlinks(), [])
self.assertEqual(self._page.templates(), [])
self.assertFalse(self._page.isCategoryRedirect())
self.assertTrue(self._page.botMayEdit())
| 33.459854
| 75
| 0.650087
|
9e990c6b496048275dcdae9fefd8df021d093624
| 39,202
|
py
|
Python
|
var/spack/repos/builtin/packages/trilinos/package.py
|
VladimirUspenskii/spack
|
18b83c3833c0e138a7153bbb14e68b1147b3f3d1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/trilinos/package.py
|
VladimirUspenskii/spack
|
18b83c3833c0e138a7153bbb14e68b1147b3f3d1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2022-03-01T04:35:54.000Z
|
2022-03-02T04:44:21.000Z
|
var/spack/repos/builtin/packages/trilinos/package.py
|
VladimirUspenskii/spack
|
18b83c3833c0e138a7153bbb14e68b1147b3f3d1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
from spack.build_environment import dso_suffix
from spack.error import NoHeadersError
from spack.operating_systems.mac_os import macos_version
from spack.pkg.builtin.kokkos import Kokkos
# Trilinos is complicated to build, as an inspiration a couple of links to
# other repositories which build it:
# https://github.com/hpcugent/easybuild-easyblocks/blob/master/easybuild/easyblocks/t/trilinos.py#L111
# https://github.com/koecher/candi/blob/master/deal.II-toolchain/packages/trilinos.package
# https://gitlab.com/configurations/cluster-config/blob/master/trilinos.sh
# https://github.com/Homebrew/homebrew-science/blob/master/trilinos.rb and some
# relevant documentation/examples:
# https://github.com/trilinos/Trilinos/issues/175
class Trilinos(CMakePackage, CudaPackage, ROCmPackage):
"""The Trilinos Project is an effort to develop algorithms and enabling
technologies within an object-oriented software framework for the solution
of large-scale, complex multi-physics engineering and scientific problems.
A unique design feature of Trilinos is its focus on packages.
"""
homepage = "https://trilinos.org/"
url = "https://github.com/trilinos/Trilinos/archive/trilinos-release-12-12-1.tar.gz"
git = "https://github.com/trilinos/Trilinos.git"
maintainers = ['keitat', 'sethrj', 'kuberry']
tags = ['e4s']
# ###################### Versions ##########################
version('master', branch='master')
version('develop', branch='develop')
version('13.2.0', commit='4a5f7906a6420ee2f9450367e9cc95b28c00d744') # tag trilinos-release-13-2-0
version('13.0.1', commit='4796b92fb0644ba8c531dd9953e7a4878b05c62d', preferred=True) # tag trilinos-release-13-0-1
version('13.0.0', commit='9fec35276d846a667bc668ff4cbdfd8be0dfea08') # tag trilinos-release-13-0-0
version('12.18.1', commit='55a75997332636a28afc9db1aee4ae46fe8d93e7') # tag trilinos-release-12-8-1
version('12.14.1', sha256='52a4406cca2241f5eea8e166c2950471dd9478ad6741cbb2a7fc8225814616f0')
version('12.12.1', sha256='5474c5329c6309224a7e1726cf6f0d855025b2042959e4e2be2748bd6bb49e18')
version('12.10.1', sha256='ab81d917196ffbc21c4927d42df079dd94c83c1a08bda43fef2dd34d0c1a5512')
version('12.8.1', sha256='d20fe60e31e3ba1ef36edecd88226240a518f50a4d6edcc195b88ee9dda5b4a1')
version('12.6.4', sha256='1c7104ba60ee8cc4ec0458a1c4f6a26130616bae7580a7b15f2771a955818b73')
version('12.6.3', sha256='4d28298bb4074eef522db6cd1626f1a934e3d80f292caf669b8846c0a458fe81')
version('12.6.2', sha256='8be7e3e1166cc05aea7f856cc8033182e8114aeb8f87184cb38873bfb2061779')
version('12.6.1', sha256='4b38ede471bed0036dcb81a116fba8194f7bf1a9330da4e29c3eb507d2db18db')
version('12.4.2', sha256='fd2c12e87a7cedc058bcb8357107ffa2474997aa7b17b8e37225a1f7c32e6f0e')
version('12.2.1', sha256='088f303e0dc00fb4072b895c6ecb4e2a3ad9a2687b9c62153de05832cf242098')
version('12.0.1', sha256='eee7c19ca108538fa1c77a6651b084e06f59d7c3307dae77144136639ab55980')
version('11.14.3', sha256='e37fa5f69103576c89300e14d43ba77ad75998a54731008b25890d39892e6e60')
version('11.14.2', sha256='f22b2b0df7b88e28b992e19044ba72b845292b93cbbb3a948488199647381119')
version('11.14.1', sha256='f10fc0a496bf49427eb6871c80816d6e26822a39177d850cc62cf1484e4eec07')
# ###################### Variants ##########################
# Build options
variant('complex', default=False, description='Enable complex numbers in Trilinos')
variant('cuda_rdc', default=False, description='turn on RDC for CUDA build')
variant('cxxstd', default='14', values=['11', '14', '17'], multi=False)
variant('debug', default=False, description='Enable runtime safety and debug checks')
variant('explicit_template_instantiation', default=True, description='Enable explicit template instantiation (ETI)')
variant('float', default=False, description='Enable single precision (float) numbers in Trilinos')
variant('fortran', default=True, description='Compile with Fortran support')
variant('gotype', default='long_long',
values=('int', 'long', 'long_long', 'all'),
multi=False,
description='global ordinal type for Tpetra')
variant('openmp', default=False, description='Enable OpenMP')
variant('python', default=False, description='Build PyTrilinos wrappers')
variant('shared', default=True, description='Enables the build of shared libraries')
variant('wrapper', default=False, description="Use nvcc-wrapper for CUDA build")
# TPLs (alphabet order)
variant('adios2', default=False, description='Enable ADIOS2')
variant('boost', default=False, description='Compile with Boost')
variant('hdf5', default=False, description='Compile with HDF5')
variant('hypre', default=False, description='Compile with Hypre preconditioner')
variant('mpi', default=True, description='Compile with MPI parallelism')
variant('mumps', default=False, description='Compile with support for MUMPS solvers')
variant('suite-sparse', default=False, description='Compile with SuiteSparse solvers')
variant('superlu-dist', default=False, description='Compile with SuperluDist solvers')
variant('superlu', default=False, description='Compile with SuperLU solvers')
variant('strumpack', default=False, description='Compile with STRUMPACK solvers')
variant('x11', default=False, description='Compile with X11 when +exodus')
# Package options (alphabet order)
variant('amesos', default=True, description='Compile with Amesos')
variant('amesos2', default=True, description='Compile with Amesos2')
variant('anasazi', default=True, description='Compile with Anasazi')
variant('aztec', default=True, description='Compile with Aztec')
variant('belos', default=True, description='Compile with Belos')
variant('chaco', default=False, description='Compile with Chaco from SEACAS')
variant('epetra', default=True, description='Compile with Epetra')
variant('epetraext', default=True, description='Compile with EpetraExt')
variant('exodus', default=False, description='Compile with Exodus from SEACAS')
variant('ifpack', default=True, description='Compile with Ifpack')
variant('ifpack2', default=True, description='Compile with Ifpack2')
variant('intrepid', default=False, description='Enable Intrepid')
variant('intrepid2', default=False, description='Enable Intrepid2')
variant('isorropia', default=False, description='Compile with Isorropia')
variant('gtest', default=False, description='Build vendored Googletest')
variant('kokkos', default=True, description='Compile with Kokkos')
variant('ml', default=True, description='Compile with ML')
variant('minitensor', default=False, description='Compile with MiniTensor')
variant('muelu', default=True, description='Compile with Muelu')
variant('nox', default=False, description='Compile with NOX')
variant('piro', default=False, description='Compile with Piro')
variant('phalanx', default=False, description='Compile with Phalanx')
variant('rol', default=False, description='Compile with ROL')
variant('rythmos', default=False, description='Compile with Rythmos')
variant('sacado', default=True, description='Compile with Sacado')
variant('stk', default=False, description='Compile with STK')
variant('shards', default=False, description='Compile with Shards')
variant('shylu', default=False, description='Compile with ShyLU')
variant('stokhos', default=False, description='Compile with Stokhos')
variant('stratimikos', default=False, description='Compile with Stratimikos')
variant('teko', default=False, description='Compile with Teko')
variant('tempus', default=False, description='Compile with Tempus')
variant('tpetra', default=True, description='Compile with Tpetra')
variant('trilinoscouplings', default=False, description='Compile with TrilinosCouplings')
variant('zoltan', default=False, description='Compile with Zoltan')
variant('zoltan2', default=False, description='Compile with Zoltan2')
# Internal package options (alphabetical order)
variant('basker', default=False, description='Compile with the Basker solver in Amesos2')
variant('epetraextbtf', default=False, description='Compile with BTF in EpetraExt')
variant('epetraextexperimental', default=False, description='Compile with experimental in EpetraExt')
variant('epetraextgraphreorderings', default=False, description='Compile with graph reorderings in EpetraExt')
# External package options
variant('dtk', default=False, description='Enable DataTransferKit (deprecated)')
variant('scorec', default=False, description='Enable SCOREC')
variant('mesquite', default=False, description='Enable Mesquite (deprecated)')
resource(name='dtk',
git='https://github.com/ornl-cees/DataTransferKit.git',
commit='4fe4d9d56cfd4f8a61f392b81d8efd0e389ee764', # branch dtk-3.0
placement='DataTransferKit',
when='+dtk @12.14.0:12.14')
resource(name='dtk',
git='https://github.com/ornl-cees/DataTransferKit.git',
commit='edfa050cd46e2274ab0a0b7558caca0079c2e4ca', # tag 3.1-rc1
placement='DataTransferKit',
submodules=True,
when='+dtk @12.18.0:12.18')
resource(name='scorec',
git='https://github.com/SCOREC/core.git',
commit='73c16eae073b179e45ec625a5abe4915bc589af2', # tag v2.2.5
placement='SCOREC',
when='+scorec')
resource(name='mesquite',
url='https://github.com/trilinos/mesquite/archive/trilinos-release-12-12-1.tar.gz',
sha256='e0d09b0939dbd461822477449dca611417316e8e8d8268fd795debb068edcbb5',
placement='packages/mesquite',
when='+mesquite @12.12.1:12.16')
resource(name='mesquite',
git='https://github.com/trilinos/mesquite.git',
commit='20a679679b5cdf15bf573d66c5dc2b016e8b9ca1', # branch trilinos-release-12-12-1
placement='packages/mesquite',
when='+mesquite @12.18.1:12.18')
resource(name='mesquite',
git='https://github.com/trilinos/mesquite.git',
tag='develop',
placement='packages/mesquite',
when='+mesquite @master')
# ###################### Conflicts ##########################
# Epetra stack
with when('~epetra'):
conflicts('+amesos')
conflicts('+aztec')
conflicts('+epetraext')
conflicts('+ifpack')
conflicts('+isorropia')
conflicts('+ml', when='@13.2:')
with when('~epetraext'):
conflicts('+isorropia')
conflicts('+teko')
conflicts('+epetraextbtf')
conflicts('+epetraextexperimental')
conflicts('+epetraextgraphreorderings')
with when('+teko'):
conflicts('~stratimikos')
conflicts('@:12 gotype=long')
# Tpetra stack
with when('~kokkos'):
conflicts('+cuda')
conflicts('+rocm')
conflicts('+tpetra')
conflicts('+intrepid2')
conflicts('+phalanx')
with when('~tpetra'):
conflicts('+amesos2')
conflicts('+dtk')
conflicts('+ifpack2')
conflicts('+muelu')
conflicts('+teko')
conflicts('+zoltan2')
with when('~zoltan'):
conflicts('+isorropia')
conflicts('+scorec')
conflicts('+shylu')
conflicts('+zoltan2')
with when('~shards'):
conflicts('+intrepid')
conflicts('+intrepid2')
conflicts('+scorec')
conflicts('+stk')
with when('+scorec'):
conflicts('~mpi')
conflicts('~stk')
# Known requirements from tribits dependencies
conflicts('+aztec', when='~fortran')
conflicts('+basker', when='~amesos2')
conflicts('+ifpack2', when='~belos')
conflicts('+intrepid', when='~sacado')
conflicts('+minitensor', when='~boost')
conflicts('+phalanx', when='~sacado')
conflicts('+stokhos', when='~kokkos')
conflicts('+tempus', when='~nox')
# Only allow DTK with Trilinos 12.14, 12.18
conflicts('+dtk', when='~boost')
conflicts('+dtk', when='~intrepid2')
conflicts('+dtk', when='@:12.12,13:')
# Installed FindTrilinos are broken in SEACAS if Fortran is disabled
# see https://github.com/trilinos/Trilinos/issues/3346
conflicts('+exodus', when='@:13.0.1 ~fortran')
# Only allow Mesquite with Trilinos 12.12 and up, and master
conflicts('+mesquite', when='@:12.10,master')
# Strumpack is only available as of mid-2021
conflicts('+strumpack', when='@:13.0')
# Can only use one type of SuperLU
conflicts('+superlu-dist', when='+superlu')
# For Trilinos v11 we need to force SuperLUDist=OFF, since only the
# deprecated SuperLUDist v3.3 together with an Amesos patch is working.
conflicts('+superlu-dist', when='@11.4.1:11.14.3')
# see https://github.com/trilinos/Trilinos/issues/3566
conflicts('+superlu-dist', when='+float+amesos2+explicit_template_instantiation^superlu-dist@5.3.0:')
# Amesos, conflicting types of double and complex SLU_D
# see https://trilinos.org/pipermail/trilinos-users/2015-March/004731.html
# and https://trilinos.org/pipermail/trilinos-users/2015-March/004802.html
conflicts('+superlu-dist', when='+complex+amesos2')
# https://github.com/trilinos/Trilinos/issues/2994
conflicts(
'+shared', when='+stk platform=darwin',
msg='Cannot build Trilinos with STK as a shared library on Darwin.'
)
conflicts('+adios2', when='@:12.14.1')
conflicts('cxxstd=11', when='@13.2:')
conflicts('cxxstd=17', when='@:12')
conflicts('cxxstd=11', when='+wrapper ^cuda@6.5.14')
conflicts('cxxstd=14', when='+wrapper ^cuda@6.5.14:8.0.61')
conflicts('cxxstd=17', when='+wrapper ^cuda@6.5.14:10.2.89')
# Multi-value gotype only applies to trilinos through 12.14
conflicts('gotype=all', when='@12.15:')
# CUDA without wrapper requires clang
for _compiler in spack.compilers.supported_compilers():
if _compiler != 'clang':
conflicts('+cuda', when='~wrapper %' + _compiler,
msg='trilinos~wrapper+cuda can only be built with the '
'Clang compiler')
conflicts('+cuda_rdc', when='~cuda')
conflicts('+wrapper', when='~cuda')
conflicts('+wrapper', when='%clang')
# Old trilinos fails with new CUDA (see #27180)
conflicts('@:13.0.1 +cuda', when='^cuda@11:')
# Build hangs with CUDA 11.6 (see #28439)
conflicts('+cuda +stokhos', when='^cuda@11.6:')
# stokhos fails on xl/xl_r
conflicts('+stokhos', when='%xl')
conflicts('+stokhos', when='%xl_r')
# Fortran mangling fails on Apple M1 (see spack/spack#25900)
conflicts('@:13.0.1 +fortran', when='target=m1')
# ###################### Dependencies ##########################
depends_on('adios2', when='+adios2')
depends_on('blas')
depends_on('boost', when='+boost')
depends_on('cgns', when='+exodus')
depends_on('hdf5+hl', when='+hdf5')
depends_on('hypre~internal-superlu~int64', when='+hypre')
depends_on('kokkos-nvcc-wrapper', when='+wrapper')
depends_on('lapack')
# depends_on('perl', type=('build',)) # TriBITS finds but doesn't use...
depends_on('libx11', when='+x11')
depends_on('matio', when='+exodus')
depends_on('metis', when='+zoltan')
depends_on('mpi', when='+mpi')
depends_on('netcdf-c', when="+exodus")
depends_on('parallel-netcdf', when='+exodus+mpi')
depends_on('parmetis', when='+mpi +zoltan')
depends_on('parmetis', when='+scorec')
depends_on('py-mpi4py', when='+mpi+python', type=('build', 'run'))
depends_on('py-numpy', when='+python', type=('build', 'run'))
depends_on('python', when='+python')
depends_on('python', when='@13.2: +ifpack +hypre', type='build')
depends_on('python', when='@13.2: +ifpack2 +hypre', type='build')
depends_on('scalapack', when='+mumps')
depends_on('scalapack', when='+strumpack+mpi')
depends_on('strumpack+shared', when='+strumpack')
depends_on('suite-sparse', when='+suite-sparse')
depends_on('superlu-dist', when='+superlu-dist')
depends_on('superlu@4.3 +pic', when='+superlu')
depends_on('swig', when='+python')
depends_on('zlib', when='+zoltan')
# Trilinos' Tribits config system is limited which makes it very tricky to
# link Amesos with static MUMPS, see
# https://trilinos.org/docs/dev/packages/amesos2/doc/html/classAmesos2_1_1MUMPS.html
# One could work it out by getting linking flags from mpif90 --showme:link
# (or alike) and adding results to -DTrilinos_EXTRA_LINK_FLAGS together
# with Blas and Lapack and ScaLAPACK and Blacs and -lgfortran and it may
# work at the end. But let's avoid all this by simply using shared libs
depends_on('mumps@5.0:+shared', when='+mumps')
for _flag in ('~mpi', '+mpi'):
depends_on('hdf5' + _flag, when='+hdf5' + _flag)
depends_on('mumps' + _flag, when='+mumps' + _flag)
for _flag in ('~openmp', '+openmp'):
depends_on('mumps' + _flag, when='+mumps' + _flag)
depends_on('hwloc', when='@13: +kokkos')
depends_on('hwloc+cuda', when='@13: +kokkos+cuda')
depends_on('hypre@develop', when='@master: +hypre')
depends_on('netcdf-c+mpi+parallel-netcdf', when="+exodus+mpi@12.12.1:")
depends_on('superlu-dist@4.4:5.3', when='@12.6.2:12.12.1+superlu-dist')
depends_on('superlu-dist@5.4:6.2.0', when='@12.12.2:13.0.0+superlu-dist')
depends_on('superlu-dist@6.3.0:', when='@13.0.1:99 +superlu-dist')
depends_on('superlu-dist@:4.3', when='@11.14.1:12.6.1+superlu-dist')
depends_on('superlu-dist@develop', when='@master: +superlu-dist')
# ###################### Patches ##########################
patch('umfpack_from_suitesparse.patch', when='@11.14.1:12.8.1')
for _compiler in ['xl', 'xl_r', 'clang']:
patch('xlf_seacas.patch', when='@12.10.1:12.12.1 %' + _compiler)
patch('xlf_tpetra.patch', when='@12.12.1 %' + _compiler)
patch('fix_clang_errors_12_18_1.patch', when='@12.18.1%clang')
patch('cray_secas_12_12_1.patch', when='@12.12.1%cce')
patch('cray_secas.patch', when='@12.14.1:%cce')
# workaround an NVCC bug with c++14 (https://github.com/trilinos/Trilinos/issues/6954)
# avoid calling deprecated functions with CUDA-11
patch('fix_cxx14_cuda11.patch', when='@13.0.0:13.0.1 cxxstd=14 ^cuda@11:')
# Allow building with +teko gotype=long
patch('https://github.com/trilinos/Trilinos/commit/b17f20a0b91e0b9fc5b1b0af3c8a34e2a4874f3f.patch',
sha256='dee6c55fe38eb7f6367e1896d6bc7483f6f9ab8fa252503050cc0c68c6340610',
when='@13.0.0:13.0.1 +teko gotype=long')
def flag_handler(self, name, flags):
is_cce = self.spec.satisfies('%cce')
if name == 'cxxflags':
spec = self.spec
if '+mumps' in spec:
# see https://github.com/trilinos/Trilinos/blob/master/packages/amesos/README-MUMPS
flags.append('-DMUMPS_5_0')
if '+stk platform=darwin' in spec:
flags.append('-DSTK_NO_BOOST_STACKTRACE')
if '+stk%intel' in spec:
# Workaround for Intel compiler segfaults with STK and IPO
flags.append('-no-ipo')
if '+wrapper' in spec:
flags.append('--expt-extended-lambda')
elif name == 'ldflags' and is_cce:
flags.append('-fuse-ld=gold')
if is_cce:
return (None, None, flags)
return (flags, None, None)
def url_for_version(self, version):
url = "https://github.com/trilinos/Trilinos/archive/trilinos-release-{0}.tar.gz"
return url.format(version.dashed)
def setup_dependent_run_environment(self, env, dependent_spec):
if '+cuda' in self.spec:
# currently Trilinos doesn't perform the memory fence so
# it relies on blocking CUDA kernel launch. This is needed
# in case the dependent app also run a CUDA backend via Trilinos
env.set('CUDA_LAUNCH_BLOCKING', '1')
def setup_dependent_package(self, module, dependent_spec):
if '+wrapper' in self.spec:
self.spec.kokkos_cxx = self.spec["kokkos-nvcc-wrapper"].kokkos_cxx
else:
self.spec.kokkos_cxx = spack_cxx
def setup_build_environment(self, env):
spec = self.spec
if '+cuda' in spec and '+wrapper' in spec:
if '+mpi' in spec:
env.set('OMPI_CXX', spec["kokkos-nvcc-wrapper"].kokkos_cxx)
env.set('MPICH_CXX', spec["kokkos-nvcc-wrapper"].kokkos_cxx)
env.set('MPICXX_CXX', spec["kokkos-nvcc-wrapper"].kokkos_cxx)
else:
env.set('CXX', spec["kokkos-nvcc-wrapper"].kokkos_cxx)
if '+rocm' in spec:
if '+mpi' in spec:
env.set('OMPI_CXX', self.spec['hip'].hipcc)
env.set('MPICH_CXX', self.spec['hip'].hipcc)
env.set('MPICXX_CXX', self.spec['hip'].hipcc)
else:
env.set('CXX', self.spec['hip'].hipcc)
if '+stk' in spec:
# Using CXXFLAGS for hipcc which doesn't use flags in the spack wrappers
env.set('CXXFLAGS', '-DSTK_NO_BOOST_STACKTRACE')
def cmake_args(self):
options = []
spec = self.spec
define = CMakePackage.define
define_from_variant = self.define_from_variant
def _make_definer(prefix):
def define_enable(suffix, value=None):
key = prefix + suffix
if value is None:
# Default to lower-case spec
value = suffix.lower()
elif isinstance(value, bool):
# Explicit true/false
return define(key, value)
return define_from_variant(key, value)
return define_enable
# Return "Trilinos_ENABLE_XXX" for spec "+xxx" or boolean value
define_trilinos_enable = _make_definer("Trilinos_ENABLE_")
# Same but for TPLs
define_tpl_enable = _make_definer("TPL_ENABLE_")
# #################### Base Settings #######################
options.extend([
define('Trilinos_VERBOSE_CONFIGURE', False),
define_from_variant('BUILD_SHARED_LIBS', 'shared'),
define_trilinos_enable('ALL_OPTIONAL_PACKAGES', False),
define_trilinos_enable('ALL_PACKAGES', False),
define_trilinos_enable('CXX11', True),
define_trilinos_enable('DEBUG', 'debug'),
define_trilinos_enable('EXAMPLES', False),
define_trilinos_enable('SECONDARY_TESTED_CODE', True),
define_trilinos_enable('TESTS', False),
define_trilinos_enable('Fortran'),
define_trilinos_enable('OpenMP'),
define_trilinos_enable('EXPLICIT_INSTANTIATION',
'explicit_template_instantiation')
])
if spec.version >= Version('13'):
options.append(define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'))
else:
# Prior to version 13, Trilinos would erroneously inject
# '-std=c++11' regardless of CMAKE_CXX_STANDARD value
options.append(define(
'Trilinos_CXX11_FLAGS',
self.compiler.cxx14_flag
if spec.variants['cxxstd'].value == '14'
else self.compiler.cxx11_flag
))
# ################## Trilinos Packages #####################
options.extend([
define_trilinos_enable('Amesos'),
define_trilinos_enable('Amesos2'),
define_trilinos_enable('Anasazi'),
define_trilinos_enable('AztecOO', 'aztec'),
define_trilinos_enable('Belos'),
define_trilinos_enable('Epetra'),
define_trilinos_enable('EpetraExt'),
define_trilinos_enable('FEI', False),
define_trilinos_enable('Gtest'),
define_trilinos_enable('Ifpack'),
define_trilinos_enable('Ifpack2'),
define_trilinos_enable('Intrepid'),
define_trilinos_enable('Intrepid2'),
define_trilinos_enable('Isorropia'),
define_trilinos_enable('Kokkos'),
define_trilinos_enable('MiniTensor'),
define_trilinos_enable('Mesquite'),
define_trilinos_enable('ML'),
define_trilinos_enable('MueLu'),
define_trilinos_enable('NOX'),
define_trilinos_enable('Pamgen', False),
define_trilinos_enable('Panzer', False),
define_trilinos_enable('Pike', False),
define_trilinos_enable('Piro'),
define_trilinos_enable('Phalanx'),
define_trilinos_enable('PyTrilinos', 'python'),
define_trilinos_enable('ROL'),
define_trilinos_enable('Rythmos'),
define_trilinos_enable('Sacado'),
define_trilinos_enable('SCOREC'),
define_trilinos_enable('Shards'),
define_trilinos_enable('ShyLU'),
define_trilinos_enable('STK'),
define_trilinos_enable('Stokhos'),
define_trilinos_enable('Stratimikos'),
define_trilinos_enable('Teko'),
define_trilinos_enable('Tempus'),
define_trilinos_enable('Tpetra'),
define_trilinos_enable('TrilinosCouplings'),
define_trilinos_enable('Zoltan'),
define_trilinos_enable('Zoltan2'),
define_tpl_enable('Cholmod', False),
define_from_variant('EpetraExt_BUILD_BTF', 'epetraextbtf'),
define_from_variant('EpetraExt_BUILD_EXPERIMENTAL',
'epetraextexperimental'),
define_from_variant('EpetraExt_BUILD_GRAPH_REORDERINGS',
'epetraextgraphreorderings'),
define_from_variant('Amesos2_ENABLE_Basker', 'basker'),
define_from_variant('Amesos2_ENABLE_LAPACK', 'amesos2'),
])
if '+dtk' in spec:
options.extend([
define('Trilinos_EXTRA_REPOSITORIES', 'DataTransferKit'),
define_trilinos_enable('DataTransferKit', True),
])
if '+exodus' in spec:
options.extend([
define_trilinos_enable('SEACAS', True),
define_trilinos_enable('SEACASExodus', True),
define_trilinos_enable('SEACASIoss', True),
define_trilinos_enable('SEACASEpu', True),
define_trilinos_enable('SEACASExodiff', True),
define_trilinos_enable('SEACASNemspread', True),
define_trilinos_enable('SEACASNemslice', True),
])
else:
options.extend([
define_trilinos_enable('SEACASExodus', False),
define_trilinos_enable('SEACASIoss', False),
])
if '+chaco' in spec:
options.extend([
define_trilinos_enable('SEACAS', True),
define_trilinos_enable('SEACASChaco', True),
])
else:
# don't disable SEACAS, could be needed elsewhere
options.extend([
define_trilinos_enable('SEACASChaco', False),
define_trilinos_enable('SEACASNemslice', False)
])
if '+stratimikos' in spec:
# Explicitly enable Thyra (ThyraCore is required). If you don't do
# this, then you get "NOT setting ${pkg}_ENABLE_Thyra=ON since
# Thyra is NOT enabled at this point!" leading to eventual build
# errors if using MueLu because `Xpetra_ENABLE_Thyra` is set to
# off.
options.append(define_trilinos_enable('Thyra', True))
# Add thyra adapters based on package enables
options.extend(
define_trilinos_enable('Thyra' + pkg + 'Adapters', pkg.lower())
for pkg in ['Epetra', 'EpetraExt', 'Tpetra'])
# ######################### TPLs #############################
def define_tpl(trilinos_name, spack_name, have_dep):
options.append(define('TPL_ENABLE_' + trilinos_name, have_dep))
if not have_dep:
return
depspec = spec[spack_name]
libs = depspec.libs
try:
options.extend([
define(trilinos_name + '_INCLUDE_DIRS',
depspec.headers.directories),
])
except NoHeadersError:
# Handle case were depspec does not have headers
pass
options.extend([
define(trilinos_name + '_ROOT', depspec.prefix),
define(trilinos_name + '_LIBRARY_NAMES', libs.names),
define(trilinos_name + '_LIBRARY_DIRS', libs.directories),
])
# Enable these TPLs explicitly from variant options.
# Format is (TPL name, variant name, Spack spec name)
tpl_variant_map = [
('ADIOS2', 'adios2', 'adios2'),
('Boost', 'boost', 'boost'),
('CUDA', 'cuda', 'cuda'),
('HDF5', 'hdf5', 'hdf5'),
('HYPRE', 'hypre', 'hypre'),
('MUMPS', 'mumps', 'mumps'),
('UMFPACK', 'suite-sparse', 'suite-sparse'),
('SuperLU', 'superlu', 'superlu'),
('SuperLUDist', 'superlu-dist', 'superlu-dist'),
('X11', 'x11', 'libx11'),
]
if spec.satisfies('@13.0.2:'):
tpl_variant_map.append(('STRUMPACK', 'strumpack', 'strumpack'))
for tpl_name, var_name, spec_name in tpl_variant_map:
define_tpl(tpl_name, spec_name, spec.variants[var_name].value)
# Enable these TPLs based on whether they're in our spec; prefer to
# require this way so that packages/features disable availability
tpl_dep_map = [
('BLAS', 'blas'),
('CGNS', 'cgns'),
('LAPACK', 'lapack'),
('Matio', 'matio'),
('METIS', 'metis'),
('Netcdf', 'netcdf-c'),
('SCALAPACK', 'scalapack'),
('Zlib', 'zlib'),
]
if spec.satisfies('@12.12.1:'):
tpl_dep_map.append(('Pnetcdf', 'parallel-netcdf'))
if spec.satisfies('@13:'):
tpl_dep_map.append(('HWLOC', 'hwloc'))
for tpl_name, dep_name in tpl_dep_map:
define_tpl(tpl_name, dep_name, dep_name in spec)
# MPI settings
options.append(define_tpl_enable('MPI'))
if '+mpi' in spec:
# Force Trilinos to use the MPI wrappers instead of raw compilers
# to propagate library link flags for linkers that require fully
# resolved symbols in shared libs (such as macOS and some newer
# Ubuntu)
options.extend([
define('CMAKE_C_COMPILER', spec['mpi'].mpicc),
define('CMAKE_CXX_COMPILER', spec['mpi'].mpicxx),
define('CMAKE_Fortran_COMPILER', spec['mpi'].mpifc),
define('MPI_BASE_DIR', spec['mpi'].prefix),
])
# ParMETIS dependencies have to be transitive explicitly
have_parmetis = 'parmetis' in spec
options.append(define_tpl_enable('ParMETIS', have_parmetis))
if have_parmetis:
options.extend([
define('ParMETIS_LIBRARY_DIRS', [
spec['parmetis'].prefix.lib, spec['metis'].prefix.lib
]),
define('ParMETIS_LIBRARY_NAMES', ['parmetis', 'metis']),
define('TPL_ParMETIS_INCLUDE_DIRS',
spec['parmetis'].headers.directories +
spec['metis'].headers.directories),
])
if spec.satisfies('^superlu-dist@4.0:'):
options.extend([
define('HAVE_SUPERLUDIST_LUSTRUCTINIT_2ARG', True),
])
if spec.satisfies('^parallel-netcdf'):
options.extend([
define('TPL_Netcdf_Enables_Netcdf4', True),
define('TPL_Netcdf_PARALLEL', True),
define('PNetCDF_ROOT', spec['parallel-netcdf'].prefix),
])
# ################# Explicit template instantiation #################
complex_s = spec.variants['complex'].value
float_s = spec.variants['float'].value
options.extend([
define('Teuchos_ENABLE_COMPLEX', complex_s),
define('Teuchos_ENABLE_FLOAT', float_s),
])
if '+tpetra +explicit_template_instantiation' in spec:
options.append(define_from_variant('Tpetra_INST_OPENMP', 'openmp'))
options.extend([
define('Tpetra_INST_DOUBLE', True),
define('Tpetra_INST_COMPLEX_DOUBLE', complex_s),
define('Tpetra_INST_COMPLEX_FLOAT', float_s and complex_s),
define('Tpetra_INST_FLOAT', float_s),
define('Tpetra_INST_SERIAL', True),
])
gotype = spec.variants['gotype'].value
if gotype == 'all':
# default in older Trilinos versions to enable multiple GOs
options.extend([
define('Tpetra_INST_INT_INT', True),
define('Tpetra_INST_INT_LONG', True),
define('Tpetra_INST_INT_LONG_LONG', True),
])
else:
options.extend([
define('Tpetra_INST_INT_INT', gotype == 'int'),
define('Tpetra_INST_INT_LONG', gotype == 'long'),
define('Tpetra_INST_INT_LONG_LONG', gotype == 'long_long'),
])
# ################# Kokkos ######################
if '+kokkos' in spec:
arch = Kokkos.get_microarch(spec.target)
if arch:
options.append(define("Kokkos_ARCH_" + arch.upper(), True))
define_kok_enable = _make_definer("Kokkos_ENABLE_")
options.extend([
define_kok_enable('CUDA'),
define_kok_enable('OPENMP' if spec.version >= Version('13')
else 'OpenMP'),
])
if '+cuda' in spec:
options.extend([
define_kok_enable('CUDA_UVM', True),
define_kok_enable('CUDA_LAMBDA', True),
define_kok_enable('CUDA_RELOCATABLE_DEVICE_CODE', 'cuda_rdc')
])
arch_map = Kokkos.spack_cuda_arch_map
options.extend(
define("Kokkos_ARCH_" + arch_map[arch].upper(), True)
for arch in spec.variants['cuda_arch'].value
)
if '+rocm' in spec:
options.extend([
define_kok_enable('ROCM', False),
define_kok_enable('HIP', True)
])
if '+tpetra' in spec:
options.append(define('Tpetra_INST_HIP', True))
amdgpu_arch_map = Kokkos.amdgpu_arch_map
for amd_target in spec.variants['amdgpu_target'].value:
try:
arch = amdgpu_arch_map[amd_target]
except KeyError:
pass
else:
options.append(define("Kokkos_ARCH_" + arch.upper(), True))
# ################# System-specific ######################
# Fortran lib (assumes clang is built with gfortran!)
if ('+fortran' in spec
and spec.compiler.name in ['gcc', 'clang', 'apple-clang']):
fc = Executable(spec['mpi'].mpifc) if (
'+mpi' in spec) else Executable(spack_fc)
libgfortran = fc('--print-file-name',
'libgfortran.' + dso_suffix,
output=str).strip()
# if libgfortran is equal to "libgfortran.<dso_suffix>" then
# print-file-name failed, use static library instead
if libgfortran == 'libgfortran.' + dso_suffix:
libgfortran = fc('--print-file-name',
'libgfortran.a',
output=str).strip()
# -L<libdir> -lgfortran required for OSX
# https://github.com/spack/spack/pull/25823#issuecomment-917231118
options.append(
define('Trilinos_EXTRA_LINK_FLAGS',
'-L%s/ -lgfortran' % os.path.dirname(libgfortran)))
if sys.platform == 'darwin' and macos_version() >= Version('10.12'):
# use @rpath on Sierra due to limit of dynamic loader
options.append(define('CMAKE_MACOSX_RPATH', True))
else:
options.append(define('CMAKE_INSTALL_NAME_DIR', self.prefix.lib))
return options
@run_after('install')
def filter_python(self):
# When trilinos is built with Python, libpytrilinos is included
# through cmake configure files. Namely, Trilinos_LIBRARIES in
# TrilinosConfig.cmake contains pytrilinos. This leads to a
# run-time error: Symbol not found: _PyBool_Type and prevents
# Trilinos to be used in any C++ code, which links executable
# against the libraries listed in Trilinos_LIBRARIES. See
# https://github.com/trilinos/Trilinos/issues/569 and
# https://github.com/trilinos/Trilinos/issues/866
# A workaround is to remove PyTrilinos from the COMPONENTS_LIST
# and to remove -lpytrilonos from Makefile.export.Trilinos
if '+python' in self.spec:
filter_file(r'(SET\(COMPONENTS_LIST.*)(PyTrilinos;)(.*)',
(r'\1\3'),
'%s/cmake/Trilinos/TrilinosConfig.cmake' %
self.prefix.lib)
filter_file(r'-lpytrilinos', '',
'%s/Makefile.export.Trilinos' %
self.prefix.include)
def setup_run_environment(self, env):
if '+exodus' in self.spec:
env.prepend_path('PYTHONPATH', self.prefix.lib)
if '+cuda' in self.spec:
# currently Trilinos doesn't perform the memory fence so
# it relies on blocking CUDA kernel launch.
env.set('CUDA_LAUNCH_BLOCKING', '1')
| 47.63305
| 120
| 0.609714
|
2ddc83b7c23c1d027fe4230efc8692e9940aedce
| 13,587
|
py
|
Python
|
readthedocs/projects/views/private.py
|
jasongrlicky/readthedocs.org
|
538e9312527c085e665c101d66d37ba44b64e88e
|
[
"MIT"
] | 1
|
2015-11-08T11:31:12.000Z
|
2015-11-08T11:31:12.000Z
|
readthedocs/projects/views/private.py
|
jasongrlicky/readthedocs.org
|
538e9312527c085e665c101d66d37ba44b64e88e
|
[
"MIT"
] | null | null | null |
readthedocs/projects/views/private.py
|
jasongrlicky/readthedocs.org
|
538e9312527c085e665c101d66d37ba44b64e88e
|
[
"MIT"
] | null | null | null |
import os
import shutil
import simplejson
import zipfile
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.markup.templatetags.markup import restructuredtext
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.template.defaultfilters import linebreaks
from django.template.loader import render_to_string
from django.views.generic.list_detail import object_list
from bookmarks.models import Bookmark
from builds.forms import AliasForm
from projects import constants
from projects.forms import (FileForm, CreateProjectForm,
ImportProjectForm, FileRevisionForm,
build_versions_form, build_upload_html_form)
from projects.models import Project, File
from projects.tasks import unzip_files
@login_required
def project_dashboard(request):
"""
A dashboard! If you aint know what that means you aint need to.
Essentially we show you an overview of your content.
"""
marks = Bookmark.objects.filter(user=request.user)[:5]
return object_list(
request,
queryset=request.user.projects.live(),
page=int(request.GET.get('page', 1)),
template_object_name='project',
extra_context={'bookmark_list': marks },
template_name='projects/project_dashboard.html',
)
@login_required
def project_manage(request, project_slug):
"""
The management view for a project, where you will have links to edit
the projects' configuration, edit the files associated with that
project, etc.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
return object_list(
request,
queryset=project.files.live(),
extra_context={'project': project},
page=int(request.GET.get('page', 1)),
template_object_name='file',
template_name='projects/project_manage.html',
)
@login_required
def project_create(request):
"""
The view for creating a new project where the docs will be hosted
as objects and edited through the site
"""
form = CreateProjectForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.instance.user = request.user
project = form.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage)
return render_to_response(
'projects/project_create.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def project_edit(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
if project.is_imported:
form_class = ImportProjectForm
else:
form_class = CreateProjectForm
form = form_class(instance=project, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_edit.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_versions(request, project_slug):
"""
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
if not project.is_imported:
raise Http404
form_class = build_versions_form(project)
form = form_class(data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_versions.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_delete(request, project_slug):
"""
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
if request.method == 'POST':
# Remove the repository checkout
shutil.rmtree(project.doc_path, ignore_errors=True)
# Delete the project and everything related to it
project.delete()
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_delete.html',
{'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_import(request):
"""
Import docs from an repo
"""
form = ImportProjectForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.instance.user = request.user
project = form.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage + '?docs_not_built=True')
return render_to_response(
'projects/project_import.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def file_add(request, project_slug):
"""
Add a file to a project, redirecting on success to the projects mgmt page
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = File(project=project)
form = FileForm(instance=file, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.instance.project = project
file = form.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage)
return render_to_response(
'projects/file_add.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def file_edit(request, project_slug, file_id):
"""
Edit an existing file
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = get_object_or_404(project.files.live(), pk=file_id)
form = FileForm(instance=file, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage)
return render_to_response(
'projects/file_edit.html',
{'form': form, 'project': project, 'file': file},
context_instance=RequestContext(request)
)
@login_required
def file_delete(request, project_slug, file_id):
"""
Mark a given file as deleted on POST, otherwise ask for confirmation
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = get_object_or_404(project.files.live(), pk=file_id)
if request.method == 'POST':
file.status = constants.DELETED_STATUS
file.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage)
return render_to_response(
'projects/file_delete.html',
{'project': project, 'file': file},
context_instance=RequestContext(request)
)
@login_required
def file_history(request, project_slug, file_id):
"""
A view that provides diffing from current to any revision, and when
posted to allows you to revert
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = get_object_or_404(project.files.live(), pk=file_id)
form = FileRevisionForm(file, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.cleaned_data['revision'].apply()
history = reverse('projects_file_history', args=[project.slug, file.pk])
return HttpResponseRedirect(history)
return object_list(
request,
queryset=file.revisions.all(),
extra_context={'project': project, 'file': file, 'form': form},
page=int(request.GET.get('page', 1)),
template_object_name='revision',
template_name='projects/file_history.html',
)
@login_required
def file_diff(request, project_slug, file_id, from_id, to_id):
"""
Return the contents of a given revision.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = get_object_or_404(project.files.live(), pk=file_id)
# grab the requested revisions
from_rev = get_object_or_404(file.revisions.all(), pk=from_id)
to_rev = get_object_or_404(file.revisions.all(), pk=to_id)
# generate a pretty html diff
diff = file.get_html_diff(from_rev.revision_number, to_rev.revision_number)
contents = linebreaks(to_rev.get_file_content())
payload = {
'diff': diff,
'contents': contents,
'display': str(to_rev),
}
# return it assuming json
return HttpResponse(simplejson.dumps(payload), mimetype='text/javascript')
@login_required
def file_preview(request):
"""
Live preview of restructuredtext payload - currently not wired up
"""
f = File(
heading=request.POST['heading'],
content=request.POST['content'],
)
rendered_base = render_to_string('projects/doc_file.rst.html', {'file': f})
rendered = restructuredtext(rendered_base)
json_response = simplejson.dumps({'payload': rendered})
return HttpResponse(json_response, mimetype='text/javascript')
@login_required
def export(request, project_slug):
"""
Export a projects' docs as a .zip file, including the .rst source
"""
project = Project.objects.live().get(user=request.user, slug=project_slug)
os.chdir(project.doc_path)
dir_path = os.path.join(settings.MEDIA_ROOT, 'export', project.user.username)
zip_filename = '%s.zip' % project.slug
file_path = os.path.join(dir_path, zip_filename)
try:
os.makedirs(dir_path)
except OSError:
#Directory already exists
pass
# Create a <slug>.zip file containing all files in file_path
archive = zipfile.ZipFile(zip_filename, 'w')
for root, subfolders, files in os.walk(file_path):
for file in files:
archive.write(os.path.join(root, file))
archive.close()
return HttpResponseRedirect(os.path.join(settings.MEDIA_URL, 'export', project.user.username, zip_filename))
def upload_html(request, project_slug):
proj = get_object_or_404(Project.objects.all(), slug=project_slug)
FormClass = build_upload_html_form(proj)
if request.method == 'POST':
form = FormClass(request.POST, request.FILES, request=request)
if form.is_valid():
file = request.FILES['content']
version_slug = form.cleaned_data['version']
version = proj.versions.get(slug=version_slug)
#Copy file
dest_dir = os.path.join(settings.UPLOAD_ROOT, proj.slug)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_file = os.path.join(dest_dir, file.name)
destination = open(dest_file, 'wb+')
for chunk in file.chunks():
destination.write(chunk)
destination.close()
#Mark version active.
version.active = True
version.uploaded = True
version.built = False
version.save()
#Extract file into the correct place.
html_path = proj.rtd_build_path(version.slug)
unzip_files(dest_file, html_path)
return HttpResponseRedirect(proj.get_absolute_url())
else:
form = FormClass(request=request)
return render_to_response(
'projects/upload_html.html',
{'form': form, 'project': proj},
context_instance=RequestContext(request)
)
@login_required
def edit_alias(request, project_slug, id=None):
"""
The view for creating a new project where the docs will be hosted
as objects and edited through the site
"""
proj = get_object_or_404(Project.objects.all(), slug=project_slug)
if id:
alias = proj.aliases.get(pk=id)
form = AliasForm(instance=alias, data=request.POST or None)
else:
form = AliasForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
alias = form.save()
return HttpResponseRedirect(alias.project.get_absolute_url())
return render_to_response(
'projects/alias_edit.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def list_alias(request, project_slug):
"""
The view for creating a new project where the docs will be hosted
as objects and edited through the site
"""
proj = get_object_or_404(Project.objects.all(), slug=project_slug)
return object_list(
request,
queryset=proj.aliases.all(),
template_object_name='alias',
template_name='projects/alias_list.html',
)
| 33.798507
| 112
| 0.683006
|
271e2ec24428ea72b01b8f36355208dce7fe8a8f
| 2,398
|
py
|
Python
|
examples/learning_feedback_example.py
|
BRNALX/ChatterBot
|
976b60a86154bffc3c7cf2d7c51e4352bd591d74
|
[
"BSD-3-Clause"
] | null | null | null |
examples/learning_feedback_example.py
|
BRNALX/ChatterBot
|
976b60a86154bffc3c7cf2d7c51e4352bd591d74
|
[
"BSD-3-Clause"
] | null | null | null |
examples/learning_feedback_example.py
|
BRNALX/ChatterBot
|
976b60a86154bffc3c7cf2d7c51e4352bd591d74
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from chatterbot import ChatBot
"""
This example shows how to create a chat bot that
will learn responses based on an additional feedback
element from the user.
"""
# Uncomment the following line to enable verbose logging
# import logging
# logging.basicConfig(level=logging.INFO)
# Create a new instance of a ChatBot
bot = ChatBot(
'Feedback Learning Bot',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
'chatterbot.logic.BestMatch'
],
input_adapter='chatterbot.input.TerminalAdapter',
output_adapter='chatterbot.output.TerminalAdapter'
)
CONVERSATION_ID = bot.storage.create_conversation()
def get_feedback():
from chatterbot.utils import input_function
text = input_function()
if 'yes' in text.lower():
return True
elif 'no' in text.lower():
return False
else:
print('Please type either "Yes" or "No"')
return get_feedback()
print('Type something to begin...')
# The following loop will execute each time the user enters input
while True:
try:
input_statement = bot.input.process_input_statement()
statement, response = bot.generate_response(input_statement, CONVERSATION_ID)
print('\n Is "{}" this a coherent response to "{}"? \n'.format(response, input_statement))
if get_feedback():
bot.learn_response(response, input_statement)
# Update the conversation history for the bot
# It is important that this happens last, after the learning step
bot.storage.add_to_conversation(CONVERSATION_ID, statement, response)
bot.output.process_response(response)
else:
question = input_statement
print('Say me the correct response, please.')
response =bot.input.process_input_statement()
bot.learn_response(response, question)
bot.storage.add_to_conversation(CONVERSATION_ID, question, response)
#bot.output.process_response(input_statement)
print("I'm understand, for {} this the coherent response to {}. \n ". format(question,response) )
print("We can continue, say me anything. \n")
# Press ctrl-c or ctrl-d on the keyboard to exit
except (KeyboardInterrupt, EOFError, SystemExit):
break
| 31.973333
| 109
| 0.66764
|
b74cdb17db7b20ab4ff31a704260c2bb0cce9af5
| 1,303
|
py
|
Python
|
algorithms/HeapSort.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
algorithms/HeapSort.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
algorithms/HeapSort.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Heap sort(堆排序)-MAX: 初始时把要排序的数的序列看作是一棵顺序存储的二叉树,调整它们的存储序,使之成为一个堆,这时堆的根节点的数最大;
然后将根节点与堆的最后一个节点交换。然后对前面(n-1)个数重新调整使之成为堆;
依此类推,直到只有两个节点的堆,并对 它们作交换,最后得到有n个节点的有序序列。
从算法描述来看,堆排序需要两个过程,一是建立堆,二是堆顶与堆的最后一个元素交换位置。
所以堆排序有两个函数组成。一是建堆的渗透函数,二是反复调用渗透函数实现排序的函数。
1.创建最大堆:将堆所有数据重新排序,使其成为最大堆
2.最大堆调整:作用是保持最大堆的性质,是创建最大堆的核心子程序
3.堆排序:移除位在第一个数据的根节点,并做最大堆调整的递归运算
性能:
时间复杂度:O(nlog₂n)
空间复杂度:O(1)
稳定性:不稳定
"""
def Heap_adjust(lists, parent, size):
lchild = 2 * parent + 1
rchild = 2 * parent + 2
maxN = parent
while parent < size / 2:
if lchild < size and lists[lchild] > lists[maxN]:
maxN = lchild
if rchild < size and lists[rchild] > lists[maxN]:
maxN = rchild
if maxN != parent:
lists[maxN], lists[parent] = lists[parent], lists[maxN]
Heap_adjust(lists, maxN, size)
def Heap_build(lists, size):
for i in range(0, (size/2))[::-1]:
print i
Heap_adjust(lists, i, size)
def Heap_sort(lists):
'''
:param lists:
:return:
'''
size = len(lists)
Heap_build(lists, size)
for i in range(0, size)[::-1]:
lists[0], lists[i] = lists[i], lists[0]
Heap_adjust(lists, 0, i)
return lists
| 27.145833
| 75
| 0.597851
|
53d258be23226311c9a25ad50d5c8b095827ce5e
| 13,189
|
py
|
Python
|
setup.py
|
haroldwoo/airflow
|
e4bf4879e17051066b5244510d8757463f971d97
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
haroldwoo/airflow
|
e4bf4879e17051066b5244510d8757463f971d97
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
haroldwoo/airflow
|
e4bf4879e17051066b5244510d8757463f971d97
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import io
import logging
import os
import sys
import subprocess
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
PY3 = sys.version_info[0] == 3
if not PY3:
# noinspection PyShadowingBuiltins
FileNotFoundError = IOError
# noinspection PyUnboundLocalVariable
try:
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def __init__(self, dist, **kw):
super().__init__(dist, **kw)
self.test_suite = True
self.test_args = []
self.tox_args = ''
def initialize_options(self):
TestCommand.initialize_options(self)
def finalize_options(self):
TestCommand.finalize_options(self)
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Compile and build the frontend assets using npm and webpack.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.call('./airflow/www/compile_assets.sh')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Cannot compute the git version. {}'.format(e))
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version, sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async_packages = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
atlas = ['atlasclient>=0.1.2']
aws = [
'boto3>=1.7.0, <1.8.0',
]
azure = [
'azure-storage>=0.34.0',
'azure-mgmt-resource==1.2.2',
'azure-mgmt-datalake-store==0.4.0',
'azure-datalake-store==0.0.19',
'azure-cosmos>=3.0.1',
'azure-mgmt-containerinstance',
]
cassandra = ['cassandra-driver>=3.13.0']
celery = [
'celery~=4.3',
'flower>=0.7.3, <1.0',
'tornado>=4.2.0, <6.0', # Dep of flower. Pin to a version that works on Py3.5.2
]
cgroups = [
'cgroupspy>=0.1.4',
]
cloudant = ['cloudant>=2.0']
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.17.1, <2'
]
databricks = ['requests>=2.20.0, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx-argparse>=0.1.13',
'sphinx-autoapi>=0.7.1',
'Sphinx-PyPI-upload>=0.2.1',
'sphinx-rtd-theme>=0.1.6',
'sphinx>=1.2.3',
'sphinxcontrib-httpdomain>=1.7.0',
]
docker = ['docker~=3.0']
druid = ['pydruid>=0.4.1']
elasticsearch = [
'elasticsearch>=5.0.0,<6.0.0',
'elasticsearch-dsl>=5.0.0,<6.0.0'
]
gcp = [
'google-api-python-client>=1.6.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'google-auth>=1.0.0, <2.0.0dev',
'google-cloud-bigtable==0.31.0',
'google-cloud-container>=0.1.1',
'google-cloud-language>=1.1.1',
'google-cloud-spanner>=1.7.1',
'google-cloud-storage~=1.14',
'google-cloud-translate>=1.3.3',
'google-cloud-videointelligence>=1.7.0',
'google-cloud-vision>=0.35.2',
'google-cloud-texttospeech>=0.4.0',
'google-cloud-speech>=0.36.3',
'grpcio-gcp>=0.2.2',
'httplib2~=0.9.2',
'pandas-gbq',
'PyOpenSSL',
]
grpc = ['grpcio>=1.15.0']
flask_oauth = [
'Flask-OAuthlib>=0.9.1',
'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',
'requests-oauthlib==1.1.0'
]
hdfs = ['snakebite>=2.7.8']
hive = [
'hmsclient>=0.1.0',
'pyhive>=0.6.0',
]
jdbc = ['jaydebeapi>=1.1.1']
jenkins = ['python-jenkins>=1.0.0']
jira = ['JIRA>1.0.7']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
kubernetes = ['kubernetes>=3.0.0',
'cryptography>=2.0.0']
ldap = ['ldap3>=2.5.1']
mssql = ['pymssql>=2.1.1']
mysql = ['mysqlclient>=1.3.6,<1.4']
oracle = ['cx_Oracle>=5.1.2']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = ['pinotdb==0.1.1']
postgres = ['psycopg2>=2.7.4,<2.8']
qds = ['qds-sdk>=1.10.4']
rabbitmq = ['librabbitmq>=1.6.1']
redis = ['redis~=3.2']
salesforce = ['simple-salesforce>=0.72']
samba = ['pysmbclient>=0.1.3']
segment = ['analytics-python>=1.2.9']
sendgrid = ['sendgrid>=5.2.0,<6']
slack = ['slackclient>=1.0.0']
mongo = ['pymongo>=3.6.0', 'dnspython>=1.13.0,<2.0.0']
snowflake = ['snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0']
ssh = ['paramiko>=2.1.1', 'pysftp>=0.2.9', 'sshtunnel>=0.1.4,<0.2']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
winrm = ['pywinrm==0.2.2']
zendesk = ['zdesk']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant + druid + pinot \
+ cassandra + mongo
devel = [
'beautifulsoup4~=4.7.1',
'click==6.7',
'flake8>=3.6.0',
'freezegun',
'jira',
'mock;python_version<"3.3"',
'mongomock',
'moto==1.3.5',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'paramiko',
'pysftp',
'pywinrm',
'qds-sdk>=1.9.6',
'rednose',
'requests_mock'
]
if PY3:
devel += ['mypy']
else:
devel += ['unittest2']
devel_minreq = devel + kubernetes + mysql + doc + password + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = (sendgrid + devel + all_dbs + doc + samba + slack + crypto + oracle +
docker + ssh + kubernetes + celery + redis + gcp + grpc +
datadog + zendesk + jdbc + ldap + kerberos + password + webhdfs + jenkins +
druid + pinot + segment + snowflake + elasticsearch +
atlas + azure + aws + salesforce)
# Snakebite & Google Cloud Dataflow are not Python 3 compatible :'(
if PY3:
devel_ci = [package for package in devel_all if package not in
['snakebite>=2.7.8', 'snakebite[kerberos]>=2.7.8']]
else:
devel_ci = devel_all
def do_setup():
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.9, <1.0',
'cached_property~=1.5',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'dumb-init>=1.2.2',
'flask>=1.0, <2.0',
'flask-appbuilder>=1.12.5, <2.0.0',
'flask-caching>=1.3.3, <1.4.0',
'flask-login>=0.3, <0.5',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.5.0, <20.0',
'iso8601>=0.1.12',
'json-merge-patch==0.2',
'jinja2>=2.10.1, <2.11.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'requests>=2.20.0, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy~=1.3',
'tabulate>=0.7.5, <0.9',
'tenacity==4.12.0',
'text-unidecode==1.2',
'typing;python_version<"3.5"',
'thrift>=0.9.2',
'tzlocal>=1.4',
'unicodecsv>=0.14.1',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'atlas': atlas,
'async': async_packages,
'aws': aws,
'azure': azure,
'cassandra': cassandra,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'elasticsearch': elasticsearch,
'gcp': gcp,
'gcp_api': gcp, # TODO: remove this in Airflow 2.1
'github_enterprise': flask_oauth,
'google_auth': flask_oauth,
'grpc': grpc,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes,
'ldap': ldap,
'mongo': mongo,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'pinot': pinot,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
'salesforce': salesforce,
'samba': samba,
'sendgrid': sendgrid,
'segment': segment,
'slack': slack,
'snowflake': snowflake,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'winrm': winrm
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.apache.org',
url='http://airflow.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
'compile_assets': CompileAssets
},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
)
if __name__ == "__main__":
do_setup()
| 30.180778
| 90
| 0.561453
|
b143f2493d36ff05b6526d97d5b50570a7a37d62
| 341
|
py
|
Python
|
stalkbroker/messages/_memos.py
|
peake100/stalkbroker-py
|
95bed6e6d89dc00b183b71d5d3fce7908c554ed9
|
[
"MIT"
] | null | null | null |
stalkbroker/messages/_memos.py
|
peake100/stalkbroker-py
|
95bed6e6d89dc00b183b71d5d3fce7908c554ed9
|
[
"MIT"
] | 12
|
2020-04-25T22:13:57.000Z
|
2020-05-24T16:24:59.000Z
|
stalkbroker/messages/_memos.py
|
peake100/stalkbroker-py
|
95bed6e6d89dc00b183b71d5d3fce7908c554ed9
|
[
"MIT"
] | null | null | null |
import random
_MEMOS = [
"401K through the vegetable way.",
"Turn-up your profits.",
"Lets unearth a fortune, together.",
"Not just another piece of shovelware",
]
def random_memo() -> str:
"""
Pick a random memo to add to a report / bulletin.
:returns: random memo.
"""
return random.choice(_MEMOS)
| 17.947368
| 53
| 0.627566
|
3dc397a0ba36facd14f9902c50054e9df7bb0252
| 1,687
|
py
|
Python
|
plotter.py
|
hsuRush/PSO
|
ff537413b4f629ba8a483752bc774573eaa50e37
|
[
"MIT"
] | null | null | null |
plotter.py
|
hsuRush/PSO
|
ff537413b4f629ba8a483752bc774573eaa50e37
|
[
"MIT"
] | null | null | null |
plotter.py
|
hsuRush/PSO
|
ff537413b4f629ba8a483752bc774573eaa50e37
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from functions import rosenbrock
ax = None
def plot3d_init():
global ax
fig = plt.figure()
ax = fig.gca(projection='3d')
"""
def log_tick_formatter(val, pos=None):
return "{:.2e}".format(10**val)
ax.zaxis.set_major_formatter(mticker.FuncFormatter(log_tick_formatter))
"""
import matplotlib.ticker as mticker
def plot_func(func=rosenbrock):
s = 0.25 # Try s=1, 0.25, 0.1, or 0.05
X = np.arange(-10, 10.+s, s) #Could use linspace instead if dividing
Y = np.arange(-10, 10.+s, s) #evenly instead of stepping...
#Create the mesh grid(s) for all X/Y combos.
X, Y = np.meshgrid(X, Y)
#Rosenbrock function w/ two parameters using numpy Arrays
Z = func(X,Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False) #Try coolwarm vs jet
#ax.plot(X,Y,Z)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
def plot_3d_dot(xs,ys,zs, **args):
ax.scatter(xs,ys,zs, **args)
def plot(x,y, **args):
plt.plot(x,y, **args)
def plot_show(pause_time=.0, **args):
while True:
try:
plt.show(**args)
#plt.draw()
plt.pause(pause_time)
except UnicodeDecodeError:
continue
break
if __name__ == "__main__":
plot_func()
plot_show()
| 23.109589
| 75
| 0.632484
|
b03584b93b46cb1ed78d3270f3eee0d7ec5bfa72
| 546
|
py
|
Python
|
Sparrow/_const.py
|
eleme/Sparrow
|
4fe10cb57c40dbdca9fe3645477cb7e1dc118d0b
|
[
"MIT"
] | 75
|
2018-10-10T02:05:16.000Z
|
2021-09-09T01:38:38.000Z
|
Sparrow/_const.py
|
eleme/Sparrow
|
4fe10cb57c40dbdca9fe3645477cb7e1dc118d0b
|
[
"MIT"
] | 2
|
2020-06-05T19:06:23.000Z
|
2021-06-10T20:48:32.000Z
|
Sparrow/_const.py
|
eleme/Sparrow
|
4fe10cb57c40dbdca9fe3645477cb7e1dc118d0b
|
[
"MIT"
] | 6
|
2018-09-29T12:07:40.000Z
|
2020-05-28T03:10:38.000Z
|
class Const(object):
class ConstError(PermissionError):
pass
class ConstCaseError(ConstError):
pass
def __setattr__(self, name, value):
if name in self.__dict__.keys():
raise self.ConstError("Can't rebind const(%s)" % name)
self.__dict__[name] = value
def __delattr__(self, name):
if name in self.__dict__:
raise self.ConstError("Can't unbind const(%s)" % name)
raise NameError(name)
import sys
sys.modules[__name__] = Const()
Const.kError = "kError"
| 22.75
| 66
| 0.626374
|
76452416caad58c32c752ed7c073ac70221e78c5
| 2,868
|
py
|
Python
|
crits/komand_crits/actions/add_email/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
crits/komand_crits/actions/add_email/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
crits/komand_crits/actions/add_email/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
FILE = "file"
PARAMS = "params"
SOURCE = "source"
TYPE = "type"
class Output:
RESPONSE = "response"
class AddEmailInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"file": {
"$ref": "#/definitions/file",
"title": "File",
"description": "The actual file ",
"order": 2
},
"params": {
"type": "object",
"title": "Parameters",
"description": "Object containing related data or metadata",
"order": 4
},
"source": {
"type": "string",
"title": "Source",
"description": "Name of the source which provided this information",
"order": 3
},
"type": {
"type": "string",
"title": "Type",
"description": "Upload type",
"enum": [
"msg",
"eml",
"raw",
"yaml",
"fields"
],
"order": 1
}
},
"required": [
"type",
"file",
"source"
],
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class AddEmailOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"response": {
"$ref": "#/definitions/post_response",
"title": "Response",
"description": "Response",
"order": 1
}
},
"definitions": {
"post_response": {
"type": "object",
"title": "post_response",
"properties": {
"id": {
"type": "string",
"title": "ID",
"order": 1
},
"message": {
"type": "string",
"title": "Message",
"order": 2
},
"return_code": {
"type": "integer",
"title": "Return Code",
"description": "The return_code is usually 0 for success, 1 for failure",
"order": 4
},
"type": {
"type": "string",
"title": "Type",
"description": "The TLO type of the TLO that created or updated",
"order": 3
},
"url": {
"type": "string",
"title": "URL",
"order": 5
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 20.340426
| 83
| 0.456416
|
455868e9197549a85dcaeb96d63d41cd260d2c2e
| 10,779
|
py
|
Python
|
pavement.py
|
echevemaster/fedora-college
|
55033de5dd6f3a1063bf12f0b8937b493ae07692
|
[
"BSD-3-Clause"
] | 4
|
2015-05-16T09:54:18.000Z
|
2016-01-08T16:52:19.000Z
|
pavement.py
|
fedora-infra/fedora-college
|
cf310dab2e4fea02b9ac5e7f57dc53aafb4834d8
|
[
"BSD-3-Clause"
] | 1
|
2015-12-03T21:30:13.000Z
|
2016-01-09T10:47:24.000Z
|
pavement.py
|
fedora-infra/fedora-college
|
cf310dab2e4fea02b9ac5e7f57dc53aafb4834d8
|
[
"BSD-3-Clause"
] | 1
|
2020-12-07T22:14:01.000Z
|
2020-12-07T22:14:01.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: utf-8; -*-
from __future__ import print_function
import os
import sys
import time
import subprocess
# Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if 'check_output' not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(
cmd_args, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from paver.easy import options, task, needs, consume_args
from paver.setuputils import install_distutils_tasks
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
sys.path.append('.')
from setup import setup_dict
# Constants
CODE_DIRECTORY = 'fedora_college'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_passed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=PASSED
print_success_message(r''' ___ _ ___ ___ ___ ___
| _ \/_\ / __/ __| __| \
| _/ _ \\__ \__ \ _|| |) |
|_|/_/ \_\___/___/___|___/
''')
def print_failed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=FAILED
print_failure_message(r''' ___ _ ___ _ ___ ___
| __/_\ |_ _| | | __| \
| _/ _ \ | || |__| _|| |) |
|_/_/ \_\___|____|___|___/
''')
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
options(setup=setup_dict)
install_distutils_tasks()
# Task-related functions
def _doc_make(*make_args):
"""Run make in sphinx' docs directory.
:return: exit code
"""
if sys.platform == 'win32':
# Windows
make_cmd = ['make.bat']
else:
# Linux, Mac OS X, and others
make_cmd = ['make']
make_cmd.extend(make_args)
return subprocess.call(make_cmd, cwd=DOCS_DIRECTORY)
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(
['flake8', '--max-complexity=10'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
import pytest
# This runs the unit tests.
# It also runs doctest, but only on the modules in TESTS_DIRECTORY.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# Tasks
@task
@needs('doc_html', 'setuptools.command.sdist')
def sdist():
"""Build the HTML docs and the tarball."""
pass
@task
def test():
"""Run the unit tests."""
raise SystemExit(_test())
@task
def lint():
# This refuses to format properly when running `paver help' unless
# this ugliness is used.
('Perform PEP8 style check, run PyFlakes, and run McCabe complexity '
'metrics on the code.')
raise SystemExit(_lint())
@task
def test_all():
"""Perform a style check and run all unit tests."""
retcode = _test_all()
if retcode == 0:
print_passed()
else:
print_failed()
raise SystemExit(retcode)
@task
@consume_args
def run(args):
"""Run the package's main script. All arguments are passed to it."""
# The main script expects to get the called executable's name as
# argv[0]. However, paver doesn't provide that in args. Even if it did (or
# we dove into sys.argv), it wouldn't be useful because it would be paver's
# executable. So we just pass the package name in as the executable name,
# since it's close enough. This should never be seen by an end user
# installing through Setuptools anyway.
from fedora_college.main import _main
raise SystemExit(_main([CODE_DIRECTORY] + args))
@task
def commit():
"""Commit only if all the tests pass."""
if _test_all() == 0:
subprocess.check_call(['git', 'commit'])
else:
print_failure_message('\nTests failed, not committing.')
@task
def coverage():
"""Run tests and show test coverage report."""
try:
import pytest_cov # NOQA
except ImportError:
print_failure_message(
'Install the pytest coverage plugin to use this task, '
"i.e., `pip install pytest-cov'.")
raise SystemExit(1)
import pytest
pytest.main(PYTEST_FLAGS + [
'--cov', CODE_DIRECTORY,
'--cov-report', 'term-missing',
TESTS_DIRECTORY])
@task # NOQA
def doc_watch():
"""Watch for changes in the docs and rebuild HTML docs when changed."""
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
except ImportError:
print_failure_message('Install the watchdog package to use this task, '
"i.e., `pip install watchdog'.")
raise SystemExit(1)
class RebuildDocsEventHandler(FileSystemEventHandler):
def __init__(self, base_paths):
self.base_paths = base_paths
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event: The event object representing the file system event.
:type event: :class:`watchdog.events.FileSystemEvent`
"""
for base_path in self.base_paths:
if event.src_path.endswith(base_path):
super(RebuildDocsEventHandler, self).dispatch(event)
# We found one that matches. We're done.
return
def on_modified(self, event):
print_failure_message('Modification detected. Rebuilding docs.')
# Strip off the path prefix.
# import os
# if event.src_path[len(os.getcwd()) + 1:].startswith(
# CODE_DIRECTORY):
# sphinx-build doesn't always pick up changes on code files,
# even though they are used to generate the documentation. As
# a workaround, just clean before building.
doc_html()
print_success_message('Docs have been rebuilt.')
print_success_message(
'Watching for changes in project files, press Ctrl-C to cancel...')
handler = RebuildDocsEventHandler(get_project_files())
observer = Observer()
observer.schedule(handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@task
@needs('doc_html')
def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index))
@task
def get_tasks():
"""Get all paver-defined tasks."""
from paver.tasks import environment
for task in environment.get_tasks():
print(task.shortname)
@task
def doc_html():
"""Build the HTML docs."""
retcode = _doc_make('html')
if retcode:
raise SystemExit(retcode)
@task
def doc_clean():
"""Clean (delete) the built docs."""
retcode = _doc_make('clean')
if retcode:
raise SystemExit(retcode)
| 28.070313
| 79
| 0.638093
|
c47cb2bb475e9295615ec9281503116f8e598433
| 593
|
py
|
Python
|
protector/rules/prevent_drop.py
|
trivago/Protector
|
7ebe7bde965e27737b961a0cb5740724d174fdc7
|
[
"BSD-3-Clause"
] | 54
|
2016-02-23T16:04:11.000Z
|
2021-05-01T06:43:55.000Z
|
protector/rules/prevent_drop.py
|
trivago/Protector
|
7ebe7bde965e27737b961a0cb5740724d174fdc7
|
[
"BSD-3-Clause"
] | 1
|
2016-03-08T15:07:35.000Z
|
2016-06-23T12:52:36.000Z
|
protector/rules/prevent_drop.py
|
trivago/Protector
|
7ebe7bde965e27737b961a0cb5740724d174fdc7
|
[
"BSD-3-Clause"
] | 4
|
2016-06-01T14:22:47.000Z
|
2017-03-09T05:23:08.000Z
|
from result import Ok, Err
from protector.influxdb.keyword import Keyword
from protector.rules.rule import Rule
class RuleChecker(Rule):
@staticmethod
def description():
return "Prevent drop queries"
@staticmethod
def reason():
return ["Drop queries mean data loss. This is a risky operation that should be restricted to admin users"]
def check(self, query):
"""
:param query:
"""
if query.get_type() != Keyword.DROP:
return Ok(True)
return Err("Drop queries are forbidden as they mean data loss.")
| 24.708333
| 114
| 0.647555
|
5718fc24abddde3cd75d24a8b98a26842d6e9555
| 350
|
py
|
Python
|
mesa_SIR/__init__.py
|
projectmesapackages/SIR
|
8ed61c53aaa2e415b8259b988e88a97d646a2caa
|
[
"MIT"
] | 8
|
2020-04-06T12:49:04.000Z
|
2021-11-26T20:17:36.000Z
|
mesa_SIR/__init__.py
|
projectmesapackages/SIR
|
8ed61c53aaa2e415b8259b988e88a97d646a2caa
|
[
"MIT"
] | null | null | null |
mesa_SIR/__init__.py
|
projectmesapackages/SIR
|
8ed61c53aaa2e415b8259b988e88a97d646a2caa
|
[
"MIT"
] | 8
|
2020-04-04T16:51:49.000Z
|
2021-07-31T21:46:24.000Z
|
"""
Mesa Agent-Based Modeling Framework Extension
Core Objects: Model, and Agent.
"""
import datetime
from mesa_SIR import calculations_and_plots
from mesa_SIR import SIR
__all__ = ["Infection"]
__title__ = 'Mesa_SIR'
__version__ = '0.0.1'
__license__ = 'MIT'
__copyright__ = 'Copyright %s Mark Bailey' % datetime.date.today().year
| 21.875
| 71
| 0.725714
|
82580356429bf62fbf0a3f75475befec07443e13
| 5,022
|
py
|
Python
|
code/env/BitcoinTradingEnv.py
|
leokan92/Contextual-bandit-Resnet-trading
|
57fa89b01ba99999b132161efa0eb16b5c32ecf8
|
[
"MIT"
] | null | null | null |
code/env/BitcoinTradingEnv.py
|
leokan92/Contextual-bandit-Resnet-trading
|
57fa89b01ba99999b132161efa0eb16b5c32ecf8
|
[
"MIT"
] | null | null | null |
code/env/BitcoinTradingEnv.py
|
leokan92/Contextual-bandit-Resnet-trading
|
57fa89b01ba99999b132161efa0eb16b5c32ecf8
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
from sklearn.preprocessing import StandardScaler
from empyrical import sortino_ratio, calmar_ratio, omega_ratio, sharpe_ratio
# Delete this if debugging
np.warnings.filterwarnings('ignore')
def calc_S(R,T = 365*24):
sumR = np.cumsum(R[::-1])[::-1]
sumR2 = np.cumsum((R**2)[::-1])[::-1]
A = sumR[0] / T
B = sumR2[0] / T
S = A / np.sqrt(B - A**2)
return S
class BitcoinTradingEnv(gym.Env):
'''A Trading environment for OpenAI gym'''
metadata = {'render.modes': ['human', 'system', 'none']}
viewer = None
def __init__(self, df, initial_balance=10000, commission=0.000, reward_func='sortino',M = 50,mu = 1,length = 1000, scaling = False):
super(BitcoinTradingEnv, self).__init__()
self.mu = mu
self.initial_balance = initial_balance
self.commission = commission
self.reward_func = reward_func
self.M = M #look back window
self.length = length
self.df = df.fillna(method='bfill').reset_index()
self.r = np.diff(self.df['Close'])
self.scaling = scaling
self.scaler = StandardScaler()
def _next_observation(self):
obs = self.r[self.current_step-self.M-1:self.current_step-1]
if self.scaling:
self.scaler.fit(self.r[:self.current_step-1].reshape(-1,1))
self.r_scaled = self.scaler.transform(self.r[:self.current_step-1].reshape(-1,1))
obs = self.r_scaled[self.current_step-self.M-1:self.current_step-1].T[0]
return obs
def _current_price(self):
return self.df['Close'].values[self.current_step]
def _last_price(self):
return self.df['Close'].values[self.current_step-1]
def _take_action(self, action):
#Working descrite with: -1: short, 0: neutral, 1: long
action_type = int(round(action,0))
if action_type == 1: # Assumes Long postion
self.position == 'long'
elif action_type == -1: #Assumes Short position
self.position == 'short'
elif action_type == 0: #Assumes Short position
self.position == 'neutral'
if (self.current_step == self.M+1): #to give the first position the neutral position
self.agent_returns.append(0)
self.position_history.append(0)
else:
self.agent_returns.append(self.initial_amount*(self.position_history[self.current_step-self.initial_step]*self.r[self.current_step-1] - self.commission*self.df['Close'].values[self.current_step-1]*abs(action - self.position_history[self.current_step-self.initial_step])))
self.price_hist.append(self._current_price())
self.trades.append({'step': self.current_step,'Position': self.position,'action': action})
self.balance = self.balance + self.agent_returns[-1:][0]
self.net_worths.append(self.balance)
self.position_history.append(action)
def _reward(self):
returns = np.array(self.agent_returns[-self.length:])
#returns = np.diff(self.net_worths[-length:])
if np.count_nonzero(returns) < 1:
return 0
if self.reward_func == 'sortino':
reward = sortino_ratio(returns, annualization=365*24)#apply anualization correction based on the fact that is hourly
elif self.reward_func == 'calmar':
reward = calmar_ratio(returns, annualization=365*24)
elif self.reward_func == 'omega':
reward = omega_ratio(returns, annualization=365*24)
elif self.reward_func == 'sharpe_ratio':
reward = sharpe_ratio(returns, annualization=365*24)
elif self.reward_func == 'differential_sharpe_ratio':
reward = calc_S(returns, T=self.length)
else:
reward = returns[-1]
return reward if np.isfinite(reward) else 0
def _done(self):
return self.current_step == (len(self.df) - self.M -1)
def reset(self):
self.position = 'neutral'
self.initial_step = self.M+1
self.current_step = self.initial_step
self.initial_amount = self.mu
self.balance = self.initial_balance
self.net_worths = []
self.net_worths.append(self.balance)
self.position_history = []
self.trades = []
self.agent_returns = []
self.price_hist = []
return self._next_observation()
def step(self, action):
self._take_action(action)
self.current_step += 1
obs = self._next_observation()
reward = self._reward()
done = self._done()
return obs, reward, done, {}
def render(self, mode='system'):
if mode == 'system':
print('Price: ' + str(self._current_price()))
print('Net worth: ' + str(self.net_worths[-1]))
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
| 36.656934
| 283
| 0.614297
|
ddb3b41f546b3b6f2103d87aabc9ccb9ae55db7e
| 4,520
|
py
|
Python
|
encoder.py
|
jojogamezHub/VideoPlayerTesting
|
2bc37fbb4637a73e323a1ce843a7480ddc3c2a94
|
[
"MIT"
] | null | null | null |
encoder.py
|
jojogamezHub/VideoPlayerTesting
|
2bc37fbb4637a73e323a1ce843a7480ddc3c2a94
|
[
"MIT"
] | null | null | null |
encoder.py
|
jojogamezHub/VideoPlayerTesting
|
2bc37fbb4637a73e323a1ce843a7480ddc3c2a94
|
[
"MIT"
] | null | null | null |
import json
import cv2 as cv
import math
import numpy as np
import os
def getVideoLocation(videoName):
return "Videos/" + videoName
def rescaleFrame(frame, scale):
width = int(frame.shape[1]* scale)
height = int(frame.shape[0] * scale)
return cv.resize(frame, (width,height), interpolation = cv.INTER_AREA)
def encode(filmName,videoName,resolutionMulti,fpsMulti,packetSize):
videoLocation = getVideoLocation(videoName)
videoCapture = cv.VideoCapture(videoLocation)
if not videoCapture.isOpened():
print("Error opening video")
return False
fpsMultiReciprocal = math.ceil(1/fpsMulti)
realWidth = videoCapture.get(cv.CAP_PROP_FRAME_WIDTH)
scaledWidth = math.floor(realWidth*resolutionMulti)
realHeight = videoCapture.get(cv.CAP_PROP_FRAME_HEIGHT)
scaledHeight = math.floor(realHeight*resolutionMulti)
realFPS = round(videoCapture.get(cv.CAP_PROP_FPS),3)
scaledFPS = round(realFPS*fpsMulti,3)
totalFrames = int(videoCapture.get(cv.CAP_PROP_FRAME_COUNT))
scaledFrames = math.ceil(totalFrames / fpsMultiReciprocal)
print(f"""
Total Frames: {str(totalFrames)}
Scaled Frames: {str(scaledFrames)}
Real Width: {str(realWidth)}
Scaled Width: {str(scaledWidth)}
Real Height: {str(realHeight)}
Scaled Height: {str(scaledHeight)}
Real FPS: {str(realFPS)}
Scaled FPS: {str(scaledFPS)}
""")
prevPercentageCompleted = 0
totalFrameIterations = 0
scaledFrameIterations = 0
packetIndex = 0
packetData = ""
storageFolder = "PacketData/" + filmName
if os.path.exists(storageFolder):
print("Please Delete Old Storage Folder")
return
os.makedirs(storageFolder)
while True:
success, frame = videoCapture.read()
if not success:
print("Frame",str(totalFrameIterations),"failed to read")
continue
if (totalFrameIterations) % fpsMultiReciprocal == 0:
frame = rescaleFrame(frame,resolutionMulti)
row,col,_ = frame.shape
colTable = []
for colI in range(col):
rowTable = []
for rowI in range(row):
colorData = frame[rowI][colI]
colorData = list(colorData)
for i,colorValue in enumerate(colorData): colorData[i] = np.int(colorValue)# allows json to sterilise
rowTable.append([colorData[2],colorData[1],colorData[0]] )
colTable.append(rowTable)
jsonPixelData = json.dumps(colTable)
lastPacket = (scaledFrameIterations + 1) >= scaledFrames
if ((scaledFrameIterations + 1) % packetSize == 0) or lastPacket:
packetData += jsonPixelData
packetIndex += 1
packetData = "[" + packetData + "]" # close table
storageLocation = storageFolder + "/" + str(packetIndex) + ".json"
with open(storageLocation, 'w') as file:
file.write(packetData)
packetData = ""
else:
packetData += jsonPixelData + ", "
scaledFrameIterations += 1
percentageCompleted = round((totalFrameIterations/totalFrames)*100)
if percentageCompleted > prevPercentageCompleted:
print("Progress:",str(percentageCompleted) + "%")
prevPercentageCompleted = percentageCompleted
totalFrameIterations += 1
if totalFrameIterations >= totalFrames:
break
videoCapture.release()
cv.destroyAllWindows()
if scaledFrames != scaledFrameIterations:
print(f"Frame Estimation Error. Predited:{str(scaledFrames)}, Actual:{str(scaledFrameIterations)}")
configData = {
"packets": packetIndex,
"packetSize": packetSize,
"width": scaledWidth,
"height": scaledHeight,
"totalFrames": scaledFrameIterations,
"fps": scaledFPS,
}
with open(storageFolder + "/config.json", 'w') as file:
file.write(json.dumps(configData))
print("Encoding Packets Completed")
print(f"{str(packetIndex)} Packets Created")
| 25.977011
| 122
| 0.584513
|
ac304219a24df0676feb382070af801423957772
| 9,042
|
py
|
Python
|
IMU/HAPT_Dataset.py
|
fcruciani/cnn_rf_har
|
2a7da7ab6eafa749ebbeb7d7f880b6f4b0e9aef7
|
[
"MIT"
] | 1
|
2020-10-27T02:39:18.000Z
|
2020-10-27T02:39:18.000Z
|
IMU/HAPT_Dataset.py
|
fcruciani/cnn_rf_har
|
2a7da7ab6eafa749ebbeb7d7f880b6f4b0e9aef7
|
[
"MIT"
] | null | null | null |
IMU/HAPT_Dataset.py
|
fcruciani/cnn_rf_har
|
2a7da7ab6eafa749ebbeb7d7f880b6f4b0e9aef7
|
[
"MIT"
] | null | null | null |
'''
File: HAPT_Dataset.py
Author: Federico Cruciani
Date: 03/10/2019
Version: 1.0
Description:
utility functions to load the
Human Activities and Postural Transitions (HAPT) dataset
'''
import numpy as np
import pandas as pd
from os.path import expanduser
from keras.utils import to_categorical
from multiprocessing import Pool as ThreadPool
import math
import scipy.signal
home = expanduser("~")
'''
Dataset Info - Labels:
1 WALKING
2 W_UPSTAIRS
3 W_DOWNSTAIRS
4 SITTING
5 STANDING
6 LAYING
7 STAND_TO_SIT
8 SIT_TO_STAND
9 SIT_TO_LIE
10 LIE_TO_SIT
11 STAND_TO_LIE
12 LIE_TO_STAND
'''
#Modify this line with the right path.
#Dataset available at: http://archive.ics.uci.edu/ml/machine-learning-databases/00341/
ucihapt_datapath = home+"/HAPT_Dataset/"
def get_test_uuids():
test_uuids = pd.read_csv(ucihapt_datapath+"Test/subject_id_test.txt",names=["UUID"])
all_test_uuids = np.unique(test_uuids.values)
return all_test_uuids
def get_train_uuids():
train_uuids = pd.read_csv(ucihapt_datapath+"Train/subject_id_train.txt",names=["UUID"])
all_train_uuids = np.unique(train_uuids.values)
return all_train_uuids
#Get Data no resampling
def get_all_data_multi_thread_noresampling_3D(uuids, n_threads):
print("Loading data")
print("Initiating pool")
print("resampling 50 -> 40 Hz disabled. Doing 3D ")
uuids_list = [ [x] for x in uuids ]
pool = ThreadPool(n_threads)
print("Pool map")
test_points = pool.map( get_all_data_noresampling_3D,uuids_list)
print("Pool map")
pool.close()
print("Pool join")
pool.join()
#Merging data from treads
print("Merging threads' data")
X_list = []
y_list = []
for res in test_points:
#dataset_size += len(res[1])
X_list.extend(res[0])
y_list.extend(res[1])
X_es = np.zeros((len(y_list),128,8))
X_es[:,:] = [x for x in X_list ]
y_es = np.zeros(len(y_list))
y_es[:] = [y for y in y_list]
y_scaled = to_categorical(y_es, num_classes=7)
return (X_es, y_scaled)
def get_all_data_noresampling_3D(uuids):
gt_df = pd.read_csv(ucihapt_datapath+"RawData/labels.txt",sep="\s",names=['EXP_ID','USER_ID','LABEL','START','END'],engine='python')
#exclude other uuids
#print( gt_df.head() )
filtered_df = pd.DataFrame(columns=['EXP_ID','USER_ID','LABEL','START','END'])
for uuid in uuids:
data_uuid = gt_df[ gt_df['USER_ID'] == uuid ]
filtered_df = pd.concat([filtered_df,data_uuid], ignore_index=True)
X_list = []
y_list = []
for index, row in filtered_df.iterrows():
exp_id = row['EXP_ID']
user_id = row['USER_ID']
start = row['START']
end = row['END']
label = row['LABEL']
str_user_id = str(user_id)
if user_id < 10:
str_user_id = "0"+str(user_id)
str_exp_id = str(exp_id)
if exp_id < 10:
str_exp_id = "0"+str(exp_id)
accfile = ucihapt_datapath+"RawData/acc_exp"+str_exp_id+"_user"+str_user_id+".txt"
gyrfile = ucihapt_datapath+"RawData/gyro_exp"+str_exp_id+"_user"+str_user_id+".txt"
#print(accfile)
acc_data_df = pd.read_csv(accfile,names=['x','y','z'],sep='\s|,', engine='python')
gyr_data_df = pd.read_csv(gyrfile,names=['x','y','z'],sep='\s|,', engine='python')
acc_x = acc_data_df['x'].values
acc_y = acc_data_df['y'].values
acc_z = acc_data_df['z'].values
gyr_x = gyr_data_df['x'].values
gyr_y = gyr_data_df['y'].values
gyr_z = gyr_data_df['z'].values
acc_mag = []
gyr_mag = []
for i in range(len(acc_x)):
acc_mag.append( math.sqrt( (acc_x[i]*acc_x[i]) + (acc_y[i]*acc_y[i]) + (acc_z[i]*acc_z[i]) ) )
gyr_mag.append( math.sqrt( (gyr_x[i]*gyr_x[i]) + (gyr_y[i]*gyr_y[i]) + (gyr_z[i]*gyr_z[i]) ) )
until = start + 128
while until < end:
X_point = np.zeros((128,8))
X_point[:,0] = acc_x[start:until]
X_point[:,1] = acc_y[start:until]
X_point[:,2] = acc_z[start:until]
X_point[:,3] = gyr_x[start:until]
X_point[:,4] = gyr_y[start:until]
X_point[:,5] = gyr_z[start:until]
X_point[:,6] = acc_mag[start:until]
X_point[:,7] = gyr_mag[start:until]
X_list.append(X_point)
#Remapping id from 1-12 to 0-6
if label < 7:
y_list.append(label-1)
else:
y_list.append(6) #considering all trainsitions as NULL class 6
start += 64
until += 64
X_es = np.zeros((len(y_list),128,8))
X_es[:,:] = [x for x in X_list ]
y_es = np.zeros(len(y_list))
y_es[:] = [y for y in y_list]
print("Finished loading: ",uuids)
return (X_es, y_es)
#Loads data resampling from 50 to 40 Hz
def get_all_data_multi_thread_resampling_3D(uuids, n_threads):
print("Loading Test set")
print("Initiating pool")
print("resampling 50 -> 40 Hz Enabled. Doing 3D ")
uuids_list = [ [x] for x in uuids ]
pool = ThreadPool(n_threads)
print("Pool map")
test_points = pool.map( get_all_data_noresampling_3D,uuids_list)
print("Pool map")
pool.close()
print("Pool join")
pool.join()
#Merging data from treads
print("Merging threads' data")
X_list = []
y_list = []
for res in test_points:
#dataset_size += len(res[1])
X_list.extend(res[0])
y_list.extend(res[1])
X_es = np.zeros((len(y_list),128,8))
X_es[:,:] = [x for x in X_list ]
y_es = np.zeros(len(y_list))
y_es[:] = [y for y in y_list]
y_scaled = to_categorical(y_es, num_classes=7)
return (X_es, y_scaled)
def get_all_data_resampling_3D(uuids,resampling=True):
#Load groundtruth
gt_df = pd.read_csv(ucihapt_datapath+"RawData/labels.txt",sep="\s",names=['EXP_ID','USER_ID','LABEL','START','END'],engine='python')
#Filter data: only uuids
#Empty data frame
filtered_df = pd.DataFrame(columns=['EXP_ID','USER_ID','LABEL','START','END'])
for uuid in uuids:
#add data for user ID is in list
data_uuid = gt_df[ gt_df['USER_ID'] == uuid ]
filtered_df = pd.concat([filtered_df,data_uuid], ignore_index=True)
X_list = []
y_list = []
#Iterating filtered groundtruth
for index, row in filtered_df.iterrows():
exp_id = row['EXP_ID'] #Used to retrive raw data file
user_id = row['USER_ID'] #Used to retrieve raw data file
start = row['START'] #Start of data segment with this label
end = row['END'] #End of segment
label = row['LABEL'] #Label of this segment
str_user_id = str(user_id)
if user_id < 10:
str_user_id = "0"+str(user_id)
str_exp_id = str(exp_id)
if exp_id < 10:
str_exp_id = "0"+str(exp_id)
#Load raw data file
accfile = ucihapt_datapath+"RawData/acc_exp"+str_exp_id+"_user"+str_user_id+".txt"
gyrfile = ucihapt_datapath+"RawData/gyro_exp"+str_exp_id+"_user"+str_user_id+".txt"
acc_data_df = pd.read_csv(accfile,names=['x','y','z'],sep='\s|,', engine='python')
gyr_data_df = pd.read_csv(gyrfile,names=['x','y','z'],sep='\s|,', engine='python')
acc_x = acc_data_df['x'].values
acc_y = acc_data_df['y'].values
acc_z = acc_data_df['z'].values
gyr_x = gyr_data_df['x'].values
gyr_y = gyr_data_df['y'].values
gyr_z = gyr_data_df['z'].values
#Isolate relevant data
acc_x = acc_x[ start:end ]
acc_y = acc_z[ start:end ]
acc_z = acc_y[ start:end ]
gyr_x = gyr_x[ start:end ]
gyr_y = gyr_y[ start:end ]
gyr_z = gyr_z[ start:end ]
#Calculate 3D magnitude of the signals
acc_mag = []
gyr_mag = []
for i in range(len(acc_x)):
acc_mag.append( math.sqrt( (acc_x[i]*acc_x[i]) + (acc_y[i]*acc_y[i]) + (acc_z[i]*acc_z[i]) ) )
gyr_mag.append( math.sqrt( (gyr_x[i]*gyr_x[i]) + (gyr_y[i]*gyr_y[i]) + (gyr_z[i]*gyr_z[i]) ) )
#Resampling factor: 50 / 40 = 1.25
#downsampling from 50 to 40 Hz for Extrasensory compatibility
num_samples_50Hz = end - start
num_samples_40Hz = num_samples_50Hz / 1.25
##DOWNSAMPLING from 50 to 40 Hz
acc_x = scipy.signal.resample( acc_x, int(num_samples_40Hz) )
acc_x = scipy.signal.resample( acc_y, int(num_samples_40Hz) )
acc_x = scipy.signal.resample( acc_z, int(num_samples_40Hz) )
gyr_x = scipy.signal.resample( gyr_x, int(num_samples_40Hz) )
gyr_x = scipy.signal.resample( gyr_y, int(num_samples_40Hz) )
gyr_x = scipy.signal.resample( gyr_z, int(num_samples_40Hz) )
acc_mag = scipy.signal.resample( acc_mag, int(num_samples_40Hz) )
gyr_mag = scipy.signal.resample( gyr_mag, int(num_samples_40Hz) )
segment_start = 0
segment_end = num_samples_40Hz
#Performing segmentation: sliding window 50% overlap
until = segment_start + 128
while until < segment_end:
X_point = np.zeros((128,8))
X_point[:,0] = acc_x[segment_start:until]
X_point[:,1] = acc_y[segment_start:until]
X_point[:,2] = acc_z[segment_start:until]
X_point[:,3] = gyr_x[segment_start:until]
X_point[:,4] = gyr_y[segment_start:until]
X_point[:,5] = gyr_z[segment_start:until]
X_point[:,6] = acc_mag[segment_start:until]
X_point[:,7] = gyr_mag[segment_start:until]
X_list.append(X_point)
#All activities + transitions
if label < 7:
#all activities except transitions
y_list.append(label-1)
else:
#putting all transitions in same class
y_list.append(6)
segment_start += 64
until += 64
X_es = np.zeros((len(y_list),128,8))
X_es[:,:] = [x for x in X_list ]
y_es = np.zeros(len(y_list))
y_es[:] = [y for y in y_list]
#y_scaled = to_categorical(y_es, num_classes=7)
print("Finished loading: ",uuids)
return (X_es, y_es)
| 31.95053
| 133
| 0.692767
|
b39d333cf510ada23673aba270752e6003731d2b
| 2,963
|
py
|
Python
|
tests/system/gapic/v1/test_system_speech_v1.py
|
zmtkr/python-speech
|
6000370242a4c548a3306ae274a0302e2bbb2445
|
[
"Apache-2.0"
] | 263
|
2020-02-05T08:29:22.000Z
|
2022-03-28T06:29:04.000Z
|
tests/system/gapic/v1/test_system_speech_v1.py
|
zmtkr/python-speech
|
6000370242a4c548a3306ae274a0302e2bbb2445
|
[
"Apache-2.0"
] | 162
|
2020-02-03T23:06:13.000Z
|
2022-03-30T22:42:14.000Z
|
tests/system/gapic/v1/test_system_speech_v1.py
|
zmtkr/python-speech
|
6000370242a4c548a3306ae274a0302e2bbb2445
|
[
"Apache-2.0"
] | 214
|
2019-12-19T00:28:52.000Z
|
2022-03-27T16:11:26.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import requests
from google.cloud import speech_v1
class TestSystemSpeech(object):
def test_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = {
"encoding": speech_v1.RecognitionConfig.AudioEncoding.FLAC,
"language_code": "en-US",
"sample_rate_hertz": 16000,
}
uri = "gs://{}/speech/brooklyn.flac".format(BUCKET)
audio = {"uri": uri}
response = client.recognize(config=config, audio=audio)
assert response.results[0].alternatives[0].transcript is not None
def test_long_running_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = speech_v1.RecognitionConfig(
encoding=speech_v1.RecognitionConfig.AudioEncoding.FLAC,
language_code="en-US",
sample_rate_hertz=16000,
)
uri = "gs://{}/speech/brooklyn.flac".format(BUCKET)
audio = {"uri": uri}
response = client.long_running_recognize(config=config, audio=audio)
assert response.result() is not None
def test_streaming_recognize(self):
try:
BUCKET = os.environ["GOOGLE_CLOUD_TESTS_SPEECH_BUCKET"]
except KeyError:
BUCKET = "cloud-samples-tests"
client = speech_v1.SpeechClient()
config = speech_v1.RecognitionConfig(
encoding=speech_v1.RecognitionConfig.AudioEncoding.FLAC,
language_code="en-US",
sample_rate_hertz=16000,
)
streamingConfig = speech_v1.StreamingRecognitionConfig(config=config)
uri = "https://storage.googleapis.com/{}/speech/brooklyn.flac".format(BUCKET)
streaming_requests = [
speech_v1.StreamingRecognizeRequest(audio_content=requests.get(uri).content)
]
responses = client.streaming_recognize(
config=streamingConfig, requests=streaming_requests
)
for response in responses:
for result in response.results:
assert result.alternatives[0].transcript is not None
| 31.189474
| 88
| 0.658454
|
26f33448f7d9ce6bb0cd1cb8f8cc7f82e45c314c
| 3,804
|
py
|
Python
|
diaparser/modules/dropout.py
|
zoonru/diaparser
|
afae32ca91b84b64c163c749599dfa264e647773
|
[
"MIT"
] | 38
|
2020-10-21T17:43:19.000Z
|
2022-03-01T15:15:25.000Z
|
diaparser/modules/dropout.py
|
zoonru/diaparser
|
afae32ca91b84b64c163c749599dfa264e647773
|
[
"MIT"
] | 11
|
2020-11-01T14:55:38.000Z
|
2022-02-03T19:51:06.000Z
|
diaparser/modules/dropout.py
|
zoonru/diaparser
|
afae32ca91b84b64c163c749599dfa264e647773
|
[
"MIT"
] | 7
|
2020-12-18T10:46:51.000Z
|
2022-01-11T22:03:11.000Z
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class SharedDropout(nn.Module):
r"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f"p={self.p}"
if self.batch_first:
s += f", batch_first={self.batch_first}"
return f"{self.__class__.__name__}({s})"
def forward(self, x):
r"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x *= mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class IndependentDropout(nn.Module):
r"""
For :math:`N` tensors, they use different dropout masks respectively.
When :math:`N-M` of them are dropped, the remaining :math:`M` ones are scaled by a factor of :math:`N/M` to compensate,
and when all of them are dropped together, zeros are returned.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
Examples:
>>> x, y = torch.ones(1, 3, 5), torch.ones(1, 3, 5)
>>> x, y = IndependentDropout()(x, y)
>>> x
tensor([[[1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0.],
[2., 2., 2., 2., 2.]]])
>>> y
tensor([[[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[0., 0., 0., 0., 0.]]])
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def __repr__(self):
return f"{self.__class__.__name__}(p={self.p})"
def forward(self, *items):
r"""
Args:
items (list[~torch.Tensor]):
A list of tensors that have the same shape except the last dimension.
Returns:
The returned tensors are of the same shape as `items`.
"""
if self.training:
masks = [x.new_empty(x.shape[:2]).bernoulli_(1 - self.p)
for x in items]
total = sum(masks)
scale = len(items) / total.max(torch.ones_like(total))
masks = [mask * scale for mask in masks]
items = [item * mask.unsqueeze(dim=-1)
for item, mask in zip(items, masks)]
return items
class TokenDropout(nn.Module):
def __init__(self, p=0.5, value=0):
super(TokenDropout, self).__init__()
self.p = p
self.value = value
def extra_repr(self):
return f"p={self.p}, value={self.value}"
def forward(self, x):
if self.training:
mask = torch.rand_like(x, dtype=torch.float) < self.p
x.masked_fill_(mask, self.value)
return x
| 27.970588
| 123
| 0.493954
|
a8196b30302f6ebc48587ac8a8e55828ac8e01b8
| 9,589
|
py
|
Python
|
homeassistant/components/ring/__init__.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/ring/__init__.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 87
|
2020-07-06T22:22:54.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/ring/__init__.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""Support for Ring Doorbell/Chimes."""
from __future__ import annotations
import asyncio
from datetime import timedelta
from functools import partial
import logging
from pathlib import Path
from oauthlib.oauth2 import AccessDeniedError
import requests
from ring_doorbell import Auth, Ring
from homeassistant.const import __version__
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.async_ import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Ring.com"
NOTIFICATION_ID = "ring_notification"
NOTIFICATION_TITLE = "Ring Setup"
DOMAIN = "ring"
DEFAULT_ENTITY_NAMESPACE = "ring"
PLATFORMS = ("binary_sensor", "light", "sensor", "switch", "camera")
async def async_setup(hass, config):
"""Set up the Ring component."""
if DOMAIN not in config:
return True
def legacy_cleanup():
"""Clean up old tokens."""
old_cache = Path(hass.config.path(".ring_cache.pickle"))
if old_cache.is_file():
old_cache.unlink()
await hass.async_add_executor_job(legacy_cleanup)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
def token_updater(token):
"""Handle from sync context when token is updated."""
run_callback_threadsafe(
hass.loop,
partial(
hass.config_entries.async_update_entry,
entry,
data={**entry.data, "token": token},
),
).result()
auth = Auth(f"HomeAssistant/{__version__}", entry.data["token"], token_updater)
ring = Ring(auth)
try:
await hass.async_add_executor_job(ring.update_data)
except AccessDeniedError:
_LOGGER.error("Access token is no longer valid. Please set up Ring again")
return False
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"api": ring,
"devices": ring.devices(),
"device_data": GlobalDataUpdater(
hass, "device", entry.entry_id, ring, "update_devices", timedelta(minutes=1)
),
"dings_data": GlobalDataUpdater(
hass,
"active dings",
entry.entry_id,
ring,
"update_dings",
timedelta(seconds=5),
),
"history_data": DeviceDataUpdater(
hass,
"history",
entry.entry_id,
ring,
lambda device: device.history(limit=10),
timedelta(minutes=1),
),
"health_data": DeviceDataUpdater(
hass,
"health",
entry.entry_id,
ring,
lambda device: device.update_health_data(),
timedelta(minutes=1),
),
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
if hass.services.has_service(DOMAIN, "update"):
return True
async def async_refresh_all(_):
"""Refresh all ring data."""
for info in hass.data[DOMAIN].values():
await info["device_data"].async_refresh_all()
await info["dings_data"].async_refresh_all()
await hass.async_add_executor_job(info["history_data"].refresh_all)
await hass.async_add_executor_job(info["health_data"].refresh_all)
# register service
hass.services.async_register(DOMAIN, "update", async_refresh_all)
return True
async def async_unload_entry(hass, entry):
"""Unload Ring entry."""
if not await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
return False
hass.data[DOMAIN].pop(entry.entry_id)
if len(hass.data[DOMAIN]) != 0:
return True
# Last entry unloaded, clean up service
hass.services.async_remove(DOMAIN, "update")
return True
class GlobalDataUpdater:
"""Data storage for single API endpoint."""
def __init__(
self,
hass: HomeAssistant,
data_type: str,
config_entry_id: str,
ring: Ring,
update_method: str,
update_interval: timedelta,
) -> None:
"""Initialize global data updater."""
self.hass = hass
self.data_type = data_type
self.config_entry_id = config_entry_id
self.ring = ring
self.update_method = update_method
self.update_interval = update_interval
self.listeners = []
self._unsub_interval = None
@callback
def async_add_listener(self, update_callback):
"""Listen for data updates."""
# This is the first listener, set up interval.
if not self.listeners:
self._unsub_interval = async_track_time_interval(
self.hass, self.async_refresh_all, self.update_interval
)
self.listeners.append(update_callback)
@callback
def async_remove_listener(self, update_callback):
"""Remove data update."""
self.listeners.remove(update_callback)
if not self.listeners:
self._unsub_interval()
self._unsub_interval = None
async def async_refresh_all(self, _now: int | None = None) -> None:
"""Time to update."""
if not self.listeners:
return
try:
await self.hass.async_add_executor_job(
getattr(self.ring, self.update_method)
)
except AccessDeniedError:
_LOGGER.error("Ring access token is no longer valid. Set up Ring again")
await self.hass.config_entries.async_unload(self.config_entry_id)
return
except requests.Timeout:
_LOGGER.warning(
"Time out fetching Ring %s data",
self.data_type,
)
return
except requests.RequestException as err:
_LOGGER.warning(
"Error fetching Ring %s data: %s",
self.data_type,
err,
)
return
for update_callback in self.listeners:
update_callback()
class DeviceDataUpdater:
"""Data storage for device data."""
def __init__(
self,
hass: HomeAssistant,
data_type: str,
config_entry_id: str,
ring: Ring,
update_method: str,
update_interval: timedelta,
) -> None:
"""Initialize device data updater."""
self.data_type = data_type
self.hass = hass
self.config_entry_id = config_entry_id
self.ring = ring
self.update_method = update_method
self.update_interval = update_interval
self.devices = {}
self._unsub_interval = None
async def async_track_device(self, device, update_callback):
"""Track a device."""
if not self.devices:
self._unsub_interval = async_track_time_interval(
self.hass, self.refresh_all, self.update_interval
)
if device.device_id not in self.devices:
self.devices[device.device_id] = {
"device": device,
"update_callbacks": [update_callback],
"data": None,
}
# Store task so that other concurrent requests can wait for us to finish and
# data be available.
self.devices[device.device_id]["task"] = asyncio.current_task()
self.devices[device.device_id][
"data"
] = await self.hass.async_add_executor_job(self.update_method, device)
self.devices[device.device_id].pop("task")
else:
self.devices[device.device_id]["update_callbacks"].append(update_callback)
# If someone is currently fetching data as part of the initialization, wait for them
if "task" in self.devices[device.device_id]:
await self.devices[device.device_id]["task"]
update_callback(self.devices[device.device_id]["data"])
@callback
def async_untrack_device(self, device, update_callback):
"""Untrack a device."""
self.devices[device.device_id]["update_callbacks"].remove(update_callback)
if not self.devices[device.device_id]["update_callbacks"]:
self.devices.pop(device.device_id)
if not self.devices:
self._unsub_interval()
self._unsub_interval = None
def refresh_all(self, _=None):
"""Refresh all registered devices."""
for device_id, info in self.devices.items():
try:
data = info["data"] = self.update_method(info["device"])
except AccessDeniedError:
_LOGGER.error("Ring access token is no longer valid. Set up Ring again")
self.hass.add_job(
self.hass.config_entries.async_unload(self.config_entry_id)
)
return
except requests.Timeout:
_LOGGER.warning(
"Time out fetching Ring %s data for device %s",
self.data_type,
device_id,
)
continue
except requests.RequestException as err:
_LOGGER.warning(
"Error fetching Ring %s data for device %s: %s",
self.data_type,
device_id,
err,
)
continue
for update_callback in info["update_callbacks"]:
self.hass.loop.call_soon_threadsafe(update_callback, data)
| 31.751656
| 96
| 0.601314
|
9605c31bf5982eabb5f08e48f5ef5f091d36422c
| 15,358
|
py
|
Python
|
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationnegotiateaction.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationnegotiateaction.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationnegotiateaction.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationnegotiateaction(base_resource) :
""" Configuration for Negotiate action resource. """
def __init__(self) :
self._name = None
self._domain = None
self._domainuser = None
self._domainuserpasswd = None
self._ou = None
self._defaultauthenticationgroup = None
self._keytab = None
self._ntlmpath = None
self._kcdspn = None
self.___count = None
@property
def name(self) :
r"""Name for the AD KDC server profile (negotiate action).
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after AD KDC server profile is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication action" or 'my authentication action').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the AD KDC server profile (negotiate action).
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after AD KDC server profile is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication action" or 'my authentication action').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def domain(self) :
r"""Domain name of the service principal that represnts Netscaler.<br/>Minimum length = 1.
"""
try :
return self._domain
except Exception as e:
raise e
@domain.setter
def domain(self, domain) :
r"""Domain name of the service principal that represnts Netscaler.<br/>Minimum length = 1
"""
try :
self._domain = domain
except Exception as e:
raise e
@property
def domainuser(self) :
r"""User name of the account that is mapped with Netscaler principal. This can be given along with domain and password when keytab file is not available. If username is given along with keytab file, then that keytab file will be searched for this user's credentials.<br/>Minimum length = 1.
"""
try :
return self._domainuser
except Exception as e:
raise e
@domainuser.setter
def domainuser(self, domainuser) :
r"""User name of the account that is mapped with Netscaler principal. This can be given along with domain and password when keytab file is not available. If username is given along with keytab file, then that keytab file will be searched for this user's credentials.<br/>Minimum length = 1
"""
try :
self._domainuser = domainuser
except Exception as e:
raise e
@property
def domainuserpasswd(self) :
r"""Password of the account that is mapped to the NetScaler principal.<br/>Minimum length = 1.
"""
try :
return self._domainuserpasswd
except Exception as e:
raise e
@domainuserpasswd.setter
def domainuserpasswd(self, domainuserpasswd) :
r"""Password of the account that is mapped to the NetScaler principal.<br/>Minimum length = 1
"""
try :
self._domainuserpasswd = domainuserpasswd
except Exception as e:
raise e
@property
def ou(self) :
r"""Active Directory organizational units (OU) attribute.<br/>Minimum length = 1.
"""
try :
return self._ou
except Exception as e:
raise e
@ou.setter
def ou(self, ou) :
r"""Active Directory organizational units (OU) attribute.<br/>Minimum length = 1
"""
try :
self._ou = ou
except Exception as e:
raise e
@property
def defaultauthenticationgroup(self) :
r"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.
"""
try :
return self._defaultauthenticationgroup
except Exception as e:
raise e
@defaultauthenticationgroup.setter
def defaultauthenticationgroup(self, defaultauthenticationgroup) :
r"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.
"""
try :
self._defaultauthenticationgroup = defaultauthenticationgroup
except Exception as e:
raise e
@property
def keytab(self) :
r"""The path to the keytab file that is used to decrypt kerberos tickets presented to Netscaler. If keytab is not available, domain/username/password can be specified in the negotiate action configuration.<br/>Minimum length = 1.
"""
try :
return self._keytab
except Exception as e:
raise e
@keytab.setter
def keytab(self, keytab) :
r"""The path to the keytab file that is used to decrypt kerberos tickets presented to Netscaler. If keytab is not available, domain/username/password can be specified in the negotiate action configuration.<br/>Minimum length = 1
"""
try :
self._keytab = keytab
except Exception as e:
raise e
@property
def ntlmpath(self) :
r"""The path to the site that is enabled for NTLM authentication, including FQDN of the server. This is used when clients fallback to NTLM.<br/>Minimum length = 1.
"""
try :
return self._ntlmpath
except Exception as e:
raise e
@ntlmpath.setter
def ntlmpath(self, ntlmpath) :
r"""The path to the site that is enabled for NTLM authentication, including FQDN of the server. This is used when clients fallback to NTLM.<br/>Minimum length = 1
"""
try :
self._ntlmpath = ntlmpath
except Exception as e:
raise e
@property
def kcdspn(self) :
r"""Host SPN extracted from keytab file.
"""
try :
return self._kcdspn
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationnegotiateaction_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationnegotiateaction
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add authenticationnegotiateaction.
"""
try :
if type(resource) is not list :
addresource = authenticationnegotiateaction()
addresource.name = resource.name
addresource.domain = resource.domain
addresource.domainuser = resource.domainuser
addresource.domainuserpasswd = resource.domainuserpasswd
addresource.ou = resource.ou
addresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
addresource.keytab = resource.keytab
addresource.ntlmpath = resource.ntlmpath
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationnegotiateaction() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].domain = resource[i].domain
addresources[i].domainuser = resource[i].domainuser
addresources[i].domainuserpasswd = resource[i].domainuserpasswd
addresources[i].ou = resource[i].ou
addresources[i].defaultauthenticationgroup = resource[i].defaultauthenticationgroup
addresources[i].keytab = resource[i].keytab
addresources[i].ntlmpath = resource[i].ntlmpath
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete authenticationnegotiateaction.
"""
try :
if type(resource) is not list :
deleteresource = authenticationnegotiateaction()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationnegotiateaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationnegotiateaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update authenticationnegotiateaction.
"""
try :
if type(resource) is not list :
updateresource = authenticationnegotiateaction()
updateresource.name = resource.name
updateresource.domain = resource.domain
updateresource.domainuser = resource.domainuser
updateresource.domainuserpasswd = resource.domainuserpasswd
updateresource.ou = resource.ou
updateresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
updateresource.keytab = resource.keytab
updateresource.ntlmpath = resource.ntlmpath
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ authenticationnegotiateaction() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].domain = resource[i].domain
updateresources[i].domainuser = resource[i].domainuser
updateresources[i].domainuserpasswd = resource[i].domainuserpasswd
updateresources[i].ou = resource[i].ou
updateresources[i].defaultauthenticationgroup = resource[i].defaultauthenticationgroup
updateresources[i].keytab = resource[i].keytab
updateresources[i].ntlmpath = resource[i].ntlmpath
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of authenticationnegotiateaction resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = authenticationnegotiateaction()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationnegotiateaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationnegotiateaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the authenticationnegotiateaction resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationnegotiateaction()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationnegotiateaction()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationnegotiateaction() for _ in range(len(name))]
obj = [authenticationnegotiateaction() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationnegotiateaction()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of authenticationnegotiateaction resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationnegotiateaction()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the authenticationnegotiateaction resources configured on NetScaler.
"""
try :
obj = authenticationnegotiateaction()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of authenticationnegotiateaction resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationnegotiateaction()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class authenticationnegotiateaction_response(base_response) :
def __init__(self, length=1) :
self.authenticationnegotiateaction = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationnegotiateaction = [authenticationnegotiateaction() for _ in range(length)]
| 35.468822
| 293
| 0.719365
|
dcc0d328db6ea81b64e1fd1fd7af946fd70cf4d5
| 15,165
|
py
|
Python
|
src/solutions/common/integrations/cirklo/cirklo.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/common/integrations/cirklo/cirklo.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/common/integrations/cirklo/cirklo.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
import logging
import urllib
from datetime import datetime
from google.appengine.api import urlfetch
from google.appengine.api.apiproxy_stub_map import UserRPC
from google.appengine.ext import ndb, db
from typing import List, Optional
from mcfw.cache import cached
from mcfw.rpc import arguments, returns
from mcfw.utils import Enum
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.maps.services import search_services_by_tags, SearchTag
from rogerthat.bizz.opening_hours import get_opening_hours_info
from rogerthat.consts import DEBUG
from rogerthat.dal.profile import get_user_profile
from rogerthat.models import OpeningHours, ServiceIdentity
from rogerthat.models.settings import ServiceInfo
from rogerthat.rpc import users
from rogerthat.to import convert_to_unicode, TO
from rogerthat.to.service import SendApiCallCallbackResultTO, UserDetailsTO
from rogerthat.utils.service import get_service_user_from_service_identity_user
from solution_server_settings import get_solution_server_settings, SolutionServerSettings
from solutions import translate
from solutions.common.dal import get_solution_settings
from solutions.common.integrations.cirklo.models import CirkloUserVouchers, VoucherProviderId, \
CirkloCity
from solutions.common.integrations.cirklo.to import AppVoucher, AppVoucherList
from solutions.common.models import SolutionBrandingSettings
class CirkloApiMethod(Enum):
GET_VOUCHERS = 'integrations.cirklo.getvouchers'
ADD_VOUCHER = 'integrations.cirklo.addvoucher'
DELETE_VOUCHER = 'integrations.cirklo.deletevoucher'
GET_TRANSACTIONS = 'integrations.cirklo.gettransactions'
GET_MERCHANTS = 'integrations.cirklo.getmerchants'
class UnknownMethodException(Exception):
def __init__(self, method):
super(UnknownMethodException, self).__init__('Unknown cirklo method: ' + method)
class TranslatedException(Exception):
def __init__(self, msg):
super(TranslatedException, self).__init__(msg)
def _cirklo_api_call(settings, url, method, payload=None, staging=False):
# type: (SolutionServerSettings, str, str, dict) -> UserRPC
url_params = ('?' + urllib.urlencode(payload)) if payload and method == urlfetch.GET else ''
url = settings.cirklo_server_url + url + url_params
if staging and 'staging-app' not in url:
url = url.replace('https://', 'https://staging-app-')
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'x-api-key': settings.cirklo_api_key_staging if staging else settings.cirklo_api_key
}
if method in (urlfetch.PUT, urlfetch.POST) and payload:
payload = json.dumps(payload)
else:
payload = None
if DEBUG:
logging.debug('%s %s', method, url)
rpc = urlfetch.create_rpc(30)
return urlfetch.make_fetch_call(rpc, url, payload, method, headers, follow_redirects=False)
def list_cirklo_cities(staging):
# type: (bool) -> List[dict]
rpc = _cirklo_api_call(get_solution_server_settings(), '/cities', urlfetch.GET, staging=staging)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code == 200:
parsed = json.loads(result.content)
if staging:
for city in parsed:
city['id'] = 'staging-' + city['id']
return parsed
else:
logging.debug('%s\n%s', result.status_code, result.content)
raise Exception('Unexpected result from cirklo api')
def list_whitelisted_merchants(city_id):
staging = city_id.startswith('staging-')
payload = {'cityId': city_id.replace('staging-', ''),
'includeShops': True}
rpc = _cirklo_api_call(get_solution_server_settings(), '/whitelists', urlfetch.GET, payload, staging)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code == 200:
return json.loads(result.content)
else:
logging.debug('%s\n%s', result.status_code, result.content)
raise Exception('Unexpected result from cirklo api')
def check_merchant_whitelisted(city_id, email):
staging = city_id.startswith('staging-')
payload = {'cityId': city_id.replace('staging-', ''),
'emails': email}
rpc = _cirklo_api_call(get_solution_server_settings(), '/whitelists', urlfetch.GET, payload, staging)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code == 200:
merchant_list = json.loads(result.content)
return len(merchant_list) > 0
else:
logging.debug('%s\n%s', result.status_code, result.content)
return False
def whitelist_merchant(city_id, email):
staging = city_id.startswith('staging-')
payload = {'cityId': city_id.replace('staging-', ''),
'whitelistEntries': [{'email': email}]}
rpc = _cirklo_api_call(get_solution_server_settings(), '/whitelists', urlfetch.POST, payload, staging)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code != 201:
logging.debug('%s\n%s', result.status_code, result.content)
raise Exception('Unexpected result from cirklo api')
def add_voucher(service_user, app_user, qr_content):
# type: (users.User, users.User, str) -> dict
try:
parsed = json.loads(qr_content)
voucher_id = parsed.get('voucher')
except ValueError:
if len(qr_content) == 36:
# Some qrs for Dilbeek made in december 2020 contained just the qr id and no json
voucher_id = qr_content
else:
voucher_id = None
voucher_details = None
if voucher_id:
rpc = _cirklo_api_call(get_solution_server_settings(), '/vouchers/' + voucher_id, urlfetch.GET)
result = rpc.get_result() # type: urlfetch._URLFetchResult
if result.status_code == 200:
voucher_details = json.loads(result.content)
voucher_details['id'] = voucher_id
elif result.status_code in (400, 404):
logging.debug('%s\n%s', result.status_code, result.content)
voucher_id = None
else:
logging.debug('%s\n%s', result.status_code, result.content)
raise Exception('Unexpected result from cirklo api')
if not voucher_id:
sln_settings = get_solution_settings(service_user)
msg = translate(sln_settings.main_language, 'not_a_valid_cirklo_qr_code')
raise TranslatedException(msg)
key = CirkloUserVouchers.create_key(app_user)
vouchers = key.get() or CirkloUserVouchers(key=key) # type: CirkloUserVouchers
if voucher_id not in vouchers.voucher_ids:
vouchers.voucher_ids.append(voucher_id)
vouchers.put()
else:
sln_settings = get_solution_settings(service_user)
msg = translate(sln_settings.main_language, 'duplicate_cirklo_voucher')
raise TranslatedException(msg)
voucher = AppVoucher.from_cirklo(voucher_id, voucher_details, datetime.utcnow())
return {
'voucher': voucher.to_dict(),
'city': {
'city_id': voucher.cityId,
'logo_url': get_logo_url_for_city_id(voucher.cityId),
}
}
def delete_voucher(app_user, voucher_id):
vouchers = CirkloUserVouchers.create_key(app_user).get() # type: CirkloUserVouchers
if vouchers and voucher_id in vouchers.voucher_ids:
vouchers.voucher_ids.remove(voucher_id)
vouchers.put()
def get_user_vouchers_ids(app_user):
vouchers = CirkloUserVouchers.create_key(app_user).get() # type: CirkloUserVouchers
return vouchers.voucher_ids if vouchers else []
def get_logo_url_for_city_id(city_id):
return get_logo_url_for_city_ids([city_id])[city_id]
def get_logo_url_for_city_ids(city_ids):
city_keys = [CirkloCity.create_key(city_id) for city_id in city_ids]
cities = ndb.get_multi(city_keys) # type: List[CirkloCity]
logos = {}
for city_id, city in zip(city_ids, cities):
if city:
if city.logo_url:
logos[city_id] = city.logo_url
else:
branding_settings = db.get(SolutionBrandingSettings.create_key(users.User(city.service_user_email)))
logos[city_id] = branding_settings.avatar_url
else:
logos[city_id] = 'https://storage.googleapis.com/oca-files/misc/vouchers_default_city.png'
return logos
@cached(0)
@returns(unicode)
@arguments(service_email=unicode)
def get_city_id_by_service_email(service_email):
cirklo_city = CirkloCity.get_by_service_email(service_email)
return cirklo_city.city_id if cirklo_city else None
def get_vouchers(service_user, app_user):
# type: (users.User, users.User) -> AppVoucherList
ids = get_user_vouchers_ids(app_user)
settings = get_solution_server_settings()
rpcs = [(voucher_id, _cirklo_api_call(settings, '/vouchers/' + voucher_id, urlfetch.GET)) for voucher_id in ids]
vouchers = [] # type: List[AppVoucher]
current_date = datetime.utcnow()
for voucher_id, rpc in rpcs:
result = rpc.get_result() # type: urlfetch._URLFetchResult
logging.debug('%s: %s', result.status_code, result.content)
if result.status_code == 200:
vouchers.append(AppVoucher.from_cirklo(voucher_id, json.loads(result.content), current_date))
else:
logging.error('Invalid cirklo api response: %s', result.status_code)
try:
main_city_id = get_city_id_by_service_email(service_user.email())
except:
main_city_id = None
if not main_city_id:
logging.error('No cityId found for service %s' % service_user.email())
sln_settings = get_solution_settings(service_user)
msg = translate(sln_settings.main_language, 'cirklo_vouchers_not_live_yet')
raise TranslatedException(msg)
city_ids = {voucher.cityId for voucher in vouchers}
city_ids.add(main_city_id)
logos = get_logo_url_for_city_ids(list(city_ids))
voucher_list = AppVoucherList()
voucher_list.results = vouchers
voucher_list.main_city_id = main_city_id
voucher_list.cities = {}
for city_id, logo_url in logos.iteritems():
voucher_list.cities[city_id] = {'logo_url': logo_url}
return voucher_list
def get_merchants_by_community(community_id, language, cursor, page_size, query):
# type: (int, str, Optional[str], int, str) -> dict
community = get_community(community_id)
# Always filter by community id
tags = [
SearchTag.community(community_id),
SearchTag.environment(community.demo),
SearchTag.vouchers(VoucherProviderId.CIRKLO)
]
service_identity_users, new_cursor = search_services_by_tags(tags, cursor, page_size, query)
service_users = [get_service_user_from_service_identity_user(service_user)
for service_user in service_identity_users]
info_keys = [ServiceInfo.create_key(service_user, ServiceIdentity.DEFAULT) for service_user in service_users]
hours_keys = [OpeningHours.create_key(service_user, ServiceIdentity.DEFAULT) for service_user in service_users]
models = ndb.get_multi(info_keys + hours_keys)
infos = models[0: len(info_keys)]
hours = models[len(info_keys):]
results = []
for service_info, opening_hours in zip(infos, hours): # type: ServiceInfo, Optional[OpeningHours]
opening_hours_dict = None
if opening_hours:
now_open, title, subtitle, weekday_text = get_opening_hours_info(opening_hours, service_info.timezone,
language)
opening_hours_dict = {
'open_now': now_open,
'title': title,
'subtitle': subtitle,
'weekday_text': [t.to_dict() for t in weekday_text]
}
results.append({
'id': service_info.service_user.email(),
'name': service_info.name,
'address': service_info.addresses[0].to_dict() if service_info.addresses else None,
'email_addresses': [{'name': email.name, 'value': email.value} for email in service_info.email_addresses],
'websites': [{'name': website.name, 'value': website.value} for website in service_info.websites],
'phone_numbers': [{'name': phone.name, 'value': phone.value} for phone in service_info.phone_numbers],
'opening_hours': opening_hours_dict,
})
return {
'results': results,
'cursor': new_cursor,
'more': new_cursor is not None,
}
def handle_method(service_user, email, method, params, tag, service_identity, user_details):
# type: (users.User, str, str, str, str, str, List[UserDetailsTO]) -> SendApiCallCallbackResultTO
response = SendApiCallCallbackResultTO()
try:
json_data = json.loads(params) if params else {}
user = user_details[0]
app_user = user.toAppUser()
if method == CirkloApiMethod.GET_VOUCHERS:
result = get_vouchers(service_user, app_user)
elif method == CirkloApiMethod.ADD_VOUCHER:
qr_content = json_data['qrContent']
result = add_voucher(service_user, app_user, qr_content)
elif method == CirkloApiMethod.DELETE_VOUCHER:
delete_voucher(app_user, json_data['id'])
result = {}
elif method == CirkloApiMethod.GET_TRANSACTIONS:
# Not implemented yet
result = {'results': []}
elif method == CirkloApiMethod.GET_MERCHANTS:
language = get_user_profile(app_user).language
cursor = json_data.get('cursor')
page_size = json_data.get('page_size', 20)
query = (json_data.get('query') or '').strip()
user_profile = get_user_profile(app_user)
result = get_merchants_by_community(user_profile.community_id, language, cursor, page_size, query)
else:
raise UnknownMethodException(method)
response.result = convert_to_unicode(json.dumps(result.to_dict() if isinstance(result, TO) else result))
except TranslatedException as e:
logging.debug('User error while handling cirklo callback: %s', e.message)
response.error = e.message
except Exception:
logging.error('Error while handling cirklo call %s' % method, exc_info=True)
sln_settings = get_solution_settings(service_user)
response.error = translate(sln_settings.main_language, 'error-occured-unknown')
return response
| 43.82948
| 118
| 0.696406
|
8bb927548f2ed7cda811df262fa312222a09f723
| 15,288
|
py
|
Python
|
tests/functional/clients/standalone/package_list/fixtures/alpine.py
|
rbrady/anchore-engine
|
5a5c492d76b5f911e60be422912fe8d42a74872b
|
[
"Apache-2.0"
] | 1
|
2021-09-12T07:44:44.000Z
|
2021-09-12T07:44:44.000Z
|
tests/functional/clients/standalone/package_list/fixtures/alpine.py
|
rbrady/anchore-engine
|
5a5c492d76b5f911e60be422912fe8d42a74872b
|
[
"Apache-2.0"
] | 3
|
2021-07-15T19:58:01.000Z
|
2021-09-16T09:39:46.000Z
|
tests/functional/clients/standalone/package_list/fixtures/alpine.py
|
rbrady/anchore-engine
|
5a5c492d76b5f911e60be422912fe8d42a74872b
|
[
"Apache-2.0"
] | null | null | null |
pkgfiles_all = {
"/bin": "APKFILE",
"/bin/busybox": "APKFILE",
"/bin/sh": "APKFILE",
"/dev": "APKFILE",
"/dev/pts": "APKFILE",
"/dev/shm": "APKFILE",
"/etc": "APKFILE",
"/etc/apk": "APKFILE",
"/etc/apk/keys": "APKFILE",
"/etc/apk/keys/alpine-devel@lists.alpinelinux.org-4a6a0840.rsa.pub": "APKFILE",
"/etc/apk/keys/alpine-devel@lists.alpinelinux.org-5243ef4b.rsa.pub": "APKFILE",
"/etc/apk/keys/alpine-devel@lists.alpinelinux.org-5261cecb.rsa.pub": "APKFILE",
"/etc/apk/protected_paths.d": "APKFILE",
"/var/local": "APKFILE",
"/var/lock": "APKFILE",
"/var/lock/subsys": "APKFILE",
"/var/log": "APKFILE",
"/var/mail": "APKFILE",
"/var/opt": "APKFILE",
"/var/run": "APKFILE",
"/var/spool": "APKFILE",
"/var/spool/cron": "APKFILE",
"/var/spool/cron/crontabs": "APKFILE",
"/var/spool/mail": "APKFILE",
"/var/tmp": "APKFILE",
}
pkgs_all = {
"alpine-baselayout": "3.2.0-r6",
"alpine-keys": "2.2-r0",
"apk-tools": "2.10.5-r1",
"busybox": "1.31.1-r16",
"ca-certificates-bundle": "20191127-r2",
"libc-utils": "0.7.2-r3",
"libcrypto1.1": "1.1.1g-r0",
"libssl1.1": "1.1.1g-r0",
"libtls-standalone": "2.9.1-r1",
"musl": "1.1.24-r8",
"musl-utils": "1.1.24-r8",
"scanelf": "1.2.6-r0",
"ssl_client": "1.31.1-r16",
"zlib": "1.2.11-r3",
}
pkgs_allinfo = {
"alpine-baselayout": {
"version": "3.2.0",
"sourcepkg": "alpine-baselayout",
"release": "r6",
"origin": "Natanael Copa <ncopa@alpinelinux.org>",
"arch": "x86_64",
"license": "GPL-2.0-only",
"size": "409600",
"type": "APKG",
"name": "alpine-baselayout",
"cpes": [
"cpe:2.3:a:alpine-baselayout:alpine-baselayout:3.2.0-r6:*:*:*:*:*:*:*",
"cpe:2.3:a:alpine_baselayout:alpine-baselayout:3.2.0-r6:*:*:*:*:*:*:*",
"cpe:2.3:a:alpine-baselayout:alpine_baselayout:3.2.0-r6:*:*:*:*:*:*:*",
"cpe:2.3:a:alpine_baselayout:alpine_baselayout:3.2.0-r6:*:*:*:*:*:*:*",
"cpe:2.3:a:*:alpine-baselayout:3.2.0-r6:*:*:*:*:*:*:*",
"cpe:2.3:a:*:alpine_baselayout:3.2.0-r6:*:*:*:*:*:*:*",
],
"files": [
"/dev",
"/dev/pts",
"/dev/shm",
"/etc/fstab",
"/etc/group",
"/etc/hostname",
"/etc/hosts",
"/etc/inittab",
"/etc/modules",
"/etc/motd",
"/etc/mtab",
"/etc/passwd",
"/etc/profile",
"/etc/protocols",
"/etc/services",
"/etc/shadow",
"/etc/shells",
"/etc/sysctl.conf",
"/etc",
"/etc/apk",
"/etc/conf.d",
"/etc/crontabs/root",
"/etc/crontabs",
"/etc/init.d",
"/etc/modprobe.d/aliases.conf",
"/etc/modprobe.d/blacklist.conf",
"/etc/modprobe.d/i386.conf",
"/etc/modprobe.d/kms.conf",
"/etc/modprobe.d",
"/etc/modules-load.d",
"/etc/network",
"/etc/network/if-down.d",
"/etc/network/if-post-down.d",
"/etc/network/if-pre-up.d",
"/etc/network/if-up.d",
"/etc/opt",
"/etc/periodic",
"/etc/periodic/15min",
"/etc/periodic/daily",
"/etc/periodic/hourly",
"/etc/periodic/monthly",
"/etc/periodic/weekly",
"/etc/profile.d/color_prompt",
"/etc/profile.d/locale",
"/etc/profile.d",
"/etc/sysctl.d",
"/home",
"/lib",
"/lib/firmware",
"/lib/mdev",
"/lib/modules-load.d",
"/lib/sysctl.d/00-alpine.conf",
"/lib/sysctl.d",
"/media",
"/media/cdrom",
"/media/floppy",
"/media/usb",
"/mnt",
"/opt",
"/proc",
"/root",
"/run",
"/sbin/mkmntdirs",
"/sbin",
"/srv",
"/sys",
"/tmp",
"/usr",
"/usr/lib",
"/usr/lib/modules-load.d",
"/usr/local",
"/usr/local/bin",
"/usr/local/lib",
"/usr/local/share",
"/usr/sbin",
"/usr/share",
"/usr/share/man",
"/usr/share/misc",
"/var/run",
"/var",
"/var/cache",
"/var/cache/misc",
"/var/empty",
"/var/lib",
"/var/lib/misc",
"/var/local",
"/var/lock",
"/var/lock/subsys",
"/var/log",
"/var/mail",
"/var/opt",
"/var/spool/mail",
"/var/spool",
"/var/spool/cron/crontabs",
"/var/spool/cron",
"/var/tmp",
],
},
"alpine-keys": {
"version": "2.2",
"sourcepkg": "alpine-keys",
"release": "r0",
"origin": "Natanael Copa <ncopa@alpinelinux.org>",
"arch": "x86_64",
"license": "MIT",
"size": "106496",
"type": "APKG",
"name": "alpine-keys",
"cpes": [
"cpe:2.3:a:alpine-keys:alpine-keys:2.2-r0:*:*:*:*:*:*:*",
"cpe:2.3:a:alpine_keys:alpine-keys:2.2-r0:*:*:*:*:*:*:*",
"cpe:2.3:a:alpine-keys:alpine_keys:2.2-r0:*:*:*:*:*:*:*",
"cpe:2.3:a:alpine_keys:alpine_keys:2.2-r0:*:*:*:*:*:*:*",
"cpe:2.3:a:*:alpine-keys:2.2-r0:*:*:*:*:*:*:*",
"cpe:2.3:a:*:alpine_keys:2.2-r0:*:*:*:*:*:*:*",
],
"files": [
"/etc",
"/etc/apk",
"/etc/apk/keys/alpine-devel@lists.alpinelinux.org-4a6a0840.rsa.pub",
"/etc/apk/keys/alpine-devel@lists.alpinelinux.org-5243ef4b.rsa.pub",
"/etc/apk/keys/alpine-devel@lists.alpinelinux.org-5261cecb.rsa.pub",
"/etc/apk/keys",
"/usr",
"/usr/share",
"/usr/share/apk",
"/usr/share/apk/keys/alpine-devel@lists.alpinelinux.org-4a6a0840.rsa.pub",
"/usr/share/apk/keys/alpine-devel@lists.alpinelinux.org-5243ef4b.rsa.pub",
"/usr/share/apk/keys/alpine-devel@lists.alpinelinux.org-524d27bb.rsa.pub",
"/usr/share/apk/keys/alpine-devel@lists.alpinelinux.org-5261cecb.rsa.pub",
"/usr/share/apk/keys/alpine-devel@lists.alpinelinux.org-58199dcc.rsa.pub",
"/usr/share/apk/keys/alpine-devel@lists.alpinelinux.org-58cbb476.rsa.pub",
"/usr/share/apk/keys/alpine-devel@lists.alpinelinux.org-58e4f17d.rsa.pub",
"/usr/share/apk/keys/alpine-devel@lists.alpinelinux.org-5e69ca50.rsa.pub",
"/usr/share/apk/keys",
"/usr/share/apk/keys/aarch64/alpine-devel@lists.alpinelinux.org-58199dcc.rsa.pub",
"/usr/share/apk/keys/aarch64",
"/usr/share/apk/keys/armhf/alpine-devel@lists.alpinelinux.org-524d27bb.rsa.pub",
"/usr/share/apk/keys/armhf",
"/usr/share/apk/keys/mips64/alpine-devel@lists.alpinelinux.org-5e69ca50.rsa.pub",
"/usr/share/apk/keys/mips64",
"/usr/share/apk/keys/ppc64le/alpine-devel@lists.alpinelinux.org-58cbb476.rsa.pub",
"/usr/share/apk/keys/ppc64le",
"/usr/share/apk/keys/s390x/alpine-devel@lists.alpinelinux.org-58e4f17d.rsa.pub",
"/usr/share/apk/keys/s390x",
"/usr/share/apk/keys/x86/alpine-devel@lists.alpinelinux.org-4a6a0840.rsa.pub",
"/usr/share/apk/keys/x86/alpine-devel@lists.alpinelinux.org-5243ef4b.rsa.pub",
"/usr/share/apk/keys/x86",
"/usr/share/apk/keys/x86_64/alpine-devel@lists.alpinelinux.org-4a6a0840.rsa.pub",
"/usr/share/apk/keys/x86_64/alpine-devel@lists.alpinelinux.org-5261cecb.rsa.pub",
"/usr/share/apk/keys/x86_64",
],
},
"busybox": {
"version": "1.31.1",
"sourcepkg": "busybox",
"release": "r16",
"origin": "Natanael Copa <ncopa@alpinelinux.org>",
"arch": "x86_64",
"license": "GPL-2.0-only",
"size": "962560",
"type": "APKG",
"name": "busybox",
"cpes": [
"cpe:2.3:a:busybox:busybox:1.31.1-r16:*:*:*:*:*:*:*",
"cpe:2.3:a:*:busybox:1.31.1-r16:*:*:*:*:*:*:*",
],
"files": [
"/bin/busybox",
"/bin/sh",
"/bin",
"/etc/securetty",
"/etc/udhcpd.conf",
"/etc",
"/etc/logrotate.d/acpid",
"/etc/logrotate.d",
"/etc/network",
"/etc/network/if-down.d",
"/etc/network/if-post-down.d",
"/etc/network/if-post-up.d",
"/etc/network/if-pre-down.d",
"/etc/network/if-pre-up.d",
"/etc/network/if-up.d/dad",
"/etc/network/if-up.d",
"/sbin",
"/tmp",
"/usr",
"/usr/sbin",
"/usr/share",
"/usr/share/udhcpc/default.script",
"/usr/share/udhcpc",
"/var",
"/var/cache",
"/var/cache/misc",
"/var/lib",
"/var/lib/udhcpd",
],
},
"ca-certificates-bundle": {
"version": "20191127",
"sourcepkg": "ca-certificates",
"release": "r2",
"origin": "Natanael Copa <ncopa@alpinelinux.org>",
"arch": "x86_64",
"license": "MPL-2.0 GPL-2.0-or-later",
"size": "233472",
"type": "APKG",
"name": "ca-certificates-bundle",
"cpes": [
"cpe:2.3:a:ca-certificates-bundle:ca-certificates-bundle:20191127-r2:*:*:*:*:*:*:*",
"cpe:2.3:a:ca_certificates_bundle:ca-certificates-bundle:20191127-r2:*:*:*:*:*:*:*",
"cpe:2.3:a:ca-certificates-bundle:ca_certificates_bundle:20191127-r2:*:*:*:*:*:*:*",
"cpe:2.3:a:ca_certificates_bundle:ca_certificates_bundle:20191127-r2:*:*:*:*:*:*:*",
"cpe:2.3:a:*:ca-certificates-bundle:20191127-r2:*:*:*:*:*:*:*",
"cpe:2.3:a:*:ca_certificates_bundle:20191127-r2:*:*:*:*:*:*:*",
],
"files": [
"/etc",
"/etc/ssl/cert.pem",
"/etc/ssl",
"/etc/ssl/certs/ca-certificates.crt",
"/etc/ssl/certs",
],
},
"libcrypto1.1": {
"version": "1.1.1g",
"sourcepkg": "openssl",
"release": "r0",
"origin": "Timo Teras <timo.teras@iki.fi>",
"arch": "x86_64",
"license": "OpenSSL",
"size": "2760704",
"type": "APKG",
"name": "libcrypto1.1",
"cpes": [
"cpe:2.3:a:libcrypto1.1:libcrypto1.1:1.1.1g-r0:*:*:*:*:*:*:*",
"cpe:2.3:a:*:libcrypto1.1:1.1.1g-r0:*:*:*:*:*:*:*",
],
"files": [
"/etc",
"/etc/ssl/ct_log_list.cnf",
"/etc/ssl/ct_log_list.cnf.dist",
"/etc/ssl/openssl.cnf",
"/etc/ssl/openssl.cnf.dist",
"/etc/ssl",
"/etc/ssl/certs",
"/etc/ssl/misc/CA.pl",
"/etc/ssl/misc/tsget",
"/etc/ssl/misc/tsget.pl",
"/etc/ssl/misc",
"/etc/ssl/private",
"/lib/libcrypto.so.1.1",
"/lib",
"/usr",
"/usr/lib/libcrypto.so.1.1",
"/usr/lib",
"/usr/lib/engines-1.1/afalg.so",
"/usr/lib/engines-1.1/capi.so",
"/usr/lib/engines-1.1/padlock.so",
"/usr/lib/engines-1.1",
],
},
"libtls-standalone": {
"version": "2.9.1",
"sourcepkg": "libtls-standalone",
"release": "r1",
"origin": "N/A",
"arch": "x86_64",
"license": "ISC",
"size": "110592",
"type": "APKG",
"name": "libtls-standalone",
"cpes": [
"cpe:2.3:a:libtls-standalone:libtls-standalone:2.9.1-r1:*:*:*:*:*:*:*",
"cpe:2.3:a:libtls_standalone:libtls-standalone:2.9.1-r1:*:*:*:*:*:*:*",
"cpe:2.3:a:libtls-standalone:libtls_standalone:2.9.1-r1:*:*:*:*:*:*:*",
"cpe:2.3:a:libtls_standalone:libtls_standalone:2.9.1-r1:*:*:*:*:*:*:*",
"cpe:2.3:a:*:libtls-standalone:2.9.1-r1:*:*:*:*:*:*:*",
"cpe:2.3:a:*:libtls_standalone:2.9.1-r1:*:*:*:*:*:*:*",
],
"files": [
"/usr",
"/usr/lib/libtls-standalone.so.1",
"/usr/lib/libtls-standalone.so.1.0.0",
"/usr/lib",
],
},
"musl": {
"version": "1.1.24",
"sourcepkg": "musl",
"release": "r8",
"origin": "Timo Teräs <timo.teras@iki.fi>",
"arch": "x86_64",
"license": "MIT",
"size": "614400",
"type": "APKG",
"name": "musl",
"cpes": [
"cpe:2.3:a:musl:musl:1.1.24-r8:*:*:*:*:*:*:*",
"cpe:2.3:a:*:musl:1.1.24-r8:*:*:*:*:*:*:*",
],
"files": ["/lib/ld-musl-x86_64.so.1", "/lib/libc.musl-x86_64.so.1", "/lib"],
},
"musl-utils": {
"version": "1.1.24",
"sourcepkg": "musl",
"release": "r8",
"origin": "Timo Teräs <timo.teras@iki.fi>",
"arch": "x86_64",
"license": "MIT BSD GPL2+",
"size": "151552",
"type": "APKG",
"name": "musl-utils",
"cpes": [
"cpe:2.3:a:musl-utils:musl-utils:1.1.24-r8:*:*:*:*:*:*:*",
"cpe:2.3:a:musl_utils:musl-utils:1.1.24-r8:*:*:*:*:*:*:*",
"cpe:2.3:a:musl-utils:musl_utils:1.1.24-r8:*:*:*:*:*:*:*",
"cpe:2.3:a:musl_utils:musl_utils:1.1.24-r8:*:*:*:*:*:*:*",
"cpe:2.3:a:*:musl-utils:1.1.24-r8:*:*:*:*:*:*:*",
"cpe:2.3:a:*:musl_utils:1.1.24-r8:*:*:*:*:*:*:*",
],
"files": [
"/sbin/ldconfig",
"/sbin",
"/usr",
"/usr/bin/getconf",
"/usr/bin/getent",
"/usr/bin/iconv",
"/usr/bin/ldd",
"/usr/bin",
],
},
"zlib": {
"version": "1.2.11",
"sourcepkg": "zlib",
"release": "r3",
"origin": "Natanael Copa <ncopa@alpinelinux.org>",
"arch": "x86_64",
"license": "Zlib",
"size": "110592",
"type": "APKG",
"name": "zlib",
"cpes": [
"cpe:2.3:a:zlib:zlib:1.2.11-r3:*:*:*:*:*:*:*",
"cpe:2.3:a:*:zlib:1.2.11-r3:*:*:*:*:*:*:*",
],
"files": ["/lib/libz.so.1", "/lib/libz.so.1.2.11", "/lib"],
},
}
pkgs_plus_source_all = {
"alpine-baselayout": "3.2.0-r6",
"alpine-keys": "2.2-r0",
"apk-tools": "2.10.5-r1",
"busybox": "1.31.1-r16",
"ca-certificates": "20191127-r2",
"ca-certificates-bundle": "20191127-r2",
"libc-dev": "0.7.2-r3",
"libc-utils": "0.7.2-r3",
"libcrypto1.1": "1.1.1g-r0",
"libssl1.1": "1.1.1g-r0",
"libtls-standalone": "2.9.1-r1",
"musl": "1.1.24-r8",
"musl-utils": "1.1.24-r8",
"openssl": "1.1.1g-r0",
"pax-utils": "1.2.6-r0",
"scanelf": "1.2.6-r0",
"ssl_client": "1.31.1-r16",
"zlib": "1.2.11-r3",
}
| 35.06422
| 96
| 0.454343
|
5feb570a61e4e3a558f6f60a614d7663b8d175d3
| 12,944
|
py
|
Python
|
model-optimizer/extensions/front/caffe/conv_ext_test.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/front/caffe/conv_ext_test.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 19
|
2021-03-26T08:11:00.000Z
|
2022-02-21T13:06:26.000Z
|
model-optimizer/extensions/front/caffe/conv_ext_test.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 1
|
2021-07-28T17:30:46.000Z
|
2021-07-28T17:30:46.000Z
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from unittest.mock import patch
import numpy as np
from extensions.front.caffe.conv_ext import ConvFrontExtractor, DeconvFrontExtractor, conv_create_attrs, conv_set_params
from mo.front.caffe.extractors.utils import get_list_from_container
from mo.utils.error import Error
from mo.utils.unittest.extractors import PB, FakeParam, FakeMultiParam
class FakeConvProtoLayer:
def __init__(self, val):
self.convolution_param = val
self.bottom = [0]
class TestConvShapesParsing(unittest.TestCase):
def test_conv_no_pb_no_ml(self):
node = PB({'pb': None})
self.assertRaises(Error, ConvFrontExtractor.extract, node)
@patch('extensions.front.caffe.conv_ext.weights_biases')
@patch('extensions.front.caffe.conv_ext.layout_attrs')
def test_conv_ext_ideal_numbers(self, weights_biases_mock, layout_attrs_mock):
weights_biases_mock.return_value = {}
layout_attrs_mock.return_value = {}
params = {
'pad': 10,
'kernel_size': 11,
'stride': 12,
'dilation': 13,
'group': 14,
'num_output': 15,
'bias_term': True
}
node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
ConvFrontExtractor.extract(node)
res = node
exp_res = {
'op': 'Conv2D',
'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
'stride': np.array([1, 1, 12, 12]),
'kernel_spatial': np.array([11, 11]),
'dilation': np.array([1, 1, 13, 13]),
'group': 14,
'bias_addable': True,
'bias_term': True,
}
self.assertTrue(weights_biases_mock.called)
self.assertTrue(layout_attrs_mock.called)
for key in exp_res.keys():
if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
np.testing.assert_equal(res[key], exp_res[key])
else:
self.assertEqual(res[key], exp_res[key])
@patch('extensions.front.caffe.conv_ext.weights_biases')
@patch('extensions.front.caffe.conv_ext.layout_attrs')
def test_conv_ext_empty_numbers(self, weights_biases_mock, layout_attrs_mock):
weights_biases_mock.return_value = {}
layout_attrs_mock.return_value = {}
params = {
'pad': None,
'kernel_size': None,
'stride': None,
'dilation': None,
'group': 14,
'num_output': 15,
'bias_term': True,
'pad_w': 3,
'pad_h': 4,
'kernel_w': 5,
'kernel_h': 6,
'stride_h': 3,
'stride_w': 2,
}
node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
ConvFrontExtractor.extract(node)
res = node
exp_res = {
'op': 'Conv2D',
'pad': np.array([[0, 0], [0, 0], [4, 4], [3, 3]]),
'pad_spatial_shape': np.array([[4, 4], [3, 3]]),
'stride': np.array([1, 1, 3, 2]),
'kernel_spatial': np.array([6, 5]),
'dilation': np.array([1, 1, 1, 1]),
'group': 14,
'bias_addable': True,
'bias_term': True,
}
self.assertTrue(weights_biases_mock.called)
self.assertTrue(layout_attrs_mock.called)
for key in exp_res.keys():
if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
np.testing.assert_equal(res[key], exp_res[key])
else:
self.assertEqual(res[key], exp_res[key])
def test_attrs(self):
params = {
'type_str': 'Conv2D',
'padding': [10, 10],
'stride': [12, 12],
'kernel': [11, 11],
'dilate': [13, 13],
'group': 14,
'output': 13,
'bias_term': True
}
res = conv_create_attrs(params)
exp_res = {
'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
'stride': np.array([1, 1, 12, 12]),
'kernel_spatial': np.array([11, 11]),
'dilation': np.array([1, 1, 13, 13]),
'group': 14,
'bias_addable': True,
'bias_term': True,
'output_spatial_shape': None,
'output_shape': None,
'output': 13,
}
for key in exp_res.keys():
if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
np.testing.assert_equal(res[key], exp_res[key])
else:
self.assertEqual(res[key], exp_res[key])
def test_get_list_from_container_no_existing_param(self):
res = get_list_from_container(FakeParam("p", "1"), 'prop', int)
self.assertEqual(res, [])
def test_get_list_from_container_no_param(self):
res = get_list_from_container(None, 'prop', int)
self.assertEqual(res, [])
def test_get_list_from_container_simple_type_match(self):
res = get_list_from_container(FakeParam('prop', 10), 'prop', int)
self.assertEqual(res, [10])
def test_get_list_from_container_list_match(self):
res = get_list_from_container(FakeParam('prop', [10, 11]), 'prop', int)
self.assertEqual(res, [10, 11])
def test_get_list_from_container_list_match_empty(self):
res = get_list_from_container(FakeParam('prop', []), 'prop', int)
self.assertEqual(res, [])
def test_params_creation(self):
params = {
'pad': None,
'kernel_size': None,
'stride': None,
'dilation': None,
'group': 14,
'num_output': 15,
'bias_term': True,
'pad_w': 3,
'pad_h': 4,
'kernel_w': 5,
'kernel_h': 6,
'stride_h': 3,
'stride_w': 2,
}
exp_res = {
'padding': [3, 4],
'stride': [2, 3],
'kernel': [5, 6],
'dilate': [1, 1],
'group': 14,
'output': 15
}
res = conv_set_params(FakeConvProtoLayer(FakeMultiParam(params)).convolution_param, 'Conv2D')
for key in exp_res.keys():
if key in ('padding', 'stride', 'stride', 'kernel', 'dilate'):
np.testing.assert_equal(res[key], exp_res[key])
else:
self.assertEqual(res[key], exp_res[key])
class TestDeconvShapesParsing(unittest.TestCase):
def test_deconv_no_pb_no_ml(self):
node = PB({'pb': None})
self.assertRaises(Error, DeconvFrontExtractor.extract, node)
@patch('extensions.front.caffe.conv_ext.weights_biases')
@patch('extensions.front.caffe.conv_ext.layout_attrs')
def test_conv_ext_ideal_numbers(self, weights_biases_mock, layout_attrs_mock):
weights_biases_mock.return_value = {}
layout_attrs_mock.return_value = {}
params = {
'pad': 10,
'kernel_size': 11,
'stride': 12,
'dilation': 13,
'group': 14,
'num_output': 15,
'bias_term': True
}
node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
res = DeconvFrontExtractor.extract(node)
res = node
exp_res = {
'op': 'Deconv2D',
'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
'stride': np.array([1, 1, 12, 12]),
'kernel_spatial': np.array([11, 11]),
'dilation': np.array([1, 1, 13, 13]),
'group': 14,
'bias_addable': True,
}
self.assertTrue(weights_biases_mock.called)
self.assertTrue(layout_attrs_mock.called)
for key in exp_res.keys():
if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
np.testing.assert_equal(res[key], exp_res[key])
else:
self.assertEqual(res[key], exp_res[key])
@patch('extensions.front.caffe.conv_ext.weights_biases')
@patch('extensions.front.caffe.conv_ext.layout_attrs')
def test_conv_ext_false_bias_term(self, weights_biases_mock, layout_attrs_mock):
weights_biases_mock.return_value = {}
layout_attrs_mock.return_value = {}
params = {
'pad': 10,
'kernel_size': 11,
'stride': 12,
'dilation': 13,
'group': 14,
'num_output': 15,
'bias_term': False
}
node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
res = DeconvFrontExtractor.extract(node)
res = node
exp_res = {
'op': 'Deconv2D',
'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
'stride': np.array([1, 1, 12, 12]),
'kernel_spatial': np.array([11, 11]),
'dilation': np.array([1, 1, 13, 13]),
'group': 14,
'bias_addable': True,
'bias_term': False,
}
self.assertTrue(weights_biases_mock.called)
self.assertTrue(layout_attrs_mock.called)
for key in exp_res.keys():
if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation', 'bias_term'):
np.testing.assert_equal(res[key], exp_res[key])
else:
self.assertEqual(res[key], exp_res[key])
@patch('extensions.front.caffe.conv_ext.weights_biases')
@patch('extensions.front.caffe.conv_ext.layout_attrs')
def test_conv_ext_empty_numbers(self, weights_biases_mock, layout_attrs_mock):
weights_biases_mock.return_value = {}
layout_attrs_mock.return_value = {}
params = {
'pad': None,
'kernel_size': None,
'stride': None,
'dilation': None,
'group': 14,
'num_output': 15,
'bias_term': True,
'pad_w': 3,
'pad_h': 4,
'kernel_w': 5,
'kernel_h': 6,
'stride_h': 3,
'stride_w': 2,
}
node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
res = DeconvFrontExtractor.extract(node)
res = node
exp_res = {
'op': 'Deconv2D',
'pad': np.array([[0, 0], [0, 0], [4, 4], [3, 3]]),
'pad_spatial_shape': np.array([[4, 4], [3, 3]]),
'stride': np.array([1, 1, 3, 2]),
'kernel_spatial': np.array([6, 5]),
'dilation': np.array([1, 1, 1, 1]),
'group': 14,
'bias_addable': True,
}
self.assertTrue(weights_biases_mock.called)
self.assertTrue(layout_attrs_mock.called)
for key in exp_res.keys():
if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
np.testing.assert_equal(res[key], exp_res[key])
else:
self.assertEqual(res[key], exp_res[key])
def test_attrs(self):
params = {
'type_str': 'Deconv2D',
'padding': [10, 10],
'stride': [12, 12],
'kernel': [11, 11],
'dilate': [13, 13],
'group': 14,
'output': 13,
'bias_term': True
}
res = conv_create_attrs(params)
exp_res = {
'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
'stride': np.array([1, 1, 12, 12]),
'kernel_spatial': np.array([11, 11]),
'dilation': np.array([1, 1, 13, 13]),
'group': 14,
'bias_addable': True,
'output_spatial_shape': None,
'output_shape': None,
'output': 13,
}
for key in exp_res.keys():
if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
np.testing.assert_equal(res[key], exp_res[key])
else:
self.assertEqual(res[key], exp_res[key])
| 36.982857
| 120
| 0.541023
|
cdfff9a6e3a8e5c1cefd321143fc2559e2d537f1
| 116
|
py
|
Python
|
Tests/Methods/Import/__init__.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | 2
|
2020-06-29T13:48:37.000Z
|
2021-06-15T07:34:05.000Z
|
Tests/Methods/Import/__init__.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
Tests/Methods/Import/__init__.py
|
Superomeg4/pyleecan
|
2b695b5f39e77475a07aa0ea89489fb0a9659337
|
[
"Apache-2.0"
] | null | null | null |
from pyleecan.Tests import TEST_DIR
from os.path import join
test_file = join(TEST_DIR, "Data", "TestXlsLoad.xls")
| 23.2
| 53
| 0.775862
|
ef3cb7f3b9870012c29aa95bfc683269e2bee6be
| 3,997
|
py
|
Python
|
config/pk_avg_2d_desi_choices.py
|
nam8/Barry
|
5deb15d71d620079aa46ced73e23b1da8b9c4e57
|
[
"MIT"
] | 13
|
2019-07-29T20:39:20.000Z
|
2021-09-26T09:20:52.000Z
|
config/pk_avg_2d_desi_choices.py
|
nam8/Barry
|
5deb15d71d620079aa46ced73e23b1da8b9c4e57
|
[
"MIT"
] | 1
|
2021-02-11T10:54:58.000Z
|
2021-02-11T10:54:58.000Z
|
config/pk_avg_2d_desi_choices.py
|
nam8/Barry
|
5deb15d71d620079aa46ced73e23b1da8b9c4e57
|
[
"MIT"
] | 7
|
2019-08-26T04:54:00.000Z
|
2022-01-20T14:47:47.000Z
|
import sys
import os
import pandas as pd
from scipy.interpolate import interp1d
from scipy.stats import norm
import numpy as np
sys.path.append("..")
from barry.datasets.dataset_power_spectrum import PowerSpectrum_DESIMockChallenge0_Z01
from barry.cosmology.camb_generator import getCambGenerator
from barry.postprocessing import BAOExtractor
from barry.config import setup
from barry.models import PowerSeo2016, PowerBeutler2017, PowerDing2018
from barry.samplers import DynestySampler
from barry.fitter import Fitter
from barry.models.model import Correction
if __name__ == "__main__":
pfn, dir_name, file = setup(__file__)
c = getCambGenerator()
r_s = c.get_data()["r_s"]
p = BAOExtractor(r_s)
sampler = DynestySampler(temp_dir=dir_name, nlive=1000)
fitter = Fitter(dir_name)
cs = ["#262232", "#116A71", "#48AB75", "#D1E05B"]
for r in [False]:
t = "Recon" if r else "Prerecon"
# Changing fitting range and sigma values to match Hee-Jong
d = PowerSpectrum_DESIMockChallenge0_Z01(recon=r, isotropic=False, realisation="data", min_k=0.001, max_k=0.30)
model = PowerBeutler2017(recon=r, isotropic=False, correction=Correction.NONE)
model.set_default("sigma_nl_par", 6.2)
model.set_default("sigma_nl_perp", 2.9)
model.set_default("sigma_s", 0.0)
model.set_fix_params(["om", "sigma_nl_par", "sigma_nl_perp", "sigma_s"])
fitter.add_model_and_dataset(model, d, name=f"Beutler 2017 New $\\Sigma_{{nl}}$ {t}")
# Now change linear and smooth spectra to Hee-Jong's inputs
pklin = np.array(pd.read_csv("../barry/data/desi_mock_challenge_0/mylinearmatterpkL900.dat", delim_whitespace=True, header=None))
pksmooth = np.array(pd.read_csv("../barry/data/desi_mock_challenge_0/Psh_mylinearmatterpkL900.dat", delim_whitespace=True, header=None, skiprows=2))
model2 = PowerBeutler2017(recon=False, isotropic=False, correction=Correction.NONE)
model2.set_default("sigma_nl_par", 6.2)
model2.set_default("sigma_nl_perp", 2.9)
model2.set_default("sigma_s", 0.0)
model2.set_fix_params(["om", "sigma_nl_par", "sigma_nl_perp", "sigma_s"])
model2.set_data(d.get_data())
model2.kvals = pklin[:, 0]
model2.pkratio = pklin[:, 1] / pksmooth[:, 1] - 1.0
model2.pksmooth = pksmooth[:, 1]
fitter.add_model_and_dataset(model2, d, name=f"Beutler 2017 New Template$ {t}")
fitter.set_sampler(sampler)
fitter.set_num_walkers(10)
fitter.fit(file)
if fitter.should_plot():
import logging
logging.info("Creating plots")
from chainconsumer import ChainConsumer
c = ChainConsumer()
for posterior, weight, chain, evidence, model, data, extra in fitter.load():
chain_conv = chain
chain_conv[:, 0], chain_conv[:, 2] = model.get_alphas(chain[:, 0], chain[:, 2])
parameters = model.get_labels()
parameters[0] = r"$\alpha_{par}$"
parameters[2] = r"$\alpha_{perp}$"
c.add_chain(chain, weights=weight, parameters=parameters, **extra)
max_post = posterior.argmax()
ps = chain_conv[max_post, :]
for l, p in zip(parameters, ps):
print(l, p)
c.configure(shade=True, bins=20, legend_artists=True, max_ticks=4)
# truth = {"$\\Omega_m$": 0.3121, "$\\alpha$": 1.0, "$\\epsilon$": 0}
truth = {"$\\Omega_m$": 0.3121, "$\\alpha_{par}$": 1.0, "$\\alpha_{perp}$": 1.0}
c.plotter.plot_summary(filename=[pfn + "_summary.png", pfn + "_summary.pdf"], errorbar=True, truth=truth)
c.plotter.plot(filename=[pfn + "_contour.png", pfn + "_contour.pdf"], truth=truth, parameters=3)
c.plotter.plot(filename=[pfn + "_contour2.png", pfn + "_contour.pdf"], truth=truth, parameters=10)
c.plotter.plot_walks(filename=pfn + "_walks.png", truth=truth)
c.analysis.get_latex_table(filename=pfn + "_params.txt")
| 44.411111
| 156
| 0.665499
|
ffc5bf66143ae0317ec7dddfe539278848efd933
| 1,130
|
py
|
Python
|
setup.py
|
adefossez/prioheap
|
b60d673ad0356bd92ecddf27b3e67e33c916aebb
|
[
"Unlicense"
] | 1
|
2020-12-05T17:48:23.000Z
|
2020-12-05T17:48:23.000Z
|
setup.py
|
adefossez/prioheap
|
b60d673ad0356bd92ecddf27b3e67e33c916aebb
|
[
"Unlicense"
] | null | null | null |
setup.py
|
adefossez/prioheap
|
b60d673ad0356bd92ecddf27b3e67e33c916aebb
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# Inspired from https://github.com/kennethreitz/setup.py
from pathlib import Path
from setuptools import setup
NAME = 'prioheap'
DESCRIPTION = 'Priority queue with a sane API'
URL = 'https://github.com/adefossez/prioheap'
EMAIL = 'alexandre.defossez@gmail.com'
AUTHOR = 'Alexandre Défossez'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = "0.0.2"
HERE = Path(__file__).parent
REQUIRED = []
try:
with open(HERE / "README.md", encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
py_modules=['prioheap'],
install_requires=REQUIRED,
include_package_data=True,
license='Unlicense license',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Topic :: Scientific/Engineering'
],
)
| 24.565217
| 76
| 0.70177
|
3cd4f23599d42d9b51adafa4ab3f2c7323301382
| 3,649
|
py
|
Python
|
efficiency_statics/modified_package/torch_stat/compute_flops.py
|
MendelXu/ANN
|
f4eabeb27dbba5c9bdcf83d03776bffa34995666
|
[
"Apache-2.0"
] | 308
|
2019-08-11T02:12:37.000Z
|
2022-03-30T07:20:41.000Z
|
efficiency_statics/modified_package/torch_stat/compute_flops.py
|
pinglmlcv/ANN
|
f4eabeb27dbba5c9bdcf83d03776bffa34995666
|
[
"Apache-2.0"
] | 19
|
2019-08-22T04:57:33.000Z
|
2022-03-27T10:59:23.000Z
|
efficiency_statics/modified_package/torch_stat/compute_flops.py
|
pinglmlcv/ANN
|
f4eabeb27dbba5c9bdcf83d03776bffa34995666
|
[
"Apache-2.0"
] | 64
|
2019-08-17T07:09:50.000Z
|
2022-03-27T11:23:39.000Z
|
import torch.nn as nn
import torch
import numpy as np
def compute_flops(module, inp, out):
if isinstance(module, nn.Conv2d):
return compute_Conv2d_flops(module, inp, out)
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_flops(module, inp, out)
elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)):
return compute_Pool2d_flops(module, inp, out)
elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)):
return compute_ReLU_flops(module, inp, out)
elif isinstance(module, nn.Upsample):
return compute_Upsample_flops(module, inp, out)
elif isinstance(module, nn.Linear):
return compute_Linear_flops(module, inp, out)
elif type(module).__name__ == 'MatMul':
return compute_matmul_flops(module,inp,out)
elif isinstance(module,nn.AdaptiveAvgPool2d):
return compute_adap_avgpool(module,inp,out)
else:
print(f"[Flops]: {type(module).__name__} is not supported!")
return 0
pass
def compute_Conv2d_flops(module, inp, out):
# Can have multiple inputs, getting the first one
assert isinstance(module, nn.Conv2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
in_c = inp.size()[1]
k_h, k_w = module.kernel_size
out_c, out_h, out_w = out.size()[1:]
groups = module.groups
filters_per_channel = out_c // groups
conv_per_position_flops = k_h * k_w * in_c * filters_per_channel
active_elements_count = batch_size * out_h * out_w
total_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if module.bias is not None:
bias_flops = out_c * active_elements_count
total_flops = total_conv_flops + bias_flops
return total_flops
def compute_BatchNorm2d_flops(module, inp, out):
assert isinstance(module, nn.BatchNorm2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
in_c, in_h, in_w = inp.size()[1:]
batch_flops = np.prod(inp.shape)
if module.affine:
batch_flops *= 2
return batch_flops
def compute_ReLU_flops(module, inp, out):
assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU))
batch_size = inp.size()[0]
active_elements_count = batch_size
for s in inp.size()[1:]:
active_elements_count *= s
return active_elements_count
def compute_Pool2d_flops(module, inp, out):
assert isinstance(module, nn.MaxPool2d) or isinstance(module, nn.AvgPool2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
return np.prod(inp.shape)
def compute_Linear_flops(module, inp, out):
assert isinstance(module, nn.Linear)
assert len(inp.size()) == 2 and len(out.size()) == 2
batch_size = inp.size()[0]
return batch_size * inp.size()[1] * out.size()[1]
def compute_Upsample_flops(module, inp, out):
assert isinstance(module, nn.Upsample)
output_size = out[0]
batch_size = inp.size()[0]
output_elements_count = batch_size
for s in output_size.shape[1:]:
output_elements_count *= s
return output_elements_count
def compute_adap_avgpool(m, x, y):
kernel = torch.Tensor([*(x[0].shape[2:])]) // torch.Tensor(list((m.output_size,))).squeeze()
total_add = torch.prod(kernel)
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
total_ops = kernel_ops * num_elements
return total_ops
def compute_matmul_flops(moudle,inp,out):
x,y=inp
batch_size = x.size(0)
_,l,m =x.size()
_,_,n = y.size()
return batch_size*2*l*m*n
| 32.580357
| 96
| 0.679638
|
d705d67c509c0b07fa068e7903002176ba749533
| 190
|
py
|
Python
|
examples/DeepWisdom/at_speech/classifier/__init__.py
|
zichuan-scott-xu/automl-workflow
|
d108e55da943775953b9f1801311a86ac07e58a0
|
[
"Apache-2.0"
] | 3
|
2020-12-15T02:40:43.000Z
|
2021-01-14T02:32:13.000Z
|
examples/DeepWisdom/at_speech/classifier/__init__.py
|
zichuan-scott-xu/automl-workflow
|
d108e55da943775953b9f1801311a86ac07e58a0
|
[
"Apache-2.0"
] | null | null | null |
examples/DeepWisdom/at_speech/classifier/__init__.py
|
zichuan-scott-xu/automl-workflow
|
d108e55da943775953b9f1801311a86ac07e58a0
|
[
"Apache-2.0"
] | 4
|
2021-01-07T05:41:38.000Z
|
2021-04-07T08:02:22.000Z
|
from at_speech.classifier.sklearn_lr import SLLRLiblinear, SLLRSag
from at_speech.classifier.cnn import CNNClassifier
from at_speech.classifier.thinresnet34_cls import ThinResnet34Classifier
| 63.333333
| 72
| 0.9
|
abd62581a48747855d6f8d2701f66fe4ce14c0da
| 5,110
|
py
|
Python
|
electrode/clients/lib/displays/display_gui_elements.py
|
krbjila/labrad_tools
|
5c510cb35090807807bfe6bd910b9c35edce6fce
|
[
"MIT"
] | 1
|
2020-11-30T01:45:08.000Z
|
2020-11-30T01:45:08.000Z
|
electrode/clients/lib/displays/display_gui_elements.py
|
krbjila/labrad_tools
|
5c510cb35090807807bfe6bd910b9c35edce6fce
|
[
"MIT"
] | 8
|
2021-02-23T00:18:12.000Z
|
2022-03-12T00:54:50.000Z
|
electrode/clients/lib/displays/display_gui_elements.py
|
krbjila/labrad_tools
|
5c510cb35090807807bfe6bd910b9c35edce6fce
|
[
"MIT"
] | 1
|
2020-11-08T14:54:21.000Z
|
2020-11-08T14:54:21.000Z
|
import json
import time
import numpy as np
import os
import sys
from PyQt4 import QtGui, QtCore, Qt
from PyQt4.QtCore import pyqtSignal
from twisted.internet.defer import inlineCallbacks
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
from matplotlib import cm, colors, patches, collections
sys.path.append('./../../../')
from calibrations import *
ROD_LIMIT = DAC_LIMIT * 2000.
COLORMAP = 'RdBu'
SEP = os.path.sep
GEOMETRY = {
'ES' : {'width': 400},
}
ROD_COORDS = [
# Lower West Rod
{'center': (-3,-2.), 'rad': 0.5, 'name': 'LW'},
# Lower East Rod
{'center': (3,-2.), 'rad': 0.5, 'name': 'LE'},
# Upper West Rod
{'center': (-3,2.), 'rad': 0.5, 'name': 'UW'},
# Upper East Rod
{'center': (3,2.), 'rad': 0.5, 'name': 'UE'},
]
PLATE_COORDS = [
# Lower Plate
{'x0': (-6, -4), 'width': 12, 'height': 1, 'name': 'LP'},
# Upper Plate
{'x0': (-6, 3), 'width': 12, 'height': 1, 'name': 'UP'},
]
class ElectrodeSchematic(QtGui.QWidget):
def __init__(self):
super(ElectrodeSchematic, self).__init__()
self.populate()
self.setFixedWidth(GEOMETRY['ES']['width'])
self.setupColorbar(-1, 1)
def populate(self):
self.layout = QtGui.QHBoxLayout()
self.figure = Figure()
self.canvas = FigureCanvas(self.figure)
self.layout.addWidget(self.canvas)
self.setLayout(self.layout)
def setupColorbar(self, vmin, vmax):
norm = colors.Normalize(vmin=vmin, vmax=vmax, clip=True)
self.mapper = cm.ScalarMappable(norm=norm, cmap=COLORMAP)
self.mapper.set_array(np.arange(vmin, vmax))
self.colorbar = self.figure.colorbar(self.mapper, orientation='vertical')
self.colorbar.set_label('Volts')
def redraw(self, values):
self.figure.clear()
self.ax = self.figure.gca()
ps = []
vals = []
for x in PLATE_COORDS:
ps.append(patches.Rectangle(x['x0'], x['width'], x['height'], ec='k'))
vals.append(values[x['name']])
for x in ROD_COORDS:
ps.append(patches.Circle(x['center'], x['rad'], ec='k'))
vals.append(values[x['name']])
vals = np.array(vals)
if np.min(vals) != np.max(vals):
vmin = np.min(vals)
vmax = np.max(vals)
else:
vmin = vals[0] - 1
vmax = vals[0] + 1
self.setupColorbar(vmin, vmax)
self.ax.axis('equal')
self.ax.axis('off')
collection = collections.PatchCollection(ps, cmap=COLORMAP, clim=(vmin, vmax), alpha=1)
collection.set_array(vals)
self.ax.add_collection(collection)
self.ax.set_xlim(-8, 8)
self.ax.set_ylim(-5, 5)
self.figure.tight_layout()
self.canvas.draw()
class FieldSlicesWindow(QtGui.QWidget):
def __init__(self, calculator):
super(FieldSlicesWindow, self).__init__()
self.calculator = calculator
self.populate()
def populate(self):
self.layout = QtGui.QHBoxLayout()
self.E = EWindow(self.calculator)
self.E.setToolTip("Field cuts, assuming CompShim has zeroed linear gradient")
self.U = UWindow(self.calculator)
self.U.setToolTip("Potential cuts, assuming CompShim has zeroed linear gradient")
self.layout.addWidget(self.E)
self.layout.addWidget(self.U)
self.setLayout(self.layout)
def update(self, ev):
self.E.updatePlot(ev)
self.U.updatePlot(ev)
class AbstractWindow(QtGui.QWidget):
def __init__(self, calculator):
super(AbstractWindow, self).__init__()
self.calculator = calculator
self.populate()
self.setupFigure()
def populate(self):
self.layout = QtGui.QHBoxLayout()
self.figure = Figure()
self.canvas = FigureCanvas(self.figure)
self.layout.addWidget(self.canvas)
self.setLayout(self.layout)
def setupFigure(self):
self.ax = self.figure.gca()
class EWindow(AbstractWindow):
xr = [-1, 1] # mm
yr = [-1, 1] # mm
def __init__(self, calculator):
super(EWindow, self).__init__(calculator)
def updatePlot(self, ev):
self.ax.clear()
x = np.arange(self.xr[0], self.xr[1], 0.01)
y = np.arange(self.yr[0], self.yr[1], 0.01)
self.ax.plot(x, self.calculator.E(ev)(x,0), 'y-', label='x')
self.ax.plot(y, self.calculator.E(ev)(0,y), 'g-', label='y')
self.ax.legend()
self.ax.set_xlabel('Position (mm)')
self.ax.set_ylabel('|E| (V/cm)')
self.figure.tight_layout()
self.canvas.draw()
class UWindow(AbstractWindow):
xr = [-100, 100] # um
yr = [-100, 100] # um
def __init__(self, calculator):
super(UWindow, self).__init__(calculator)
def updatePlot(self, ev):
self.ax.clear()
x = np.arange(self.xr[0], self.xr[1], 1.0)
y = np.arange(self.yr[0], self.yr[1], 1.0)
U0 = self.calculator.U(ev)(0,0)
self.ax.plot(x, self.calculator.U(ev)(x/1000.0,0) - U0, 'y-', label='x')
self.ax.plot(y, self.calculator.U(ev)(0,y/1000.0) - U0, 'g-', label='y')
self.ax.legend()
self.ax.set_xlabel(r'Position ($\mu$m)')
self.ax.set_ylabel(r'$U - U_0$ ($\mu$K)')
self.figure.tight_layout()
self.canvas.draw()
| 26.894737
| 90
| 0.651076
|
85671b3e156793095852946fcfa8a1fa56e561bf
| 2,752
|
py
|
Python
|
mep/people/tests/test_people_commands.py
|
making-books-ren-today/test_eval_3_shxco
|
5a6427abeb4aec1aa70c0d9a4b32d028012780c8
|
[
"Apache-2.0"
] | 3
|
2020-05-12T19:19:41.000Z
|
2021-04-07T13:56:32.000Z
|
mep/people/tests/test_people_commands.py
|
making-books-ren-today/test_eval_3_shxco
|
5a6427abeb4aec1aa70c0d9a4b32d028012780c8
|
[
"Apache-2.0"
] | 736
|
2017-06-21T16:24:42.000Z
|
2022-02-26T17:46:10.000Z
|
mep/people/tests/test_people_commands.py
|
making-books-ren-today/test_eval_3_shxco
|
5a6427abeb4aec1aa70c0d9a4b32d028012780c8
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from io import StringIO
from django.test import TestCase
from mep.accounts.models import Event
from mep.people.management.commands import export_members
from mep.people.models import Person
class TestExportMembers(TestCase):
fixtures = ['sample_people']
def setUp(self):
self.cmd = export_members.Command()
self.cmd.stdout = StringIO()
def test_get_queryset(self):
# queryset should only include library members
member = Person.objects.get(pk=189) # francisque gay, member
author = Person.objects.get(pk=7152) # aeschylus, non-member
qs = self.cmd.get_queryset()
assert member in qs
assert author not in qs
def test_get_object_data(self):
# fetch some example people from fixture & call get_object_data
gay = Person.objects.get(name='Francisque Gay')
hemingway = Person.objects.get(name='Ernest Hemingway')
gay_data = self.cmd.get_object_data(gay)
hemingway_data = self.cmd.get_object_data(hemingway)
# check some basic data
assert gay_data['name'] == 'Francisque Gay'
assert gay_data['gender'] == 'Male'
assert gay_data['birth_year'] == 1885
assert hemingway_data['sort_name'] == 'Hemingway, Ernest'
assert hemingway_data['death_year'] == 1961
assert 'title' not in hemingway_data # empty fields not present
# fixture has no events, so no years are set
assert hemingway_data['membership_years'] == []
# check nationalities
assert 'France' in gay_data['nationalities']
assert 'United States' in hemingway_data['nationalities']
# check viaf & wikipedia urls
assert hemingway_data['wikipedia_url'] == \
'https://en.wikipedia.org/wiki/Ernest_Hemingway'
assert gay_data['viaf_url'] == 'http://viaf.org/viaf/9857613'
# check addresses & coordinates
assert '3 Rue Garancière, Paris' in gay_data['addresses']
assert '48.85101, 2.33590' in gay_data['coordinates']
assert '75006' in gay_data['postal_codes']
assert 6 in gay_data['arrondissements']
assert gay_data['updated'] == gay.updated_at.isoformat()
assert hemingway_data['updated'] == hemingway.updated_at.isoformat()
# add events to check membership years
account = gay.account_set.first()
Event.objects.create(
account=account, start_date=datetime.date(1920, 5, 1),
end_date=datetime.date(1921, 2, 1))
Event.objects.create(
account=account, start_date=datetime.date(1935, 5, 1))
gay_data = self.cmd.get_object_data(gay)
assert gay_data['membership_years'] == [1920, 1921, 1935]
| 39.314286
| 76
| 0.662427
|
ab7434daee3f5d52f82bc375044d0ccce35ac6ab
| 7,368
|
py
|
Python
|
pytest_cases/case_funcs_new.py
|
chinghwayu/python-pytest-cases
|
a95f2a50c201a10c6a2aa2544bd1ea39aab23a47
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_cases/case_funcs_new.py
|
chinghwayu/python-pytest-cases
|
a95f2a50c201a10c6a2aa2544bd1ea39aab23a47
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_cases/case_funcs_new.py
|
chinghwayu/python-pytest-cases
|
a95f2a50c201a10c6a2aa2544bd1ea39aab23a47
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import copy
from decopatch import function_decorator, DECORATED
# try: # python 3.2+
# from functools import lru_cache as lru
# except ImportError:
# from functools32 import lru_cache as lru # noqa
try: # python 3.5+
from typing import Type, Callable, Union, Optional, Any, Tuple, Dict, Iterable, List, Set
except ImportError:
pass
from .common_mini_six import string_types
from .common_pytest import safe_isclass
CASE_FIELD = '_pytestcase'
class CaseInfo(object):
"""
Contains all information available about a case.
It is attached to a case function as an attribute
"""
__slots__ = ('id', 'marks', 'tags')
def __init__(self,
id=None, # type: str
marks=(), # type: Tuple[MarkDecorator]
tags=() # type: Tuple[Any]
):
self.id = id
self.marks = marks
self.tags = ()
self.add_tags(tags)
@classmethod
def get_from(cls, case_func, create=False, prefix_for_ids='case_'):
"""
Returns the CaseInfo associated with case_fun ; creates it and attaches it if needed and required.
If not present, a case id is automatically created from the function name based on the collection prefix.
:param case_func:
:param create:
:param prefix_for_ids:
:return:
"""
case_info = getattr(case_func, CASE_FIELD, None)
if create:
if case_info is None:
case_info = CaseInfo()
case_info.attach_to(case_func)
if case_info.id is None:
# default test id from function name
if case_func.__name__.startswith(prefix_for_ids):
case_info.id = case_func.__name__[len(prefix_for_ids):]
else:
case_info.id = case_func.__name__
return case_info
def attach_to(self,
case_func # type: Callable
):
"""attach this case_info to the given case function"""
setattr(case_func, CASE_FIELD, self)
def add_tags(self,
tags # type: Union[Any, Union[List, Set, Tuple]]
):
"""add the given tag or tags"""
if tags:
if isinstance(tags, string_types) or not isinstance(tags, (set, list, tuple)):
# a single tag, create a tuple around it
tags = (tags,)
self.tags += tuple(tags)
def matches_tag_query(self,
has_tag=None, # type: Union[str, Iterable[str]]
):
"""
Returns True if the case function with this case_info is selected by the query
:param has_tag:
:return:
"""
if has_tag is None:
return True
if not isinstance(has_tag, (tuple, list, set)):
has_tag = (has_tag,)
return all(t in self.tags for t in has_tag)
@classmethod
def copy_info(cls, from_case_func, to_case_func):
case_info = cls.get_from(from_case_func)
if case_info is not None:
cp = copy(case_info)
cp.attach_to(to_case_func)
def matches_tag_query(case_fun,
has_tag=None, # type: Union[str, Iterable[str]]
filter=None, # type: Union[Callable[[Iterable[Any]], bool], Iterable[Callable[[Iterable[Any]], bool]]] # noqa
):
"""
Returns True if the case function is selected by the query:
- if `has_tag` contains one or several tags, they should ALL be present in the tags
set on `case_fun` (`case_fun._pytestcase.tags`)
- if `filter` contains one or several filter callables, they are all called in sequence and the
case_fun is only selected if ALL of them return a True truth value
:param case_fun:
:param has_tag:
:param filter:
:return: True if the case_fun is selected by the query.
"""
selected = True
# query on tags
if has_tag is not None:
selected = selected and CaseInfo.get_from(case_fun).matches_tag_query(has_tag)
# filter function
if filter is not None:
if not isinstance(filter, (tuple, set, list)):
filter = (filter,)
for _filter in filter:
# break if already unselected
if not selected:
return selected
# try next filter
try:
res = _filter(case_fun)
# keep this in the try catch in case there is an issue with the truth value of result
selected = selected and res
except: # noqa
# any error leads to a no-match
selected = False
return selected
@function_decorator
def case(id=None, # type: str # noqa
tags=None, # type: Union[Any, Iterable[Any]]
marks=(), # type: Union[MarkDecorator, Iterable[MarkDecorator]]
case_func=DECORATED # noqa
):
"""
Optional decorator for case functions so as to customize some information.
```python
@case(id='hey')
def case_hi():
return 1
```
:param id: the custom pytest id that should be used when this case is active. Replaces the deprecated `@case_name`
decorator from v1. If no id is provided, the id is generated from case functions by removing their prefix,
see `@parametrize_with_cases(prefix='case_')`.
:param tags: custom tags to be used for filtering in `@parametrize_with_cases(has_tags)`. Replaces the deprecated
`@case_tags` and `@target` decorators.
:param marks: optional pytest marks to add on the case. Note that decorating the function directly with the mark
also works, and if marks are provided in both places they are merged.
:return:
"""
case_info = CaseInfo(id, marks, tags)
case_info.attach_to(case_func)
return case_func
CASE_PREFIX_CLS = 'Case'
"""Prefix used by default to identify case classes"""
CASE_PREFIX_FUN = 'case_'
"""Prefix used by default to identify case functions within a module"""
def is_case_class(cls, case_marker_in_name=CASE_PREFIX_CLS, check_name=True):
"""
Returns True if the given object is a class and, if `check_name=True` (default), if its name contains
`case_marker_in_name`.
:param cls: the object to check
:param case_marker_in_name: the string that should be present in a class name so that it is selected. Default is
'Case'.
:param check_name: a boolean (default True) to enforce that the name contains the word `case_marker_in_name`.
If False, all classes will lead to a `True` result whatever their name.
:return: True if this is a case class
"""
return safe_isclass(cls) and (not check_name or case_marker_in_name in cls.__name__)
def is_case_function(f, prefix=CASE_PREFIX_FUN, check_prefix=True):
"""
Returns True if the provided object is a function or callable and, if `check_prefix=True` (default), if it starts
with `prefix`.
:param f:
:param prefix:
:param check_prefix:
:return:
"""
if not callable(f):
return False
elif safe_isclass(f):
return False
else:
return f.__name__.startswith(prefix) if check_prefix else True
| 33.339367
| 134
| 0.616042
|
1ee8bcff3db1a902043f572a4dcdc3f4697712e1
| 236
|
py
|
Python
|
handbook/urls.py
|
UICHCC/uicCourse
|
3c34d0f765e583be05f084df1e6ab63b1ed62ed6
|
[
"MIT"
] | 3
|
2018-03-13T02:00:43.000Z
|
2019-03-24T02:46:56.000Z
|
handbook/urls.py
|
UICHCC/uicCourse
|
3c34d0f765e583be05f084df1e6ab63b1ed62ed6
|
[
"MIT"
] | 65
|
2018-02-08T16:01:53.000Z
|
2021-11-10T14:59:37.000Z
|
handbook/urls.py
|
UICHCC/uicCourse
|
3c34d0f765e583be05f084df1e6ab63b1ed62ed6
|
[
"MIT"
] | 2
|
2018-06-02T06:06:22.000Z
|
2019-04-18T03:27:16.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('handbook/', views.handbook, name='handbook_home'),
path('handbook/<str:r_major>/<int:admission_year>/', views.handbook_content, name='handbook_content')
]
| 26.222222
| 105
| 0.728814
|
a185fc979dc20f08a08e6ec3c4ec20007ea2b21e
| 467
|
py
|
Python
|
src/senor_octopus/sources/static.py
|
betodealmeida/senor-octopus
|
362d4cb8d5ac7fea620c2b4d46e807bb614a59bd
|
[
"MIT"
] | 7
|
2021-03-24T18:29:21.000Z
|
2021-11-15T21:13:25.000Z
|
src/senor_octopus/sources/static.py
|
betodealmeida/senor-octopus
|
362d4cb8d5ac7fea620c2b4d46e807bb614a59bd
|
[
"MIT"
] | null | null | null |
src/senor_octopus/sources/static.py
|
betodealmeida/senor-octopus
|
362d4cb8d5ac7fea620c2b4d46e807bb614a59bd
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from datetime import timezone
from senor_octopus.types import Stream
async def static(name: str, value: str) -> Stream:
"""
Generate static events.
Parameters
----------
name
Name of the event
value
Value of the event
Yields
------
Event
Static event
"""
yield {
"timestamp": datetime.now(timezone.utc),
"name": name,
"value": value,
}
| 16.678571
| 50
| 0.567452
|
dd6b0456350bc14aa3ae735f16460f72e02a1716
| 374
|
py
|
Python
|
30 Days of Code/26NestedLogic.py
|
devansh-pratap-singh/hackerrank-solutions
|
227817d90846424cd3078e60b225eb201e906cf9
|
[
"MIT"
] | 1
|
2020-10-15T14:03:52.000Z
|
2020-10-15T14:03:52.000Z
|
30 Days of Code/26NestedLogic.py
|
devansh-pratap-singh/HackerRank-Solutions
|
227817d90846424cd3078e60b225eb201e906cf9
|
[
"MIT"
] | null | null | null |
30 Days of Code/26NestedLogic.py
|
devansh-pratap-singh/HackerRank-Solutions
|
227817d90846424cd3078e60b225eb201e906cf9
|
[
"MIT"
] | null | null | null |
actually = str(input()).split(" ")
da = int(actually[0])
ma = int(actually[1])
ya = int(actually[2])
expected = str(input()).split(" ")
de = int(expected[0])
me = int(expected[1])
ye = int(expected[2])
fine = 0
if ya > ye:
fine = 10000
elif ya == ye:
if ma > me:
fine = (ma - me) * 500
elif ma == me and da > de:
fine = (da - de) * 15
print(fine)
| 22
| 34
| 0.548128
|
fc040f7fa72d51bdc3e56c7991173ab769c2eedf
| 7,446
|
py
|
Python
|
hh_deep_deep/dd_crawl.py
|
TeamHG-Memex/hh-deep-deep
|
a4629392115580dec48b468850e48f53e9f3547d
|
[
"MIT"
] | 1
|
2017-11-14T10:13:49.000Z
|
2017-11-14T10:13:49.000Z
|
hh_deep_deep/dd_crawl.py
|
afcarl/hh-deep-deep
|
a4629392115580dec48b468850e48f53e9f3547d
|
[
"MIT"
] | null | null | null |
hh_deep_deep/dd_crawl.py
|
afcarl/hh-deep-deep
|
a4629392115580dec48b468850e48f53e9f3547d
|
[
"MIT"
] | 2
|
2018-06-14T18:32:16.000Z
|
2020-05-29T14:14:30.000Z
|
from collections import deque
import json
import logging
from pathlib import Path
import re
import math
import multiprocessing
import subprocess
from typing import Any, Dict, Optional
from .crawl_utils import JsonLinesFollower
from .dd_utils import BaseDDPaths, BaseDDCrawlerProcess, is_running
from .deepdeep_crawl import DEFAULT_TRAINER_PAGE_LIMIT
class DDCrawlerPaths(BaseDDPaths):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.page_clf = self.root.joinpath('page_clf.joblib')
self.link_clf = self.root.joinpath('Q.joblib')
class DDCrawlerProcess(BaseDDCrawlerProcess):
paths_cls = DDCrawlerPaths
crawler_name = 'deepdeep'
def __init__(self, *,
page_clf_data: bytes,
link_clf_data: bytes,
broadness: str='BROAD',
**kwargs):
super().__init__(**kwargs)
self.page_clf_data = page_clf_data
self.link_clf_data = link_clf_data
self.broadness = broadness
@classmethod
def load_running(cls, root: Path, **kwargs) -> Optional['DDCrawlerProcess']:
""" Initialize a process from a directory.
"""
paths = cls.paths_cls(root)
if not all(p.exists() for p in [paths.pid, paths.meta, paths.seeds,
paths.page_clf, paths.link_clf]):
return
if not is_running(paths.root):
logging.warning('Cleaning up job in {}.'.format(paths.root))
subprocess.check_call(
['docker-compose', 'down', '-v'], cwd=str(paths.root))
paths.pid.unlink()
return
with paths.seeds.open('rt', encoding='utf8') as f:
seeds = [line.strip() for line in f]
if paths.login_credentials.exists():
with paths.login_credentials.open('rt', encoding='utf8') as f:
login_credentials = json.load(f)
else:
login_credentials = None
meta = json.loads(paths.meta.read_text('utf8'))
return cls(
id_=meta['id'],
workspace_id=meta['workspace_id'],
seeds=seeds,
login_credentials=login_credentials,
page_clf_data=paths.page_clf.read_bytes(),
link_clf_data=paths.link_clf.read_bytes(),
root=root,
**kwargs)
def start(self):
self.paths.mkdir()
self.paths.meta.write_text(json.dumps({
'id': self.id_,
'workspace_id': self.workspace_id,
}), encoding='utf8')
self.paths.page_clf.write_bytes(self.page_clf_data)
self.paths.link_clf.write_bytes(self.link_clf_data)
# Create out/media beforehand to prevent a race condition
self.paths.out.joinpath('media').mkdir(parents=True)
self.paths.seeds.write_text(
'\n'.join(url for url in self.seeds), encoding='utf8')
with self.paths.login_credentials.open('wt', encoding='utf8') as f:
json.dump(self.login_credentials, f)
n_processes = multiprocessing.cpu_count()
if self.max_workers:
n_processes = min(self.max_workers, n_processes)
cur_dir = Path(__file__).parent # type: Path
compose_templates = (
cur_dir.joinpath('dd-crawler-compose.template.yml').read_text())
self.paths.root.joinpath('docker-compose.yml').write_text(
compose_templates.format(
docker_image=self.docker_image,
page_limit=int(math.ceil(self.page_limit / n_processes)),
max_relevant_domains=self._max_relevant_domains(self.broadness),
relevancy_threshold=0.8, # just a heuristics
external_links=self.external_links,
proxy=self.proxy,
**{p: self.to_host_path(getattr(self.paths, p)) for p in [
'seeds', 'page_clf', 'link_clf', 'redis_conf', 'out',
'models', 'login_credentials',
]}
))
redis_config = cur_dir.joinpath('redis.conf').read_text()
self.paths.redis_conf.write_text(redis_config)
logging.info('Starting crawl in {}'.format(self.paths.root))
self._compose_call('up', '-d')
self._compose_call('scale', 'crawler={}'.format(n_processes))
self.paths.pid.write_text(self.id_)
logging.info('Crawl "{}" started'.format(self.id_))
@staticmethod
def _max_relevant_domains(broadness: str) -> str:
if broadness == 'DEEP':
return '0'
elif broadness == 'BROAD':
return ''
else:
return re.match('N(\d+)$', broadness).groups()[0]
def _get_updates(self) -> Dict[str, Any]:
n_last = self.get_n_last()
log_paths = list(self.paths.out.glob('*.log.jl'))
updates = {}
if log_paths:
n_last_per_file = int(math.ceil(n_last / len(log_paths)))
all_last_items = []
total_score = n_crawled = n_domains = n_relevant_domains = 0
for path in log_paths:
follower = self._log_followers.setdefault(
path, JsonLinesFollower(path))
last_items = deque(maxlen=n_last_per_file)
for item in follower.get_new_items(at_least_last=True):
if item.get('has_login_form'):
updates.setdefault('login_urls', []).append(item['url'])
if 'login_success' in item:
self._add_login_state_update(item, updates)
if 'url' in item:
last_items.append(item)
if last_items:
all_last_items.extend(last_items)
last = last_items[-1]
total_score += last['total_score']
n_crawled += last['n_crawled']
# A very small fraction (before "scale crawler=N")
# might overlap between workers, more might overlap
# in case some workers die.
n_domains += last['n_domains']
n_relevant_domains += last['n_relevant_domains']
all_last_items.sort(key=lambda x: x['time'])
updates['pages'] = [
{'url': it['url'], 'score': 100 * it['score']}
for it in all_last_items[-n_last:]]
if n_crawled > 0:
updates['progress'] = (
'{n_crawled:,} pages processed from {n_domains:,} domains '
'({n_relevant_domains:,} relevant), '
'average score {mean_score:.1f}.'.format(
n_crawled=n_crawled,
n_domains=n_domains,
n_relevant_domains=n_relevant_domains,
mean_score=100 * total_score / n_crawled,
))
# This is correct as long as trainer crawler is really run
# for DEFAULT_TRAINER_PAGE_LIMIT. It's not the case in tests,
# but is true in production, where we don't set a custom limit.
updates['percentage_done'] = 100 * (
(n_crawled + DEFAULT_TRAINER_PAGE_LIMIT) /
(self.page_limit + DEFAULT_TRAINER_PAGE_LIMIT))
else:
updates['progress'] = 'Crawl is not running yet'
return updates
| 43.54386
| 80
| 0.571985
|
076cbc86cebb590cc9686130a8c3f42a7c4485f8
| 9,524
|
py
|
Python
|
sdk/python/feast/inference.py
|
ibnummuhammad/feast
|
1fd9c2def1fbaca68e865a7c67336793ddb25582
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/inference.py
|
ibnummuhammad/feast
|
1fd9c2def1fbaca68e865a7c67336793ddb25582
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/inference.py
|
ibnummuhammad/feast
|
1fd9c2def1fbaca68e865a7c67336793ddb25582
|
[
"Apache-2.0"
] | null | null | null |
import re
from typing import List
from feast import BigQuerySource, Entity, FileSource, RedshiftSource, SnowflakeSource
from feast.data_source import DataSource, PushSource, RequestSource
from feast.errors import RegistryInferenceFailure
from feast.feature_view import FeatureView
from feast.field import Field, from_value_type
from feast.repo_config import RepoConfig
from feast.value_type import ValueType
def update_entities_with_inferred_types_from_feature_views(
entities: List[Entity], feature_views: List[FeatureView], config: RepoConfig
) -> None:
"""
Infers the types of the entities by examining the schemas of feature view batch sources.
Args:
entities: The entities to be updated.
feature_views: A list containing feature views associated with the entities.
config: The config for the current feature store.
"""
incomplete_entities = {
entity.name: entity
for entity in entities
if entity.value_type == ValueType.UNKNOWN
}
incomplete_entities_keys = incomplete_entities.keys()
for view in feature_views:
if not (incomplete_entities_keys & set(view.entities)):
continue # skip if view doesn't contain any entities that need inference
col_names_and_types = list(
view.batch_source.get_table_column_names_and_types(config)
)
for entity_name in view.entities:
if entity_name in incomplete_entities:
entity = incomplete_entities[entity_name]
# get entity information from information extracted from the view batch source
extracted_entity_name_type_pairs = list(
filter(lambda tup: tup[0] == entity.join_key, col_names_and_types,)
)
if len(extracted_entity_name_type_pairs) == 0:
# Doesn't mention inference error because would also be an error without inferencing
raise ValueError(
f"""No column in the batch source for the {view.name} feature view matches
its entity's name."""
)
inferred_value_type = view.batch_source.source_datatype_to_feast_value_type()(
extracted_entity_name_type_pairs[0][1]
)
if (
entity.value_type != ValueType.UNKNOWN
and entity.value_type != inferred_value_type
) or (len(extracted_entity_name_type_pairs) > 1):
raise RegistryInferenceFailure(
"Entity",
f"""Entity value_type inference failed for {entity_name} entity.
Multiple viable matches.
""",
)
entity.value_type = inferred_value_type
def update_data_sources_with_inferred_event_timestamp_col(
data_sources: List[DataSource], config: RepoConfig
) -> None:
ERROR_MSG_PREFIX = "Unable to infer DataSource timestamp_field"
for data_source in data_sources:
if isinstance(data_source, RequestSource):
continue
if isinstance(data_source, PushSource):
data_source = data_source.batch_source
if data_source.timestamp_field is None or data_source.timestamp_field == "":
# prepare right match pattern for data source
ts_column_type_regex_pattern = ""
# TODO(adchia): Move Spark source inference out of this logic
if (
isinstance(data_source, FileSource)
or "SparkSource" == data_source.__class__.__name__
):
ts_column_type_regex_pattern = r"^timestamp"
elif isinstance(data_source, BigQuerySource):
ts_column_type_regex_pattern = "TIMESTAMP|DATETIME"
elif isinstance(data_source, RedshiftSource):
ts_column_type_regex_pattern = "TIMESTAMP[A-Z]*"
elif isinstance(data_source, SnowflakeSource):
ts_column_type_regex_pattern = "TIMESTAMP_[A-Z]*"
else:
raise RegistryInferenceFailure(
"DataSource",
f"""
DataSource inferencing of timestamp_field is currently only supported
for FileSource, SparkSource, BigQuerySource, RedshiftSource, and SnowflakeSource.
Attempting to infer from {data_source}.
""",
)
# for informing the type checker
assert (
isinstance(data_source, FileSource)
or isinstance(data_source, BigQuerySource)
or isinstance(data_source, RedshiftSource)
or isinstance(data_source, SnowflakeSource)
or "SparkSource" == data_source.__class__.__name__
)
# loop through table columns to find singular match
timestamp_fields = []
for (
col_name,
col_datatype,
) in data_source.get_table_column_names_and_types(config):
if re.match(ts_column_type_regex_pattern, col_datatype):
timestamp_fields.append(col_name)
if len(timestamp_fields) > 1:
raise RegistryInferenceFailure(
"DataSource",
f"""{ERROR_MSG_PREFIX}; found multiple possible columns of timestamp type.
Data source type: {data_source.__class__.__name__},
Timestamp regex: `{ts_column_type_regex_pattern}`, columns: {timestamp_fields}""",
)
elif len(timestamp_fields) == 1:
data_source.timestamp_field = timestamp_fields[0]
else:
raise RegistryInferenceFailure(
"DataSource",
f"""
{ERROR_MSG_PREFIX}; Found no columns of timestamp type.
Data source type: {data_source.__class__.__name__},
Timestamp regex: `{ts_column_type_regex_pattern}`.
""",
)
def update_feature_views_with_inferred_features(
fvs: List[FeatureView], entities: List[Entity], config: RepoConfig
) -> None:
"""
Infers the set of features associated to each FeatureView and updates the FeatureView with those features.
Inference occurs through considering each column of the underlying data source as a feature except columns that are
associated with the data source's timestamp columns and the FeatureView's entity columns.
Args:
fvs: The feature views to be updated.
entities: A list containing entities associated with the feature views.
config: The config for the current feature store.
"""
entity_name_to_join_key_map = {entity.name: entity.join_key for entity in entities}
join_keys = entity_name_to_join_key_map.values()
for fv in fvs:
# First drop all Entity fields. Then infer features if necessary.
fv.schema = [field for field in fv.schema if field.name not in join_keys]
fv.features = [field for field in fv.features if field.name not in join_keys]
if not fv.features:
columns_to_exclude = {
fv.batch_source.timestamp_field,
fv.batch_source.created_timestamp_column,
} | {
entity_name_to_join_key_map[entity_name] for entity_name in fv.entities
}
if fv.batch_source.timestamp_field in fv.batch_source.field_mapping:
columns_to_exclude.add(
fv.batch_source.field_mapping[fv.batch_source.timestamp_field]
)
if (
fv.batch_source.created_timestamp_column
in fv.batch_source.field_mapping
):
columns_to_exclude.add(
fv.batch_source.field_mapping[
fv.batch_source.created_timestamp_column
]
)
for (
col_name,
col_datatype,
) in fv.batch_source.get_table_column_names_and_types(config):
if col_name not in columns_to_exclude and not re.match(
"^__|__$",
col_name, # double underscores often signal an internal-use column
):
feature_name = (
fv.batch_source.field_mapping[col_name]
if col_name in fv.batch_source.field_mapping
else col_name
)
field = Field(
name=feature_name,
dtype=from_value_type(
fv.batch_source.source_datatype_to_feast_value_type()(
col_datatype
)
),
)
# Note that schema and features are two different attributes of a
# FeatureView, and that features should be present in both.
fv.schema.append(field)
fv.features.append(field)
if not fv.features:
raise RegistryInferenceFailure(
"FeatureView",
f"Could not infer Features for the FeatureView named {fv.name}.",
)
| 43.889401
| 119
| 0.591348
|
08f366c273224e96a8c3281648742e5f5f4d506d
| 1,204
|
py
|
Python
|
ciphey/basemods/Decoders/tap_code.py
|
AlexandruValeanu/Ciphey
|
58323db4443b0d29d7797dadab88955a02da8812
|
[
"MIT"
] | 9,908
|
2020-06-06T01:06:50.000Z
|
2022-03-31T21:22:57.000Z
|
ciphey/basemods/Decoders/tap_code.py
|
ScarlettHoefler/Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
[
"MIT"
] | 423
|
2020-05-30T11:44:37.000Z
|
2022-03-18T03:15:30.000Z
|
ciphey/basemods/Decoders/tap_code.py
|
ScarlettHoefler/Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
[
"MIT"
] | 714
|
2020-06-09T20:24:41.000Z
|
2022-03-29T15:28:53.000Z
|
# by https://github.com/RustyDucky and https://github.com/lukasgabriel
from typing import Dict, Optional
from ciphey.iface import Config, Decoder, ParamSpec, T, Translation, U, registry
@registry.register
class Tap_code(Decoder[str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs Tap code decoding
"""
try:
result = ""
combinations = ctext.split(" ")
for fragment in combinations:
result += self.TABLE.get(fragment)
return result
except Exception:
return None
@staticmethod
def priority() -> float:
return 0.06
def __init__(self, config: Config):
super().__init__(config)
self.TABLE = config.get_resource(self._params()["dict"], Translation)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The table of letters used for the tap code interpretation.",
req=False,
default="cipheydists::translate::tap_code",
)
}
@staticmethod
def getTarget() -> str:
return "tap_code"
| 27.363636
| 82
| 0.577243
|
cb808b5484d8346313c8c6e3b441fc243d667200
| 17,785
|
py
|
Python
|
src/main/python/utils/dave_reader.py
|
godslayer201/dave
|
cc084e84327ab94dda0b2228f955d42cc152bb45
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/utils/dave_reader.py
|
godslayer201/dave
|
cc084e84327ab94dda0b2228f955d42cc152bb45
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/utils/dave_reader.py
|
godslayer201/dave
|
cc084e84327ab94dda0b2228f955d42cc152bb45
|
[
"Apache-2.0"
] | null | null | null |
import os
import utils.dave_logger as logging
import utils.exception_helper as ExHelper
import magic
import model.dataset as DataSet
import numpy as np
from astropy.io import fits
from hendrics.io import load_events_and_gtis
from stingray.gti import get_gti_from_all_extensions
from hendrics.lcurve import lcurve_from_fits
from hendrics.io import load_data
import utils.dataset_cache as DsCache
from config import CONFIG
def get_cache_key_for_destination (destination, time_offset):
if os.path.isfile(destination):
# If destination is a valid file, so is not a cache key
return DsCache.get_key(destination + "|" + str(time_offset), True)
else:
return destination # If destination is a cache key
def get_hdu_string_from_hdulist (hdu_string, hdulist):
supported_hdus = hdu_string.split(",")
for hdu in hdulist:
if hdu.name in supported_hdus:
return hdu.name
return ""
def get_file_dataset(destination, time_offset=0):
dataset = None
cache_key = ""
try:
if destination:
cache_key = get_cache_key_for_destination(destination, time_offset)
if DsCache.contains(cache_key):
logging.debug("get_file_dataset: returned cached dataset, cache_key: " + str(cache_key))
return DsCache.get(cache_key), cache_key
logging.debug("get_file_dataset: reading destination: " + str(destination))
filename = os.path.splitext(destination)[0]
file_extension_from_file = os.path.splitext(destination)[1]
file_extension = magic.from_file(destination)
logging.debug("File extension: %s" % file_extension)
if file_extension.find("ASCII") == 0:
table_id = "EVENTS"
header_names = [CONFIG.TIME_COLUMN, "PHA", "Color1", "Color2"]
dataset = get_txt_dataset(destination, table_id, header_names)
table = dataset.tables[table_id]
table.add_columns(["AMPLITUDE"])
numValues = len(table.columns[CONFIG.TIME_COLUMN].values)
random_values = np.random.uniform(-1, 1, size=numValues)
table.columns["AMPLITUDE"].values = random_values
elif file_extension.find("FITS") == 0 \
or file_extension.find("gzip") > -1:
# Opening Fits
hdulist = fits.open(destination, memmap=True)
if get_hdu_string_from_hdulist(CONFIG.EVENTS_STRING, hdulist) != "":
# If EVENTS extension found, consider the Fits as EVENTS Fits
dataset = get_events_fits_dataset_with_stingray(destination, hdulist, dsId='FITS',
hduname=get_hdu_string_from_hdulist(CONFIG.EVENTS_STRING, hdulist),
column=CONFIG.TIME_COLUMN, gtistring=CONFIG.GTI_STRING,
extra_colums=['PI', "PHA"], time_offset=time_offset)
elif 'RATE' in hdulist:
# If RATE extension found, consider the Fits as LIGHTCURVE Fits
dataset = get_lightcurve_fits_dataset_with_stingray(destination, hdulist, hduname='RATE',
column=CONFIG.TIME_COLUMN, gtistring=CONFIG.GTI_STRING, time_offset=time_offset)
elif 'EBOUNDS' in hdulist:
# If EBOUNDS extension found, consider the Fits as RMF Fits
dataset = get_fits_dataset(hdulist, "RMF", ["EBOUNDS"])
elif get_hdu_string_from_hdulist(CONFIG.GTI_STRING, hdulist) != "":
# If not EVENTS or RATE extension found, check if is GTI Fits
dataset = get_gti_fits_dataset_with_stingray(hdulist,gtistring=CONFIG.GTI_STRING, time_offset=time_offset)
else:
logging.warn("Unsupported FITS type! Any table found: " + CONFIG.EVENTS_STRING + ", RATE, EBOUNDS or " + CONFIG.GTI_STRING)
elif file_extension == "data" and (file_extension_from_file in [".p", ".nc"]):
# If file is pickle object, tries to parse it as dataset
dataset = load_dataset_from_intermediate_file(destination)
else:
logging.warn("Unknown file extension: " + str(file_extension) + " , " + str(file_extension_from_file))
if dataset:
DsCache.add(cache_key, dataset)
logging.debug("get_file_dataset, dataset added to cache, cache_key: " + str(cache_key))
else:
logging.error("get_file_dataset: Destination is empty")
except:
logging.error(ExHelper.getException('get_file_dataset'))
return dataset, cache_key
def get_txt_dataset(destination, table_id, header_names):
data = np.loadtxt(destination)
dataset = DataSet.get_hdu_type_dataset(table_id, header_names, hduname="EVENTS")
# Column1, Column1Err, Column2, Column2Err .. header order expected
for i in range(len(header_names)):
header_name = header_names[i]
column = dataset.tables[table_id].columns[header_name]
column.values = data[0:len(data), i * 2]
column.error_values = data[0:len(data), (i * 2) + 1]
logging.debug("Read txt file successfully: %s" % destination)
return dataset
# Returns a dataset by reading a Fits file, returns all tables
def get_fits_dataset(hdulist, dsId, table_ids):
dataset = DataSet.get_empty_dataset(dsId)
for t in range(len(hdulist)):
if isinstance(hdulist[t], fits.hdu.table.BinTableHDU):
if hdulist[t].name in table_ids:
table_id = hdulist[t].name
header_names = hdulist[t].columns.names
tbdata = hdulist[t].data
dataset.add_table(table_id, header_names)
header, header_comments = get_header(hdulist, table_id)
dataset.tables[table_id].set_header_info(header, header_comments)
for i in range(len(header_names)):
header_name = header_names[i]
dataset.tables[table_id].columns[header_name].add_values(np.nan_to_num(tbdata.field(i)))
else:
logging.warn("Ignored table data: %s" % hdulist[t].name)
else:
logging.warn("No valid data on: %s" % t)
logging.warn("Type of Data: %s" % type(hdulist[t]))
hdulist.close()
logging.debug("Read fits file successfully: %s" % dsId)
return dataset
# Returns the column's names of a given table of Fits file
def get_fits_table_column_names(hdulist, table_id):
if table_id in hdulist:
if isinstance(hdulist[table_id], fits.hdu.table.BinTableHDU):
return hdulist[table_id].columns.names
return None
# Returns a dataset containin HDU("EVENTS") table and GTI table
# with the Fits data using Stingray library
def get_events_fits_dataset_with_stingray(destination, hdulist, dsId='FITS',
hduname='EVENTS', column=CONFIG.TIME_COLUMN,
gtistring=CONFIG.GTI_STRING, extra_colums=[], time_offset=0):
# Gets columns from fits hdu table
logging.debug("Reading Events Fits columns")
columns = get_fits_table_column_names(hdulist, hduname)
header, header_comments = get_header(hdulist, hduname)
# Closes the FITS file, further file data reads will be done via Stingray
hdulist.close()
# Prepares additional_columns
additional_columns = []
for i in range(len(columns)):
if columns[i] != column:
if len(extra_colums) == 0 or columns[i] in extra_colums:
additional_columns.append(columns[i])
# Reads fits data
logging.debug("Reading Events Fits columns's data")
fits_data = load_events_and_gtis(destination,
additional_columns=additional_columns,
gtistring=gtistring,
hduname=hduname, column=column)
event_list, events_start_time = substract_tstart_from_events(fits_data, time_offset)
# Gets PI column data from eventlist if requiered and PHA not in additional_data
if "PI" in additional_columns \
and "PI" not in fits_data.additional_data \
and "PHA" not in fits_data.additional_data:
fits_data.additional_data["PI"] = event_list.pi
dataset = DataSet.get_dataset_applying_gtis(dsId, header, header_comments,
fits_data.additional_data, [],
event_list.time, [],
event_list.gti[:, 0], event_list.gti[:, 1],
None, None, "EVENTS", column)
# Stores the events_start_time in time column extra
dataset.tables["EVENTS"].columns[column].set_extra("TSTART", events_start_time)
logging.debug("Read Events fits with stingray file successfully: " + str(destination) + ", tstart: " + str(events_start_time))
return dataset
# Returns a dataset containing GTI table using Stingray library
def get_gti_fits_dataset_with_stingray(hdulist, gtistring=CONFIG.GTI_STRING, time_offset=0):
st_gtis = get_gti_from_all_extensions(hdulist, accepted_gtistrings=[gtistring])
if time_offset != 0:
st_gtis[:, 0] = st_gtis[:, 0] - time_offset
st_gtis[:, 1] = st_gtis[:, 1] - time_offset
return DataSet.get_gti_dataset_from_stingray_gti(st_gtis)
# Returns a dataset containin LIGHTCURVE table and GTI table
# with the Fits data using Stingray library
def get_lightcurve_fits_dataset_with_stingray(destination, hdulist, hduname='RATE',
column=CONFIG.TIME_COLUMN, gtistring=CONFIG.GTI_STRING, time_offset=0):
supported_rate_columns = set(['RATE', 'RATE1', 'COUNTS'])
found_rate_columns = set(hdulist[hduname].data.names)
intersection_columns = supported_rate_columns.intersection(found_rate_columns)
#Check if HDUCLAS1 = LIGHTCURVE column exists
logging.debug("Reading Lightcurve Fits columns")
if "HDUCLAS1" not in hdulist[hduname].header:
logging.warn("HDUCLAS1 not found in header: " + hduname)
return None
elif hdulist[hduname].header["HDUCLAS1"] != "LIGHTCURVE":
logging.warn("HDUCLAS1 is not LIGHTCURVE")
return None
elif len(intersection_columns) == 0:
logging.warn("RATE, RATE1 or COUNTS columns not found in " + str(hduname) + " HDU, found columns: " + str(hdulist[hduname].data.names))
return None
elif len(intersection_columns) > 1:
logging.warn("RATE, RATE1 or COUNTS ambiguous columns found in " + str(hduname) + " HDU, found columns: " + str(hdulist[hduname].data.names))
return None
ratecolumn = list(intersection_columns)[0]
if len(hdulist[hduname].data[ratecolumn].shape) != 1 \
or not (isinstance(hdulist[hduname].data[ratecolumn][0], int) \
or isinstance(hdulist[hduname].data[ratecolumn][0], np.integer) \
or isinstance(hdulist[hduname].data[ratecolumn][0], float) \
or isinstance(hdulist[hduname].data[ratecolumn][0], np.floating)):
logging.warn("Wrong data type found for column: " + str(ratecolumn) + " in " + str(hduname) + " HDU, expected Integer or Float.")
return None
header, header_comments = get_header(hdulist, hduname)
# Reads the lightcurve with HENDRICS
outfile = lcurve_from_fits(destination, gtistring=get_hdu_string_from_hdulist(gtistring, hdulist),
timecolumn=column, ratecolumn=ratecolumn, ratehdu=1,
fracexp_limit=CONFIG.FRACEXP_LIMIT)[0]
lcurve, events_start_time = substract_tstart_from_lcurve(load_data(outfile), time_offset)
dataset = DataSet.get_lightcurve_dataset_from_stingray_lcurve(lcurve, header, header_comments,
hduname, column)
# Stores the events_start_time in time column extra
dataset.tables[hduname].columns[column].set_extra("TSTART", events_start_time)
logging.debug("Read Lightcurve fits with stingray file successfully: " + str(destination) + ", tstart: " + str(events_start_time) + ", rate: " + str(len(lcurve["counts"])))
return dataset
def substract_tstart_from_events(fits_data, time_offset=0):
# Adds the lag of the first event to the start time of observation
if time_offset == 0:
events_start_time = fits_data.t_start
else:
events_start_time = fits_data.t_start - (fits_data.t_start - time_offset)
event_list = fits_data.ev_list
event_list.gti[:, 0] = event_list.gti[:, 0] - events_start_time
event_list.gti[:, 1] = event_list.gti[:, 1] - events_start_time
event_list.time = event_list.time - events_start_time
return event_list, fits_data.t_start
def substract_tstart_from_lcurve(lcurve, time_offset=0):
# Gets start time of observation and substract it from all time data,
# sure this can be done on lcurve_from_fits, but I consider this is cleaner
events_start_time = 0
real_start_time = 0
if "tstart" in lcurve:
real_start_time = lcurve["tstart"]
if time_offset == 0:
events_start_time = real_start_time
else:
events_start_time = real_start_time - (real_start_time - time_offset)
lcurve["time"] = lcurve["time"] - events_start_time
lcurve["gti"][:, 0] = lcurve["gti"][:, 0] - events_start_time
lcurve["gti"][:, 1] = lcurve["gti"][:, 1] - events_start_time
else:
logging.warn("TSTART not readed from lightcurve Fits")
return lcurve, real_start_time
# Gets FITS header properties
def get_header(hdulist, hduname):
header = dict()
header_comments = dict()
for header_column in hdulist[hduname].header:
header[header_column] = str(hdulist[hduname].header[header_column])
header_comments[header_column] = str(hdulist[hduname].header.comments[header_column])
return header, header_comments
def get_stingray_object(destination, time_offset=0):
if not destination:
return None
filename = os.path.splitext(destination)[0]
file_extension = magic.from_file(destination)
logging.debug("File extension: %s" % file_extension)
if file_extension.find("FITS") == 0:
# Opening Fits
hdulist = fits.open(destination, memmap=True)
if 'EVENTS' in hdulist:
# If EVENTS extension found, consider the Fits as EVENTS Fits
fits_data = load_events_and_gtis(destination,
additional_columns=['PI', "PHA"],
gtistring=CONFIG.GTI_STRING,
hduname='EVENTS', column=CONFIG.TIME_COLUMN)
return substract_tstart_from_events(fits_data, time_offset)
elif 'RATE' in hdulist:
# If RATE extension found, consider the Fits as LIGHTCURVE Fits
# Reads the lightcurve with hendrics
outfile = lcurve_from_fits(destination, gtistring=get_hdu_string_from_hdulist(CONFIG.GTI_STRING, hdulist),
timecolumn=CONFIG.TIME_COLUMN, ratecolumn=None, ratehdu=1,
fracexp_limit=CONFIG.FRACEXP_LIMIT)[0]
return substract_tstart_from_lcurve(load_lcurve(outfile), time_offset)
else:
logging.error("Unsupported FITS type!")
else:
logging.error("Unknown file extension: %s" % file_extension)
return None
def save_to_intermediate_file(stingray_object, fname):
"""Save Stingray object to intermediate file."""
from stingray.lightcurve import Lightcurve
from stingray.events import EventList
from stingray.crossspectrum import Crossspectrum
from hendrics.io import save_lcurve, save_events, save_pds
if isinstance(stingray_object, Lightcurve):
save_lcurve(stingray_object, fname)
elif isinstance(stingray_object, EventList):
save_events(stingray_object, fname)
# This also work for Powerspectrum and AveragedCrosspowerspectrum, clearly
elif isinstance(stingray_object, Crossspectrum):
save_pds(stingray_object, fname)
else:
logging.error("save_to_intermediate_file: Unknown object type: %s" % type(stingray_object).__name__)
return False
return True
def load_dataset_from_intermediate_file(fname):
"""Save Stingray object to intermediate file."""
from stingray.lightcurve import Lightcurve
from stingray.events import EventList
from stingray.crossspectrum import Crossspectrum
from hendrics.io import get_file_type
from stingray.io import _retrieve_pickle_object
# This will return an EventList, a light curve, a Powerspectrum, ...
# depending on the contents of the file
try:
ftype, contents = get_file_type(fname)
except:
contents = _retrieve_pickle_object(fname)
if isinstance(contents, Lightcurve):
return DataSet.get_lightcurve_dataset_from_stingray_Lightcurve(contents)
elif isinstance(contents, EventList):
return DataSet.get_eventlist_dataset_from_stingray_Eventlist(contents)
# This also work for Powerspectrum and AveragedCrosspowerspectrum, clearly
elif isinstance(contents, Crossspectrum):
logging.error("Unsupported intermediate file type: Crossspectrum")
else:
logging.error("Unsupported intermediate file type: %s" % type(stingray_object).__name__)
return None
| 41.456876
| 176
| 0.654709
|
a6f2dac0dbbaef1cd4e79c32f4d6646abcb828bd
| 9,988
|
py
|
Python
|
scripts/convert_to_nwb.py
|
vathes/najafi-2018-nwb
|
28c9deb1e8be55c8a1bbdcb2d899dae725e04534
|
[
"MIT"
] | 4
|
2018-12-12T21:54:07.000Z
|
2019-08-15T15:06:02.000Z
|
scripts/convert_to_nwb.py
|
deeptimittal12/najafi-2018-nwb
|
28c9deb1e8be55c8a1bbdcb2d899dae725e04534
|
[
"MIT"
] | 8
|
2019-01-03T00:34:51.000Z
|
2020-03-23T15:10:42.000Z
|
scripts/convert_to_nwb.py
|
deeptimittal12/najafi-2018-nwb
|
28c9deb1e8be55c8a1bbdcb2d899dae725e04534
|
[
"MIT"
] | 8
|
2018-12-06T23:08:40.000Z
|
2020-03-31T20:01:24.000Z
|
#!/usr/bin/env python3
import os
import sys
from datetime import datetime
from dateutil.tz import tzlocal
from pathlib import Path
import pytz
import re
import json
import numpy as np
import scipy.io as sio
import pynwb
from pynwb import NWBFile, NWBHDF5IO, ophys as nwb_ophys
from collections import defaultdict
import tqdm
from hdmf.backends.hdf5 import H5DataIO
# Read configuration
try:
config_file = sys.argv[1]
except IndexError:
config_file = 'conversion_config.json'
with open(config_file) as f:
config = json.load(f)
# Read the list of .mat files from manifest file
file_pattern = re.compile(
r'(?P<checksum>[0-9a-f]{32}) (?P<path>.+)(?P<prefix>(post|more))_(?P<session>.+)(?P<ext>\.mat)')
mat_file_pairs = defaultdict(dict)
with open(Path(config['manifest']), 'r') as f:
for line in f:
match = file_pattern.match(line)
if match:
mat_file_pairs[match['session']][match['prefix']] = Path('{root}/{path}/{prefix}_{session}{ext}'.format(
root=os.path.dirname(os.path.abspath(Path(config['manifest']))), **match.groupdict()))
# save an NWB file for each session
save_path = os.path.abspath(Path(config['output_dir']))
for session, file_pair in tqdm.tqdm(list(mat_file_pairs.items())):
moremat, postmat = (sio.loadmat(file_pair[x], struct_as_record=False, squeeze_me=True)
for x in ('more', 'post'))
mouse_folder, session_folder = file_pair['more'].parts[-3:-1]
nwbfile = NWBFile(
session_description=session,
identifier=session,
session_id=''.join(session.split('-')[-2:]), # use the last 12-digits as the session id
session_start_time=datetime.strptime(session_folder, '%y%m%d').astimezone(pytz.timezone('US/Eastern')),
file_create_date=datetime.now(tzlocal()),
**config['general'])
nwbfile.subject = pynwb.file.Subject(
subject_id=mouse_folder, age='', description='', genotype='', sex='', species='', weight='')
# -- imaging plane - the plane info ophys was performed on (again hard-coded here)
device = pynwb.device.Device('img_device')
nwbfile.add_device(device)
imaging_plane = nwbfile.create_imaging_plane(
name='ImagingPlane',
optical_channel=nwb_ophys.OpticalChannel('Green', 'Green (ET 525/50m)', 525.), # (nm)
device=device,
description='imaging plane',
excitation_lambda=930., # (nm)
imaging_rate=30.,
indicator='GCaMP6f',
location='left PPC',
conversion=1e-6,
unit='micrometer')
# Epochs: define custom epoch columns
trial_columns = dict(
trial_type='high-rate, low-rate',
trial_pulse_rate='ranged from 5-27Hz, 16Hz boundary for high/low rate',
trial_response='correct, incorrect, no center lick, no go-tone lick',
trial_is_good='good, bad',
init_tone='(sec) time of initiation tone w.r.t the start of the trial (t=0)',
stim_onset='(sec) time of stimulus onset w.r.t the start of the trial (t=0)',
stim_offset='(sec) time of stimulus offset w.r.t the start of the trial (t=0)',
go_tone='(sec) time of go tone w.r.t the start of the trial (t=0)',
first_commit='(sec) time of first commit w.r.t the start of the trial (t=0)',
second_commit='(sec) time of second commit w.r.t the start of the trial (t=0)'
)
for k, v in trial_columns.items():
nwbfile.add_trial_column(name=k, description=v)
# - read and condition data
outcomes = postmat['outcomes'] # 1: correct, 0: incorrect, nan: no trial, -3: no center lick to start stimulus, -1: no go-tone lick
outcomes[np.isnan(outcomes)] = -10 # replace nan with -10 to easily make outcome dict
trial_response_dict = {
1: 'correct',
0: 'incorrect',
-1: 'no go-tone lick (-1)', # because from the data, it seems like code(-1) and code(-4) both refer to 'no go-tone lick'
-4: 'no go-tone lick (-4)',
-3: 'no center lick',
-2: 'no first commit',
-5: 'no second commit',
-10: 'no decision'}
# get timeInitTone and handle some timeInitTone elements being vectors instead of scalars (get [0] of that vector)
init_tone = [a if np.isscalar(a) else a[0] for a in postmat['timeInitTone']]
# merge timeReward and timeCommitIncorrectResp to get an overall second commit times
second_commit_times = postmat['timeReward']
ix = ~np.isnan(postmat['timeCommitIncorrResp'])
second_commit_times[ix] = postmat['timeCommitIncorrResp'][ix]
# get trial start stop times
if 'alldata_frameTimes' not in postmat:
start_time = np.full(outcomes.shape, np.nan)
stop_time = np.full(outcomes.shape, np.nan)
else:
alldata_frameTimes = postmat['alldata_frameTimes'] # timestamps of each trial for all trials
start_time = [t[0] for t in alldata_frameTimes]
stop_time = [t[-1] for t in alldata_frameTimes]
# - now insert each trial into trial table
for k in range(outcomes.size):
nwbfile.add_trial(
start_time=start_time[k]/1000,
stop_time=stop_time[k]/1000,
trial_type=('High-rate' if postmat['stimrate'][k] >= 16 else 'Low-rate'),
trial_pulse_rate=postmat['stimrate'][k],
trial_response=trial_response_dict[outcomes[k]],
trial_is_good=(outcomes[k] >= 0),
init_tone=init_tone[k]/1000, # in seconds
stim_onset=postmat['timeStimOnsetAll'][k]/1000,
stim_offset=postmat['timeSingleStimOffset'][k]/1000,
go_tone=postmat['timeCommitCL_CR_Gotone'][k]/1000,
first_commit=postmat['time1stSideTry'][k]/1000,
second_commit=second_commit_times[k]/1000)
# ------ Image Segmentation processing module ------
img_seg_mod = nwbfile.create_processing_module(
'Ophys', 'Plane segmentation and ROI information')
img_segmentation = nwb_ophys.ImageSegmentation(name='ImageSegmentation')
img_seg_mod.add_data_interface(img_segmentation)
plane_segmentation = nwb_ophys.PlaneSegmentation(
name='PlaneSegmentation',
description='description here',
imaging_plane=imaging_plane)
img_segmentation.add_plane_segmentation([plane_segmentation])
# add segmentation columns
for k, v in dict(
roi_id='roi id',
roi_status='good or bad ROI',
neuron_type='excitatory or inhibitory',
fitness='',
roi2surr_sig='',
offsets_ch1_pix='').items():
plane_segmentation.add_column(name=k, description=v)
# insert ROI mask
bad_roi_mask = np.where(moremat['badROIs01'] == 0)
neuron_type = np.full_like(moremat['idx_components'], np.nan)
neuron_type[bad_roi_mask] = moremat['inhibitRois_pix']
roi2surr_sig = np.full_like(moremat['idx_components'], np.nan)
roi2surr_sig[bad_roi_mask] = moremat['roi2surr_sig']
offsets_ch1_pix = np.full_like(moremat['idx_components'], np.nan)
offsets_ch1_pix[bad_roi_mask] = moremat['offsets_ch1_pix']
neuron_type_dict = {0: 'excitatory', 1: 'inhibitory'}
neuron_status_dict = {0: 'good', 1: 'bad'}
for idx in range(moremat['idx_components'].size):
plane_segmentation.add_roi(
roi_id=moremat['idx_components'][idx],
image_mask=moremat['mask'][:, :, idx],
roi_status=neuron_status_dict.get(moremat['badROIs01'][idx]),
fitness=moremat['fitness'][idx],
neuron_type=neuron_type_dict.get(neuron_type[idx], 'unknown'),
roi2surr_sig=roi2surr_sig[idx],
offsets_ch1_pix=offsets_ch1_pix[idx])
# create a ROI region table
roi_region = plane_segmentation.create_roi_table_region(
description='good roi region table',
region=(np.where(moremat['badROIs01'] == 0)[0]).tolist())
# ingest each trial-based dataset, time-lock to different event types
for data_name in ('firstSideTryAl', 'firstSideTryAl_COM', 'goToneAl', 'rewardAl', 'commitIncorrAl',
'initToneAl', 'stimAl_allTrs', 'stimAl_noEarlyDec', 'stimOffAl'):
try:
dF_F = nwb_ophys.DfOverF(name=f'dFoF_{data_name}')
for tr_idx, d in enumerate(postmat[data_name].traces.transpose([2, 1, 0])):
dF_F.add_roi_response_series(
nwb_ophys.RoiResponseSeries(
name=f'Trial_{tr_idx:02d}',
data=H5DataIO(d.T, compression=True),
rois=roi_region,
unit='au',
starting_time=postmat[data_name].time[0]/1000,
rate=(postmat[data_name].time[1] - postmat[data_name].time[0])/1000,
description=f'(ROIs x time), aligned to event_id: {postmat[data_name].eventI}'))
img_seg_mod.add_data_interface(dF_F)
except Exception as e:
print(f'Error adding roi_response_series: {data_name}\n\t\tErrorMsg: {str(e)}\n', file=sys.stderr)
# ------ Behavior processing module ------
behavior_mod = nwbfile.create_processing_module(
'Behavior', 'Behavior data (e.g. wheel revolution, lick traces)')
behavior_epoch = pynwb.behavior.BehavioralTimeSeries(
name='Epoched_behavioral_series')
behavior_mod.add_data_interface(behavior_epoch)
for behavior in ['firstSideTryAl_wheelRev', 'firstSideTryAl_lick']:
behavior_epoch.create_timeseries(
name=behavior,
data=H5DataIO(postmat[behavior].traces, compression=True),
unit='unknown',
starting_time=postmat[behavior].time[0]/1000, # in seconds
rate=(postmat[behavior].time[1] - postmat[behavior].time[0])/1000,
description=f'(time x trial), aligned to event_id: {postmat[behavior].eventI}')
with NWBHDF5IO(os.path.join(save_path, mouse_folder + '_' + session + '.nwb'), mode='w') as io:
io.write(nwbfile)
| 46.02765
| 136
| 0.64998
|
572069496de1d3f36a652c763d091f904c2ec6a2
| 2,001
|
py
|
Python
|
src/sources/flyinglines.py
|
diogenes895/lightnovel-crawler
|
69162799b05c7a84845e87f4d95041715c77c2ba
|
[
"Apache-2.0"
] | null | null | null |
src/sources/flyinglines.py
|
diogenes895/lightnovel-crawler
|
69162799b05c7a84845e87f4d95041715c77c2ba
|
[
"Apache-2.0"
] | null | null | null |
src/sources/flyinglines.py
|
diogenes895/lightnovel-crawler
|
69162799b05c7a84845e87f4d95041715c77c2ba
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import logging
import re
from urllib.parse import urlparse
from ..utils.crawler import Crawler
logger = logging.getLogger('FLYING LINES')
chapter_body_url = 'https://www.flying-lines.com/h5/novel/%s/%s?accessToken=&isFirstEnter=1'
class FlyingLinesCrawler(Crawler):
base_url = 'https://www.flying-lines.com/'
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.select_one('.novel-info .title h2').text
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = self.absolute_url(
soup.select_one('.novel .novel-thumb img')['data-src'])
logger.info('Novel cover: %s', self.novel_cover)
authors = [x.text.strip()
for x in soup.select('.novel-info ul.profile li')]
self.novel_author = ', '.join(authors)
logger.info('%s', self.novel_author)
self.novel_id = urlparse(self.novel_url).path.split('/')[2]
logger.info("Novel id: %s", self.novel_id)
for a in soup.select('ul.volume-chapters li a'):
chap_id = int(a['data-chapter-number'])
vol_id = 1 + (chap_id - 1) // 100
if len(self.chapters) % 100 == 0:
self.volumes.append({'id': vol_id})
# end if
self.chapters.append({
'id': chap_id,
'volume': vol_id,
'title': a.text.strip(),
'url': self.absolute_url(a['href']),
})
# end for
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format.'''
url = chapter_body_url % (self.novel_id, chapter['id'])
logger.info('Downloading %s', url)
data = self.get_json(url)
return data['data']['content']
# end def
# end class
| 33.915254
| 92
| 0.586707
|
384b1e61c792199dd2f2f979a80900fb460127b5
| 4,523
|
py
|
Python
|
plot/silence-attack/silence-attack.py
|
tncheng/bamboo
|
2e52d3728733fb07f25d9ff6207cba871127af34
|
[
"MIT"
] | 15
|
2020-09-29T11:30:28.000Z
|
2022-03-30T07:51:39.000Z
|
plot/silence-attack/silence-attack.py
|
tncheng/bamboo
|
2e52d3728733fb07f25d9ff6207cba871127af34
|
[
"MIT"
] | 5
|
2020-11-09T09:22:20.000Z
|
2021-11-23T02:46:24.000Z
|
plot/silence-attack/silence-attack.py
|
tncheng/bamboo
|
2e52d3728733fb07f25d9ff6207cba871127af34
|
[
"MIT"
] | 8
|
2020-10-26T13:11:36.000Z
|
2022-02-15T19:34:55.000Z
|
import matplotlib.pyplot as plt
# Measurements from forking-attack.data
cgr = [
('HotStuff',[
1.0, 0.93, 0.864, 0.807, 0.738, 0.637
], '-o', 'coral'),
('2CHS',[
1.0, 0.935, 0.872, 0.810, 0.742, 0.643
], '-^', 'darkseagreen'),
('Streamlet',[
1.0, 1.0, 1.0, 1.0, 1.0, 1.0
], '-s', 'steelblue')
]
bi = [
('HotStuff',[
3.0, 3.557, 4.286, 5.122, 6.676, 9.531
], '-o', 'coral'),
('2CHS',[
2.0, 2.218, 2.496, 2.854, 3.371, 4.496
], '-^', 'darkseagreen'),
('Streamlet',[
2.0, 2.29, 2.662, 3.153, 3.820, 5.688
], '-s', 'steelblue')
]
thru = [
('HotStuff',[
[49.14, 49.1],
[36.474, 37.31],
[25.279, 26.14],
[18.2, 18.5],
[13.4, 13.5],
[9.9, 10.0]
], '-o', 'coral'),
('2CHS',[
[50.90, 50.99],
[36.0, 36.5],
[25.6, 25.9],
[17.5, 17.7],
[12.6, 12.7],
[9.5, 9.7]
], '-^', 'darkseagreen'),
('Streamlet',[
[14.1, 14.2],
[12.5, 12.5],
[11.6, 11.7],
[9.5, 9.7],
[7.9, 8.1],
[5.7, 5.8],
], '-s', 'steelblue')
]
lat = [
('HotStuff',[
[213, 222],
[416, 443],
[735, 776],
[1073, 1160],
[1537, 1581],
[2137, 2221],
], '-o', 'coral'),
('2C-HS',[
[216, 220],
[402, 419],
[576, 612],
[1029, 1074],
[1488, 1535],
[2053, 2097],
], '-^', 'darkseagreen'),
('Streamlet',[
[597, 630],
[707, 740],
[765, 826],
[856, 911],
[1050, 1155],
[1400, 1438],
], '-s', 'steelblue')
]
def do_plot():
f, ax = plt.subplots(2,2, figsize=(8,6))
byzNo = [0, 2, 4, 6, 8, 10]
for name, entries, style, color in cgr:
cgrs = []
for item in entries:
cgrs.append(item)
ax[1][0].plot(byzNo, cgrs, style, color=color, label='%s' % name, markersize=8, alpha=0.8)
ax[1][0].set_ylabel("Chain growth rate")
ax[1][0].set_ylim([0,1.0])
ax[1][0].legend(loc='best', fancybox=True,frameon=False,framealpha=0.8)
for name, entries, style, color in bi:
bis = []
for item in entries:
bis.append(item)
ax[1][1].plot(byzNo, bis, style, color=color, label='%s' % name, markersize=8, alpha=0.8)
ax[1][1].set_ylabel("Block intervals")
ax[1][1].yaxis.set_label_position("right")
ax[1][1].yaxis.tick_right()
ax[1][1].set_ylim([0,10.0])
for name, entries, style, color in thru:
throughput = []
errs = []
for item in entries:
throughput.append((item[0]+item[1])/2.0)
errs.append(abs(item[0]-item[1]))
ax[0][0].errorbar(byzNo, throughput, yerr=errs, fmt=style, mec=color, color=color, mfc='none', label='%s'%name, markersize=6)
ax[0][0].set_ylabel("Throughput (KTx/s)")
ax[0][0].legend(loc='best', fancybox=True,frameon=False,framealpha=0.8)
# a0[00[1].set_xticks(xticks)
ax[0][0].set_ylim([0,60])
ax[0][0].set_xticklabels(("", "", "", "", "", ""))
ax[0][0].set_xlim([0,10])
# a1[00[1].set_xticklabels(xticks_label)
for name, entries, style, color in lat:
latency = []
errs = []
for item in entries:
latency.append((item[0]+item[1])/2.0)
errs.append(abs(item[0]-item[1]))
ax[0][1].errorbar(byzNo, latency, yerr=errs, fmt=style, mec=color, color=color, mfc='none', label='%s'%name, markersize=6)
ax[0][1].set_ylabel("Latency (ms)")
# ax[0][1].legend(loc='best', fancybox=True,frameon=False,framealpha=0.8)
ax[0][1].yaxis.set_label_position("right")
ax[0][1].yaxis.tick_right()
ax[0][1].set_xticklabels(("", "", "", "", "", ""))
# a0[1][1].set_xticks(xticks)
ax[0][1].set_xlim([0,10])
# ax[0][1].set_ylim([100,1000])
# ax[1][1].set_xticklabels(xticks_label)
# plt.legend(loc='best', fancybox=True,frameon=False,framealpha=0.8)
f.text(0.5, 0.04, 'Byz. number', ha='center', va='center')
plt.subplots_adjust(wspace=0.1)
plt.subplots_adjust(hspace=0.1)
ax[0][0].grid(linestyle='--', alpha=0.3)
ax[1][0].grid(linestyle='--', alpha=0.3)
ax[0][1].grid(linestyle='--', alpha=0.3)
ax[1][1].grid(linestyle='--', alpha=0.3)
plt.savefig('silence-attack-data.pdf', format='pdf')
plt.show()
if __name__ == '__main__':
do_plot()
| 31.193103
| 133
| 0.488835
|
1634c90e2095fffbc4b82e192c96271305556a79
| 1,165
|
py
|
Python
|
tests/stream_test.py
|
aplanas/aioredis
|
be5ae76ce2c43a1316e6e86365215a0e26be49b3
|
[
"MIT"
] | 18
|
2020-10-05T05:38:39.000Z
|
2022-01-06T07:40:09.000Z
|
tests/stream_test.py
|
aplanas/aioredis
|
be5ae76ce2c43a1316e6e86365215a0e26be49b3
|
[
"MIT"
] | 4
|
2020-10-22T19:27:22.000Z
|
2022-03-10T02:03:40.000Z
|
tests/stream_test.py
|
aplanas/aioredis
|
be5ae76ce2c43a1316e6e86365215a0e26be49b3
|
[
"MIT"
] | 8
|
2021-01-03T16:56:50.000Z
|
2021-08-20T09:07:12.000Z
|
import pytest
from aioredis.stream import StreamReader
from aioredis.parser import PyReader
from aioredis.errors import (
ProtocolError,
ReplyError
)
@pytest.fixture
def reader(loop):
reader = StreamReader(loop=loop)
reader.set_parser(
PyReader(protocolError=ProtocolError, replyError=ReplyError)
)
return reader
async def test_feed_and_parse(reader):
reader.feed_data(b'+PONG\r\n')
assert (await reader.readobj()) == b'PONG'
async def test_buffer_available_after_RST(reader):
reader.feed_data(b'+PONG\r\n')
reader.set_exception(Exception())
assert (await reader.readobj()) == b'PONG'
with pytest.raises(Exception):
await reader.readobj()
def test_feed_with_eof(reader):
reader.feed_eof()
with pytest.raises(AssertionError):
reader.feed_data(b'+PONG\r\n')
def test_feed_no_data(reader):
assert not reader.feed_data(None)
@pytest.mark.parametrize(
'read_method',
['read', 'readline', 'readuntil', 'readexactly']
)
async def test_read_flavors_not_supported(reader, read_method):
with pytest.raises(RuntimeError):
await getattr(reader, read_method)()
| 23.3
| 68
| 0.717597
|
96d32e1db12299dccc064d49dec8f315d6dc452c
| 983
|
py
|
Python
|
dizoo/mujoco/entry/mujoco_cql_generation_main.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | 1
|
2022-03-21T16:15:39.000Z
|
2022-03-21T16:15:39.000Z
|
dizoo/mujoco/entry/mujoco_cql_generation_main.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
dizoo/mujoco/entry/mujoco_cql_generation_main.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
from dizoo.mujoco.config.hopper_sac_data_generation_config import main_config, create_config
from ding.entry import collect_demo_data, eval
import torch
import copy
def eval_ckpt(args):
config = copy.deepcopy([main_config, create_config])
eval(config, seed=args.seed, load_path=main_config.policy.learn.learner.hook.load_ckpt_before_run)
def generate(args):
config = copy.deepcopy([main_config, create_config])
state_dict = torch.load(main_config.policy.learn.learner.load_path, map_location='cpu')
collect_demo_data(
config,
collect_count=main_config.policy.other.replay_buffer.replay_buffer_size,
seed=args.seed,
expert_data_path=main_config.policy.collect.save_path,
state_dict=state_dict
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', '-s', type=int, default=0)
args = parser.parse_args()
eval_ckpt(args)
generate(args)
| 29.787879
| 102
| 0.741607
|
0e69ad2b0fdfa4c3585930246012411b66e51c4d
| 1,715
|
py
|
Python
|
samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/recognizers_choice/choice/recognizers_choice.py
|
luzeunice/BotBuilder-Samples
|
b62be4e8863125a567902b736b7b74313d9d4f28
|
[
"MIT"
] | 10
|
2019-05-11T18:07:14.000Z
|
2021-08-20T03:02:47.000Z
|
samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/recognizers_choice/choice/recognizers_choice.py
|
luzeunice/BotBuilder-Samples
|
b62be4e8863125a567902b736b7b74313d9d4f28
|
[
"MIT"
] | 1
|
2020-07-10T08:25:36.000Z
|
2020-07-10T08:25:36.000Z
|
samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/recognizers_choice/choice/recognizers_choice.py
|
luzeunice/BotBuilder-Samples
|
b62be4e8863125a567902b736b7b74313d9d4f28
|
[
"MIT"
] | 18
|
2019-08-19T12:11:00.000Z
|
2021-10-12T09:36:27.000Z
|
from enum import IntFlag
from typing import List
from recognizers_choice.choice.extractors import BooleanExtractor
from recognizers_choice.choice.english import EnglishBooleanExtractorConfiguration
from recognizers_choice.choice.models import BooleanModel
from recognizers_choice.choice.parsers import BooleanParser
from recognizers_text import Culture, Recognizer, ModelResult, Model
class ChoiceOptions(IntFlag):
NONE = 0
def recognize_boolean(query: str,
culture: str,
options: ChoiceOptions = ChoiceOptions.NONE,
fallback_to_default_culture: bool = True) -> List[ModelResult]:
recognizer = ChoiceRecognizer(culture, options)
model = recognizer.get_boolean_model(culture, fallback_to_default_culture)
return model.parse(query)
class ChoiceRecognizer (Recognizer[ChoiceOptions]):
def __init__(self, target_culture: str = None, options: ChoiceOptions = ChoiceOptions.NONE, lazy_initialization: bool = False):
if options < ChoiceOptions.NONE or options > ChoiceOptions.NONE:
raise ValueError()
super().__init__(target_culture, options, lazy_initialization)
def initialize_configuration(self):
self.register_model('BooleanModel', Culture.English, lambda options: BooleanModel(
BooleanParser(), BooleanExtractor(EnglishBooleanExtractorConfiguration())))
@staticmethod
def is_valid_option(options: int) -> bool:
return options >= 0 & options <= ChoiceOptions.NONE
def get_boolean_model(self, culture: str = None, fallback_to_default_culture: bool = True) -> Model:
return self.get_model('BooleanModel', culture, fallback_to_default_culture)
| 41.829268
| 131
| 0.746356
|
57bbcccc4f66fc5ff1fc7f9accec441f1ee23f63
| 26,562
|
py
|
Python
|
pyformlang/finite_automaton/tests/test_epsilon_nfa.py
|
YaccConstructor/pyformlang
|
df640e13524c5d835ddcdedf25d8246fc73d7b88
|
[
"MIT"
] | null | null | null |
pyformlang/finite_automaton/tests/test_epsilon_nfa.py
|
YaccConstructor/pyformlang
|
df640e13524c5d835ddcdedf25d8246fc73d7b88
|
[
"MIT"
] | 1
|
2020-07-22T11:40:30.000Z
|
2020-07-22T11:40:30.000Z
|
pyformlang/finite_automaton/tests/test_epsilon_nfa.py
|
YaccConstructor/pyformlang
|
df640e13524c5d835ddcdedf25d8246fc73d7b88
|
[
"MIT"
] | null | null | null |
"""
Tests for epsilon NFA
"""
import unittest
import networkx
from pyformlang.finite_automaton import EpsilonNFA, State, Symbol, Epsilon
from ..regexable import Regexable
class TestEpsilonNFA(unittest.TestCase):
""" Tests epsilon NFA """
def test_eclose(self):
""" Test of the epsilon closure """
states = [State(x) for x in range(8)]
epsilon = Epsilon()
symb_a = Symbol("a")
symb_b = Symbol("b")
enfa = EpsilonNFA()
enfa.add_transition(states[1], epsilon, states[2])
enfa.add_transition(states[1], epsilon, states[4])
enfa.add_transition(states[2], epsilon, states[3])
enfa.add_transition(states[3], epsilon, states[6])
enfa.add_transition(states[5], epsilon, states[7])
enfa.add_transition(states[4], symb_a, states[5])
enfa.add_transition(states[5], symb_b, states[6])
self.assertEqual(len(enfa.eclose(states[1])), 5)
self.assertEqual(len(enfa.eclose(states[2])), 3)
self.assertEqual(len(enfa.eclose(states[5])), 2)
self.assertEqual(len(enfa.eclose(states[6])), 1)
self.assertEqual(len(list(enfa._transition_function.get_edges())), 7)
self.assertEqual(enfa.remove_transition(states[1], epsilon, states[4]),
1)
self.assertFalse(enfa.is_deterministic())
def test_accept(self):
""" Test the acceptance """
self._perform_tests_digits(False)
def test_copy(self):
""" Tests the copy of enda """
self._perform_tests_digits(True)
def _perform_tests_digits(self, copy=False):
enfa, digits, epsilon, plus, minus, point = get_digits_enfa()
if copy:
enfa = enfa.copy()
self.assertTrue(enfa.accepts([plus, digits[1], point, digits[9]]))
self.assertTrue(enfa.accepts([minus, digits[1], point, digits[9]]))
self.assertTrue(enfa.accepts([digits[1], point, digits[9]]))
self.assertTrue(enfa.accepts([digits[1], point]))
self.assertTrue(enfa.accepts([digits[1], point, epsilon]))
self.assertTrue(enfa.accepts([point, digits[9]]))
self.assertFalse(enfa.accepts([point]))
self.assertFalse(enfa.accepts([plus]))
self.assertFalse(enfa.is_deterministic())
self.assertTrue(enfa.accepts(["+", digits[1], ".", digits[9]]))
self.assertTrue(enfa.accepts(["-", digits[1], ".", digits[9]]))
self.assertTrue(enfa.accepts([digits[1], ".", digits[9]]))
self.assertTrue(enfa.accepts([digits[1], "."]))
self.assertTrue(enfa.accepts([digits[1], ".", "epsilon"]))
self.assertTrue(enfa.accepts([".", digits[9]]))
self.assertFalse(enfa.accepts(["."]))
self.assertFalse(enfa.accepts(["+"]))
def test_deterministic(self):
""" Tests the transformation to a dfa"""
enfa, digits, _, plus, minus, point = get_digits_enfa()
dfa = enfa.to_deterministic()
self.assertTrue(dfa.is_deterministic())
self.assertEqual(len(dfa.states), 6)
self.assertEqual(dfa.get_number_transitions(), 65)
self.assertEqual(len(dfa.final_states), 2)
self.assertTrue(dfa.accepts([plus, digits[1], point, digits[9]]))
self.assertTrue(dfa.accepts([minus, digits[1], point, digits[9]]))
self.assertTrue(dfa.accepts([digits[1], point, digits[9]]))
self.assertTrue(dfa.accepts([digits[1], point]))
self.assertTrue(dfa.accepts([digits[1], point]))
self.assertTrue(dfa.accepts([point, digits[9]]))
self.assertFalse(dfa.accepts([point]))
self.assertFalse(dfa.accepts([plus]))
def test_remove_state(self):
" Tests the remove of state """
enfa = EpsilonNFA()
state0 = State(0)
state1 = State(1)
state2 = State(2)
symb02 = Symbol("a+b")
symb01 = Symbol("c*")
symb11 = Symbol("b+(c.d)")
symb12 = Symbol("a.b.c")
enfa.add_start_state(state0)
enfa.add_final_state(state2)
enfa.add_transition(state0, symb01, state1)
enfa.add_transition(state0, symb02, state2)
enfa.add_transition(state1, symb11, state1)
enfa.add_transition(state1, symb12, state2)
enfa.remove_all_basic_states()
self.assertEqual(enfa.get_number_transitions(), 1)
self.assertEqual(len(enfa.states), 2)
def test_to_regex(self):
""" Tests the transformation to regex """
enfa = EpsilonNFA()
state0 = State(0)
state1 = State(1)
state2 = State(2)
symb_e = Symbol("e")
symb_f = Symbol("f")
symb_g = Symbol("g")
enfa.add_start_state(state0)
enfa.add_final_state(state2)
enfa.add_transition(state0, symb_e, state1)
enfa.add_transition(state1, symb_f, state2)
enfa.add_transition(state0, symb_g, state2)
regex = enfa.to_regex()
enfa2 = regex.to_epsilon_nfa()
self.assertTrue(enfa2.accepts([symb_e, symb_f]))
self.assertTrue(enfa2.accepts([symb_g]))
self.assertFalse(enfa2.accepts([]))
self.assertFalse(enfa2.accepts([symb_e]))
self.assertFalse(enfa2.accepts([symb_f]))
enfa.add_final_state(state0)
with self.assertRaises(ValueError) as _:
enfa.get_regex_simple()
regex = enfa.to_regex()
enfa3 = regex.to_epsilon_nfa()
self.assertTrue(enfa3.accepts([symb_e, symb_f]))
self.assertTrue(enfa3.accepts([symb_g]))
self.assertTrue(enfa3.accepts([]))
self.assertFalse(enfa3.accepts([symb_e]))
self.assertFalse(enfa3.accepts([symb_f]))
enfa.remove_start_state(state0)
regex = enfa.to_regex()
enfa3 = regex.to_epsilon_nfa()
self.assertFalse(enfa3.accepts([symb_e, symb_f]))
self.assertFalse(enfa3.accepts([symb_g]))
self.assertFalse(enfa3.accepts([]))
self.assertFalse(enfa3.accepts([symb_e]))
self.assertFalse(enfa3.accepts([symb_f]))
enfa.add_start_state(state0)
enfa.add_transition(state0, symb_f, state0)
regex = enfa.to_regex()
enfa3 = regex.to_epsilon_nfa()
self.assertTrue(enfa3.accepts([symb_e, symb_f]))
self.assertTrue(enfa3.accepts([symb_f, symb_e, symb_f]))
self.assertTrue(enfa3.accepts([symb_g]))
self.assertTrue(enfa3.accepts([symb_f, symb_f, symb_g]))
self.assertTrue(enfa3.accepts([]))
self.assertFalse(enfa3.accepts([symb_e]))
self.assertTrue(enfa3.accepts([symb_f]))
def test_to_regex2(self):
""" Tests the transformation to regex """
enfa = EpsilonNFA()
state0 = State(0)
state1 = State(1)
symb_a = Symbol("0")
symb_b = Symbol("1")
enfa.add_start_state(state0)
enfa.add_final_state(state1)
enfa.add_transition(state0, symb_a, state0)
enfa.add_transition(state0, symb_a, state1)
enfa.add_transition(state1, symb_b, state0)
enfa.add_transition(state1, symb_b, state1)
regex = enfa.to_regex()
enfa2 = regex.to_epsilon_nfa()
self.assertTrue(enfa2.accepts([symb_a]))
self.assertTrue(enfa2.accepts([symb_a, symb_a]))
self.assertTrue(enfa2.accepts([symb_a, symb_a, symb_b]))
self.assertTrue(enfa2.accepts([symb_a, symb_a, symb_b, symb_b]))
self.assertTrue(enfa2.accepts([symb_a, symb_a, symb_b, symb_b, symb_a]))
self.assertTrue(enfa2.accepts([symb_a, symb_a, symb_b, symb_b, symb_a, symb_b]))
self.assertFalse(enfa2.accepts([symb_b]))
def test_to_regex3(self):
""" Tests the transformation to regex """
enfa = EpsilonNFA()
state0 = State(0)
state1 = State(1)
symb_a = Symbol("0")
symb_b = Symbol("1")
enfa.add_start_state(state0)
enfa.add_final_state(state1)
enfa.add_transition(state0, symb_a, state0)
enfa.add_transition(state1, symb_b, state0)
enfa.add_transition(state1, symb_b, state1)
regex = enfa.to_regex()
enfa2 = regex.to_epsilon_nfa()
self.assertFalse(enfa2.accepts([symb_a]))
self.assertFalse(enfa2.accepts([symb_a, symb_a]))
self.assertFalse(enfa2.accepts([symb_a, symb_a, symb_b]))
self.assertFalse(enfa2.accepts([symb_a, symb_a, symb_b, symb_b, symb_a]))
self.assertFalse(enfa2.accepts([symb_a, symb_a, symb_b, symb_b, symb_a, symb_b]))
self.assertFalse(enfa2.accepts([symb_b]))
epsilon = Epsilon()
enfa.add_transition(state0, epsilon, state1)
regex = enfa.to_regex()
enfa2 = regex.to_epsilon_nfa()
self.assertTrue(enfa.accepts([]))
self.assertTrue(enfa.accepts([symb_a]))
self.assertTrue(enfa2.accepts([symb_a]))
self.assertTrue(enfa2.accepts([symb_a, symb_a]))
self.assertTrue(enfa2.accepts([symb_a, symb_a, symb_b, symb_b]))
self.assertTrue(enfa2.accepts([symb_a, symb_a, symb_b, symb_b, symb_a, symb_b]))
self.assertTrue(enfa2.accepts([symb_b]))
self.assertTrue(enfa2.accepts([]))
enfa.remove_transition(state0, symb_a, state0)
regex = enfa.to_regex()
enfa2 = regex.to_epsilon_nfa()
self.assertFalse(enfa2.accepts([symb_a]))
self.assertFalse(enfa2.accepts([symb_a, symb_a]))
self.assertFalse(enfa2.accepts([symb_a, symb_a, symb_b]))
self.assertFalse(enfa2.accepts([symb_a, symb_a, symb_b, symb_b, symb_a]))
self.assertFalse(enfa2.accepts([symb_a, symb_a, symb_b, symb_b, symb_a, symb_b]))
self.assertTrue(enfa2.accepts([symb_b]))
self.assertTrue(enfa2.accepts([]))
enfa.remove_transition(state1, symb_b, state1)
regex = enfa.to_regex()
enfa2 = regex.to_epsilon_nfa()
self.assertTrue(enfa2.accepts([symb_b, symb_b]))
enfa.add_transition(state0, symb_a, state0)
regex = enfa.to_regex()
enfa2 = regex.to_epsilon_nfa()
self.assertTrue(enfa2.accepts([symb_a, symb_b]))
def test_union(self):
""" Tests the union of two epsilon NFA """
with self.assertRaises(NotImplementedError) as _:
Regexable().to_regex()
enfa0 = get_enfa_example0()
enfa1 = get_enfa_example1()
symb_a = Symbol("a")
symb_b = Symbol("b")
symb_c = Symbol("c")
enfa = enfa0.union(enfa1)
self.assertTrue(enfa.accepts([symb_b]))
self.assertTrue(enfa.accepts([symb_a, symb_b]))
self.assertTrue(enfa.accepts([symb_c]))
self.assertFalse(enfa.accepts([symb_a]))
self.assertFalse(enfa.accepts([]))
def test_concatenate(self):
""" Tests the concatenation of two epsilon NFA """
enfa0 = get_enfa_example0()
enfa1 = get_enfa_example1()
symb_a = Symbol("a")
symb_b = Symbol("b")
symb_c = Symbol("c")
enfa = enfa0.concatenate(enfa1)
self.assertTrue(enfa.accepts([symb_b, symb_c]))
self.assertTrue(enfa.accepts([symb_a, symb_b, symb_c]))
self.assertTrue(enfa.accepts([symb_a, symb_a, symb_b, symb_c]))
self.assertFalse(enfa.accepts([symb_c]))
self.assertFalse(enfa.accepts([symb_b]))
self.assertFalse(enfa.accepts([]))
def test_kleene(self):
""" Tests the kleene star of an epsilon NFA """
enfa0 = get_enfa_example0()
symb_a = Symbol("a")
symb_b = Symbol("b")
enfa = enfa0.kleene_star()
self.assertTrue(enfa.accepts([symb_b]))
self.assertTrue(enfa.accepts([symb_a, symb_b]))
self.assertTrue(enfa.accepts([symb_a, symb_b, symb_a, symb_b]))
self.assertTrue(enfa.accepts([]))
self.assertTrue(enfa.accepts([symb_b, symb_b]))
self.assertFalse(enfa.accepts([symb_a]))
self.assertFalse(enfa.accepts([symb_a, symb_b, symb_a]))
def test_complement(self):
""" Tests the complement operation """
enfa = EpsilonNFA()
state0 = State(0)
state1 = State(1)
state2 = State(2)
symb_a = Symbol("a")
enfa.add_start_state(state0)
enfa.add_final_state(state2)
enfa.add_transition(state0, Epsilon(), state1)
enfa.add_transition(state1, symb_a, state2)
enfa_comp = enfa.get_complement()
self.assertFalse(enfa_comp.accepts([symb_a]))
def test_intersection(self):
""" Tests the intersection of two enfas """
enfa0 = get_enfa_example0()
symb_a = Symbol("a")
symb_b = Symbol("b")
eps = Epsilon()
enfa1 = EpsilonNFA()
state0 = State(10)
state1 = State(11)
state2 = State(12)
state3 = State(13)
state4 = State(14)
enfa1.add_start_state(state0)
enfa1.add_final_state(state3)
enfa1.add_final_state(state4)
enfa1.add_transition(state0, eps, state1)
enfa1.add_transition(state1, symb_a, state2)
enfa1.add_transition(state2, eps, state3)
enfa1.add_transition(state3, symb_b, state4)
enfa = enfa0.get_intersection(enfa1)
self.assertEqual(len(enfa.start_states), 4)
self.assertEqual(len(enfa.final_states), 2)
self.assertEqual(len(enfa.symbols), 2)
self.assertTrue(enfa.accepts([symb_a, symb_b]))
self.assertFalse(enfa.accepts([symb_b]))
self.assertFalse(enfa.accepts([symb_a]))
self.assertFalse(enfa.accepts([]))
self.assertFalse(enfa.accepts([symb_a, symb_a, symb_b]))
def test_difference(self):
""" Tests the intersection of two languages """
enfa0 = get_enfa_example0()
enfa1 = get_enfa_example1()
symb_a = Symbol("a")
symb_b = Symbol("b")
symb_c = Symbol("c")
enfa = enfa0.get_difference(enfa1)
self.assertTrue(enfa.accepts([symb_a, symb_b]))
self.assertTrue(enfa.accepts([symb_b]))
self.assertFalse(enfa.accepts([symb_c]))
self.assertFalse(enfa.accepts([]))
enfa2 = EpsilonNFA()
state0 = State(0)
enfa2.add_start_state(state0)
enfa2.add_final_state(state0)
enfa2.add_transition(state0, symb_b, state0)
enfa = enfa0.get_difference(enfa2)
self.assertTrue(enfa.accepts([symb_a, symb_b]))
self.assertFalse(enfa.accepts([symb_b]))
self.assertFalse(enfa.accepts([symb_c]))
def test_reverse(self):
""" Test the reversal of a language """
enfa0 = get_enfa_example0()
symb_a = Symbol("a")
symb_b = Symbol("b")
enfa = enfa0.reverse()
self.assertTrue(enfa.accepts([symb_b]))
self.assertTrue(enfa.accepts([symb_b, symb_a]))
self.assertTrue(enfa.accepts([symb_b, symb_a, symb_a]))
self.assertFalse(enfa.accepts([symb_a, symb_b]))
self.assertFalse(enfa.accepts([symb_a]))
self.assertFalse(enfa.accepts([]))
def test_empty(self):
""" Tests the emptiness of a finite automaton """
self.assertFalse(get_enfa_example0().is_empty())
self.assertFalse(get_enfa_example1().is_empty())
enfa = EpsilonNFA()
state0 = State(0)
enfa.add_start_state(state0)
self.assertTrue(enfa.is_empty())
state1 = State(1)
symb_a = Symbol('a')
enfa.add_transition(state0, symb_a, state1)
self.assertTrue(enfa.is_empty())
enfa.add_final_state(state1)
self.assertFalse(enfa.is_empty())
def test_minimization(self):
""" Tests the minimization algorithm """
enfa = get_enfa_example0_bis()
symb_a = Symbol("a")
symb_b = Symbol("b")
enfa = enfa.minimize()
self.assertTrue(enfa.is_deterministic())
self.assertEqual(len(enfa.states), 2)
self.assertTrue(enfa.accepts([symb_a, symb_b]))
self.assertTrue(enfa.accepts([symb_a, symb_a, symb_b]))
self.assertTrue(enfa.accepts([symb_b]))
self.assertFalse(enfa.accepts([symb_a]))
enfa = get_example_non_minimal()
enfa = enfa.minimize()
self.assertTrue(enfa.is_deterministic())
self.assertEqual(len(enfa.states), 3)
self.assertTrue(enfa.accepts([symb_a, symb_b]))
self.assertTrue(enfa.accepts([symb_a, symb_a, symb_b]))
self.assertFalse(enfa.accepts([symb_b]))
self.assertFalse(enfa.accepts([symb_a]))
enfa = EpsilonNFA()
enfa = enfa.minimize()
self.assertTrue(enfa.is_deterministic())
self.assertEqual(len(enfa.states), 0)
self.assertFalse(enfa.accepts([]))
def test_to_fst(self):
""" Tests to turn a ENFA into a FST """
enfa = EpsilonNFA()
fst = enfa.to_fst()
self.assertEqual(len(fst.states), 0)
self.assertEqual(len(fst.final_states), 0)
self.assertEqual(len(fst.start_states), 0)
self.assertEqual(len(fst.input_symbols), 0)
self.assertEqual(len(fst.output_symbols), 0)
self.assertEqual(fst.get_number_transitions(), 0)
s0 = State("q0")
s0bis = State("q0bis")
enfa.add_start_state(s0)
enfa.add_start_state(s0bis)
fst = enfa.to_fst()
self.assertEqual(len(fst.states), 2)
self.assertEqual(len(fst.final_states), 0)
self.assertEqual(len(fst.start_states), 2)
self.assertEqual(len(fst.input_symbols), 0)
self.assertEqual(len(fst.output_symbols), 0)
self.assertEqual(fst.get_number_transitions(), 0)
sfinal = State("qfinal")
sfinalbis = State("qfinalbis")
enfa.add_final_state(sfinal)
enfa.add_final_state(sfinalbis)
fst = enfa.to_fst()
self.assertEqual(len(fst.states), 4)
self.assertEqual(len(fst.final_states), 2)
self.assertEqual(len(fst.start_states), 2)
self.assertEqual(len(fst.input_symbols), 0)
self.assertEqual(len(fst.output_symbols), 0)
self.assertEqual(fst.get_number_transitions(), 0)
enfa.add_transition(s0, Symbol("a"), sfinal)
enfa.add_transition(sfinal, Symbol("b"), sfinal)
enfa.add_transition(s0, Symbol("c"), sfinalbis)
fst = enfa.to_fst()
self.assertEqual(len(fst.states), 4)
self.assertEqual(len(fst.final_states), 2)
self.assertEqual(len(fst.start_states), 2)
self.assertEqual(len(fst.input_symbols), 3)
self.assertEqual(len(fst.output_symbols), 3)
self.assertEqual(fst.get_number_transitions(), 3)
enfa.add_transition(s0, Epsilon(), sfinalbis)
fst = enfa.to_fst()
self.assertEqual(len(fst.states), 4)
self.assertEqual(len(fst.final_states), 2)
self.assertEqual(len(fst.start_states), 2)
self.assertEqual(len(fst.input_symbols), 3)
self.assertEqual(len(fst.output_symbols), 3)
self.assertEqual(fst.get_number_transitions(), 4)
trans0 = list(fst.translate(["a"]))
self.assertEqual(trans0, [["a"]])
trans0 = list(fst.translate(["a", "b", "b"]))
self.assertEqual(trans0, [["a", "b", "b"]])
trans0 = list(fst.translate(["b", "b"]))
self.assertEqual(trans0, [])
trans0 = list(fst.translate(["c"]))
self.assertEqual(trans0, [["c"]])
def test_cyclic(self):
enfa = EpsilonNFA()
state0 = State(0)
state1 = State(1)
symb_a = Symbol('a')
enfa.add_start_state(state0)
enfa.add_transition(state0, symb_a, state1)
enfa.add_transition(state1, Epsilon(), state0)
self.assertFalse(enfa.is_acyclic())
def test_export_networkx(self):
enfa = EpsilonNFA()
state0 = State("0")
state1 = State(1)
symb_a = Symbol('a')
enfa.add_start_state(state0)
enfa.add_final_state(state1)
enfa.add_transition(state0, symb_a, state1)
enfa.add_transition(state1, Epsilon(), state0)
graph = enfa.to_networkx()
self.assertTrue(isinstance(graph, networkx.MultiDiGraph))
self.assertTrue("0" in graph)
self.assertTrue(("0", 1) in graph.edges)
self.assertIn("a", [x["label"] for x in graph["0"][1].values()])
self.assertTrue(graph.nodes["0"]["is_start"])
self.assertFalse(graph.nodes["0"]["is_final"])
self.assertFalse(graph.nodes[1]["is_start"])
self.assertTrue(graph.nodes[1]["is_final"])
enfa.write_as_dot("enfa.dot")
def test_import_networkx(self):
enfa = EpsilonNFA()
state0 = State("0")
state1 = State(1)
symb_a = Symbol('a')
enfa.add_start_state(state0)
enfa.add_final_state(state1)
enfa.add_transition(state0, symb_a, state1)
enfa.add_transition(state1, Epsilon(), state0)
graph = enfa.to_networkx()
enfa_from_nx = EpsilonNFA.from_networkx(graph)
self.assertTrue(enfa_from_nx.accepts([symb_a]))
self.assertTrue(enfa_from_nx.accepts([symb_a, symb_a]))
self.assertFalse(enfa_from_nx.accepts([]))
def test_iter(self):
enfa = EpsilonNFA()
state0 = State("0")
state1 = State(1)
symb_a = Symbol('a')
enfa.add_start_state(state0)
enfa.add_final_state(state1)
enfa.add_transition(state0, symb_a, state1)
enfa.add_transition(state1, Epsilon(), state0)
counter = 0
for s_from, symb, s_to in enfa:
counter += 1
self.assertIn((s_from, symb, s_to), enfa)
self.assertNotIn((state1, symb_a, state1), enfa)
self.assertIn(("0", "a", 1), enfa)
self.assertEqual(counter, 2)
def test_equivalent(self):
enfa0 = EpsilonNFA()
state0 = State("0")
state1 = State(1)
symb_a = Symbol('a')
enfa0.add_start_state(state0)
enfa0.add_final_state(state1)
enfa0.add_transition(state0, symb_a, state1)
enfa0.add_transition(state1, Epsilon(), state0)
enfa1 = EpsilonNFA()
enfa1.add_start_state(state0)
enfa1.add_final_state(state1)
enfa1.add_transition(state0, symb_a, state1)
enfa1.add_transition(state1, symb_a, state1)
self.assertTrue(enfa0.is_equivalent_to(enfa1))
def test_non_equivalent(self):
enfa0 = EpsilonNFA()
state0 = State("0")
state1 = State(1)
symb_a = Symbol('a')
enfa0.add_start_state(state0)
enfa0.add_final_state(state1)
enfa0.add_transition(state0, symb_a, state1)
enfa0.add_transition(state1, Epsilon(), state0)
enfa1 = EpsilonNFA()
enfa1.add_start_state(state0)
enfa1.add_final_state(state1)
enfa1.add_transition(state0, symb_a, state1)
enfa1.add_transition(state1, symb_a, state0)
self.assertFalse(enfa0.is_equivalent_to(enfa1))
def test_get_as_dict(self):
enfa0 = EpsilonNFA()
state0 = State("0")
state1 = State(1)
symb_a = Symbol('a')
enfa0.add_start_state(state0)
enfa0.add_final_state(state1)
enfa0.add_transition(state0, symb_a, state1)
enfa0.add_transition(state1, Epsilon(), state0)
d_enfa = enfa0.to_dict()
self.assertIn(state0, d_enfa)
self.assertIn(symb_a, d_enfa[state0])
self.assertIn(state1, d_enfa[state0][symb_a])
def get_digits_enfa():
""" An epsilon NFA to recognize digits """
epsilon = Epsilon()
plus = Symbol("+")
minus = Symbol("-")
point = Symbol(".")
digits = [Symbol(x) for x in range(10)]
states = [State("q" + str(x)) for x in range(6)]
enfa = EpsilonNFA()
enfa.add_start_state(states[0])
enfa.add_final_state(states[5])
enfa.add_transition(states[0], epsilon, states[1])
enfa.add_transition(states[0], plus, states[1])
enfa.add_transition(states[0], minus, states[1])
for digit in digits:
enfa.add_transition(states[1], digit, states[1])
enfa.add_transition(states[1], digit, states[4])
enfa.add_transition(states[2], digit, states[3])
enfa.add_transition(states[3], digit, states[3])
enfa.add_transition(states[1], point, states[2])
enfa.add_transition(states[4], point, states[3])
enfa.add_transition(states[3], epsilon, states[5])
return enfa, digits, epsilon, plus, minus, point
def get_enfa_example0():
""" Gives an example ENFA
Accepts a*b
"""
enfa0 = EpsilonNFA()
state0 = State(0)
state1 = State(1)
state2 = State(2)
symb_a = Symbol("a")
symb_b = Symbol("b")
enfa0.add_start_state(state0)
enfa0.add_final_state(state2)
enfa0.add_transition(state0, symb_a, state0)
enfa0.add_transition(state0, Epsilon(), state1)
enfa0.add_transition(state1, symb_b, state2)
return enfa0
def get_enfa_example1():
""" Gives and example ENFA
Accepts c
"""
enfa1 = EpsilonNFA()
state2 = State(2)
state3 = State(3)
symb_c = Symbol("c")
enfa1.add_start_state(state2)
enfa1.add_final_state(state3)
enfa1.add_transition(state2, symb_c, state3)
return enfa1
def get_enfa_example0_bis():
""" A non minimal NFA, equivalent to example0 """
enfa0 = EpsilonNFA()
state3 = State(3)
state4 = State(4)
state0 = State(0)
state1 = State(1)
state2 = State(2)
symb_a = Symbol("a")
symb_b = Symbol("b")
enfa0.add_start_state(state0)
enfa0.add_final_state(state2)
enfa0.add_final_state(state4)
enfa0.add_transition(state0, symb_a, state0)
enfa0.add_transition(state0, Epsilon(), state1)
enfa0.add_transition(state1, symb_b, state2)
# New part
enfa0.add_transition(state0, Epsilon(), state3)
enfa0.add_transition(state3, symb_a, state3)
enfa0.add_transition(state3, symb_b, state4)
return enfa0
def get_example_non_minimal():
""" A non minimal example a.a*.b"""
enfa0 = EpsilonNFA()
state0 = State(0)
state3 = State(3)
state4 = State(4)
state5 = State(5)
state6 = State(6)
state1 = State(1)
state2 = State(2)
symb_a = Symbol("a")
symb_b = Symbol("b")
enfa0.add_start_state(state0)
enfa0.add_final_state(state3)
enfa0.add_final_state(state4)
enfa0.add_transition(state0, symb_a, state1)
enfa0.add_transition(state1, symb_a, state2)
enfa0.add_transition(state2, symb_a, state5)
enfa0.add_transition(state5, symb_a, state6)
enfa0.add_transition(state6, symb_a, state1)
enfa0.add_transition(state1, symb_b, state3)
enfa0.add_transition(state2, symb_b, state4)
enfa0.add_transition(state5, symb_b, state3)
enfa0.add_transition(state6, symb_b, state4)
return enfa0
| 39.23486
| 89
| 0.630035
|
29083c95c37c729da72bea259c7be42f433d9c31
| 1,566
|
py
|
Python
|
znail/ui/api/disciplines/packet_loss.py
|
Zenterio/znail
|
68cd3a4b5ae866f3a8846ce1d0fb5f89428a6b18
|
[
"Apache-2.0"
] | 4
|
2019-02-20T09:40:49.000Z
|
2019-11-19T21:18:44.000Z
|
znail/ui/api/disciplines/packet_loss.py
|
Zenterio/znail
|
68cd3a4b5ae866f3a8846ce1d0fb5f89428a6b18
|
[
"Apache-2.0"
] | 4
|
2019-03-11T15:24:17.000Z
|
2019-06-14T14:31:01.000Z
|
znail/ui/api/disciplines/packet_loss.py
|
Zenterio/znail
|
68cd3a4b5ae866f3a8846ce1d0fb5f89428a6b18
|
[
"Apache-2.0"
] | 2
|
2019-03-05T19:04:06.000Z
|
2019-09-08T13:53:10.000Z
|
import flask_restplus
import marshmallow
from znail.netem.disciplines import PacketLoss
from znail.netem.tc import Tc
from znail.ui import api
from znail.ui.util import NoneAttributes, json_request_handler
class PacketLossSchema(marshmallow.Schema):
percent = marshmallow.fields.Float(required=True, validate=lambda n: n >= 0 and n <= 100)
packet_loss_schema = PacketLossSchema()
packet_loss_model = api.model(
'PacketLoss', {
'percent': flask_restplus.fields.Float(min=0, max=100)
})
@api.route('/api/disciplines/packet_loss')
class PacketLossResource(flask_restplus.Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tc = Tc.adapter('eth1')
@api.response(200, 'Success', packet_loss_model)
def get(self):
loss = self.tc.disciplines.get('loss', NoneAttributes)
return {'percent': loss.percent}, 200
@json_request_handler(packet_loss_schema, packet_loss_model)
def post(self, data):
disciplines = self.tc.disciplines
disciplines['loss'] = PacketLoss(data['percent'])
self.tc.apply(disciplines)
@api.route('/api/disciplines/packet_loss/clear')
class ClearPacketLossResource(flask_restplus.Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tc = Tc.adapter('eth1')
@json_request_handler()
def post(self, data):
disciplines = self.tc.disciplines
if 'loss' in disciplines:
del disciplines['loss']
self.tc.apply(disciplines)
| 29.54717
| 93
| 0.693487
|
8872ed7a466ebc245183bf28a4c9a492d71048d7
| 4,628
|
py
|
Python
|
ckan/lib/dictization/__init__.py
|
derhecht/ckan
|
27f0b8cf52bb2140f16e2f09f3c42ed15e7e9b99
|
[
"BSD-3-Clause"
] | 6
|
2015-11-09T00:44:51.000Z
|
2019-11-21T14:56:01.000Z
|
ckan/lib/dictization/__init__.py
|
derhecht/ckan
|
27f0b8cf52bb2140f16e2f09f3c42ed15e7e9b99
|
[
"BSD-3-Clause"
] | 39
|
2015-02-18T17:32:23.000Z
|
2022-03-11T18:03:36.000Z
|
ckan/lib/dictization/__init__.py
|
derhecht/ckan
|
27f0b8cf52bb2140f16e2f09f3c42ed15e7e9b99
|
[
"BSD-3-Clause"
] | 17
|
2015-03-13T18:05:05.000Z
|
2020-11-06T13:55:32.000Z
|
# encoding: utf-8
import datetime
from sqlalchemy.orm import class_mapper
import sqlalchemy
from six import text_type
from ckan.model.core import State
try:
RowProxy = sqlalchemy.engine.result.RowProxy
except AttributeError:
RowProxy = sqlalchemy.engine.base.RowProxy
try:
long # Python 2
except NameError:
long = int # Python 3
# NOTE
# The functions in this file contain very generic methods for dictizing objects
# and saving dictized objects. If a specialised use is needed please do NOT extend
# these functions. Copy code from here as needed.
legacy_dict_sort = lambda x: (len(x), dict.items(x))
def table_dictize(obj, context, **kw):
'''Get any model object and represent it as a dict'''
result_dict = {}
model = context["model"]
session = model.Session
if isinstance(obj, RowProxy):
fields = obj.keys()
else:
ModelClass = obj.__class__
table = class_mapper(ModelClass).mapped_table
fields = [field.name for field in table.c]
for field in fields:
name = field
if name in ('current', 'expired_timestamp', 'expired_id'):
continue
if name in ('continuity_id', 'revision_id'):
continue
value = getattr(obj, name)
if value is None:
result_dict[name] = value
elif isinstance(value, dict):
result_dict[name] = value
elif isinstance(value, int):
result_dict[name] = value
elif isinstance(value, long):
result_dict[name] = value
elif isinstance(value, datetime.datetime):
result_dict[name] = value.isoformat()
elif isinstance(value, list):
result_dict[name] = value
else:
result_dict[name] = text_type(value)
result_dict.update(kw)
##HACK For optimisation to get metadata_modified created faster.
context['metadata_modified'] = max(result_dict.get('revision_timestamp', ''),
context.get('metadata_modified', ''))
return result_dict
def obj_list_dictize(obj_list, context, sort_key=legacy_dict_sort):
'''Get a list of model object and represent it as a list of dicts'''
result_list = []
active = context.get('active', True)
for obj in obj_list:
if context.get('with_capacity'):
obj, capacity = obj
dictized = table_dictize(obj, context, capacity=capacity)
else:
dictized = table_dictize(obj, context)
if active and obj.state != 'active':
continue
result_list.append(dictized)
return sorted(result_list, key=sort_key)
def obj_dict_dictize(obj_dict, context, sort_key=lambda x:x):
'''Get a dict whose values are model objects
and represent it as a list of dicts'''
result_list = []
for key, obj in obj_dict.items():
result_list.append(table_dictize(obj, context))
return sorted(result_list, key=sort_key)
def get_unique_constraints(table, context):
'''Get a list of unique constraints for a sqlalchemy table'''
list_of_constraints = []
for contraint in table.constraints:
if isinstance(contraint, sqlalchemy.UniqueConstraint):
columns = [column.name for column in contraint.columns]
list_of_constraints.append(columns)
return list_of_constraints
def table_dict_save(table_dict, ModelClass, context, extra_attrs=()):
'''Given a dict and a model class, update or create a sqlalchemy object.
This will use an existing object if "id" is supplied OR if any unique
constraints are met. e.g supplying just a tag name will get out that tag obj.
'''
model = context["model"]
session = context["session"]
table = class_mapper(ModelClass).mapped_table
obj = None
id = table_dict.get("id")
if id:
obj = session.query(ModelClass).get(id)
if not obj:
unique_constraints = get_unique_constraints(table, context)
for constraint in unique_constraints:
params = dict((key, table_dict.get(key)) for key in constraint)
obj = session.query(ModelClass).filter_by(**params).first()
if obj:
if 'name' in params and getattr(obj, 'state', None) == State.DELETED:
obj.name = obj.id
obj = None
else:
break
if not obj:
obj = ModelClass()
obj.from_dict(table_dict)
for a in extra_attrs:
if a in table_dict:
setattr(obj, a, table_dict[a])
session.add(obj)
return obj
| 29.291139
| 85
| 0.638073
|
76d61acf90d2dbc79be30ebb7740d560ef46cb63
| 7,438
|
py
|
Python
|
util/cpt_upgrader.py
|
volnxebec/CC_Fused
|
e2b805e3475bd275409379c41eaeeb1a565cbdef
|
[
"BSD-3-Clause"
] | 5
|
2017-03-11T05:02:47.000Z
|
2020-10-29T07:16:33.000Z
|
util/cpt_upgrader.py
|
volnxebec/CC_Fused
|
e2b805e3475bd275409379c41eaeeb1a565cbdef
|
[
"BSD-3-Clause"
] | 4
|
2015-01-13T18:27:31.000Z
|
2015-01-13T18:27:57.000Z
|
util/cpt_upgrader.py
|
volnxebec/CC_Fused
|
e2b805e3475bd275409379c41eaeeb1a565cbdef
|
[
"BSD-3-Clause"
] | 3
|
2020-01-17T02:25:00.000Z
|
2021-09-18T21:51:16.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
#
# This python code is used to migrate checkpoints that were created in one
# version of the simulator to newer version. As features are added or bugs are
# fixed some of the state that needs to be checkpointed can change. If you have
# many historic checkpoints that you use, manually editing them to fix them is
# both time consuming and error-prone.
# This script provides a way to migrate checkpoints to the newer repository in
# a programatic way. It can be imported into another script or used on the
# command line. From the command line the script will either migrate every
# checkpoint it finds recursively (-r option) or a single checkpoint. When a
# change is made to the gem5 repository that breaks previous checkpoints a
# from_N() method should be implemented here and the gem5CheckpointVersion
# variable in src/sim/serialize.hh should be incremented. For each version
# between the checkpoints current version and the new version the from_N()
# method will be run, passing in a ConfigParser object which contains the open
# file. As these operations can be isa specific the method can verify the isa
# and use regexes to find the correct sections that need to be updated.
import ConfigParser
import sys, os
import os.path as osp
def from_0(cpt):
pass
# An example of a translator
def from_1(cpt):
if cpt.get('root','isa') == 'arm':
for sec in cpt.sections():
import re
# Search for all the execution contexts
if re.search('.*sys.*\.cpu.*\.x.\..*', sec):
# Update each one
mr = cpt.get(sec, 'miscRegs').split()
#mr.insert(21,0)
#mr.insert(26,0)
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr))
migrations = []
migrations.append(from_0)
migrations.append(from_1)
verbose_print = False
def verboseprint(*args):
if not verbose_print:
return
for arg in args:
print arg,
print
def process_file(path, **kwargs):
if not osp.isfile(path):
import errno
raise IOError(ennro.ENOENT, "No such file", path)
verboseprint("Processing file %s...." % path)
if kwargs.get('backup', True):
import shutil
shutil.copyfile(path, path + '.bak')
cpt = ConfigParser.SafeConfigParser()
# gem5 is case sensitive with paramaters
cpt.optionxform = str
# Read the current data
cpt_file = file(path, 'r')
cpt.readfp(cpt_file)
cpt_file.close()
# Make sure we know what we're starting from
if not cpt.has_option('root','cpt_ver'):
raise LookupError("cannot determine version of checkpoint")
cpt_ver = cpt.getint('root','cpt_ver')
# If the current checkpoint is longer than the migrations list, we have a problem
# and someone didn't update this file
if cpt_ver > len(migrations):
raise ValueError("upgrade script is too old and needs updating")
verboseprint("\t...file is at version %#x" % cpt_ver)
if cpt_ver == len(migrations):
verboseprint("\t...nothing to do")
return
# Walk through every function from now until the end fixing the checkpoint
for v in xrange(cpt_ver,len(migrations)):
verboseprint("\t...migrating to version %#x" % (v + 1))
migrations[v](cpt)
cpt.set('root','cpt_ver', str(v + 1))
# Write the old data back
verboseprint("\t...completed")
cpt.write(file(path, 'w'))
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] <filename or directory>")
parser.add_option("-r", "--recurse", action="store_true",
help="Recurse through all subdirectories modifying "\
"each checkpoint that is found")
parser.add_option("-N", "--no-backup", action="store_false",
dest="backup", default=True,
help="Do no backup each checkpoint before modifying it")
parser.add_option("-v", "--verbose", action="store_true",
help="Print out debugging information as")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("You must specify a checkpoint file to modify or a "\
"directory of checkpoints to recursively update")
verbose_print = options.verbose
# Deal with shell variables and ~
path = osp.expandvars(osp.expanduser(args[0]))
# Process a single file if we have it
if osp.isfile(path):
process_file(path, **vars(options))
# Process an entire directory
elif osp.isdir(path):
cpt_file = osp.join(path, 'm5.cpt')
if options.recurse:
# Visit very file and see if it matches
for root,dirs,files in os.walk(path):
for name in files:
if name == 'm5.cpt':
process_file(osp.join(root,name), **vars(options))
for dir in dirs:
pass
# Maybe someone passed a cpt.XXXXXXX directory and not m5.cpt
elif osp.isfile(cpt_file):
process_file(cpt_file, **vars(options))
else:
print "Error: checkpoint file not found at in %s " % path,
print "and recurse not specified"
sys.exit(1)
sys.exit(0)
| 39.775401
| 85
| 0.682307
|
147baae6b02fc702c8f6b25ed2c068527a7bc888
| 689
|
py
|
Python
|
setup.py
|
DarthQadir/cardinality_cs110
|
5593251146d08ae215f746302d981e529e799afe
|
[
"MIT"
] | null | null | null |
setup.py
|
DarthQadir/cardinality_cs110
|
5593251146d08ae215f746302d981e529e799afe
|
[
"MIT"
] | null | null | null |
setup.py
|
DarthQadir/cardinality_cs110
|
5593251146d08ae215f746302d981e529e799afe
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="cardinality_cs110",
version="0.0.8",
author="Abdul Qadir",
author_email="abdul.qadir@minerva.kgi",
description="A cardinality estimator using the Flajolet-Martin algorithm",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DarthQadir/cardinality_cs110.git",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 32.809524
| 79
| 0.667634
|
ccdd6e81e10c818e03b2cda2a631875d2ab95b71
| 41,002
|
py
|
Python
|
lib/sqlalchemy/orm/persistence.py
|
paylogic/sqlalchemy
|
876a487bf06a038efde7d46ce09e253b9247aae5
|
[
"MIT"
] | 2
|
2015-11-07T11:55:45.000Z
|
2017-09-04T07:56:34.000Z
|
lib/sqlalchemy/orm/persistence.py
|
paylogic/sqlalchemy
|
876a487bf06a038efde7d46ce09e253b9247aae5
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/persistence.py
|
paylogic/sqlalchemy
|
876a487bf06a038efde7d46ce09e253b9247aae5
|
[
"MIT"
] | null | null | null |
# orm/persistence.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
from .base import _state_mapper, state_str, _attr_as_key
from ..sql import expression
from . import loading
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.items():
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.items():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
has_all_defaults = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(None)
params[col.key] = val
else:
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif col.server_default is not None and \
mapper.base_mapper.eager_defaults:
has_all_defaults = False
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
if mapper.version_id_generator is not False:
val = mapper.version_id_generator(params[col._label])
params[col.key] = val
# HACK: check for history, in case the
# history is only
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.values():
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
if hasdata:
if hasnull:
raise orm_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
if mapper.base_mapper.eager_defaults:
stmt = stmt.return_defaults()
elif mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(insert,
lambda rec: (rec[4],
list(rec[2].keys()),
bool(rec[5]),
rec[6], rec[7])
):
if \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper_rec._set_state_attr_by_column(
state,
state_dict,
col, pk)
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], list(rec[2].keys()))
):
connection = key[0]
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.items():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
if need_version_id:
# TODO: need test coverage for this [ticket:1761]
if connection.dialect.supports_sane_rowcount:
rows = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows += c.rowcount
if rows != len(del_objects):
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched." %
(table.description, len(del_objects), c.rowcount)
)
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults and state.unloaded:
toload_now.extend(state.unloaded)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
prop = mapper._columntoproperty[mapper.version_id_col]
if prop.key in state.unloaded:
toload_now.extend([prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.prefetch_cols
postfetch_cols = result.context.postfetch_cols
returning_cols = result.context.returning_cols
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
mapper._set_state_attr_by_column(state, dict_, col, row[col])
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _do_pre(self):
query = self.query
self.context = context = query._compile_context()
if len(context.statement.froms) != 1 or \
not isinstance(context.statement.froms[0], schema.Table):
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
else:
self.primary_table = context.statement.froms[0]
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
try:
evaluator_compiler = evaluator.EvaluatorCompiler()
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
target_cls = query._mapper_zero().class_
#TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
select_stmt = self.context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values):
super(BulkUpdate, self).__init__(query)
self.query._no_select_modifiers("update")
self.values = values
@classmethod
def factory(cls, query, synchronize_session, values):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values)
def _do_exec(self):
update_stmt = sql.update(self.primary_table,
self.context.whereclause, self.values)
self.result = self.query.session.execute(
update_stmt, params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
self.query._no_select_modifiers("delete")
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(delete_stmt,
params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
for key, value in self.values.items():
key = _attr_as_key(key)
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
| 37.755064
| 84
| 0.555997
|
9e6955230140b36903b090508845f354931d67d5
| 1,476
|
py
|
Python
|
monailabel/utils/scoring/dice.py
|
finalelement/MONAILabel
|
3f63ffd4f49161076e77b7c74c733f6ce5cce78c
|
[
"Apache-2.0"
] | 1
|
2021-07-27T12:45:36.000Z
|
2021-07-27T12:45:36.000Z
|
monailabel/utils/scoring/dice.py
|
finalelement/MONAILabel
|
3f63ffd4f49161076e77b7c74c733f6ce5cce78c
|
[
"Apache-2.0"
] | null | null | null |
monailabel/utils/scoring/dice.py
|
finalelement/MONAILabel
|
3f63ffd4f49161076e77b7c74c733f6ce5cce78c
|
[
"Apache-2.0"
] | 1
|
2021-07-27T12:45:38.000Z
|
2021-07-27T12:45:38.000Z
|
import logging
import numpy as np
from monai.transforms import LoadImage
from monailabel.interfaces.datastore import Datastore, DefaultLabelTag
from monailabel.interfaces.tasks import ScoringMethod
logger = logging.getLogger(__name__)
class Dice(ScoringMethod):
"""
Compute dice between final vs original tags
"""
def __init__(self):
super().__init__("Compute Dice for predicated label vs submitted")
def __call__(self, request, datastore: Datastore):
loader = LoadImage(image_only=True)
tag_y = request.get("y", DefaultLabelTag.FINAL)
tag_y_pred = request.get("y_pred", DefaultLabelTag.ORIGINAL)
result = {}
for image_id in datastore.list_images():
y_i = datastore.get_label_by_image_id(image_id, tag_y) if tag_y else None
y_pred_i = datastore.get_label_by_image_id(image_id, tag_y_pred) if tag_y_pred else None
if y_i and y_pred_i:
y = loader(datastore.get_label_uri(y_i))
y_pred = loader(datastore.get_label_uri(y_pred_i))
y = y.flatten()
y_pred = y_pred.flatten()
union = np.sum(y) + np.sum(y_pred)
dice = 2.0 * np.sum(y * y_pred) / union if union != 0 else 1
logger.info(f"Dice Score for {image_id} is {dice}")
datastore.update_image_info(image_id, {"dice": dice})
result[image_id] = dice
return result
| 33.545455
| 100
| 0.640244
|
f403d9af744e172bc2d6a756449c54a23cd4542f
| 426
|
py
|
Python
|
Flask/app.py
|
Ahmad-Fahad/python-practice
|
bea29e1eeb691b6c457d1e47129f6c308a091f44
|
[
"Apache-2.0"
] | null | null | null |
Flask/app.py
|
Ahmad-Fahad/python-practice
|
bea29e1eeb691b6c457d1e47129f6c308a091f44
|
[
"Apache-2.0"
] | null | null | null |
Flask/app.py
|
Ahmad-Fahad/python-practice
|
bea29e1eeb691b6c457d1e47129f6c308a091f44
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, redirect, url_for
app = Flask(__name__)
@app.route("/")
def welcome():
return "<h1>Welcome to Flask Journey</h1>"
@app.route("/dashboard")
def dashboard():
return "<h2> Welcome to dashboard </h2>"
@app.route("/<name>")
def user(name):
return f"Hy {name} !"
@app.route("/admin")
def admin():
return redirect(url_for("user", name="admin!"))
if __name__ == "__main__":
app.run()
| 19.363636
| 51
| 0.638498
|
bd9c13a1b70195b31d8147cb019bbe2294c1e94b
| 10,865
|
py
|
Python
|
venv/Lib/site-packages/matplotlib/tests/test_tightlayout.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | 4
|
2021-09-13T07:48:44.000Z
|
2021-11-03T13:41:29.000Z
|
venv/Lib/site-packages/matplotlib/tests/test_tightlayout.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/matplotlib/tests/test_tightlayout.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | 1
|
2021-09-27T07:12:39.000Z
|
2021-09-27T07:12:39.000Z
|
import warnings
import numpy as np
from numpy.testing import assert_array_equal
import pytest
import matplotlib as mpl
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea
from matplotlib.patches import Rectangle
def example_plot(ax, fontsize=12):
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
@image_comparison(['tight_layout1'], tol=1.9)
def test_tight_layout1():
"""Test tight_layout for a single subplot."""
fig, ax = plt.subplots()
example_plot(ax, fontsize=24)
plt.tight_layout()
@image_comparison(['tight_layout2'])
def test_tight_layout2():
"""Test tight_layout for multiple subplots."""
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(['tight_layout3'])
def test_tight_layout3():
"""Test tight_layout for multiple subplots."""
ax1 = plt.subplot(221)
ax2 = plt.subplot(223)
ax3 = plt.subplot(122)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
plt.tight_layout()
@image_comparison(['tight_layout4'], freetype_version=('2.5.5', '2.6.1'),
tol=0.015)
def test_tight_layout4():
"""Test tight_layout for subplot2grid."""
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(['tight_layout5'])
def test_tight_layout5():
"""Test tight_layout for image."""
ax = plt.subplot()
arr = np.arange(100).reshape((10, 10))
ax.imshow(arr, interpolation="none")
plt.tight_layout()
@image_comparison(['tight_layout6'])
def test_tight_layout6():
"""Test tight_layout for gridspec."""
# This raises warnings since tight layout cannot
# do this fully automatically. But the test is
# correct since the layout is manually edited
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
fig = plt.figure()
gs1 = mpl.gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = mpl.gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.45)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),
0.5, 1 - (gs1.top-top)])
gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),
None, 1 - (gs2.top-top)],
h_pad=0.45)
@image_comparison(['tight_layout7'], tol=1.9)
def test_tight_layout7():
# tight layout with left and right titles
fontsize = 24
fig, ax = plt.subplots()
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Left Title', loc='left', fontsize=fontsize)
ax.set_title('Right Title', loc='right', fontsize=fontsize)
plt.tight_layout()
@image_comparison(['tight_layout8'])
def test_tight_layout8():
"""Test automatic use of tight_layout."""
fig = plt.figure()
fig.set_tight_layout({'pad': .1})
ax = fig.add_subplot()
example_plot(ax, fontsize=24)
@image_comparison(['tight_layout9'])
def test_tight_layout9():
# Test tight_layout for non-visible subplots
# GH 8244
f, axarr = plt.subplots(2, 2)
axarr[1][1].set_visible(False)
plt.tight_layout()
def test_outward_ticks():
"""Test automatic use of tight_layout."""
fig = plt.figure()
ax = fig.add_subplot(221)
ax.xaxis.set_tick_params(tickdir='out', length=16, width=3)
ax.yaxis.set_tick_params(tickdir='out', length=16, width=3)
ax.xaxis.set_tick_params(
tickdir='out', length=32, width=3, tick1On=True, which='minor')
ax.yaxis.set_tick_params(
tickdir='out', length=32, width=3, tick1On=True, which='minor')
ax.xaxis.set_ticks([0], minor=True)
ax.yaxis.set_ticks([0], minor=True)
ax = fig.add_subplot(222)
ax.xaxis.set_tick_params(tickdir='in', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='in', length=32, width=3)
ax = fig.add_subplot(223)
ax.xaxis.set_tick_params(tickdir='inout', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='inout', length=32, width=3)
ax = fig.add_subplot(224)
ax.xaxis.set_tick_params(tickdir='out', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='out', length=32, width=3)
plt.tight_layout()
# These values were obtained after visual checking that they correspond
# to a tight layouting that did take the ticks into account.
ans = [[[0.091, 0.607], [0.433, 0.933]],
[[0.579, 0.607], [0.922, 0.933]],
[[0.091, 0.140], [0.433, 0.466]],
[[0.579, 0.140], [0.922, 0.466]]]
for nn, ax in enumerate(fig.axes):
assert_array_equal(np.round(ax.get_position().get_points(), 3),
ans[nn])
def add_offsetboxes(ax, size=10, margin=.1, color='black'):
"""
Surround ax with OffsetBoxes
"""
m, mp = margin, 1+margin
anchor_points = [(-m, -m), (-m, .5), (-m, mp),
(mp, .5), (.5, mp), (mp, mp),
(.5, -m), (mp, -m), (.5, -m)]
for point in anchor_points:
da = DrawingArea(size, size)
background = Rectangle((0, 0), width=size,
height=size,
facecolor=color,
edgecolor='None',
linewidth=0,
antialiased=False)
da.add_artist(background)
anchored_box = AnchoredOffsetbox(
loc='center',
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=point,
bbox_transform=ax.transAxes,
borderpad=0.)
ax.add_artist(anchored_box)
return anchored_box
@image_comparison(['tight_layout_offsetboxes1', 'tight_layout_offsetboxes2'])
def test_tight_layout_offsetboxes():
# 1.
# - Create 4 subplots
# - Plot a diagonal line on them
# - Surround each plot with 7 boxes
# - Use tight_layout
# - See that the squares are included in the tight_layout
# and that the squares in the middle do not overlap
#
# 2.
# - Make the squares around the right side axes invisible
# - See that the invisible squares do not affect the
# tight_layout
rows = cols = 2
colors = ['red', 'blue', 'green', 'yellow']
x = y = [0, 1]
def _subplots():
_, axs = plt.subplots(rows, cols)
axs = axs.flat
for ax, color in zip(axs, colors):
ax.plot(x, y, color=color)
add_offsetboxes(ax, 20, color=color)
return axs
# 1.
axs = _subplots()
plt.tight_layout()
# 2.
axs = _subplots()
for ax in (axs[cols-1::rows]):
for child in ax.get_children():
if isinstance(child, AnchoredOffsetbox):
child.set_visible(False)
plt.tight_layout()
def test_empty_layout():
"""Test that tight layout doesn't cause an error when there are no axes."""
fig = plt.gcf()
fig.tight_layout()
@pytest.mark.parametrize("label", ["xlabel", "ylabel"])
def test_verybig_decorators(label):
"""Test that no warning emitted when xlabel/ylabel too big."""
fig, ax = plt.subplots(figsize=(3, 2))
ax.set(**{label: 'a' * 100})
def test_big_decorators_horizontal():
"""Test that doesn't warn when xlabel too big."""
fig, axs = plt.subplots(1, 2, figsize=(3, 2))
axs[0].set_xlabel('a' * 30)
axs[1].set_xlabel('b' * 30)
def test_big_decorators_vertical():
"""Test that doesn't warn when ylabel too big."""
fig, axs = plt.subplots(2, 1, figsize=(3, 2))
axs[0].set_ylabel('a' * 20)
axs[1].set_ylabel('b' * 20)
def test_badsubplotgrid():
# test that we get warning for mismatched subplot grids, not than an error
plt.subplot2grid((4, 5), (0, 0))
# this is the bad entry:
plt.subplot2grid((5, 5), (0, 3), colspan=3, rowspan=5)
with pytest.warns(UserWarning):
plt.tight_layout()
def test_collapsed():
# test that if the amount of space required to make all the axes
# decorations fit would mean that the actual Axes would end up with size
# zero (i.e. margins add up to more than the available width) that a call
# to tight_layout will not get applied:
fig, ax = plt.subplots(tight_layout=True)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.annotate('BIG LONG STRING', xy=(1.25, 2), xytext=(10.5, 1.75),
annotation_clip=False)
p1 = ax.get_position()
with pytest.warns(UserWarning):
plt.tight_layout()
p2 = ax.get_position()
assert p1.width == p2.width
# test that passing a rect doesn't crash...
with pytest.warns(UserWarning):
plt.tight_layout(rect=[0, 0, 0.8, 0.8])
def test_suptitle():
fig, ax = plt.subplots(tight_layout=True)
st = fig.suptitle("foo")
t = ax.set_title("bar")
fig.canvas.draw()
assert st.get_window_extent().y0 > t.get_window_extent().y1
@pytest.mark.backend("pdf")
def test_non_agg_renderer(monkeypatch, recwarn):
unpatched_init = mpl.backend_bases.RendererBase.__init__
def __init__(self, *args, **kwargs):
# Check that we don't instantiate any other renderer than a pdf
# renderer to perform pdf tight layout.
assert isinstance(self, mpl.backends.backend_pdf.RendererPdf)
unpatched_init(self, *args, **kwargs)
monkeypatch.setattr(mpl.backend_bases.RendererBase, "__init__", __init__)
fig, ax = plt.subplots()
fig.tight_layout()
| 32.725904
| 80
| 0.603682
|
ac524442cf0014ddeef356abe38adf1712ab77a2
| 2,320
|
py
|
Python
|
processVideo.py
|
DylanGreene/working-title-ai-driving
|
a879f95a749d606785b3d143f7bf89de1231867b
|
[
"MIT"
] | null | null | null |
processVideo.py
|
DylanGreene/working-title-ai-driving
|
a879f95a749d606785b3d143f7bf89de1231867b
|
[
"MIT"
] | null | null | null |
processVideo.py
|
DylanGreene/working-title-ai-driving
|
a879f95a749d606785b3d143f7bf89de1231867b
|
[
"MIT"
] | null | null | null |
"""
processVideo.py
Date: 9 November 2018
This file will contain the entire pipeline for processing a video to determine the locations of the lane lines.
It will split the video into separate images and then determine the location of the lanes in each before regenerating a video with
the lane locations added.
"""
import numpy as np
import cv2
import laneDetect
import shiftPerspective
import preprocess
import darknet
if __name__ == "__main__" :
# Initialize dict of objects and colors for drawing object boxes
labels = {}
# Load yolov3 cnn
net = darknet.load_net(b"yolov3.cfg", b"yolov3.weights", 0)
meta = darknet.load_meta(b"coco.data")
########## SPLIT VIDEO INTO FRAMES ##########
# Load input video
drivingVideo = cv2.VideoCapture('test.mp4')
# Define codec and initialize videowriter
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
# 3 = FrameWidth; 4 = FrameHeight; 5 = FPS
fps = drivingVideo.get(5)
output = cv2.VideoWriter("outVid.mp4", fourcc, fps, (int(drivingVideo.get(3)), int(drivingVideo.get(4))))
# track frame seconds
frameNumber = 0
while (drivingVideo.isOpened()):
# Display total time calculated so far
print(frameNumber / fps)
# Read the next frame
ret, frame = drivingVideo.read()
workingCopy = frame.copy()
frameNumber += 1
########## PERSPECTIVE TRANSFORM ##########
workingCopy = shiftPerspective.shift_perspective(workingCopy, 0)
########## PRE PROCESSING ##########
workingCopy = preprocess.preprocess(workingCopy)
########## LANE DETECTION ##########
mask = laneDetect.laneDetect(workingCopy)
########## LANE AREA OVERLAY ##########
mask = shiftPerspective.shift_perspective(mask, 1)
newFrame = cv2.addWeighted(frame, 1, mask, 0.3, 0)
########## OBJECT DETECTION WITH DARKNET ##########
res = darknet.detect(net, meta, newFrame)
newFrame = darknet.draw_bounding_box(newFrame, res, labels)
#cv2.imshow('image',newFrame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
########## ADD IMAGE TO OUTPUT VIDEO ##########
output.write(newFrame)
########## RELEASE MEMORY ##########
drivingVideo.release()
output.release()
| 30.12987
| 130
| 0.625862
|
67c1f83d13d367a603b543da266096803cfc5229
| 68,856
|
py
|
Python
|
django/db/models/query.py
|
romulorosa/django
|
12d0567aa5e82322543f0c0c126ba18c91a1e439
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2021-02-21T15:26:05.000Z
|
2021-02-21T15:26:05.000Z
|
django/db/models/query.py
|
romulorosa/django
|
12d0567aa5e82322543f0c0c126ba18c91a1e439
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/db/models/query.py
|
romulorosa/django
|
12d0567aa5e82322543f0c0c126ba18c91a1e439
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2017-08-03T13:06:19.000Z
|
2017-08-03T13:06:19.000Z
|
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F
from django.db.models.fields import AutoField
from django.db.models.functions import Trunc
from django.db.models.query_utils import InvalidQuery, Q
from django.db.models.sql.constants import CURSOR
from django.utils import timezone
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable:
def __init__(self, queryset, chunked_fetch=False):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch)
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet:
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self, chunked_fetch=True))
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_ids_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size)
if connection.features.can_return_ids_from_bulk_insert:
assert len(ids) == len(objs_without_pk)
for obj_without_pk, pk in zip(objs_without_pk, ids):
obj_without_pk.pk = pk
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in defaults.items():
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Try to create an object using passed params. Used by get_or_create()
and update_or_create().
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create() and update_or_create().
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
if param != 'pk': # It's okay to use a model's pk property.
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Return the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""Return the first object of a query or None if no match is found."""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""Return the last object of a query or None if no match is found."""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list=None):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if id_list is not None:
if not id_list:
return {}
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset:offset + batch_size]
qs += tuple(self.filter(pk__in=batch).order_by())
else:
qs = self.filter(pk__in=id_list).order_by()
else:
qs = self._clone()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""Delete the records in the current QuerySet."""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
def _values(self, *fields, **expressions):
clone = self._clone()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False):
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
_fields = []
expressions = {}
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id = str(id(field))
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""Return an empty QuerySet."""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._clone()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
return self._combinator_query('union', *other_qs, all=all)
def intersection(self, *other_qs):
return self._combinator_query('intersection', *other_qs)
def difference(self, *other_qs):
return self._combinator_query('difference', *other_qs)
def select_for_update(self, nowait=False, skip_locked=False):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""Add extra SQL fragments to the query."""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A helper method for bulk_create() to insert the bulk one batch at a
time. Insert recursively a batch from the front of the bulk and then
_batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if connections[self.db].features.can_return_ids_from_bulk_insert:
inserted_id = self._insert(item, fields=fields, using=self.db, return_id=True)
if isinstance(inserted_id, list):
inserted_ids.extend(inserted_id)
else:
inserted_ids.append(inserted_id)
else:
self._insert(item, fields=fields, using=self.db)
return inserted_ids
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare_as_filter_value(self):
if self._fields is None:
queryset = self.values('pk')
queryset.query._forced_pk = True
else:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
queryset = self._clone()
return queryset.query.as_subquery_filter(queryset._db)
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = columns.index(query_name)
columns[index] = model_name
except ValueError:
# Ignore translations for nonexistent column names
pass
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.table_name_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and queryset._iterable_class is not ModelIterable:
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._clone(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if len(model_instances) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| 40.912656
| 115
| 0.613774
|
4a7036cc92df6c06944addba1db7773e8b8a3172
| 4,576
|
py
|
Python
|
django_quicky/models.py
|
sametmax/django-quicky
|
2a87dbdcc6db400aff5a9119533bd3784fc4afb4
|
[
"Zlib"
] | 149
|
2015-01-02T19:48:47.000Z
|
2022-02-18T15:43:34.000Z
|
django_quicky/models.py
|
keshapps/django-quicky
|
2a87dbdcc6db400aff5a9119533bd3784fc4afb4
|
[
"Zlib"
] | 3
|
2015-01-28T18:44:42.000Z
|
2017-05-23T18:50:02.000Z
|
django_quicky/models.py
|
keshapps/django-quicky
|
2a87dbdcc6db400aff5a9119533bd3784fc4afb4
|
[
"Zlib"
] | 11
|
2015-01-05T19:22:16.000Z
|
2021-01-25T13:06:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import types
from random import randint
__all__ = ['get_random_objects', 'get_object_or_none', 'patch_model']
def get_random_objects(model=None, queryset=None, count=float('+inf')):
"""
Get `count` random objects for a model object `model` or from
a queryset. Returns an iterator that yield one object at a time.
You model must have an auto increment id for it to work and it should
be available on the `id` attribute.
"""
from django.db.models import Max
if not queryset:
try:
queryset = model.objects.all()
except AttributeError:
raise ValueError("You must provide a model or a queryset")
max_ = queryset.aggregate(Max('id'))['id__max']
i = 0
while i < count:
try:
yield queryset.get(pk=randint(1, max_))
i += 1
except queryset.model.DoesNotExist:
pass
def get_object_or_none(klass, *args, **kwargs):
"""
Uses get() to return an object or None if the object does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
"""
from django.shortcuts import _get_queryset
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
def patch_model(model_to_patch, class_to_patch_with):
"""
Adapted from https://gist.github.com/1402045
Monkey patch a django model with additional or
replacement fields and methods.
- All fields and methods that didn't exist previously are added.
- Existing methods with the same names are renamed with
<methodname>__overridden, so there are still accessible,
then the new ones are added.
- Existing fields with the same name are deleted and replaced with
the new fields.
The class used to patch the model MUST be an old-style class (so
this may not work with Python 3).
Example (in your models.py):
from django.contrib.auth.models import User
from django_quicky.models import patch_model
class UserOverride: # we don't need to inherit from anything
email = models.EmailField(_('e-mail address'), unique=True)
new_field = models.CharField(_('new field'), max_length=10)
def save(self, *args, **kwargs):
# Call original save() method
self.save__overridden(*args, **kwargs)
# More custom save
patch_model(User, UserOverride)
"""
from django.db.models.fields import Field
# The _meta attribute is where the definition of the fields is stored in
# django model classes.
patched_meta = getattr(model_to_patch, '_meta')
field_lists = (patched_meta.local_fields, patched_meta.local_many_to_many)
for name, obj in class_to_patch_with.__dict__.iteritems():
# If the attribute is a field, delete any field with the same name.
if isinstance(obj, Field):
for field_list in field_lists:
match = ((i, f) for i, f in enumerate(field_list) if f.name == name)
try:
i, field = match.next()
# The creation_counter is used by django to know in
# which order the database columns are declared. We
# get it to ensure that when we override a field it
# will be declared in the same position as before.
obj.creation_counter = field.creation_counter
field_list.pop(i)
finally:
break
# Add "__overridden" to method names if they already exist.
elif isinstance(obj, (types.FunctionType, property,
staticmethod, classmethod)):
# rename the potential old method
attr = getattr(model_to_patch, name, None)
if attr:
setattr(model_to_patch, name + '__overridden', attr)
# bind the new method to the object
if isinstance(obj, types.FunctionType):
obj = types.UnboundMethodType(obj, None, model_to_patch)
# Add the new field/method name and object to the model.
model_to_patch.add_to_class(name, obj)
| 33.40146
| 84
| 0.609484
|
3ca6cf9252efe6cebd871cf6443614eacb42b391
| 161
|
py
|
Python
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_MonthOfYear_LSTM.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_MonthOfYear_LSTM.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_MonthOfYear_LSTM.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['PolyTrend'] , ['Seasonal_MonthOfYear'] , ['LSTM'] );
| 40.25
| 88
| 0.757764
|
ac4b596a23a53e64178b9e68372b8c51f26fc7ed
| 78
|
py
|
Python
|
plugins/google_directory/icon_google_directory/actions/get_all_domain_users/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/google_directory/icon_google_directory/actions/get_all_domain_users/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/google_directory/icon_google_directory/actions/get_all_domain_users/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import GetAllDomainUsers
| 26
| 39
| 0.794872
|
3994d92dce4474c91d39918ee5459d6f697f23f1
| 6,392
|
py
|
Python
|
src/organisms/curator.py
|
flaviuvadan/proteoglycan-pathway-evolution
|
9fa9c457946eaf932860b9c0c9ab5b0b37d02e6e
|
[
"MIT"
] | 1
|
2020-01-12T01:42:32.000Z
|
2020-01-12T01:42:32.000Z
|
src/organisms/curator.py
|
flaviuvadan/proteoglycan-pathway-evolution
|
9fa9c457946eaf932860b9c0c9ab5b0b37d02e6e
|
[
"MIT"
] | null | null | null |
src/organisms/curator.py
|
flaviuvadan/proteoglycan-pathway-evolution
|
9fa9c457946eaf932860b9c0c9ab5b0b37d02e6e
|
[
"MIT"
] | null | null | null |
import csv
import json
import os
from src import exceptions
class Curator:
""" Curator is responsible for loading the 51 genes of interest, reading their orthologs, and parsing their
Ensembl results with the intention to create file with unique organisms for those genes """
GENE_NAME_IDX = 0
GENE_ID_IDX = 1
def __init__(self, ):
"""
Constructor
"""
self.gene_file_path = self._get_gene_file_path()
self.genes = self.load_genes()
def _get_gene_file_path(self):
""" Builds and returns the genes file path """
return os.path.join(os.getcwd(), "src", "data", "genes", "genes.txt")
def load_genes(self):
""" Loads the genes into the class gene_ids """
with open(self.gene_file_path, 'r') as gene_file:
csv_reader = csv.reader(gene_file, delimiter=',')
for gene in csv_reader:
yield (gene[self.GENE_NAME_IDX], gene[self.GENE_ID_IDX])
def _build_gene_file_path(self, gene_id):
"""
Builds the path to a gene orthologs file
:param gene_id: Ensembl ID of the gene
:return: path to file
"""
return os.path.join(os.getcwd(), "src", "data", "orthologs", "{}.txt".format(gene_id))
def curate(self):
""" Curates organisms """
for gene in self.genes:
gene_id = gene[self.GENE_ID_IDX]
gene_name = gene[self.GENE_NAME_IDX]
organisms = {}
with open(self._build_gene_file_path(gene_id), "r") as orthologs:
loaded = json.load(orthologs)
data = loaded.get('data')
if not data:
raise exceptions.EmptyOrthologData("gene {} orthologs not found".format(gene_id))
homologies = data[0].get('homologies')
if not homologies:
raise exceptions.EmptyOrthologData("gene {} ortholog homologies not found".format(gene_id))
for hom in homologies:
try:
source_species, source_seq, target_species, target_seq = self._parse_homologies(gene_id, hom)
except exceptions.ProjectedBuildOrganismException:
continue
# want to keep seqs around for multiple sequence alignment
organisms[source_species] = source_seq
organisms[target_species] = target_seq
self._create_organisms_file(gene_name, gene_id, organisms)
def _parse_homologies(self, gene_id, homology):
"""
Parses the homologies dictionary of the orthologs of a gene to get the source and target species, along with
their sequences
:param str gene_id: Ensembl ID of the gene
:param dict homology: information of a gene
:return: source species, target species
"""
source = homology.get('source')
if not source:
raise exceptions.EmptyHomologyInformation("gene {} ortholog has no source".format(gene_id))
target = homology.get('target')
if not target:
raise exceptions.EmptyHomologyInformation("gene {} ortholog has no target".format(gene_id))
source_species = source.get('species')
if not source_species:
raise exceptions.EmptyHomologyInformation(
"gene {} ortholog has not source target".format(gene_id))
target_species = target.get('species')
if not target_species:
raise exceptions.EmptyHomologyInformation(
"gene {} ortholog has not source species".format(gene_id))
source_seq = source.get('align_seq')
if not source_seq:
raise exceptions.EmptySquence("gene {} source seq not found".format(gene_id))
target_seq = target.get('align_seq')
if not target_seq:
raise exceptions.EmptySquence("gene {} target seq not found".format(gene_id))
if self._is_projection_build(target_species):
# source species is always Homo Sapiens b/c that's our reference
raise exceptions.ProjectedBuildOrganismException("projected build species: {}".format(target_species))
return source_species, source_seq, target_species, target_seq
def _is_projection_build(self, species):
"""
Tells whether a given species's genome is listed as a "Projection Build" in Ensembl.
Note, we do not want to include low-coverage genomes in the analysis as we cannot make any biologically-sound
claims about those organisms. By filtering out those organisms here, we allow other packages to use clean data
:param str species: species name
:return: True if species has a projection build, False otherwise
"""
projection_build_species = ["vicugna_pacos", "tursiops_truncatus", "erinaceus_europaeus", "procavia_capensis",
"echinops_telfairi", "pteropus_vampyrus", "pongo_abelii", "ochotona_princeps",
"sorex_araneus", "choloepus_hoffmanni", "tupaia_belangeri", "notamacropus_eugenii"]
return True if species in projection_build_species else False
def _get_organisms_file_path(self, gene_name, gene_id):
""" Builds the file path to the organisms file of the given gene name and gene ID """
return os.path.join(os.getcwd(), "src", "data", "organisms", "{}_{}.txt".format(gene_name, gene_id))
def _create_organisms_file(self, gene_name, gene_id, organisms):
"""
Creates the organisms files associated with the orthologs of a gene
:param str gene_name: name of the gene being processed
:param str gene_id: Ensembl ID of the gene
:param dict organisms: dictionary of organisms keyed on species
"""
organisms_file_path = self._get_organisms_file_path(gene_name, gene_id)
with open(organisms_file_path, "w") as out:
for species, sequence in organisms.items():
# no point in having the dashes (-) from the alignment as the seqs for Homo Sapiens get overwritten
# and we have to re-compute the pair-wise alignment again, anyway
out.write(">{}\n{}\n".format(species, sequence.replace("-", "")))
if __name__ == "__main__":
curator = Curator()
curator.curate()
| 47.348148
| 119
| 0.63689
|
ad59d44bc3969235a7b6ebe5c48a76ec6e134f76
| 300
|
wsgi
|
Python
|
SetupLab/files/helloWorld.wsgi
|
david618/Centos
|
ca88a094424a32b064107e658e8e7b6fae7619b2
|
[
"Apache-2.0"
] | null | null | null |
SetupLab/files/helloWorld.wsgi
|
david618/Centos
|
ca88a094424a32b064107e658e8e7b6fae7619b2
|
[
"Apache-2.0"
] | null | null | null |
SetupLab/files/helloWorld.wsgi
|
david618/Centos
|
ca88a094424a32b064107e658e8e7b6fae7619b2
|
[
"Apache-2.0"
] | null | null | null |
def application(environ, start_response):
status = '200 OK'
output = b'Hello World! Python is so easy!\n'
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
| 27.272727
| 61
| 0.623333
|
f6407f344c6b5e4d60c46e055589a9fa38623765
| 56,890
|
py
|
Python
|
corehq/apps/sms/tests/test_backends.py
|
dborowiecki/commcare-hq
|
f2f4fa67faec09040a98502f5657444075b63f2e
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/sms/tests/test_backends.py
|
dborowiecki/commcare-hq
|
f2f4fa67faec09040a98502f5657444075b63f2e
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/sms/tests/test_backends.py
|
dborowiecki/commcare-hq
|
f2f4fa67faec09040a98502f5657444075b63f2e
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import uuid
from datetime import datetime
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from mock import patch
from six.moves.urllib.parse import urlencode
from dimagi.utils.couch.cache.cache_core import get_redis_client
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import clear_plan_version_cache
from corehq.apps.api.models import PERMISSION_POST_SMS, ApiUser
from corehq.apps.domain.models import Domain
from corehq.apps.hqcase.utils import update_case
from corehq.apps.sms.api import (
send_sms,
send_sms_to_verified_number,
send_sms_with_backend,
send_sms_with_backend_name,
)
from corehq.apps.sms.mixin import BadSMSConfigException
from corehq.apps.sms.models import (
SMS,
BackendMap,
MobileBackendInvitation,
PhoneLoadBalancingMixin,
QueuedSMS,
SQLMobileBackend,
SQLMobileBackendMapping,
)
from corehq.apps.sms.tasks import (
get_connection_slot_from_phone_number,
get_connection_slot_lock,
handle_outgoing,
)
from corehq.apps.sms.tests.util import BaseSMSTest, delete_domain_phone_numbers
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import run_with_all_backends
from corehq.messaging.smsbackends.airtel_tcl.models import AirtelTCLBackend
from corehq.messaging.smsbackends.apposit.models import SQLAppositBackend
from corehq.messaging.smsbackends.grapevine.models import SQLGrapevineBackend
from corehq.messaging.smsbackends.http.models import SQLHttpBackend
from corehq.messaging.smsbackends.icds_nic.models import SQLICDSBackend
from corehq.messaging.smsbackends.ivory_coast_mtn.models import (
IvoryCoastMTNBackend,
)
from corehq.messaging.smsbackends.karix.models import KarixBackend
from corehq.messaging.smsbackends.mach.models import SQLMachBackend
from corehq.messaging.smsbackends.megamobile.models import SQLMegamobileBackend
from corehq.messaging.smsbackends.push.models import PushBackend
from corehq.messaging.smsbackends.sislog.models import SQLSislogBackend
from corehq.messaging.smsbackends.smsgh.models import SQLSMSGHBackend
from corehq.messaging.smsbackends.start_enterprise.models import (
StartEnterpriseBackend,
)
from corehq.messaging.smsbackends.telerivet.models import SQLTelerivetBackend
from corehq.messaging.smsbackends.test.models import SQLTestSMSBackend
from corehq.messaging.smsbackends.twilio.models import SQLTwilioBackend
from corehq.messaging.smsbackends.unicel.models import (
InboundParams,
SQLUnicelBackend,
)
from corehq.messaging.smsbackends.vertex.models import VertexBackend
from corehq.messaging.smsbackends.yo.models import SQLYoBackend
from corehq.util.test_utils import create_test_case
class AllBackendTest(DomainSubscriptionMixin, TestCase):
@classmethod
def setUpClass(cls):
super(AllBackendTest, cls).setUpClass()
cls.domain_obj = Domain(name='all-backend-test')
cls.domain_obj.save()
cls.setup_subscription(cls.domain_obj.name, SoftwarePlanEdition.ADVANCED)
cls.domain_obj = Domain.get(cls.domain_obj.get_id)
cls.test_phone_number = '99912345'
cls.unicel_backend = SQLUnicelBackend(
name='UNICEL',
is_global=True,
hq_api_id=SQLUnicelBackend.get_api_id()
)
cls.unicel_backend.save()
cls.mach_backend = SQLMachBackend(
name='MACH',
is_global=True,
hq_api_id=SQLMachBackend.get_api_id()
)
cls.mach_backend.save()
cls.http_backend = SQLHttpBackend(
name='HTTP',
is_global=True,
hq_api_id=SQLHttpBackend.get_api_id()
)
cls.http_backend.save()
cls.telerivet_backend = SQLTelerivetBackend(
name='TELERIVET',
is_global=True,
hq_api_id=SQLTelerivetBackend.get_api_id()
)
cls.telerivet_backend.set_extra_fields(webhook_secret='telerivet-webhook-secret')
cls.telerivet_backend.save()
cls.test_backend = SQLTestSMSBackend(
name='TEST',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.test_backend.save()
cls.grapevine_backend = SQLGrapevineBackend(
name='GRAPEVINE',
is_global=True,
hq_api_id=SQLGrapevineBackend.get_api_id()
)
cls.grapevine_backend.save()
cls.twilio_backend = SQLTwilioBackend(
name='TWILIO',
is_global=True,
hq_api_id=SQLTwilioBackend.get_api_id()
)
cls.twilio_backend.save()
cls.megamobile_backend = SQLMegamobileBackend(
name='MEGAMOBILE',
is_global=True,
hq_api_id=SQLMegamobileBackend.get_api_id()
)
cls.megamobile_backend.save()
cls.smsgh_backend = SQLSMSGHBackend(
name='SMSGH',
is_global=True,
hq_api_id=SQLSMSGHBackend.get_api_id()
)
cls.smsgh_backend.save()
cls.apposit_backend = SQLAppositBackend(
name='APPOSIT',
is_global=True,
hq_api_id=SQLAppositBackend.get_api_id()
)
cls.apposit_backend.save()
cls.sislog_backend = SQLSislogBackend(
name='SISLOG',
is_global=True,
hq_api_id=SQLSislogBackend.get_api_id()
)
cls.sislog_backend.save()
cls.yo_backend = SQLYoBackend(
name='YO',
is_global=True,
hq_api_id=SQLYoBackend.get_api_id()
)
cls.yo_backend.save()
cls.push_backend = PushBackend(
name='PUSH',
is_global=True,
hq_api_id=PushBackend.get_api_id()
)
cls.push_backend.save()
cls.icds_backend = SQLICDSBackend(
name="ICDS",
is_global=True,
hq_api_id=SQLICDSBackend.get_api_id()
)
cls.icds_backend.save()
cls.vertext_backend = VertexBackend(
name="VERTEX",
is_global=True,
hq_api_id=VertexBackend.get_api_id()
)
cls.vertext_backend.save()
cls.start_enterprise_backend = StartEnterpriseBackend(
name="START_ENT",
is_global=True,
hq_api_id=StartEnterpriseBackend.get_api_id()
)
cls.start_enterprise_backend.save()
cls.ivory_coast_mtn_backend = IvoryCoastMTNBackend(
name="IVORY_COAST_MTN",
is_global=True,
hq_api_id=IvoryCoastMTNBackend.get_api_id()
)
cls.ivory_coast_mtn_backend.save()
cls.karix_backend = KarixBackend(
name='KARIX',
is_global=True,
hq_api_id=KarixBackend.get_api_id()
)
cls.karix_backend.save()
cls.airtel_tcl_backend = AirtelTCLBackend(
name='AIRTEL_TCL',
is_global=True,
hq_api_id=AirtelTCLBackend.get_api_id()
)
cls.airtel_tcl_backend.save()
@classmethod
def tearDownClass(cls):
cls.teardown_subscription()
cls.domain_obj.delete()
cls.unicel_backend.delete()
cls.mach_backend.delete()
cls.http_backend.delete()
cls.telerivet_backend.delete()
cls.test_backend.delete()
cls.grapevine_backend.delete()
cls.twilio_backend.delete()
cls.megamobile_backend.delete()
cls.smsgh_backend.delete()
cls.apposit_backend.delete()
cls.sislog_backend.delete()
cls.yo_backend.delete()
cls.push_backend.delete()
cls.icds_backend.delete()
cls.vertext_backend.delete()
cls.start_enterprise_backend.delete()
cls.ivory_coast_mtn_backend.delete()
cls.karix_backend.delete()
cls.airtel_tcl_backend.delete()
clear_plan_version_cache()
super(AllBackendTest, cls).tearDownClass()
def tearDown(self):
SMS.objects.filter(domain=self.domain_obj.name).delete()
def _test_outbound_backend(self, backend, msg_text, mock_send):
SQLMobileBackendMapping.set_default_domain_backend(self.domain_obj.name, backend)
send_sms(self.domain_obj.name, None, self.test_phone_number, msg_text)
sms = SMS.objects.get(
domain=self.domain_obj.name,
direction='O',
text=msg_text
)
self.assertTrue(mock_send.called)
msg_arg = mock_send.call_args[0][0]
self.assertEqual(msg_arg.date, sms.date)
self.assertEqual(sms.backend_api, backend.hq_api_id)
self.assertEqual(sms.backend_id, backend.couch_id)
def _verify_inbound_request(self, backend_api_id, msg_text, backend_couch_id=None):
sms = SMS.objects.get(
domain=self.domain_obj.name,
direction='I',
text=msg_text
)
self.assertEqual(sms.backend_api, backend_api_id)
if backend_couch_id:
self.assertEqual(sms.backend_id, backend_couch_id)
def _simulate_inbound_request_with_payload(self, url,
content_type, payload):
with create_test_case(
self.domain_obj.name,
'participant',
'contact',
case_properties={
'contact_phone_number': self.test_phone_number,
'contact_phone_number_is_verified': '1',
},
drop_signals=False):
response = Client().post(url, payload, content_type=content_type)
self.assertEqual(response.status_code, 200)
def _simulate_inbound_request(self, url, phone_param,
msg_param, msg_text, post=False, additional_params=None,
expected_response_code=200, is_megamobile=False):
fcn = Client().post if post else Client().get
payload = {
phone_param: self.test_phone_number,
msg_param: msg_text,
}
if additional_params:
payload.update(additional_params)
contact_phone_prefix = '63' if is_megamobile else ''
with create_test_case(
self.domain_obj.name,
'participant',
'contact',
case_properties={
'contact_phone_number': contact_phone_prefix + self.test_phone_number,
'contact_phone_number_is_verified': '1',
},
drop_signals=False):
response = fcn(url, payload)
self.assertEqual(response.status_code, expected_response_code)
@patch('corehq.messaging.smsbackends.unicel.models.SQLUnicelBackend.send')
@patch('corehq.messaging.smsbackends.mach.models.SQLMachBackend.send')
@patch('corehq.messaging.smsbackends.http.models.SQLHttpBackend.send')
@patch('corehq.messaging.smsbackends.telerivet.models.SQLTelerivetBackend.send')
@patch('corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send')
@patch('corehq.messaging.smsbackends.grapevine.models.SQLGrapevineBackend.send')
@patch('corehq.messaging.smsbackends.twilio.models.SQLTwilioBackend.send')
@patch('corehq.messaging.smsbackends.megamobile.models.SQLMegamobileBackend.send')
@patch('corehq.messaging.smsbackends.smsgh.models.SQLSMSGHBackend.send')
@patch('corehq.messaging.smsbackends.apposit.models.SQLAppositBackend.send')
@patch('corehq.messaging.smsbackends.sislog.models.SQLSislogBackend.send')
@patch('corehq.messaging.smsbackends.yo.models.SQLYoBackend.send')
@patch('corehq.messaging.smsbackends.push.models.PushBackend.send')
@patch('corehq.messaging.smsbackends.icds_nic.models.SQLICDSBackend.send')
@patch('corehq.messaging.smsbackends.vertex.models.VertexBackend.send')
@patch('corehq.messaging.smsbackends.start_enterprise.models.StartEnterpriseBackend.send')
@patch('corehq.messaging.smsbackends.ivory_coast_mtn.models.IvoryCoastMTNBackend.send')
@patch('corehq.messaging.smsbackends.karix.models.KarixBackend.send')
@patch('corehq.messaging.smsbackends.airtel_tcl.models.AirtelTCLBackend.send')
def test_outbound_sms(
self,
airtel_tcl_send,
karix_send,
ivory_coast_mtn_send,
start_ent_send,
vertex_send,
icds_send,
push_send,
yo_send,
sislog_send,
apposit_send,
smsgh_send,
megamobile_send,
twilio_send,
grapevine_send,
test_send,
telerivet_send,
http_send,
mach_send,
unicel_send):
self._test_outbound_backend(self.unicel_backend, 'unicel test', unicel_send)
self._test_outbound_backend(self.mach_backend, 'mach test', mach_send)
self._test_outbound_backend(self.http_backend, 'http test', http_send)
self._test_outbound_backend(self.telerivet_backend, 'telerivet test', telerivet_send)
self._test_outbound_backend(self.test_backend, 'test test', test_send)
self._test_outbound_backend(self.grapevine_backend, 'grapevine test', grapevine_send)
self._test_outbound_backend(self.twilio_backend, 'twilio test', twilio_send)
self._test_outbound_backend(self.megamobile_backend, 'megamobile test', megamobile_send)
self._test_outbound_backend(self.smsgh_backend, 'smsgh test', smsgh_send)
self._test_outbound_backend(self.apposit_backend, 'apposit test', apposit_send)
self._test_outbound_backend(self.sislog_backend, 'sislog test', sislog_send)
self._test_outbound_backend(self.yo_backend, 'yo test', yo_send)
self._test_outbound_backend(self.push_backend, 'push test', push_send)
self._test_outbound_backend(self.icds_backend, 'icds test', icds_send)
self._test_outbound_backend(self.vertext_backend, 'vertex_test', vertex_send)
self._test_outbound_backend(self.start_enterprise_backend, 'start_ent_test', start_ent_send)
self._test_outbound_backend(self.ivory_coast_mtn_backend, 'ivory_coast_mtn_test', ivory_coast_mtn_send)
self._test_outbound_backend(self.karix_backend, 'karix test', karix_send)
self._test_outbound_backend(self.airtel_tcl_backend, 'airtel tcl test', airtel_tcl_send)
@run_with_all_backends
def test_unicel_inbound_sms(self):
self._simulate_inbound_request(
'/unicel/in/%s/' % self.unicel_backend.inbound_api_key,
phone_param=InboundParams.SENDER,
msg_param=InboundParams.MESSAGE,
msg_text='unicel test'
)
self._verify_inbound_request(self.unicel_backend.get_api_id(), 'unicel test')
@run_with_all_backends
def test_telerivet_inbound_sms(self):
additional_params = {
'event': 'incoming_message',
'message_type': 'sms',
'secret': self.telerivet_backend.config.webhook_secret
}
self._simulate_inbound_request('/telerivet/in/', phone_param='from_number_e164',
msg_param='content', msg_text='telerivet test', post=True,
additional_params=additional_params)
self._verify_inbound_request(self.telerivet_backend.get_api_id(), 'telerivet test')
@run_with_all_backends
@override_settings(SIMPLE_API_KEYS={'grapevine-test': 'grapevine-api-key'})
def test_grapevine_inbound_sms(self):
xml = """
<gviSms>
<smsDateTime>2015-10-12T12:00:00</smsDateTime>
<cellNumber>99912345</cellNumber>
<content>grapevine test</content>
</gviSms>
"""
payload = urlencode({'XML': xml})
self._simulate_inbound_request_with_payload(
'/gvi/api/sms/?apiuser=grapevine-test&apikey=grapevine-api-key',
content_type='application/x-www-form-urlencoded', payload=payload)
self._verify_inbound_request(self.grapevine_backend.get_api_id(), 'grapevine test')
@run_with_all_backends
def test_twilio_inbound_sms(self):
url = '/twilio/sms/%s' % self.twilio_backend.inbound_api_key
self._simulate_inbound_request(url, phone_param='From',
msg_param='Body', msg_text='twilio test', post=True)
self._verify_inbound_request(self.twilio_backend.get_api_id(), 'twilio test',
backend_couch_id=self.twilio_backend.couch_id)
@run_with_all_backends
def test_twilio_401_response(self):
start_count = SMS.objects.count()
self._simulate_inbound_request('/twilio/sms/xxxxx', phone_param='From',
msg_param='Body', msg_text='twilio test', post=True,
expected_response_code=401)
end_count = SMS.objects.count()
self.assertEqual(start_count, end_count)
@run_with_all_backends
def test_sislog_inbound_sms(self):
self._simulate_inbound_request(
'/sislog/in/%s/' % self.sislog_backend.inbound_api_key,
phone_param='sender',
msg_param='msgdata',
msg_text='sislog test'
)
self._verify_inbound_request(self.sislog_backend.get_api_id(), 'sislog test')
@run_with_all_backends
def test_yo_inbound_sms(self):
self._simulate_inbound_request(
'/yo/sms/%s/' % self.yo_backend.inbound_api_key,
phone_param='sender',
msg_param='message',
msg_text='yo test'
)
self._verify_inbound_request(self.yo_backend.get_api_id(), 'yo test')
@run_with_all_backends
def test_smsgh_inbound_sms(self):
self._simulate_inbound_request(
'/smsgh/sms/{}/'.format(self.smsgh_backend.inbound_api_key),
phone_param='snr',
msg_param='msg',
msg_text='smsgh test'
)
self._verify_inbound_request('SMSGH', 'smsgh test')
@run_with_all_backends
def test_apposit_inbound_sms(self):
self._simulate_inbound_request_with_payload(
'/apposit/in/%s/' % self.apposit_backend.inbound_api_key,
'application/json',
json.dumps({
'from': self.test_phone_number,
'message': 'apposit test',
})
)
self._verify_inbound_request('APPOSIT', 'apposit test',
backend_couch_id=self.apposit_backend.couch_id)
@run_with_all_backends
def test_push_inbound_sms(self):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<bspostevent>
<field name="MobileNumber" type="string">99912345</field>
<field name="Text" type="string">push test</field>
</bspostevent>
"""
self._simulate_inbound_request_with_payload(
'/push/sms/%s/' % self.push_backend.inbound_api_key,
content_type='application/xml', payload=xml)
self._verify_inbound_request(self.push_backend.get_api_id(), 'push test',
backend_couch_id=self.push_backend.couch_id)
class OutgoingFrameworkTestCase(DomainSubscriptionMixin, TestCase):
@classmethod
def setUpClass(cls):
super(OutgoingFrameworkTestCase, cls).setUpClass()
cls.domain = "test-domain"
cls.domain2 = "test-domain2"
cls.domain_obj = Domain(name=cls.domain)
cls.domain_obj.save()
cls.setup_subscription(cls.domain, SoftwarePlanEdition.ADVANCED)
cls.domain_obj = Domain.get(cls.domain_obj._id)
cls.backend1 = SQLTestSMSBackend.objects.create(
name='BACKEND1',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend2 = SQLTestSMSBackend.objects.create(
name='BACKEND2',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend3 = SQLTestSMSBackend.objects.create(
name='BACKEND3',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend4 = SQLTestSMSBackend.objects.create(
name='BACKEND4',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend5 = SQLTestSMSBackend.objects.create(
name='BACKEND5',
domain=cls.domain,
is_global=False,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend6 = SQLTestSMSBackend.objects.create(
name='BACKEND6',
domain=cls.domain2,
is_global=False,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend6.set_shared_domains([cls.domain])
cls.backend7 = SQLTestSMSBackend.objects.create(
name='BACKEND7',
domain=cls.domain2,
is_global=False,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend8 = SQLTestSMSBackend.objects.create(
name='BACKEND',
domain=cls.domain,
is_global=False,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend9 = SQLTestSMSBackend.objects.create(
name='BACKEND',
domain=cls.domain2,
is_global=False,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend9.set_shared_domains([cls.domain])
cls.backend10 = SQLTestSMSBackend.objects.create(
name='BACKEND',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
cls.backend_mapping1 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='*',
backend=cls.backend1
)
cls.backend_mapping2 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='1',
backend=cls.backend2
)
cls.backend_mapping3 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='91',
backend=cls.backend3
)
cls.backend_mapping4 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='265',
backend=cls.backend4
)
cls.backend_mapping5 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='256',
backend=cls.backend5
)
cls.backend_mapping6 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='25670',
backend=cls.backend6
)
cls.backend_mapping7 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='25675',
backend=cls.backend7
)
@classmethod
def tearDownClass(cls):
delete_domain_phone_numbers(cls.domain)
delete_domain_phone_numbers(cls.domain2)
cls.backend1.delete()
cls.backend2.delete()
cls.backend3.delete()
cls.backend4.delete()
cls.backend5.delete()
cls.backend6.delete()
cls.backend7.delete()
cls.backend8.delete()
cls.backend9.delete()
cls.backend10.delete()
cls.teardown_subscription()
cls.domain_obj.delete()
clear_plan_version_cache()
super(OutgoingFrameworkTestCase, cls).tearDownClass()
def test_multiple_country_prefixes(self):
self.assertEqual(
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'256800000000'
).pk,
self.backend5.pk
)
self.assertEqual(
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'256700000000'
).pk,
self.backend6.pk
)
self.assertEqual(
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'256750000000'
).pk,
self.backend7.pk
)
def __test_global_backend_map(self):
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms(self.domain, None, '15551234567', 'Test for BACKEND2'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend2.pk)
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms(self.domain, None, '9100000000', 'Test for BACKEND3'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend3.pk)
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms(self.domain, None, '26500000000', 'Test for BACKEND4'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend4.pk)
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms(self.domain, None, '25800000000', 'Test for BACKEND1'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend1.pk)
def __test_domain_default(self):
# Test overriding with domain-level backend
SQLMobileBackendMapping.set_default_domain_backend(self.domain, self.backend5)
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms(self.domain, None, '15551234567', 'Test for BACKEND5'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend5.pk)
def __test_shared_backend(self):
# Test use of backend that another domain owns but has granted access
SQLMobileBackendMapping.set_default_domain_backend(self.domain, self.backend6)
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms(self.domain, None, '25800000000', 'Test for BACKEND6'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend6.pk)
# Test trying to use a backend that another domain owns but has not granted access
SQLMobileBackendMapping.set_default_domain_backend(self.domain, self.backend7)
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertFalse(send_sms(self.domain, None, '25800000000', 'Test Unauthorized'))
self.assertEqual(mock_send.call_count, 0)
def __test_verified_number_with_map(self, contact):
# Test sending to verified number with backend map
SQLMobileBackendMapping.unset_default_domain_backend(self.domain)
verified_number = contact.get_phone_number()
self.assertTrue(verified_number is not None)
self.assertTrue(verified_number.backend_id is None)
self.assertEqual(verified_number.phone_number, '15551234567')
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms_to_verified_number(verified_number, 'Test for BACKEND2'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend2.pk)
# Test sending to verified number with default domain backend
SQLMobileBackendMapping.set_default_domain_backend(self.domain, self.backend5)
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms_to_verified_number(verified_number, 'Test for BACKEND5'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend5.pk)
def __test_contact_level_backend(self, contact):
# Test sending to verified number with a contact-level backend owned by the domain
update_case(self.domain, contact.case_id, case_properties={'contact_backend_id': 'BACKEND'})
contact = CaseAccessors(self.domain).get_case(contact.case_id)
verified_number = contact.get_phone_number()
self.assertTrue(verified_number is not None)
self.assertEqual(verified_number.backend_id, 'BACKEND')
self.assertEqual(verified_number.phone_number, '15551234567')
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms_to_verified_number(verified_number, 'Test for domain BACKEND'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend8.pk)
# Test sending to verified number with a contact-level backend granted to the domain by another domain
self.backend8.name = 'BACKEND8'
self.backend8.save()
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms_to_verified_number(verified_number, 'Test for shared domain BACKEND'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend9.pk)
# Test sending to verified number with a contact-level global backend
self.backend9.name = 'BACKEND9'
self.backend9.save()
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(send_sms_to_verified_number(verified_number, 'Test for global BACKEND'))
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend10.pk)
# Test raising exception if contact-level backend is not found
self.backend10.name = 'BACKEND10'
self.backend10.save()
with self.assertRaises(BadSMSConfigException):
send_sms_to_verified_number(verified_number, 'Test for unknown BACKEND')
def __test_send_sms_with_backend(self):
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(
send_sms_with_backend(self.domain, '+15551234567', 'Test for BACKEND3', self.backend3.couch_id)
)
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend3.pk)
def __test_send_sms_with_backend_name(self):
with patch(
'corehq.messaging.smsbackends.test.models.SQLTestSMSBackend.send',
autospec=True
) as mock_send:
self.assertTrue(
send_sms_with_backend_name(self.domain, '+15551234567', 'Test for BACKEND3', 'BACKEND3')
)
self.assertEqual(mock_send.call_count, 1)
self.assertEqual(mock_send.call_args[0][0].pk, self.backend3.pk)
def test_choosing_appropriate_backend_for_outgoing(self):
with create_test_case(
self.domain,
'participant',
'contact',
case_properties={
'contact_phone_number': '15551234567',
'contact_phone_number_is_verified': '1',
},
drop_signals=False) as contact:
self.__test_global_backend_map()
self.__test_domain_default()
self.__test_shared_backend()
self.__test_verified_number_with_map(contact)
self.__test_contact_level_backend(contact)
self.__test_send_sms_with_backend()
self.__test_send_sms_with_backend_name()
SQLMobileBackendMapping.unset_default_domain_backend(self.domain)
def test_reserving_connection_slots(self):
random_slot = get_connection_slot_from_phone_number(uuid.uuid4().hex, 4)
self.assertGreaterEqual(random_slot, 0)
self.assertLessEqual(random_slot, 3)
self.assertEqual(get_connection_slot_from_phone_number('999000001', 4), 0)
self.assertEqual(get_connection_slot_from_phone_number('999000002', 4), 1)
self.assertEqual(get_connection_slot_from_phone_number('999000003', 4), 0)
lock_999000001 = get_connection_slot_lock('999000001', self.backend1, 4)
lock_999000002 = get_connection_slot_lock('999000002', self.backend1, 4)
lock_999000003 = get_connection_slot_lock('999000003', self.backend1, 4)
self.assertTrue(lock_999000001.acquire(blocking=False))
self.assertFalse(lock_999000003.acquire(blocking=False))
self.assertTrue(lock_999000002.acquire(blocking=False))
lock_999000001.release()
self.assertTrue(lock_999000003.acquire(blocking=False))
lock_999000002.release()
lock_999000003.release()
class SQLMobileBackendTestCase(TestCase):
def assertBackendsEqual(self, backend1, backend2):
self.assertEqual(backend1.pk, backend2.pk)
self.assertEqual(backend1.__class__, backend2.__class__)
def test_domain_is_shared(self):
backend = SQLTestSMSBackend.objects.create(
name='BACKEND',
domain='shared-test-1',
is_global=False,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
self.assertFalse(backend.domain_is_shared('shared-test-2'))
backend.set_shared_domains(['shared-test-2'])
self.assertTrue(backend.domain_is_shared('shared-test-2'))
backend.soft_delete()
self.assertFalse(backend.domain_is_shared('shared-test-2'))
backend.delete()
def test_domain_is_authorized(self):
backend1 = SQLTestSMSBackend.objects.create(
name='BACKEND1',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend2 = SQLTestSMSBackend.objects.create(
name='BACKEND2',
domain='auth-test-1',
is_global=False,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
self.assertTrue(backend1.domain_is_authorized('auth-test-1'))
self.assertTrue(backend1.domain_is_authorized('auth-test-2'))
self.assertTrue(backend1.domain_is_authorized('auth-test-3'))
self.assertTrue(backend2.domain_is_authorized('auth-test-1'))
self.assertFalse(backend2.domain_is_authorized('auth-test-2'))
self.assertFalse(backend2.domain_is_authorized('auth-test-3'))
backend2.set_shared_domains(['auth-test-2'])
self.assertTrue(backend2.domain_is_authorized('auth-test-1'))
self.assertTrue(backend2.domain_is_authorized('auth-test-2'))
self.assertFalse(backend2.domain_is_authorized('auth-test-3'))
backend1.delete()
backend2.delete()
def test_load_default_by_phone_and_domain(self):
backend1 = SQLTestSMSBackend.objects.create(
name='BACKEND1',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend2 = SQLTestSMSBackend.objects.create(
name='BACKEND2',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend3 = SQLTestSMSBackend.objects.create(
name='BACKEND3',
is_global=False,
domain='load-default-test',
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend4 = SQLTestSMSBackend.objects.create(
name='BACKEND4',
is_global=False,
domain='load-default-test',
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='*',
backend=backend1
)
SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='27',
backend=backend2
)
SQLMobileBackendMapping.objects.create(
is_global=False,
domain='load-default-test',
backend_type=SQLMobileBackend.SMS,
prefix='*',
backend=backend3
)
SQLMobileBackendMapping.objects.create(
is_global=False,
domain='load-default-test',
backend_type=SQLMobileBackend.SMS,
prefix='27',
backend=backend4
)
# Test global prefix map
self.assertBackendsEqual(
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'2700000000',
domain='load-default-test-2'
),
backend2
)
# Test domain-level prefix map
self.assertBackendsEqual(
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'2700000000',
domain='load-default-test'
),
backend4
)
# Test domain catch-all
backend4.soft_delete()
self.assertBackendsEqual(
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'2700000000',
domain='load-default-test'
),
backend3
)
# Test global prefix map
backend3.soft_delete()
self.assertBackendsEqual(
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'2700000000',
domain='load-default-test'
),
backend2
)
# Test global catch-all
backend2.soft_delete()
self.assertBackendsEqual(
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'2700000000',
domain='load-default-test'
),
backend1
)
# Test raising exception if nothing found
backend1.soft_delete()
with self.assertRaises(BadSMSConfigException):
SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
'2700000000',
domain='load-default-test'
)
backend1.delete()
backend2.delete()
backend3.delete()
backend4.delete()
def test_get_backend_api_id(self):
backend = SQLTestSMSBackend.objects.create(
name='BACKEND',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
self.assertEquals(
SQLMobileBackend.get_backend_api_id(backend.pk),
SQLTestSMSBackend.get_api_id()
)
self.assertEquals(
SQLMobileBackend.get_backend_api_id(backend.couch_id, is_couch_id=True),
SQLTestSMSBackend.get_api_id()
)
backend.soft_delete()
with self.assertRaises(SQLMobileBackend.DoesNotExist):
SQLMobileBackend.get_backend_api_id(backend.pk)
with self.assertRaises(SQLMobileBackend.DoesNotExist):
SQLMobileBackend.get_backend_api_id(backend.couch_id, is_couch_id=True)
backend.delete()
def test_load(self):
backend = SQLTestSMSBackend.objects.create(
name='BACKEND',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
self.assertBackendsEqual(
SQLMobileBackend.load(backend.pk),
backend
)
self.assertBackendsEqual(
SQLMobileBackend.load(backend.pk, api_id=SQLTestSMSBackend.get_api_id()),
backend
)
self.assertBackendsEqual(
SQLMobileBackend.load(backend.couch_id, is_couch_id=True),
backend
)
self.assertBackendsEqual(
SQLMobileBackend.load(
backend.couch_id,
api_id=SQLTestSMSBackend.get_api_id(),
is_couch_id=True
),
backend
)
backend.soft_delete()
with self.assertRaises(SQLMobileBackend.DoesNotExist):
SQLMobileBackend.load(backend.pk, api_id=SQLTestSMSBackend.get_api_id())
with self.assertRaises(SQLMobileBackend.DoesNotExist):
SQLMobileBackend.load(
backend.couch_id,
api_id=SQLTestSMSBackend.get_api_id(),
is_couch_id=True
)
with self.assertRaises(BadSMSConfigException):
SQLMobileBackend.load(backend.pk, api_id='this-api-id-does-not-exist')
backend.delete()
def test_load_by_name(self):
backend1 = SQLTestSMSBackend.objects.create(
name='BACKEND_BY_NAME_TEST',
is_global=False,
domain='backend-by-name-test-1',
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend2 = SQLTestSMSBackend.objects.create(
name='BACKEND_BY_NAME_TEST',
is_global=False,
domain='backend-by-name-test-2',
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend2.set_shared_domains(['backend-by-name-test-1'])
backend3 = SQLTestSMSBackend.objects.create(
name='BACKEND_BY_NAME_TEST',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
self.assertBackendsEqual(
SQLMobileBackend.load_by_name(
SQLMobileBackend.SMS,
'backend-by-name-test-1',
'BACKEND_BY_NAME_TEST'
),
backend1
)
self.assertBackendsEqual(
SQLMobileBackend.load_by_name(
SQLMobileBackend.SMS,
'backend-by-name-test-3',
'BACKEND_BY_NAME_TEST'
),
backend3
)
backend1.soft_delete()
self.assertBackendsEqual(
SQLMobileBackend.load_by_name(
SQLMobileBackend.SMS,
'backend-by-name-test-1',
'BACKEND_BY_NAME_TEST'
),
backend2
)
backend2.set_shared_domains([])
self.assertBackendsEqual(
SQLMobileBackend.load_by_name(
SQLMobileBackend.SMS,
'backend-by-name-test-1',
'BACKEND_BY_NAME_TEST'
),
backend3
)
self.assertBackendsEqual(
SQLMobileBackend.load_by_name(
SQLMobileBackend.SMS,
'backend-by-name-test-2',
'BACKEND_BY_NAME_TEST'
),
backend2
)
backend2.soft_delete()
self.assertBackendsEqual(
SQLMobileBackend.load_by_name(
SQLMobileBackend.SMS,
'backend-by-name-test-2',
'BACKEND_BY_NAME_TEST'
),
backend3
)
backend3.soft_delete()
with self.assertRaises(BadSMSConfigException):
SQLMobileBackend.load_by_name(
SQLMobileBackend.SMS,
'backend-by-name-test-1',
'BACKEND_BY_NAME_TEST'
)
backend1.delete()
backend2.delete()
backend3.delete()
class LoadBalanceBackend(SQLTestSMSBackend, PhoneLoadBalancingMixin):
class Meta(object):
proxy = True
@classmethod
def get_api_id(cls):
return 'LOAD_BALANCE'
class RateLimitBackend(SQLTestSMSBackend):
class Meta(object):
proxy = True
def get_sms_rate_limit(self):
return 10
@classmethod
def get_api_id(cls):
return 'RATE_LIMIT'
class LoadBalanceAndRateLimitBackend(SQLTestSMSBackend, PhoneLoadBalancingMixin):
class Meta(object):
proxy = True
def get_sms_rate_limit(self):
return 10
@classmethod
def get_api_id(cls):
return 'LOAD_BALANCE_RATE_LIMIT'
def mock_get_sms_backend_classes():
return {
LoadBalanceBackend.get_api_id(): LoadBalanceBackend,
RateLimitBackend.get_api_id(): RateLimitBackend,
LoadBalanceAndRateLimitBackend.get_api_id(): LoadBalanceAndRateLimitBackend,
}
@patch('corehq.apps.sms.util.get_sms_backend_classes', new=mock_get_sms_backend_classes)
class LoadBalancingAndRateLimitingTestCase(BaseSMSTest):
def setUp(self):
super(LoadBalancingAndRateLimitingTestCase, self).setUp()
self.domain = 'load-balance-rate-limit'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.create_account_and_subscription(self.domain)
self.domain_obj = Domain.get(self.domain_obj.get_id)
def tearDown(self):
QueuedSMS.objects.all().delete()
self.domain_obj.delete()
super(LoadBalancingAndRateLimitingTestCase, self).tearDown()
def create_outgoing_sms(self, backend, phone_number):
sms = QueuedSMS(
domain=self.domain,
date=datetime.utcnow(),
direction='O',
phone_number=phone_number,
text='message',
backend_id=backend.couch_id
)
sms.save()
return sms
def assertRequeue(self, backend, phone_number):
requeue_flag = handle_outgoing(self.create_outgoing_sms(backend, phone_number))
self.assertTrue(requeue_flag)
def assertNotRequeue(self, backend, phone_number):
requeue_flag = handle_outgoing(self.create_outgoing_sms(backend, phone_number))
self.assertFalse(requeue_flag)
def test_load_balance(self):
backend = LoadBalanceBackend.objects.create(
name='BACKEND',
is_global=True,
load_balancing_numbers=['+9990001', '+9990002', '+9990003'],
hq_api_id=LoadBalanceBackend.get_api_id()
)
self.addCleanup(backend.delete)
for i in range(2):
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceBackend.send') as mock_send:
self.assertNotRequeue(backend, '+9991111111')
self.assertTrue(mock_send.called)
self.assertEqual(mock_send.call_args[1]['orig_phone_number'], '+9990002')
for i in range(2):
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceBackend.send') as mock_send:
self.assertNotRequeue(backend, '+9992222222')
self.assertTrue(mock_send.called)
self.assertEqual(mock_send.call_args[1]['orig_phone_number'], '+9990001')
for i in range(2):
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceBackend.send') as mock_send:
self.assertNotRequeue(backend, '+9993333333')
self.assertTrue(mock_send.called)
self.assertEqual(mock_send.call_args[1]['orig_phone_number'], '+9990003')
def test_rate_limit(self):
backend = RateLimitBackend.objects.create(
name='BACKEND',
is_global=True,
hq_api_id=RateLimitBackend.get_api_id()
)
self.addCleanup(backend.delete)
# Requeue flag should be False until we hit the limit
for i in range(backend.get_sms_rate_limit()):
with patch('corehq.apps.sms.tests.test_backends.RateLimitBackend.send') as mock_send:
self.assertNotRequeue(backend, '+9991111111')
self.assertTrue(mock_send.called)
# Requeue flag should be True after hitting the limit
with patch('corehq.apps.sms.tests.test_backends.RateLimitBackend.send') as mock_send:
self.assertRequeue(backend, '+9991111111')
self.assertFalse(mock_send.called)
def test_load_balance_and_rate_limit(self):
backend = LoadBalanceAndRateLimitBackend.objects.create(
name='BACKEND',
is_global=True,
load_balancing_numbers=['+9990001', '+9990002', '+9990003'],
hq_api_id=LoadBalanceAndRateLimitBackend.get_api_id()
)
self.addCleanup(backend.delete)
for i in range(backend.get_sms_rate_limit()):
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceAndRateLimitBackend.send') as mock_send:
self.assertNotRequeue(backend, '+9991111111')
self.assertTrue(mock_send.called)
self.assertEqual(mock_send.call_args[1]['orig_phone_number'], '+9990002')
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceAndRateLimitBackend.send') as mock_send:
self.assertNotRequeue(backend, '+9992222222')
self.assertTrue(mock_send.called)
self.assertEqual(mock_send.call_args[1]['orig_phone_number'], '+9990001')
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceAndRateLimitBackend.send') as mock_send:
self.assertNotRequeue(backend, '+9993333333')
self.assertTrue(mock_send.called)
self.assertEqual(mock_send.call_args[1]['orig_phone_number'], '+9990003')
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceAndRateLimitBackend.send') as mock_send:
self.assertRequeue(backend, '+9991111111')
self.assertFalse(mock_send.called)
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceAndRateLimitBackend.send') as mock_send:
self.assertRequeue(backend, '+9992222222')
self.assertFalse(mock_send.called)
with patch('corehq.apps.sms.tests.test_backends.LoadBalanceAndRateLimitBackend.send') as mock_send:
self.assertRequeue(backend, '+9993333333')
self.assertFalse(mock_send.called)
class SQLMobileBackendMappingTestCase(TestCase):
def test_backend_map(self):
backend_map = BackendMap(
1, {
'1': 2,
'27': 3,
'256': 4,
'25670': 5,
'25675': 6,
}
)
self.assertEqual(backend_map.get_backend_id_by_prefix('910000000'), 1)
self.assertEqual(backend_map.get_backend_id_by_prefix('100000000'), 2)
self.assertEqual(backend_map.get_backend_id_by_prefix('200000000'), 1)
self.assertEqual(backend_map.get_backend_id_by_prefix('250000000'), 1)
self.assertEqual(backend_map.get_backend_id_by_prefix('270000000'), 3)
self.assertEqual(backend_map.get_backend_id_by_prefix('256000000'), 4)
self.assertEqual(backend_map.get_backend_id_by_prefix('256700000'), 5)
self.assertEqual(backend_map.get_backend_id_by_prefix('256750000'), 6)
def assertNoDomainDefaultBackend(self, domain):
self.assertEqual(
SQLMobileBackendMapping.objects.filter(domain=domain).count(),
0
)
def assertDomainDefaultBackend(self, domain, backend):
mapping = SQLMobileBackendMapping.objects.get(domain=domain)
self.assertFalse(mapping.is_global)
self.assertEqual(mapping.domain, domain)
self.assertEqual(mapping.backend_type, SQLMobileBackend.SMS)
self.assertEqual(mapping.prefix, '*')
self.assertEqual(mapping.backend_id, backend.pk)
def test_set_default_domain_backend(self):
backend1 = SQLTestSMSBackend.objects.create(
name='BACKEND1',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend2 = SQLTestSMSBackend.objects.create(
name='BACKEND2',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
domain = 'domain-default-backend-test'
self.assertNoDomainDefaultBackend(domain)
SQLMobileBackendMapping.set_default_domain_backend(domain, backend1)
self.assertDomainDefaultBackend(domain, backend1)
SQLMobileBackendMapping.set_default_domain_backend(domain, backend2)
self.assertDomainDefaultBackend(domain, backend2)
SQLMobileBackendMapping.unset_default_domain_backend(domain)
self.assertNoDomainDefaultBackend(domain)
backend1.delete()
backend2.delete()
def test_get_prefix_to_backend_map(self):
backend1 = SQLTestSMSBackend.objects.create(
name='BACKEND1',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend2 = SQLTestSMSBackend.objects.create(
name='BACKEND2',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend3 = SQLTestSMSBackend.objects.create(
name='BACKEND3',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend4 = SQLTestSMSBackend.objects.create(
name='BACKEND4',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend5 = SQLTestSMSBackend.objects.create(
name='BACKEND5',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend6 = SQLTestSMSBackend.objects.create(
name='BACKEND6',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
)
backend_mapping1 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='*',
backend=backend1
)
backend_mapping2 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='27',
backend=backend2
)
backend_mapping3 = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='1',
backend=backend3
)
backend_mapping4 = SQLMobileBackendMapping.objects.create(
is_global=False,
domain='prefix-backend-map-test',
backend_type=SQLMobileBackend.SMS,
prefix='*',
backend=backend4
)
backend_mapping5 = SQLMobileBackendMapping.objects.create(
is_global=False,
domain='prefix-backend-map-test',
backend_type=SQLMobileBackend.SMS,
prefix='256',
backend=backend5
)
backend_mapping6 = SQLMobileBackendMapping.objects.create(
is_global=False,
domain='prefix-backend-map-test',
backend_type=SQLMobileBackend.SMS,
prefix='25670',
backend=backend6
)
global_backend_map = SQLMobileBackendMapping.get_prefix_to_backend_map(SQLMobileBackend.SMS)
self.assertEqual(global_backend_map.catchall_backend_id, backend1.pk)
self.assertEqual(global_backend_map.backend_map_dict, {
'27': backend2.pk,
'1': backend3.pk,
})
domain_backend_map = SQLMobileBackendMapping.get_prefix_to_backend_map(
SQLMobileBackend.SMS,
domain='prefix-backend-map-test'
)
self.assertEqual(domain_backend_map.catchall_backend_id, backend4.pk)
self.assertEqual(domain_backend_map.backend_map_dict, {
'256': backend5.pk,
'25670': backend6.pk,
})
backend_mapping1.delete()
backend_mapping2.delete()
backend_mapping3.delete()
backend_mapping4.delete()
backend_mapping5.delete()
backend_mapping6.delete()
backend1.delete()
backend2.delete()
backend3.delete()
backend4.delete()
backend5.delete()
backend6.delete()
| 36.281888
| 111
| 0.643593
|
8999893aa633a5d1bff3f32e6e34765bc4649e25
| 668
|
py
|
Python
|
manage.py
|
Paul-Ngigi/joy-childrens-home-backend
|
9a631e37724c54e008ceda7ad205e753419e873b
|
[
"MIT"
] | null | null | null |
manage.py
|
Paul-Ngigi/joy-childrens-home-backend
|
9a631e37724c54e008ceda7ad205e753419e873b
|
[
"MIT"
] | null | null | null |
manage.py
|
Paul-Ngigi/joy-childrens-home-backend
|
9a631e37724c54e008ceda7ad205e753419e873b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'joy_children.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.043478
| 76
| 0.681138
|
6dbb9579710e16f33918c485420e06328b9e9ef5
| 1,650
|
py
|
Python
|
code/char_rnn.py
|
shubhamagarwal92/deep-learning-nlp-sentiment-analysis
|
527bc76a7e106071e33140d67f70500a93666326
|
[
"MIT"
] | null | null | null |
code/char_rnn.py
|
shubhamagarwal92/deep-learning-nlp-sentiment-analysis
|
527bc76a7e106071e33140d67f70500a93666326
|
[
"MIT"
] | null | null | null |
code/char_rnn.py
|
shubhamagarwal92/deep-learning-nlp-sentiment-analysis
|
527bc76a7e106071e33140d67f70500a93666326
|
[
"MIT"
] | null | null | null |
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import Activation
max_features = 68
embedding_dims = 32
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model = Sequential()
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen,
dropout=0.2))
model.add(LSTM(embedding_dims, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun
model.add(Dense(1))
model.add(Activation('relu'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
batch_size =32
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=5)
# validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
# Epoch 1/5
# 25000/25000 [==============================] - 204s - loss: 0.7605 - acc: 0.5069
# Epoch 2/5
# 25000/25000 [==============================] - 209s - loss: 0.6904 - acc: 0.5283
# Epoch 3/5
# 25000/25000 [==============================] - 215s - loss: 0.6875 - acc: 0.5334
# Epoch 4/5
# 25000/25000 [==============================] - 210s - loss: 0.6840 - acc: 0.5462
# Epoch 5/5
# 25000/25000 [==============================] - 211s - loss: 0.6790 - acc: 0.5560
| 35.106383
| 97
| 0.612727
|
6db256f66c7fdbf9a6d67425101f7f8891f60f19
| 1,734
|
py
|
Python
|
tensorflow/python/data/experimental/kernel_tests/serialization/unique_dataset_serialization_test.py
|
DanMitroshin/tensorflow
|
74aa353842f1788bdb7506ecceaf6ba99140e165
|
[
"Apache-2.0"
] | 2
|
2020-01-17T09:11:24.000Z
|
2021-12-30T04:57:15.000Z
|
tensorflow/python/data/experimental/kernel_tests/serialization/unique_dataset_serialization_test.py
|
DanMitroshin/tensorflow
|
74aa353842f1788bdb7506ecceaf6ba99140e165
|
[
"Apache-2.0"
] | 7
|
2021-11-10T20:21:23.000Z
|
2022-03-22T19:18:39.000Z
|
tensorflow/python/data/experimental/kernel_tests/serialization/unique_dataset_serialization_test.py
|
DanMitroshin/tensorflow
|
74aa353842f1788bdb7506ecceaf6ba99140e165
|
[
"Apache-2.0"
] | 3
|
2021-05-09T13:41:29.000Z
|
2021-06-24T06:12:05.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointing the UniqueDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class UniqueDatasetCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testUnique(self):
def build_dataset(num_elements, unique_elem_range):
return dataset_ops.Dataset.range(num_elements).map(
lambda x: x % unique_elem_range).apply(unique.unique())
self.run_core_tests(lambda: build_dataset(200, 100), 100)
if __name__ == "__main__":
test.main()
| 38.533333
| 80
| 0.745098
|
fa4177443f583329fb7186f209650d569f4973e5
| 5,132
|
py
|
Python
|
nmtpytorch/models/mnmt.py
|
fmetze/nmtpytorch
|
658a39a2c50e4e9e2fde69b520ddac7efc083257
|
[
"MIT"
] | null | null | null |
nmtpytorch/models/mnmt.py
|
fmetze/nmtpytorch
|
658a39a2c50e4e9e2fde69b520ddac7efc083257
|
[
"MIT"
] | null | null | null |
nmtpytorch/models/mnmt.py
|
fmetze/nmtpytorch
|
658a39a2c50e4e9e2fde69b520ddac7efc083257
|
[
"MIT"
] | 1
|
2020-07-22T19:25:53.000Z
|
2020-07-22T19:25:53.000Z
|
# -*- coding: utf-8 -*-
import torch
import logging
from .nmt import NMT
from ..layers import MultimodalTextEncoder
from ..layers import ConditionalDecoder
logger = logging.getLogger('nmtpytorch')
class MultimodalNMT(NMT):
"""A encoder/decoder enriched multimodal NMT.
Integration types (feat_fusion argument)
'encinit': Initialize RNNs in the encoder
'decinit': Initializes first decoder RNN.
'encdecinit': Initializes RNNs in the encoder & first decoder RNN.
'trgmul': Multiplicative interaction with trg embs.
'srcmul': Multiplicative interaction with src embs.
'ctxmul': Multiplicative interaction with src encodings.
'concat': Concat the embeddings and features (doubles RNN input)
'sum': Sum the embeddings with projected features
'prepend': Input sequence: [vis, embs, eos]
'append': Input sequence: [embs, vis, eos]
"""
def __init__(self, opts):
super().__init__(opts)
def set_defaults(self):
# Set parent defaults
super().set_defaults()
# NOTE: You should not use dec_init == feats with this model.
# Use "feat_fusion:decinit" instead.
self.defaults.update({
'feat_dim': 2048, # Feature dimension for multimodal encoder
'feat_activ': None, # Feature non-linearity for multimodal encoder
'feat_fusion': 'encinit', # By default initialize only the encoder
})
def reset_parameters(self):
super().reset_parameters()
# Reset padding embeddings to 0
with torch.no_grad():
self.enc.emb.weight.data[0].fill_(0)
def setup(self, is_train=True):
"""Sets up NN topology by creating the layers."""
# Hack to sync enc-decinit computation
self.dec_requires_img = False
if self.opts.model['feat_fusion']:
if 'decinit' in self.opts.model['feat_fusion']:
self.opts.model['dec_init'] = 'feats'
self.opts.model['dec_init_size'] = self.opts.model['feat_dim']
self.opts.model['dec_init_activ'] = self.opts.model['feat_activ']
self.dec_requires_img = True
elif self.opts.model['feat_fusion'].startswith('trg'):
self.dec_requires_img = True
self.enc = MultimodalTextEncoder(
input_size=self.opts.model['emb_dim'],
hidden_size=self.opts.model['enc_dim'],
n_vocab=self.n_src_vocab,
rnn_type=self.opts.model['enc_type'],
dropout_emb=self.opts.model['dropout_emb'],
dropout_ctx=self.opts.model['dropout_ctx'],
dropout_rnn=self.opts.model['dropout_enc'],
num_layers=self.opts.model['n_encoders'],
emb_maxnorm=self.opts.model['emb_maxnorm'],
emb_gradscale=self.opts.model['emb_gradscale'],
layer_norm=self.opts.model['enc_lnorm'],
feat_size=self.opts.model['feat_dim'],
feat_activ=self.opts.model['feat_activ'],
feat_fusion=self.opts.model['feat_fusion'])
self.dec = ConditionalDecoder(
input_size=self.opts.model['emb_dim'],
hidden_size=self.opts.model['dec_dim'],
n_vocab=self.n_trg_vocab,
rnn_type=self.opts.model['dec_type'],
ctx_size_dict=self.ctx_sizes,
ctx_name=str(self.sl),
tied_emb=self.opts.model['tied_emb'],
dec_init=self.opts.model['dec_init'],
dec_init_size=self.opts.model['dec_init_size'],
dec_init_activ=self.opts.model['dec_init_activ'],
att_type=self.opts.model['att_type'],
att_temp=self.opts.model['att_temp'],
att_activ=self.opts.model['att_activ'],
transform_ctx=self.opts.model['att_transform_ctx'],
mlp_bias=self.opts.model['att_mlp_bias'],
att_bottleneck=self.opts.model['att_bottleneck'],
dropout_out=self.opts.model['dropout_out'],
emb_maxnorm=self.opts.model['emb_maxnorm'],
emb_gradscale=self.opts.model['emb_gradscale'],
sched_sample=self.opts.model['sched_sampling'],
out_logic=self.opts.model['out_logic'],
emb_interact=self.opts.model['feat_fusion'],
emb_interact_dim=self.opts.model['feat_dim'],
emb_interact_activ=self.opts.model['feat_activ'])
# Share encoder and decoder weights
if self.opts.model['tied_emb'] == '3way':
self.enc.emb.weight = self.dec.emb.weight
# Use the same representation everywhere
if self.opts.model['feat_fusion'] == 'encdecinit':
self.enc.ff_vis.weight = self.dec.ff_dec_init.weight
def encode(self, batch, **kwargs):
d = {str(self.sl): self.enc(batch[self.sl], v=batch.get('feats', None))}
# # It may also be decoder-side integration
if self.dec_requires_img:
d['feats'] = (batch['feats'], None)
return d
| 43.491525
| 86
| 0.609509
|
5b99b64916d736176349d9e676591dd5ca479ce8
| 3,081
|
py
|
Python
|
qa/rpc-tests/invalidateblock.py
|
stakecom/stakework
|
a2110b0ba6aa9638a18c2e7ae12f0f229e074f35
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/invalidateblock.py
|
stakecom/stakework
|
a2110b0ba6aa9638a18c2e7ae12f0f229e074f35
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/invalidateblock.py
|
stakecom/stakework
|
a2110b0ba6aa9638a18c2e7ae12f0f229e074f35
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework.test_framework import StakeWorkTestFramework
from test_framework.util import *
class InvalidateTest(StakeWorkTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
print("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
print("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print("\nMake sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
print("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print("..and then mine a block")
self.nodes[2].generate(1)
print("Verify all nodes are at the right height")
time.sleep(5)
for i in range(3):
print(i,self.nodes[i].getblockcount())
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| 40.012987
| 95
| 0.642324
|
b7a6b27b0eb8bda8a22bd8d2ceba0c5de4d111b6
| 1,179
|
py
|
Python
|
NMTK_apps/NMTK_apps/wsgi.py
|
jrawbits/nmtk-1
|
759781770b5f2464008ceb5376fd3922b1b877fa
|
[
"Unlicense"
] | null | null | null |
NMTK_apps/NMTK_apps/wsgi.py
|
jrawbits/nmtk-1
|
759781770b5f2464008ceb5376fd3922b1b877fa
|
[
"Unlicense"
] | null | null | null |
NMTK_apps/NMTK_apps/wsgi.py
|
jrawbits/nmtk-1
|
759781770b5f2464008ceb5376fd3922b1b877fa
|
[
"Unlicense"
] | null | null | null |
"""
WSGI config for NMTK_apps project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import djcelery
djcelery.setup_loader()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NMTK_apps.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 39.3
| 79
| 0.810008
|
918da32e48cf3e128cfec79d4e6b97eaa9923a04
| 208
|
py
|
Python
|
DIR.py
|
romchegue/Python
|
444476088e64d5da66cb00174f3d1d30ebbe38f6
|
[
"bzip2-1.0.6"
] | null | null | null |
DIR.py
|
romchegue/Python
|
444476088e64d5da66cb00174f3d1d30ebbe38f6
|
[
"bzip2-1.0.6"
] | null | null | null |
DIR.py
|
romchegue/Python
|
444476088e64d5da66cb00174f3d1d30ebbe38f6
|
[
"bzip2-1.0.6"
] | null | null | null |
def DIR(L): #L is dir()
D = {}
for i in L:
if i[:2] != '__':
D[i] = str(eval(i))
return(D)
for i in DIR(L):
print(i + '\t--->\t' + DIR(L)[i])
input()
| 14.857143
| 38
| 0.346154
|
d83ff78f690d2c365bbf105245407998bba506d1
| 3,004
|
py
|
Python
|
tests/create-test.py
|
mgorny/glep63-check
|
820dd55e3c667edfaeff165cd990c121f436c108
|
[
"BSD-2-Clause"
] | null | null | null |
tests/create-test.py
|
mgorny/glep63-check
|
820dd55e3c667edfaeff165cd990c121f436c108
|
[
"BSD-2-Clause"
] | 4
|
2018-07-21T20:04:37.000Z
|
2019-05-06T12:26:56.000Z
|
tests/create-test.py
|
mgorny/glep63-check
|
820dd55e3c667edfaeff165cd990c121f436c108
|
[
"BSD-2-Clause"
] | 1
|
2018-07-21T19:54:38.000Z
|
2018-07-21T19:54:38.000Z
|
#!/usr/bin/env python
# Write a test case based on key file.
# (c) 2018 Michał Górny
# Released under the terms of 2-clause BSD license.
import enum
import io
import os.path
import subprocess
import sys
sys.path.insert(0, '.')
from glep63.check import (check_key,)
from glep63.gnupg import (process_gnupg_colons,)
from glep63.specs import (SPECS,)
def pretty_key(key, indent=4):
out = '{}('.format(key.__class__.__name__)
for k, v in key._asdict().items():
# issue class special cases
if k == 'key':
v = 'KEY'
elif k == 'subkey':
v = 'KEY.subkeys[0]'
elif k == 'uid':
v = 'KEY.uids[0]'
elif k == 'long_desc':
v = repr('')
elif isinstance(v, enum.Enum):
v = '{}.{}'.format(v.__class__.__name__, v.name)
elif isinstance(v, list):
lv = '['
for e in v:
lv += ('\n{_:{padding}}{item},'
.format(_=' ', padding=indent+8,
item=pretty_key(e, indent=indent+8)))
v = '{}\n{_:{padding}}]'.format(lv, _=' ', padding=indent+4)
else:
v = repr(v)
out += '\n{_:{padding}}{k}={v},'.format(k=k, v=v, _=' ',
padding=indent+4)
out += '\n{_:{padding}})'.format(_=' ', padding=indent)
return out
def pretty_result(result):
if not result:
return ''
out = ''
for r in result:
out += '\n{_:{padding}}{v},'.format(v=pretty_key(r, indent=12),
_=' ', padding=12)
return '{}\n{_:{padding}}'.format(out, _=' ', padding=8)
def pretty_results(results):
out = '{'
for k, v in sorted(results.items()):
out += '\n{_:{indent}}{k}: [{v}],'.format(k=repr(k),
v=pretty_result(v),
_=' ', indent=8)
out += '\n }'
return out
def main(key_path, test_name):
with subprocess.Popen(['gpg', '--no-default-keyring',
'--keyring', key_path, '--list-key', '--with-colons'],
stdout=subprocess.PIPE) as s:
key_colons, _ = s.communicate()
assert s.wait() == 0
key_colons = key_colons.decode('ASCII')
with io.StringIO(key_colons) as f:
key_cls = process_gnupg_colons(f)
assert len(key_cls) == 1
key_cls = key_cls[0]
results = {}
for k, spec in SPECS.items():
results[k] = check_key(key_cls, spec)
print('''
class {test_name}(tests.key_base.BaseKeyTest):
KEY_FILE = '{key_file}'
GPG_COLONS = \'\'\'
{gpg_colons}\'\'\'
KEY = {key_cls}
EXPECTED_RESULTS = {expected}'''.format(
test_name=test_name,
key_file=os.path.relpath(key_path, 'tests'),
gpg_colons=key_colons,
key_cls=pretty_key(key_cls),
expected=pretty_results(results)))
if __name__ == '__main__':
main(*sys.argv[1:])
| 27.559633
| 72
| 0.514647
|
cd536f2d215dda7d7517b6f1e00ae18e9b0770bb
| 1,896
|
py
|
Python
|
plugins/schnitzelplats.py
|
nyson/mat
|
d74bde5c164fdf3d387626aa1856ae70acdb1667
|
[
"MIT"
] | null | null | null |
plugins/schnitzelplats.py
|
nyson/mat
|
d74bde5c164fdf3d387626aa1856ae70acdb1667
|
[
"MIT"
] | null | null | null |
plugins/schnitzelplats.py
|
nyson/mat
|
d74bde5c164fdf3d387626aa1856ae70acdb1667
|
[
"MIT"
] | null | null | null |
import datetime
import json
from functools import reduce
__days = [
"Måndag",
"Tisdag",
"Onsdag",
"Torsdag",
"Fredag"
]
__weekly_headers = [
"Alltid på Platz",
"Veckans Schnitzel",
"Veckans vegetariska"
]
def name():
return "Schnitzelplatz"
def food(api, date):
def collapse_paragraphs(ps):
return dict(map(
lambda kv: (
kv[0],
reduce(
lambda acc, s: acc + " " + s,
kv[1],
""
).replace("\n", " ")
),
ps.items()))
def categorize(menu_soup):
menu = {}
current_index = None
for item in menu_soup:
if item.name == "h4":
current_index = item.get_text()
elif item.name == "p":
if not current_index in menu:
menu[current_index] = []
menu[current_index].append(item.get_text())
return menu
if (not api.is_current_week(date)
or not api.is_weekday(date)
or not api.soup):
return []
response = api.requests.get('https://schnitzelplatz.se/lunch/')
soup = api.soup(response.content, 'html.parser')
food_menu = soup.find_all("div", {"class", "foodmenu section-padding--medium"})[0].find_all()
parsed_menu = collapse_paragraphs(categorize(food_menu))
# Assert that all expected headings exists in the parsed menu
assert all(heading in parsed_menu
for heading in __days + __weekly_headers)
return [
api.food("Alltid på Platz: ", parsed_menu["Alltid på Platz"]),
api.food("Veckans Schnitzel: ", parsed_menu["Veckans Schnitzel"]),
api.food("Veckans Vegetariska: ", parsed_menu["Veckans vegetariska"]),
api.food("Dagens: ", parsed_menu[__days[date.weekday()]])
]
| 27.478261
| 97
| 0.552215
|
d5e39c84197c85468fbbaec022fd298d1df492f1
| 248
|
py
|
Python
|
parsers/faparser.py
|
sWski/beerlist
|
4279bd37f6ee15aadadfabe978f12af734fe12d2
|
[
"Unlicense"
] | 1
|
2020-02-25T11:08:45.000Z
|
2020-02-25T11:08:45.000Z
|
parsers/faparser.py
|
sWski/beerlist
|
4279bd37f6ee15aadadfabe978f12af734fe12d2
|
[
"Unlicense"
] | 1
|
2020-06-25T07:20:19.000Z
|
2020-06-25T07:20:19.000Z
|
parsers/faparser.py
|
sWski/beerlist
|
4279bd37f6ee15aadadfabe978f12af734fe12d2
|
[
"Unlicense"
] | 4
|
2020-04-16T18:39:34.000Z
|
2021-11-12T15:20:44.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import common as beerlib
html = beerlib.download_html('https://untappd.com/v/fa-bar-oranzova/1728532')
if not html:
exit(-1)
beerlib.process_untappd(html, 'F.A. Bar Oranžová', sys.argv)
| 20.666667
| 77
| 0.709677
|
306368ba13eac86188bc92a8cee1252afa5facb1
| 4,467
|
py
|
Python
|
landlab/graph/structured_quad/tests/test_quad.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | null | null | null |
landlab/graph/structured_quad/tests/test_quad.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | 1
|
2016-03-16T02:34:08.000Z
|
2016-04-20T19:31:30.000Z
|
landlab/graph/structured_quad/tests/test_quad.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | null | null | null |
"""Test StructuredQuadGraph."""
from numpy.testing import assert_array_almost_equal, assert_array_equal
from landlab.graph import StructuredQuadGraph
def test_create():
"""Test creating a quad graph."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert graph.number_of_nodes == 9
assert graph.number_of_links == 12
assert graph.number_of_patches == 4
def test_perimeter_nodes():
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_equal(graph.perimeter_nodes, [2, 5, 8, 7, 6, 3, 0, 1])
def test_length_of_link():
"""Test length of links."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_almost_equal(
graph.length_of_link,
[1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0],
)
def test_area_of_patch():
"""Test areas of patches."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_almost_equal(graph.area_of_patch, [1.0, 2.0, 2.0, 4.0])
def test_nodes_at_patch():
"""Test areas of patches."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_equal(
graph.nodes_at_patch, [[4, 3, 0, 1], [5, 4, 1, 2], [7, 6, 3, 4], [8, 7, 4, 5]]
)
def test_patches_at_node():
"""Test areas of patches."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_equal(
graph.patches_at_node,
[
[0, -1, -1, -1],
[1, 0, -1, -1],
[-1, 1, -1, -1],
[2, -1, -1, 0],
[3, 2, 0, 1],
[-1, 3, 1, -1],
[-1, -1, -1, 2],
[-1, -1, 2, 3],
[-1, -1, 3, -1],
],
)
def test_patches_at_link():
"""Test areas of patches."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_equal(
graph.patches_at_link,
[
[-1, 0],
[-1, 1],
[0, -1],
[1, 0],
[-1, 1],
[0, 2],
[1, 3],
[2, -1],
[3, 2],
[-1, 3],
[2, -1],
[3, -1],
],
)
def test_links_at_patch():
"""Test areas of patches."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_equal(
graph.links_at_patch, [[3, 5, 2, 0], [4, 6, 3, 1], [8, 10, 7, 5], [9, 11, 8, 6]]
)
def test_nodes_at_link():
"""Test areas of patches."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_equal(
graph.nodes_at_link,
[
[0, 1],
[1, 2],
[0, 3],
[1, 4],
[2, 5],
[3, 4],
[4, 5],
[3, 6],
[4, 7],
[5, 8],
[6, 7],
[7, 8],
],
)
def test_links_at_node():
"""Test areas of patches."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_equal(
graph.links_at_node,
[
[0, 2, -1, -1],
[1, 3, 0, -1],
[-1, 4, 1, -1],
[5, 7, -1, 2],
[6, 8, 5, 3],
[-1, 9, 6, 4],
[10, -1, -1, 7],
[11, -1, 10, 8],
[-1, -1, 11, 9],
],
)
def test_link_dirs_at_node():
"""Test areas of patches."""
y = [0, 1, 3, 0, 1, 3, 0, 1, 3]
x = [3, 3, 3, 4, 4, 4, 6, 6, 6]
graph = StructuredQuadGraph((y, x), shape=(3, 3))
assert_array_equal(
graph.link_dirs_at_node,
[
[-1, -1, 0, 0],
[-1, -1, 1, 0],
[0, -1, 1, 0],
[-1, -1, 0, 1],
[-1, -1, 1, 1],
[0, -1, 1, 1],
[-1, 0, 0, 1],
[-1, 0, 1, 1],
[0, 0, 1, 1],
],
)
| 25.820809
| 88
| 0.411238
|
7636193cebdc7d67ba2e8af280e376d0c4a5c8e8
| 1,080
|
py
|
Python
|
test/nlp/test_autohf_tokenclassification.py
|
Qiaochu-Song/FLAML
|
28511340528dfc9def29862f5076b4516eb7305f
|
[
"MIT"
] | null | null | null |
test/nlp/test_autohf_tokenclassification.py
|
Qiaochu-Song/FLAML
|
28511340528dfc9def29862f5076b4516eb7305f
|
[
"MIT"
] | null | null | null |
test/nlp/test_autohf_tokenclassification.py
|
Qiaochu-Song/FLAML
|
28511340528dfc9def29862f5076b4516eb7305f
|
[
"MIT"
] | null | null | null |
import sys
import pytest
import requests
from utils import get_toy_data_tokenclassification, get_automl_settings
@pytest.mark.skipif(sys.platform == "darwin", reason="do not run on mac os")
def test_tokenclassification():
from flaml import AutoML
X_train, y_train, X_val, y_val = get_toy_data_tokenclassification()
automl = AutoML()
automl_settings = get_automl_settings()
automl_settings["task"] = "token-classification"
automl_settings["metric"] = "seqeval:overall_f1" # evaluating based on the overall_f1 of seqeval
automl_settings["fit_kwargs_by_estimator"]["transformer"]["label_list"] = [
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
"B-MISC",
"I-MISC",
]
try:
automl.fit(
X_train=X_train,
y_train=y_train,
X_val=X_val,
y_val=y_val,
**automl_settings
)
except requests.exceptions.HTTPError:
return
if __name__ == "__main__":
test_tokenclassification()
| 25.116279
| 101
| 0.62037
|
7e6398e6fcf9331083224124eb15efd030b7d54d
| 764
|
py
|
Python
|
MxShop/MxShop/urls.py
|
xgq07/Django
|
a3bfa4fa0ebfc3cdcbc59bcaa810507889050d3b
|
[
"MIT"
] | null | null | null |
MxShop/MxShop/urls.py
|
xgq07/Django
|
a3bfa4fa0ebfc3cdcbc59bcaa810507889050d3b
|
[
"MIT"
] | null | null | null |
MxShop/MxShop/urls.py
|
xgq07/Django
|
a3bfa4fa0ebfc3cdcbc59bcaa810507889050d3b
|
[
"MIT"
] | null | null | null |
"""MxShop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import xadmin
urlpatterns = [
path(r'xadmin', xadmin.site.urls),
]
| 33.217391
| 77
| 0.713351
|
17aafa3ced425f735155bc4ed9fcbe366ca25887
| 13,027
|
py
|
Python
|
tests/test_service.py
|
frankwirgit/fhir-flask-rest
|
a8d72b1636a4c441e2e96a21f167fefc232c14e0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_service.py
|
frankwirgit/fhir-flask-rest
|
a8d72b1636a4c441e2e96a21f167fefc232c14e0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_service.py
|
frankwirgit/fhir-flask-rest
|
a8d72b1636a4c441e2e96a21f167fefc232c14e0
|
[
"Apache-2.0"
] | null | null | null |
# Tests for REST API Service
"""
REST API Service Test Suite
Test cases can be run with the following:
nosetests -v --with-spec --spec-color
coverage report -m
codecov --token=$CODECOV_TOKEN
While debugging just these tests it's convinient to use this:
nosetests --stop tests/test_service.py:TestPatServer
"""
import os
import logging
import unittest
import json
import copy
from unittest.mock import MagicMock, patch
from urllib.parse import quote_plus
from flask_api import status # HTTP Status Codes
from service.models import Pprofile, Pname, Paddress, db
from service.service import app, init_db
#from .factories import PatFactory
# DATABASE_URI = os.getenv('DATABASE_URI', 'sqlite:///../db/test.db')
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgres://postgres:postgres1@localhost:5432/postgres"
)
with open('tests/fhir-patient-post.json') as jsonfile:
sample_data = json.load(jsonfile)
######################################################################
# SERVICE TEST CASES
######################################################################
class TestPatServer(unittest.TestCase):
""" REST Server Tests """
@classmethod
def setUpClass(cls):
""" Run once before all tests """
app.config['TESTING'] = True
app.config['DEBUG'] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
init_db()
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
""" Runs before each test """
db.drop_all() # clean up the last tests
db.create_all() # create new tables
self.app = app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
def _create_pats(self, count):
""" create patients based on sample data """
pats = []
#there is only one record in the sample data
for i in range(count):
pat = Pprofile()
pat = pat.deserialize(sample_data)
#call of the POST method to create pat
#note: deserialize()+serialize() will not work, because
#deserialize() breaks the original json in sample_data,
#for example, no "telecom" block with "system","value" and "use"
#but with "phone_home","phone_office"..."email", etc.
#after serialize(), the json is NOT equal to the sample_data
#therefore the following call can not get a patient created
#resp = self.app.post("/pats", json=pat.serialize(), content_type="application/json")
#instead, pass the sample_data json directly to the call of the POST method
resp = self.app.post("/pats", json=sample_data, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_201_CREATED, "Could not create test patient")
new_pat = resp.get_json()
pat.id = new_pat["id"]
#note: pat is not the "created" object
#no pat.create() has been called. And to verifly, the following assert prints out:
#root: DEBUG: <Pat fname='Nedward' lname='Flanders' id=[1] pprofile_id=[None]>
#id is assigned from new_pat["id"], profile_id is None
logging.debug(pat)
self.assertEqual(new_pat["address"][0]["pat_id"], 1)
self.assertEqual(pat.address[0].postalCode, "90210")
pats.append(pat)
return pats
def test_index(self):
""" Test the Home Page """
resp = self.app.get("/")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data["name"], "Patient FHIR REST API Service")
def test_get_pat_list(self):
""" Get a list of patients """
self._create_pats(1)
resp = self.app.get("/pats")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), 1)
def test_get_pat(self):
""" Get a single patient """
test_pat = self._create_pats(1)[0]
# get the id of a patient
resp = self.app.get(
"/pats/{}".format(test_pat.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data["phone_home"], test_pat.phone_home)
def test_get_pat_by_name(self):
""" Get a single patient by name """
test_pat = self._create_pats(1)[0]
# get the id of a patient
#resp = self.app.get(
#"/pats/{}".format(test_pat.id), content_type="application/json")
resp = self.app.get(
"".join(["/pats?given=", test_pat.name[0].given_1, "&family=", test_pat.name[0].family]),
content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data[0]["name"][0]["given"][0], test_pat.name[0].given_1)
self.assertEqual(data[0]["name"][0]["family"], test_pat.name[0].family)
def test_get_pat_by_phone(self):
""" Get a single patient by phone """
test_pat = self._create_pats(1)[0]
# get the id of a patient
resp = self.app.get(
"/pats?phone_home={}".format(test_pat.phone_home), content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data[0]["phone_home"], test_pat.phone_home)
def test_get_pat_not_found(self):
""" Get a patient whos not found """
resp = self.app.get("/pats/0")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_create_pat(self):
""" Create a new patient """
test_pat = self._create_pats(1)[0]
#test_pat = PatFactory()
logging.debug(test_pat)
resp = self.app.post(
"/pats", json=sample_data, content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Make sure location header is set
location = resp.headers.get("Location", None)
self.assertIsNotNone(location)
# Check the data is correct
new_pat = resp.get_json()
self.assertEqual(new_pat["name"][0]["family"], test_pat.name[0].family, "Last name does not match")
self.assertEqual(new_pat["name"][0]["given"][0], test_pat.name[0].given_1, "First name does not match")
self.assertEqual(new_pat["address"][0]["postalCode"], test_pat.address[0].postalCode, "Zip code does not match")
self.assertEqual(new_pat["address"][0]["line"][0], test_pat.address[0].line_1, "Zip code does not match")
self.assertEqual(new_pat["birthDate"], test_pat.DOB.strftime("%Y-%m-%d"), "DOB does not match")
# Check that the location header was correct
resp = self.app.get(location, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# Check the data is correct
new_pat = resp.get_json()
self.assertEqual(new_pat["name"][0]["family"], test_pat.name[0].family, "Last name does not match")
self.assertEqual(new_pat["name"][0]["given"][0], test_pat.name[0].given_1, "First name does not match")
self.assertEqual(new_pat["address"][0]["postalCode"], test_pat.address[0].postalCode, "Zip code does not match")
self.assertEqual(new_pat["address"][0]["line"][0], test_pat.address[0].line_1, "Zip code does not match")
self.assertEqual(new_pat["birthDate"], test_pat.DOB.strftime("%Y-%m-%d"), "DOB does not match")
def test_update_pat(self):
""" Update an existing patient """
# create a patient to update
#test_pat = self._create_pats(1)[0]
#test_pat = PatFactory()
resp = self.app.post(
"/pats", json=sample_data, content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the patient
new_pat = resp.get_json()
new_json = copy.deepcopy(sample_data)
#modify the item value
new_json["telecom"][1]["value"] = "daisy.cao@email.com"
new_json["name"][0]["given"][0] = "Daisy"
logging.debug(new_json)
resp = self.app.put(
"/pats/{}".format(new_pat["id"]),
json=new_json,
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_pat = resp.get_json()
logging.debug(updated_pat)
#updated the pprofile entry
self.assertEqual(updated_pat["email"], "daisy.cao@email.com")
#added a new first name
self.assertEqual(updated_pat["name"][1]["given"][0], "Daisy")
self.assertEqual(updated_pat["name"][0]["given"][0], "Nedward")
def test_update_pat_latest_name(self):
""" Update the latest name of an existing patient """
# create a patient to update
resp = self.app.post(
"/pats", json=sample_data, content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
new_pat = resp.get_json()
# update the latest name of patient
modified_name_json = copy.deepcopy(sample_data["name"])
#modify the name
modified_name_json[len(modified_name_json)-1]["family"] = "Doggie"
modified_name_json[len(modified_name_json)-1]["given"][0] = "Daisy"
logging.debug(modified_name_json)
resp = self.app.put("/pats/{}/latest_name".format(new_pat["id"]),
json=modified_name_json[len(modified_name_json)-1], content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_name = resp.get_json()
logging.debug(updated_name)
#updated latest name
self.assertEqual(updated_name["given"][0], "Daisy")
self.assertEqual(updated_name["family"], "Doggie")
def test_delete_pat(self):
""" Delete a patient """
test_pat = self._create_pats(1)[0]
resp = self.app.delete(
"/pats/{}".format(test_pat.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(len(resp.data), 0)
# make sure they are deleted
resp = self.app.get(
"/pats/{}".format(test_pat.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_query_pat_list_by_gender(self):
""" Query patients by gender """
pats = self._create_pats(1)
test_gender = pats[0].gender
gender_pats = [pat for pat in pats if pat.gender.name == test_gender.name]
resp = self.app.get("/pats", query_string="gender={}".format(quote_plus(test_gender.name)))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(gender_pats))
# check the data just to be sure
for _dd in data:
self.assertEqual(_dd["gender"], test_gender.name)
app.logger.info("run a test for testing query patients with the same gender")
def test_bad_request(self):
""" Send wrong media type """
pat = Pprofile()
pat = pat.deserialize(sample_data)
resp = self.app.post("/pats", json=pat.serialize(), content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_unsupported_media_type(self):
""" Send wrong media type """
resp = self.app.post("/pats", json=sample_data,
content_type="test/html")
self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def test_method_not_allowed(self):
""" Make an illegal method call """
resp = self.app.put(
"/pats", json=sample_data,
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
# @patch('service.models.Pet.find_by_name')
# def test_bad_request(self, bad_request_mock):
# """ Test a Bad Request error from Find By Name """
# bad_request_mock.side_effect = DataValidationError()
# resp = self.app.get('/pets', query_string='name=fido')
# self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
#
# @patch('service.models.Pet.find_by_name')
# def test_mock_search_data(self, pet_find_mock):
# """ Test showing how to mock data """
# pet_find_mock.return_value = [MagicMock(serialize=lambda: {'name': 'fido'})]
# resp = self.app.get('/pets', query_string='name=fido')
# self.assertEqual(resp.status_code, status.HTTP_200_OK)
| 41.094637
| 120
| 0.624856
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.