max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
projects/article_generator/tests/test_pipeline_corner_cases.py | JackonYang/projects-in-one | 3 | 12771751 | <reponame>JackonYang/projects-in-one
from article_generator.tools.file_group import FileGroup
from article_generator.configs import image_pipe_group_data_file
def test_image_out_of_range():
g = FileGroup(image_pipe_group_data_file)
# make sure no exception
g.get_file_group('anything', 1)
g = FileGroup('error')
g.get_file_group('dudu-1', 'hhhhhh')
| 2.234375 | 2 |
tests/printmidimessages.py | binokaryg/Piano-LED-Visualizer | 0 | 12771752 | <reponame>binokaryg/Piano-LED-Visualizer
import mido
from mido import MidiFile, Message, tempo2bpm, MidiTrack,MetaMessage
import time
ports = mido.get_input_names()
for port in ports:
if "Through" not in port and "RPiD" not in port and "RtMidOut" not in port:
try:
inport = mido.open_input(port)
print("Inport set to "+port)
except:
print ("Failed to set "+port+" as inport")
while True:
for msg in inport.iter_pending():
millis = int(round(time.time() * 1000))
print("t:{0} | msg:{1}", millis, msg)
| 2.515625 | 3 |
test_keysmith.py | dmtucker/cs112 | 3 | 12771753 | """Test Keysmith."""
import subprocess
import sys
import keysmith
def test_script():
"""Test a full run when directly invoking."""
subprocess.check_call([sys.executable, keysmith.__file__])
def test_python_m():
"""Test python -m."""
command = [sys.executable, '-m', keysmith.__name__]
assert subprocess.run(command).returncode == 0
def test_main_stats():
"""Test a full run with statistics."""
assert keysmith.main(['--stats']) == 0
def test_main_population():
"""Test a population file that does not exist."""
assert keysmith.main(['--population', 'nonexistent']) == 1
| 2.609375 | 3 |
src/deepen/propagation.py | petejh/deepen | 0 | 12771754 | import numpy as np
from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward
def initialize_params(layer_dims):
"""Create and initialize the params of an L-layer neural network.
Parameters
----------
layer_dims : list or tuple of int
The number of neurons in each layer of the network.
Returns
-------
params : dict of {str: ndarray}
Initialized parameters for each layer, l, of the L-layer network.
Wl : ndarray
Weights matrix of shape (`layer_dims[l]`, `layer_dims[l-1]`).
bl : ndarray
Biases vector of shape (`layer_dims[l]`, 1).
"""
params = {}
L = len(layer_dims)
for l in range(1, L):
params['W' + str(l)] = (
np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1])
)
params['b' + str(l)] = np.zeros((layer_dims[l], 1))
return params
def linear_forward(A, W, b):
"""Calculate the linear part of forward propagation for the current layer.
.. math:: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}, where $A^{[0]} = X$
Parameters
----------
A : ndarray
Activations from the previous layer, of shape (size of previous layer,
number of examples).
W : ndarray
Weights matrix of shape (size of current layer, size of previous layer).
b : ndarray
Bias vector of shape (size of current layer, 1).
Returns
-------
Z : ndarray
Input of the activation function, also called pre-activation parameter,
of shape (size of current layer, number of examples).
cache : tuple of ndarray
Store `A`, `W`, and `b` for computing the backward pass efficiently.
"""
Z = np.dot(W, A) + b
cache = (A, W, b)
return Z, cache
def layer_forward(A_prev, W, b, activation):
"""Compute forward propagation for a single layer.
Parameters
----------
A_prev : ndarray
Activations from the previous layer of shape (size of previous layer,
number of examples).
W : ndarray
Weights matrix of shape (size of current layer, size of previous layer).
b : ndarray
Bias vector of shape (size of the current layer, 1).
activation : str {"sigmoid", "relu"}
Activation function to be used in this layer.
Returns
-------
A : ndarray
Output of the activation function of shape (size of current layer,
number of examples).
cache : tuple of (tuple of ndarray, ndarray)
Stored for computing the backward pass efficiently.
linear_cache : tuple of ndarray
Stores `cache` returned by `linear_forward()`.
activation_cache : ndarray
Stores `Z` returned by 'linear_forward()`.
"""
Z, linear_cache = linear_forward(A_prev, W, b)
if activation == "sigmoid":
A, activation_cache = sigmoid(Z)
elif activation == "relu":
A, activation_cache = relu(Z)
cache = (linear_cache, activation_cache)
return A, cache
def model_forward(X, parameters):
"""Compute forward propagation for [LINEAR->RELU]*(L-1) -> [LINEAR->SIGMOID].
Parameters
----------
X : ndarray
Input data of shape (input size, number of examples)
parameters : dict of {str: ndarray}
Output of initialize_parameters_deep()
Returns
-------
Y_hat : ndarray
Vector of prediction probabilities of shape (1, number of
examples).
caches : list of (tuple of (tuple of ndarray, ndarray))
The L `cache` results from `layer_forward()`.
"""
caches = []
A = X
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
A, cache = layer_forward(
A_prev,
parameters["W" + str(l)],
parameters["b" + str(l)],
"relu"
)
caches.append(cache)
Y_hat, cache = layer_forward(
A,
parameters["W" + str(L)],
parameters["b" + str(L)],
"sigmoid"
)
caches.append(cache)
return Y_hat, caches
def compute_cost(Y_hat, Y):
"""Compute the cross-entropy cost.
.. math:: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right))
Parameters
----------
Y_hat : ndarray
Vector of prediction probabilities from `model_forward()` of shape
(1, number of examples).
Y : ndarray
Vector of true values of shape (1, number of examples).
Returns
-------
cost : list of int
Cross-entropy cost.
"""
m = Y.shape[1]
cost = (1./m) * (-np.dot(Y, np.log(Y_hat).T) - np.dot(1-Y, np.log(1-Y_hat).T))
cost = np.squeeze(cost)
return cost
def linear_backward(dZ, cache):
"""Calculate the linear portion of backward propagation for a single layer.
Parameters
----------
dZ : ndarray
Gradient of the cost with respect to the linear output of layer l.
cache : tuple of ndarray
Stored `A`, `W`, `b` from `linear_forward()`.
Returns
-------
dA_prev : ndarray
Gradient of the cost with respect to the activation of the previous
layer, l-1. Shape of `cache['A']`.
dW : ndarray
Gradient of the cost with respect to W for the current layer, l. Shape
of `cache['W']`.
db : ndarray
Gradient of the cost with respect to b for the current layer, l. Shape
of `cache['b']`.
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1/m) * np.dot(dZ, A_prev.T)
db = (1/m) * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
return dA_prev, dW, db
def layer_backward(dA, cache, activation):
"""Compute backward propagation for a single layer.
Parameters
----------
dA: ndarray
Post-activation gradient for current layer, l.
cache : tuple of (tuple of ndarray, ndarray)
Stored `(linear_cache, activation_cache)` from `layer_forward()`.
activation : str {"relu", "sigmoid"}
Activation function to be used in this layer.
Returns
-------
dA_prev : ndarray
Gradient of the cost with respect to the activation of the previous
layer, l-1. Shape of `cache['A']`.
dW : ndarray
Gradient of the cost with respect to W for the current layer, l. Shape
of `cache['W']`.
db : ndarray
Gradient of the cost with respect to b for the current layer, l. Shape
of `cache['b']`.
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def model_backward(Y_hat, Y, caches):
"""Compute backward propagation for [LINEAR->RELU]*(L-1) -> [LINEAR->SIGMOID].
Parameters
----------
Y_hat : ndarray
Vector of prediction probabilities from `model_forward()` of shape
(1, number of examples).
Y : ndarray
Vector of true values of shape (1, number of examples).
caches : list of (tuple of (tuple of ndarray, ndarray))
Stored results of `model_forward()`.
Returns
-------
grads : dict of {str: ndarray}
Gradients for layer `l` in `range(L-1)`.
dAl : ndarray
Gradient of the activations for layer `l`.
dWl : ndarray
Gradient of the weights for layer `l`.
dbl : ndarray
Gradient of the biases for layer `l`.
"""
grads = {}
L = len(caches)
m = Y_hat.shape[1]
Y = Y.reshape(Y_hat.shape)
dY_hat = -(np.divide(Y, Y_hat) - np.divide(1-Y, 1-Y_hat))
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = (
layer_backward(dY_hat, current_cache, "sigmoid")
)
for l in reversed(range(L-1)):
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = (
layer_backward(grads["dA" + str(l+1)], current_cache, "relu")
)
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_params(params, grads, learning_rate):
"""Update parameters using gradient descent.
Parameters
----------
params : dict of {str: ndarray}
Initialized parameters from `intialize_params()`.
grads : dict of {str: ndarray}
Gradients from `model_backward()`.
learning_rate : float in (0, 1)
Learning rate for the model.
Returns
-------
params : dict of {str: ndarray}
Updated parameters.
`Wl` : ndarray
Updated weights matrix.
`bl` : ndarray
Updated biases vector.
"""
L = len(params) // 2
for l in range(L):
params["W" + str(l+1)] -= learning_rate * grads["dW" + str(l+1)]
params["b" + str(l+1)] -= learning_rate * grads["db" + str(l+1)]
return params
| 3.65625 | 4 |
web/templatetags/project.py | HongDaMa/sass_project | 1 | 12771755 | #!E:\py_virtual_env\saas_project\Scripts\python.exe
# -*- coding: utf-8 -*-
from django.template import Library
from web import models
register = Library()
@register.inclusion_tag('inclusion/all_project_list.html')
def all_project_list(request):
"""
1。获取我创建的所有项目
2.获取我参与的所有项目
:return:
"""
my_project_list = models.Project.objects.filter(creator=request.tracer.user)
join_project_list = models.ProjectUser.objects.filter(user=request.tracer.user)
return {'my':my_project_list,'join':join_project_list,'request':request} | 1.914063 | 2 |
python/advent_of_code/y2020/day02.py | stonecharioteer/advent-of-code | 0 | 12771756 | <gh_stars>0
"""--- Day 2: Password Philosophy ---
Your flight departs in a few days from the coastal airport; the easiest way
down to the coast from here is via toboggan.
The shopkeeper at the North Pole Toboggan Rental Shop is having a bad day.
"Something's wrong with our computers; we can't log in!" You ask if you can
take a look.
Their password database seems to be a little corrupted: some of the passwords
wouldn't have been allowed by the Official Toboggan Corporate Policy that was
in effect when they were chosen.
To try to debug the problem, they have created a list (your puzzle input) of
passwords (according to the corrupted database) and the corporate policy when
that password was set.
For example, suppose you have the following list:
1-3 a: abcde
1-3 b: cdefg
2-9 c: ccccccccc
Each line gives the password policy and then the password. The password policy
indicates the lowest and highest number of times a given letter must appear for
the password to be valid. For example, 1-3 a means that the password must
contain a at least 1 time and at most 3 times.
In the above example, 2 passwords are valid. The middle password, <PASSWORD>, is
not; it contains no instances of b, but needs at least 1. The first and third
passwords are valid: they contain one a or nine c, both within the limits of
their respective policies.
How many passwords are valid according to their policies?
--- Part Two ---
While it appears you validated the passwords correctly, they don't seem to be
what the Official Toboggan Corporate Authentication System is expecting.
The shopkeeper suddenly realizes that he just accidentally explained the
password policy rules from his old job at the sled rental place down the
street! The Official Toboggan Corporate Policy actually works a little
differently.
Each policy actually describes two positions in the password, where 1 means the
first character, 2 means the second character, and so on. (Be careful; Toboggan
Corporate Policies have no concept of "index zero"!) Exactly one of these
positions must contain the given letter. Other occurrences of the letter are
irrelevant for the purposes of policy enforcement.
Given the same example list from above:
1-3 a: abcde is valid: position 1 contains a and position 3 does not.
1-3 b: cdefg is invalid: neither position 1 nor position 3 contains b.
2-9 c: ccccccccc is invalid: both position 2 and position 9 contain c.
How many passwords are valid according to the new interpretation of the
policies?
"""
from typing import Tuple, TextIO
def run(inp: TextIO) -> Tuple[int, int]:
"""Solution for 2020 day 2"""
data = inp.read().splitlines()
result = password_philosophy(data)
return result
def password_philosophy(password_record):
"""Validates passwords according to the rules"""
from collections import Counter
valid_1 = 0
valid_2 = 0
for line in password_record:
prefix, suffix = line.split("-")
min_count = int(prefix)
max_count = int(suffix[:suffix.find(" ")])
letter = suffix[suffix.find(" ")+1:suffix.find(":")]
input_password = suffix.split(":")[1].strip()
letter_count = Counter(input_password)
# validate by the first part's rules
if min_count <= letter_count[letter] <= max_count:
valid_1 += 1
# validate by the second part's rules
first_index = min_count - 1
second_index = max_count - 1
first_pos_contains_index = input_password[first_index] == letter
second_pos_contains_index = input_password[second_index] == letter
if first_pos_contains_index != second_pos_contains_index:
valid_2 += 1
part_1 = valid_1
part_2 = valid_2
return part_1, part_2
| 3.359375 | 3 |
elab.py | RobertBaruch/mz80 | 10 | 12771757 | from nmigen import *
from nmigen.cli import main
from nmigen.asserts import *
from enum import Enum, unique, IntEnum
# Two different types of "enums".
class Thing(object):
"""A fake enumerated value.
This isn't actually an Enum, but is just a bag of constants,
and not at all Pythonic.
"""
DOG = 0
CAT = 1
SQUIRREL = 2
@classmethod
def signal(cls):
"""Returns a Signal with the right number of bits for the Enum.
src_loc_at tells nMigen which stack frame to get the variable
name from, so the one above this one.
"""
return Signal.range(0, 3, src_loc_at=1)
@unique
class ConstingEnumThing(Enum):
"""A real Enum.
In general, we shouldn't care what the actual value of an enum
is. If you do, you probably don't want an enum but a Const.
We'll see later that the enum values don't matter. You can auto() them,
or even use strings.
"""
DOG = 0
CAT = 1
SQUIRREL = 2
@classmethod
def signal(cls):
"""Returns a Signal with the right number of bits for the Enum.
src_loc_at tells nMigen which stack frame to get the variable
name from, so the one above this one.
"""
return Signal.range(0, 3, src_loc_at=1)
@property
def const(self):
"""Returns a Const with the right number of bits for the Enum.
Although we use self.value, we'll see later on that we can
dispense with that.
"""
return Const(self.value, ConstingEnumThing.signal().shape())
class CatDetector(Elaboratable):
"""Uses Thing, which isn't an Enum.
This issues a warning because width(Thing.CAT) = 1, and
width(Thing.DOG) = 2.
"""
def __init__(self):
self.input = Thing.signal()
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, Thing.CAT)
return m
def detectCat(self, m, comparand):
with m.Switch(comparand):
with m.Case(Thing.DOG, Thing.SQUIRREL):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
# This is the way we want things to work:
#
# But, we get an error:
# TypeError: Object '<ConstingEnumThing.CAT: 1>' is not an nMigen value
#
# class EnumCatDetector(Elaboratable):
# def __init__(self):
# self.input = ConstingEnumThing.signal()
# self.output = Signal()
# def elaborate(self, platform):
# m = Module()
# self.detectCat(m, ConstingEnumThing.CAT)
# return m
# def detectCat(self, m, comparand):
# with m.Switch(comparand):
# with m.Case(ConstingEnumThing.DOG,
# ConstingEnumThing.SQUIRREL):
# m.d.comb += self.output.eq(0)
# with m.Default():
# m.d.comb += self.output.eq(1)
class EnumValueCatDetector(Elaboratable):
"""
We can try to fix the error by using .value on all the enum values. But
we run into the same width warning.
"""
def __init__(self):
self.input = ConstingEnumThing.signal()
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, ConstingEnumThing.CAT.value)
return m
def detectCat(self, m, comparand):
with m.Switch(comparand):
with m.Case(ConstingEnumThing.DOG.value,
ConstingEnumThing.SQUIRREL.value):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
# We can't use the Const version on all the enum values, because:
#
# File "/home/robertbaruch/.local/lib/python3.6/site-packages/nmigen/hdl/dsl.py", line 286, in Case
# switch_data["cases"][new_values] = self._statements
# TypeError: unhashable type: 'Const'
#
# class EnumConstCatDetector(Elaboratable):
# def __init__(self):
# self.input = ConstingEnumThing.signal()
# self.output = Signal()
# def elaborate(self, platform):
# m = Module()
# self.detectCat(m, ConstingEnumThing.CAT.const)
# return m
# def detectCat(self, m, comparand):
# with m.Switch(comparand):
# with m.Case(ConstingEnumThing.DOG.const, ConstingEnumThing.const):
# m.d.comb += self.output.eq(0)
# with m.Default():
# m.d.comb += self.output.eq(1)
class EnumConstCatDetector(Elaboratable):
"""
This is how you can use the Enum, but it's not great because
now you have to remember when to use .value and when to use .const.
And you still have to care about the actual numeric values of the Enum
values, which isn't great.
"""
def __init__(self):
self.input = ConstingEnumThing.signal()
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, ConstingEnumThing.CAT.const)
return m
def detectCat(self, m, comparand):
with m.Switch(comparand):
with m.Case(ConstingEnumThing.DOG.value,
ConstingEnumThing.SQUIRREL.value):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
def enumToConst(v):
"""Converts an Enum value to a Const of the correct size for the Enum.
"""
assert (isinstance(v, Enum))
s = Signal.range(0, len(type(v)))
return Const(enumToValue(v), s.shape())
def enumToValue(v):
"""Converts an Enum value to a Value of the correct size for the Enum.
Note that we do not care what the actual numeric values of the Enum
values are. All we care about is that they are unique, start from 0,
and monotonically increase by 1.
O(N), but there's no reason you can't memoize a hashtable.
"""
assert (isinstance(v, Enum))
return list(type(v)).index(v)
def enumToSignal(t):
"""Converts an Enum type to a Signal of the correct size for the Enum.
This could be useful if the Signal constructor accepts an Enum.
"""
assert (issubclass(t, Enum))
return Signal.range(0, len(t), src_loc_at=1)
class IdealEnumCatDetector(Elaboratable):
"""
This is the ideal, if we imagine that Switch accepts Enum values and calls
enumToConst() on them, Case accepts Enum values and calls enumToValue()
on them, and Signal() accepts Enum classes and calls enumToSignal() on them.
"""
def __init__(self):
self.input = enumToSignal(ConstingEnumThing)
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, ConstingEnumThing.CAT)
return m
def detectCat(self, m, comparand):
with m.Switch(enumToConst(comparand)):
with m.Case(
enumToValue(ConstingEnumThing.DOG),
enumToValue(ConstingEnumThing.SQUIRREL)):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
class OtherUses(Elaboratable):
def __init__(self):
self.input = enumToSignal(ConstingEnumThing)
self.output1 = Signal()
self.output2 = enumToSignal(ConstingEnumThing)
self.output3 = Signal()
def elaborate(self, platform):
m = Module()
with m.If(self.input == enumToValue(ConstingEnumThing.CAT)):
m.d.comb += self.output1.eq(1)
with m.Else():
m.d.comb += self.output1.eq(0)
# These should be disallowed, as should any math on enum values
# except equality/inequality comparison.
m.d.comb += self.output2.eq((self.input < 1) | (1 + self.input))
# matches should allow Enums.
m.d.comb += self.output3.eq(
self.input.matches(enumToValue(ConstingEnumThing.CAT)))
return m
class IntThing(IntEnum):
"""An enumerated value based on IntEnum.
"""
DOG = 0
CAT = 1
SQUIRREL = 2
@classmethod
def signal(cls):
"""Returns a Signal with the right number of bits for the Enum.
src_loc_at tells nMigen which stack frame to get the variable
name from, so the one above this one.
"""
return Signal.range(0, 3, src_loc_at=1)
@property
def const(self):
"""Returns a Const with the right number of bits for the Enum.
Although we use self.value, we'll see later on that we can
dispense with that.
"""
return Const(self.value, ConstingEnumThing.signal().shape())
class IntEnumCatDetector(Elaboratable):
"""CatDetector using IntEnum.
You can dispense with .value, but you must still use .const.
"""
def __init__(self):
self.input = IntThing.signal()
self.output = Signal()
def elaborate(self, platform):
m = Module()
self.detectCat(m, IntThing.CAT.const)
return m
def detectCat(self, m, comparand):
with m.Switch(comparand):
with m.Case(IntThing.DOG, IntThing.SQUIRREL):
m.d.comb += self.output.eq(0)
with m.Default():
m.d.comb += self.output.eq(1)
if __name__ == "__main__":
meow = CatDetector()
# meow2 = EnumCatDetector()
meow3 = EnumValueCatDetector()
meow4 = EnumConstCatDetector()
meow5 = IdealEnumCatDetector()
meow6 = OtherUses()
meow7 = IntEnumCatDetector()
m = Module()
m.submodules.meow = meow
# m.submodules.meow2 = meow2
m.submodules.meow3 = meow3
m.submodules.meow4 = meow4
m.submodules.meow5 = meow5
m.submodules.meow6 = meow6
m.submodules.meow7 = meow7
main(
m,
ports=[
meow.input,
meow.output,
# meow2.input, meow2.output,
meow3.input,
meow3.output,
meow4.input,
meow4.output,
meow5.input,
meow5.output,
meow6.input,
meow6.output1,
meow6.output2,
meow6.output3,
meow7.input,
meow7.output
])
| 3.640625 | 4 |
674. Longest Continuous Increasing Subsequence.py | Muthu2093/Algorithms-practice | 0 | 12771758 | <reponame>Muthu2093/Algorithms-practice
# Given an unsorted array of integers, find the length of longest continuous increasing subsequence (subarray).
# Example 1:
# Input: [1,3,5,4,7]
# Output: 3
# Explanation: The longest continuous increasing subsequence is [1,3,5], its length is 3.
# Even though [1,3,5,7] is also an increasing subsequence, it's not a continuous one where 5 and 7 are separated by 4.
# Example 2:
# Input: [2,2,2,2,2]
# Output: 1
# Explanation: The longest continuous increasing subsequence is [2], its length is 1.
# Note: Length of the array will not exceed 10,000.
class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
if (l < 2):
return l
lens = 0
temp = 1
for i in range(1,l):
if (nums[i] <= nums[i-1]):
lens = max(lens, temp)
temp = 1
i += 1
else:
temp += 1
return max(lens, temp)
| 4.21875 | 4 |
ABC/164/a.py | fumiyanll23/AtCoder | 0 | 12771759 | S, W = map(int, input().split())
if(W >= S):
print("unsafe")
else:
print("safe") | 3.078125 | 3 |
fasteve/io/mongo/__init__.py | Wytamma/fasteve | 31 | 12771760 | from .mongo import Mongo, MongoClient
| 1.0625 | 1 |
huaweicloud-sdk-cdn/huaweicloudsdkcdn/v1/model/sources.py | NQLoong/huaweicloud-sdk-python-v3 | 1 | 12771761 | <filename>huaweicloud-sdk-cdn/huaweicloudsdkcdn/v1/model/sources.py
# coding: utf-8
import pprint
import re
import six
class Sources:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'ip_or_domain': 'str',
'origin_type': 'str',
'active_standby': 'int',
'enable_obs_web_hosting': 'int'
}
attribute_map = {
'ip_or_domain': 'ip_or_domain',
'origin_type': 'origin_type',
'active_standby': 'active_standby',
'enable_obs_web_hosting': 'enable_obs_web_hosting'
}
def __init__(self, ip_or_domain=None, origin_type=None, active_standby=None, enable_obs_web_hosting=None):
"""Sources - a model defined in huaweicloud sdk"""
self._ip_or_domain = None
self._origin_type = None
self._active_standby = None
self._enable_obs_web_hosting = None
self.discriminator = None
self.ip_or_domain = ip_or_domain
self.origin_type = origin_type
self.active_standby = active_standby
if enable_obs_web_hosting is not None:
self.enable_obs_web_hosting = enable_obs_web_hosting
@property
def ip_or_domain(self):
"""Gets the ip_or_domain of this Sources.
源站IP或者域名。
:return: The ip_or_domain of this Sources.
:rtype: str
"""
return self._ip_or_domain
@ip_or_domain.setter
def ip_or_domain(self, ip_or_domain):
"""Sets the ip_or_domain of this Sources.
源站IP或者域名。
:param ip_or_domain: The ip_or_domain of this Sources.
:type: str
"""
self._ip_or_domain = ip_or_domain
@property
def origin_type(self):
"""Gets the origin_type of this Sources.
源站类型取值:ipaddr、 domain、obs_bucket,分别表示:源站IP、源站域名、OBS桶访问域名。
:return: The origin_type of this Sources.
:rtype: str
"""
return self._origin_type
@origin_type.setter
def origin_type(self, origin_type):
"""Sets the origin_type of this Sources.
源站类型取值:ipaddr、 domain、obs_bucket,分别表示:源站IP、源站域名、OBS桶访问域名。
:param origin_type: The origin_type of this Sources.
:type: str
"""
self._origin_type = origin_type
@property
def active_standby(self):
"""Gets the active_standby of this Sources.
主备状态(1代表主站;0代表备站),主源站必须存在,备源站可选,OBS桶不能有备源站。
:return: The active_standby of this Sources.
:rtype: int
"""
return self._active_standby
@active_standby.setter
def active_standby(self, active_standby):
"""Sets the active_standby of this Sources.
主备状态(1代表主站;0代表备站),主源站必须存在,备源站可选,OBS桶不能有备源站。
:param active_standby: The active_standby of this Sources.
:type: int
"""
self._active_standby = active_standby
@property
def enable_obs_web_hosting(self):
"""Gets the enable_obs_web_hosting of this Sources.
是否开启Obs静态网站托管(0表示关闭,1表示则为开启)
:return: The enable_obs_web_hosting of this Sources.
:rtype: int
"""
return self._enable_obs_web_hosting
@enable_obs_web_hosting.setter
def enable_obs_web_hosting(self, enable_obs_web_hosting):
"""Sets the enable_obs_web_hosting of this Sources.
是否开启Obs静态网站托管(0表示关闭,1表示则为开启)
:param enable_obs_web_hosting: The enable_obs_web_hosting of this Sources.
:type: int
"""
self._enable_obs_web_hosting = enable_obs_web_hosting
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Sources):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.4375 | 2 |
pymdwizard/gui/MetadataRoot.py | mmfink/fort-pymdwizard | 53 | 12771762 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
The widget for the main metadata root item.
This is the container for an FGDC record without the application wrapper,
menu bar, etc.
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
from lxml import etree
from PyQt5.QtGui import QPainter
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import QTimeLine
from pymdwizard.core import utils
from pymdwizard.core import xml_utils
from pymdwizard.gui.wiz_widget import WizardWidget
from pymdwizard.gui.ui_files import UI_MetadataRoot
from pymdwizard.gui.IDInfo import IdInfo
from pymdwizard.gui.spatial_tab import SpatialTab
from pymdwizard.gui.EA import EA
from pymdwizard.gui.DataQuality import DataQuality
from pymdwizard.gui.metainfo import MetaInfo
from pymdwizard.gui.distinfo import DistInfo
class MetadataRoot(WizardWidget):
drag_label = "Metadata <metadata>"
acceptable_tags = ["abstract"]
ui_class = UI_MetadataRoot.Ui_metadata_root
def __init__(self, parent=None):
self.schema = "bdp"
super(self.__class__, self).__init__(parent=parent)
self.use_dataqual = True
self.use_spatial = True
self.use_eainfo = True
self.use_distinfo = True
def build_ui(self):
"""
Build and modify this widget's GUI
Returns
-------
None
"""
self.ui = self.ui_class()
self.ui.setupUi(self)
self.setup_dragdrop(self, enable=True)
self.idinfo = IdInfo(root_widget=self, parent=self)
self.ui.page_idinfo.layout().addWidget(self.idinfo)
self.dataqual = DataQuality()
self.ui.page_dataqual.layout().addWidget(self.dataqual)
self.spatial_tab = SpatialTab(root_widget=self)
self.ui.page_spatial.layout().addWidget(self.spatial_tab)
self.eainfo = EA()
self.ui.page_eainfo.layout().addWidget(self.eainfo)
self.metainfo = MetaInfo(root_widget=self)
self.ui.page_metainfo.layout().addWidget(self.metainfo)
self.distinfo = DistInfo(root_widget=self)
self.ui.page_distinfo.layout().addWidget(self.distinfo)
def connect_events(self):
"""
Connect the appropriate GUI components with the corresponding functions
Returns
-------
None
"""
self.ui.idinfo_button.pressed.connect(self.section_changed)
self.ui.dataquality_button.pressed.connect(self.section_changed)
self.ui.spatial_button.pressed.connect(self.section_changed)
self.ui.eainfo_button.pressed.connect(self.section_changed)
self.ui.distinfo_button.pressed.connect(self.section_changed)
self.ui.metainfo_button.pressed.connect(self.section_changed)
def section_changed(self):
"""
The event which switches the currently displayed main section
when a user clicks on one of the top level section header buttons.
Returns
-------
None
"""
button_name = self.sender().objectName()
index_lookup = {
"idinfo_button": 0,
"dataquality_button": 1,
"spatial_button": 2,
"eainfo_button": 3,
"distinfo_button": 4,
"metainfo_button": 5,
}
new_index = index_lookup[button_name]
self.switch_section(which_index=new_index)
def switch_section(self, which_index):
"""
sub funtion that does the actual switching, creating a fader widget,
etc.
Parameters
----------
which_index : int
The index of the section to display
Returns
-------
None
"""
if which_index == 0:
self.ui.idinfo_button.setChecked(True)
elif which_index == 1:
self.ui.dataquality_button.setChecked(True)
elif which_index == 2:
self.ui.spatial_button.setChecked(True)
elif which_index == 3:
self.ui.eainfo_button.setChecked(True)
elif which_index == 4:
self.ui.distinfo_button.setChecked(True)
elif which_index == 5:
self.ui.metainfo_button.setChecked(True)
old_widget = self.ui.fgdc_metadata.currentWidget()
new_widget = self.ui.fgdc_metadata.widget(which_index)
FaderWidget(old_widget, new_widget)
self.ui.fgdc_metadata.setCurrentIndex(which_index)
return new_widget
def switch_schema(self, schema):
"""
Switch the displayed schema between straight FGDC and BDP
Parameters
----------
schema : str
Returns
-------
"""
self.schema = schema
self.idinfo.switch_schema(schema)
self.spatial_tab.switch_schema(schema)
def use_section(self, which, value):
"""
enable or disable top optional top level sections
Parameters
----------
which : str
Which section to change: ['dataqual', 'spatial', 'ea',
'distinfo']
value : bool
Whether to enable (True) or disable (False)
Returns
-------
None
"""
if which == "dataqual":
self.use_dataqual = value
self.dataqual.setVisible(value)
if which == "spatial":
self.use_spatial = value
self.spatial_tab.setVisible(value)
if which == "eainfo":
self.use_eainfo = value
self.eainfo.setVisible(value)
if which == "distinfo":
self.use_distinfo = value
self.distinfo.setVisible(value)
def to_xml(self):
metadata_node = xml_utils.xml_node(tag="metadata")
idinfo = self.idinfo.to_xml()
metadata_node.append(idinfo)
if self.use_dataqual:
dataqual = self.dataqual.to_xml()
metadata_node.append(dataqual)
if self.spatial_tab.spdoinfo.has_content() and self.use_spatial:
spdoinfo = self.spatial_tab.spdoinfo.to_xml()
metadata_node.append(spdoinfo)
if self.spatial_tab.spref.has_content() and self.use_spatial:
spref = self.spatial_tab.spref.to_xml()
metadata_node.append(spref)
if self.eainfo.has_content() and self.use_eainfo:
eainfo = self.eainfo.to_xml()
metadata_node.append(eainfo)
if self.use_distinfo:
distinfo = self.distinfo.to_xml()
metadata_node.append(distinfo)
metainfo = self.metainfo.to_xml()
metadata_node.append(metainfo)
return metadata_node
def from_xml(self, metadata_element):
self.populate_section(metadata_element, "spdoinfo", self.spatial_tab.spdoinfo)
self.populate_section(metadata_element, "spref", self.spatial_tab.spref)
self.populate_section(metadata_element, "idinfo", self.idinfo)
self.populate_section(metadata_element, "dataqual", self.dataqual)
self.populate_section(metadata_element, "eainfo", self.eainfo)
self.populate_section(metadata_element, "distinfo", self.distinfo)
self.populate_section(metadata_element, "metainfo", self.metainfo)
def populate_section(self, metadata_element, section_name, widget):
"""
Since the content of top level sections might contain items that
need to go to separate top level items, this function handles the
divvying up of sub-content.
Parameters
----------
metadata_element : XML Element
section_name : Section tag to populate
widget : The section widget
Returns
-------
"""
just_this_one = type(metadata_element) == etree._Element
if just_this_one and metadata_element.tag == section_name:
section = metadata_element
elif just_this_one:
return True
else:
section = xml_utils.search_xpath(metadata_element, section_name)
if section is not None:
widget.from_xml(section)
elif not just_this_one:
widget.clear_widget()
class FaderWidget(QWidget):
"""
A QWidget that allows for fading in and out on display.
"""
def __init__(self, old_widget, new_widget):
QWidget.__init__(self, new_widget)
self.old_pixmap = QPixmap(new_widget.size())
old_widget.render(self.old_pixmap)
self.pixmap_opacity = 1.0
self.timeline = QTimeLine()
self.timeline.valueChanged.connect(self.animate)
self.timeline.finished.connect(self.close)
self.timeline.setDuration(450)
self.timeline.start()
self.resize(new_widget.size())
self.show()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setOpacity(self.pixmap_opacity)
painter.drawPixmap(0, 0, self.old_pixmap)
painter.end()
def animate(self, value):
self.pixmap_opacity = 1.0 - value
self.repaint()
if __name__ == "__main__":
utils.launch_widget(MetadataRoot, "MetadataRoot testing")
| 1.5625 | 2 |
skyportal/tests/frontend/test_individual_spectra.py | bparazin/skyportal | 52 | 12771763 | import uuid
import pytest
from selenium.common.exceptions import TimeoutException
from skyportal.tests import api
def enter_comment_text(driver, comment_text):
comment_xpath = "//div[contains(@data-testid, 'individual-spectrum-id_')]//textarea[@name='text']"
comment_box = driver.wait_for_xpath(comment_xpath)
driver.click_xpath(comment_xpath)
comment_box.send_keys(comment_text)
def add_comment(driver, comment_text):
enter_comment_text(driver, comment_text)
driver.click_xpath(
"//div[contains(@data-testid, 'individual-spectrum-id_')]//*[@name='submitCommentButton']"
)
def add_comment_and_wait_for_display(driver, comment_text):
add_comment(driver, comment_text)
try:
driver.wait_for_xpath(f'//p[text()="{comment_text}"]', timeout=20)
except TimeoutException:
driver.refresh()
driver.wait_for_xpath(f'//p[text()="{comment_text}"]')
@pytest.mark.flaky(reruns=2)
def test_comments(driver, user, public_source):
driver.get(f"/become_user/{user.id}")
comment_text = str(uuid.uuid4())
# now test the Manage Data page
driver.get(f"/manage_data/{public_source.id}")
# little triangle you push to expand the table
driver.click_xpath("//*[@id='expandable-button']")
add_comment_and_wait_for_display(driver, comment_text)
# Make sure individual spectra comments appear on the Source page
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath(f'//p[contains(text(), "{comment_text}")]')
def test_annotations(
driver, user, annotation_token, upload_data_token, public_source, lris
):
driver.get(f"/become_user/{user.id}")
annotation_data = str(uuid.uuid4())
status, data = api(
'POST',
'spectrum',
data={
'obj_id': str(public_source.id),
'observed_at': '2021-11-02 12:00:00',
'instrument_id': lris.id,
'wavelengths': [664, 665, 666],
'fluxes': [234.2, 232.1, 235.3],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
spectrum_id = data["data"]["id"]
status, data = api(
'POST',
f'spectra/{spectrum_id}/annotations',
data={
'origin': 'kowalski',
'data': {'useful_info': annotation_data},
},
token=annotation_token,
)
assert status == 200
# ----> now test the Manage Data page <----
driver.get(f"/manage_data/{public_source.id}")
# need to filter out only the new spectrum we've added
# open the filter menu
driver.click_xpath(
"//*[@data-testid='spectrum-div']//button[@data-testid='Filter Table-iconButton']"
)
# click the filter on ID button
driver.click_xpath("//div[@id='mui-component-select-id']", scroll_parent=True)
# choose the one we've added based on ID
driver.click_xpath(f"//li[@data-value='{spectrum_id}']", scroll_parent=True)
# close the filter menu
driver.click_xpath("//*[contains(@class, 'filterClose')]")
# push the little triangle to expand the table
driver.click_xpath("//*[@data-testid='spectrum-div']//*[@id='expandable-button']")
driver.wait_for_xpath(f'//div[text()="{annotation_data}"]')
# ----> now go to the source page <----
driver.get(f"/source/{public_source.id}")
driver.wait_for_xpath('//div[text()="Spectrum Obs. at"]')
# filter once more for only this spectrum
driver.click_xpath(
"//*[@id='annotations-content']//button[@data-testid='Filter Table-iconButton']"
)
# click the filter on ID button
driver.click_xpath(
"//div[@id='mui-component-select-observed_at']", scroll_parent=True
)
# choose the one we've added based on ID
driver.click_xpath("//li[@data-value='2021-11-02.5']", scroll_parent=True)
# close the filter menu
driver.click_xpath("//*[contains(@class, 'filterClose')]")
driver.wait_for_xpath('//div[text()="2021-11-02.5"]')
driver.wait_for_xpath(f'//div[text()="{annotation_data}"]')
| 2.53125 | 3 |
优化算法/test.py | shao1chuan/pythonbook | 95 | 12771764 | lr = pow(2,-2)*16
print(lr) | 2.796875 | 3 |
library/library.py | rh01/p2p | 0 | 12771765 | <reponame>rh01/p2p<filename>library/library.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import logging
import json
import socket
def sigint_handler(signal, frame):
# 负责linux kernel信号处理
"""
handle keyboard interrupts (CTRL-C)
"""
# cli_output
print()
logging.info("CTRL-C received, exiting")
sys.exit(0)
def send_message(connection, message):
# 发送消息
try:
connection.sendall(message)
except socket.error:
logging.error("error, send_message")
sys.exit(-1)
logging.info("message sent: " + message)
def json_load(json_file):
# 载入json格式的文件,并将其解码
with open(json_file, "rb") as file_:
json_ = json.load(file_)
return json_
def json_save(json_file, json_):
# 使用简单的json.dumps方法对简单数据类型进行编码,并将编码后的json保存到文件中
# save json structrure into json file,and make easy readable
with open(json_file, "wb+") as file_:
json.dump(json_, file_, sort_keys=True, indent=4, separators=(",", ": "))
if __name__ == "__main__":
print("This file is meant to be imported, not run.")
| 2.421875 | 2 |
python-format-strings/exercise3.py | ATelders/learn-python | 0 | 12771766 | <gh_stars>0
#medicine = 'Coughussin'
#dosage = 5
#duration = 4.5
#instructions = '{} - Take {} ML by mouth every {} hours.'.format(medicine, dosage, duration)
#print(instructions)
#instructions = '{2} - Take {1} ML by mouth every {0} hours'.format(medicine, dosage, duration)
#print(instructions)
#instructions = '{medicine} - Take {dosage} ML by mouth every {duration} hours'.format(medicine = 'Sneezergen', dosage = 10, duration = 6)
#print(instructions)
#name = 'World'
#message = f'Hello, {name}'
#print(message)
#count = 10
#value = 3.14
#message = f'Count to {10}. Multiply by {value}.'
#print(message)
#width = 5
#height = 10
#print(f'The perimeter is {2 * width + 2 * height} and the area is {width * height}.')
value = 'hi'
print(f'.{value:<25}.')
print(f'.{value:>25}.')
print(f'.{value:^25}.')
print(f'.{value:-^25}.')
| 3.3125 | 3 |
graphene_django_pagination/objects_type.py | daniel2101/graphene-django-pagination | 1 | 12771767 | <reponame>daniel2101/graphene-django-pagination
from graphene import ObjectType, Boolean
class PageInfoExtra(ObjectType):
has_next_page = Boolean(
required=True,
name="hasNextPage",
description="When paginating forwards, are there more items?",
)
has_previous_page = Boolean(
required=True,
name="hasPreviousPage",
description="When paginating backwards, are there more items?",
)
| 2.859375 | 3 |
tests/test_Crypto.py | sondreod/pysj | 1 | 12771768 | import string
import pytest
from pysj import md5, sha1, sha256, uuid
from pysj.crypto import ALPHABET
@pytest.mark.parametrize(
"hash_algorithm, digest",
[
("sha256", "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"),
("sha1", "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"),
("md5", "098f6bcd4621d373cade4e832627b4f6"),
],
)
def test_hashing(hash_algorithm, digest):
assert eval(f"{hash_algorithm}('test')") == digest
def test_uuid_generation():
assert len(uuid()) == 36 and all([x in string.hexdigits + "-" for x in uuid()])
def test_uuid_alpha_generation():
assert 20 < len(uuid("alpha")) < 24 and all([x in ALPHABET for x in uuid("alpha")])
def test_uuid_int_generation():
assert isinstance(uuid("int"), int)
def test_force_mutable():
assert sha256([1, 2, 3], force_mutable=True) == sha256("[1, 2, 3]")
def test_force_mutable_error_when_flag_is_false():
with pytest.raises(AttributeError):
assert sha256([1, 2, 3])
| 2.484375 | 2 |
wxpy_rofi_config/config/rofi.py | thecjharries/wxpy-rofi-config | 2 | 12771769 | <gh_stars>1-10
# coding=utf8
"""This file provides the Rofi class"""
from collections import OrderedDict
from filecmp import cmp as file_cmp
from os import environ, stat as file_stat
from os.path import exists, expanduser, join
from re import (
compile as re_compile,
DOTALL,
finditer,
IGNORECASE,
MULTILINE,
search,
sub,
VERBOSE
)
from shutil import copyfile
from subprocess import check_output
from wxpy_rofi_config.config import Entry
class Rofi(object): # pylint: disable=too-many-public-methods
"""Rofi holds all the config for rofi"""
PATTERNS = {
'RASI_ENTRY': re_compile(
r"^.*?(?:\s|\/|\*)(?P<key>[a-z][a-z0-9-]*):\s*(?P<value>.*?);.*?$",
MULTILINE
),
'RASI_COMMENT': re_compile(r"(\/\*.*?\*\/|\/\/.*?$)", MULTILINE),
'MAN_CONFIG_BLOCK': re_compile(
r"\nconfiguration(.*?)\n\w",
DOTALL | IGNORECASE
),
'MAN_GROUP': re_compile(
r"\n {3}(?P<group>\w.*?)\n(?P<contents>(.(?!\n {3}\w|$))*)",
DOTALL
),
'MAN_ITEM': re_compile(
r"(?:^|\n\n) {7}-(?!no-)(?P<key>[\w-]+).*?\n\n(?P<man>(.(?!\n\n {7}-\w|$))*.?)",
DOTALL
),
'CLEAN_GROUP': re_compile(r" settings? ?"),
'CLEAN_MAN': [
[
re_compile(r"^ {7}", MULTILINE),
''
],
[
re_compile(r" (\w+)(?:‐|-)\n([^\s])"),
r" \1\2"
],
[
re_compile(r"(\w+)\n([^\s])"),
r"\1 \2"
]
],
'HELP_BLOCK': re_compile(
r"\n?global options:(?P<contents>.*?)(?:\n\n)",
DOTALL | IGNORECASE
),
'HELP_ENTRY': re_compile(
r"""
^\s+-(?:\[no-\])? # Preface stuff; skip it
(?P<key>[a-z0-9-]+?) # The entry key
(?:\s+\[(?P<help_type>\w+)\])? # Type info is not always there
\s+(?P<help_value>.*?)$ # The short help string
""",
MULTILINE | VERBOSE
),
'HELP_ACTIVE_FILE': re_compile(
r"^.*?configuration\s+file:\s+(?P<file_path>.*?)$",
MULTILINE | IGNORECASE
),
'HELP_AVAILABLE_MODI_BLOCK': re_compile(
r"(?:detected modi:)(?P<modi>.*?\n)\n",
DOTALL | IGNORECASE
),
'HELP_MODI': re_compile(
r"^\s+\*\s+\+?(?P<modi>.*?)$",
MULTILINE | IGNORECASE
)
}
def __init__(self):
self.config = OrderedDict()
self.groups = []
self.active_file = None
self.last_mtime = None
self.active_backup = None
self.available_modi = []
def assign_rasi_entry(self, key_value_match, destination='default'):
"""
Given a match from a rasi file, attempts to pull values from the input.
Assigns either default or current.
"""
key = key_value_match.group('key')
value = key_value_match.group('value')
if key in self.config:
setattr(self.config[key], destination, value)
else:
arg_dict = {'key_name': key}
arg_dict[destination] = value
self.config[key] = Entry(**arg_dict)
def parse_rasi(self, rasi, destination='default'):
"""Parses the provided rasi string for entries"""
for discovered_entry in finditer(self.PATTERNS['RASI_ENTRY'], rasi):
self.assign_rasi_entry(discovered_entry, destination)
def load_default_config(self):
"""
Loads the default configuration.
WARNING: This can actually create broken config depending on what you've
got on your system. For example, the official file_browser example
creates `display-file_browser`, which kills rofi.
see: https://gitcrate.org/qtools/rofi-file_browser
"""
raw = check_output(['rofi', '-no-config', '-dump-config'])
self.parse_rasi(raw, 'default')
def load_current_config(self):
"""
Loads the currently active config (what exists of it)
"""
raw = check_output(['rofi', '-dump-config'])
raw_cleaned = sub(
self.PATTERNS['RASI_COMMENT'],
'',
raw,
0
)
self.parse_rasi(raw_cleaned, 'current')
def load_arbitrary_config(self, config_path):
"""
Loads the provided file
"""
with open(config_path, 'r') as config_file:
raw = config_file.read()
raw_cleaned = sub(
self.PATTERNS['RASI_COMMENT'],
'',
raw,
0
)
self.parse_rasi(raw_cleaned, 'current')
def process_config(self):
"""Process all entries for useful information"""
for _, entry in self.config.items():
entry.process_entry()
if not entry.group in self.groups:
self.groups.append(entry.group)
if not self.active_file:
self.active_file = self.create_default_path()
self.update_mtime()
def clean_entry_man(self, contents):
"""Cleans a single man entry"""
for substitution in self.PATTERNS['CLEAN_MAN']:
contents = sub(
substitution[0],
substitution[1],
contents,
0
)
return contents.decode('utf8', 'ignore')
def parse_man_entry(self, group, man_entry_match):
"""Looks for a single man entry"""
key = man_entry_match.group('key')
if key in self.config:
man = self.clean_entry_man(man_entry_match.group('man'))
if man:
setattr(self.config[key], 'group', group)
setattr(self.config[key], 'man', man)
def parse_man_group(self, man_group_match):
"""Looks for a group of settings in man"""
group = sub(
self.PATTERNS['CLEAN_GROUP'],
'',
man_group_match.group('group')
)
for discovered_entry in finditer(
self.PATTERNS['MAN_ITEM'],
man_group_match.group('contents')
):
self.parse_man_entry(group, discovered_entry)
def parse_man_config(self, man_config_match):
"""Looks for the config man section"""
for discovered_group in finditer(
self.PATTERNS['MAN_GROUP'],
man_config_match
):
self.parse_man_group(discovered_group)
def load_man(self):
"""Loads man rofi"""
raw = check_output(['man', 'rofi'])
possible_config = search(self.PATTERNS['MAN_CONFIG_BLOCK'], raw)
if possible_config:
self.parse_man_config(possible_config.group())
def parse_help_modi(self, modi_group):
"""Parses out all available modi"""
for discovered_modi in finditer(
self.PATTERNS['HELP_MODI'],
modi_group
):
modi = discovered_modi.group('modi')
if modi:
self.available_modi.append(modi)
def parse_help_modi_block(self, raw_help):
"""Parses help for the modi block"""
possible_modi = search(
self.PATTERNS['HELP_AVAILABLE_MODI_BLOCK'],
raw_help
)
if possible_modi:
self.parse_help_modi(possible_modi.group('modi'))
def parse_help_active_file(self, raw_help):
"""Parses help for the active config file"""
possible_file = search(
self.PATTERNS['HELP_ACTIVE_FILE'],
raw_help
)
if possible_file:
self.active_file = possible_file.group('file_path')
def parse_help_entry(self, help_entry_match):
"""Parses a single help entry"""
key = help_entry_match.group('key')
if key in self.config:
help_value = help_entry_match.group('help_value')
setattr(self.config[key], 'help_value', help_value)
help_type = help_entry_match.group('help_type')
if help_type:
setattr(self.config[key], 'help_type', help_type)
def parse_help_config(self, help_block_match):
"""Parses the entire help config block"""
for discovered_entry in finditer(
self.PATTERNS['HELP_ENTRY'],
help_block_match.group('contents')
):
self.parse_help_entry(discovered_entry)
def parse_help_config_block(self, raw_help):
"""Parses help for the config block"""
possible_config = search(self.PATTERNS['HELP_BLOCK'], raw_help)
if possible_config:
self.parse_help_config(possible_config)
def load_help(self):
"""Loads rofi --help in an attempt to parse it"""
raw = check_output(['rofi', '--help'])
self.parse_help_config_block(raw)
self.parse_help_active_file(raw)
self.parse_help_modi_block(raw)
def build(self, config_path=None):
"""
Loads defaults, adds current values, discovers available documentation,
and processes all entries
"""
self.load_default_config()
if config_path:
self.load_arbitrary_config(config_path)
else:
self.load_current_config()
self.load_help()
self.load_man()
self.process_config()
if config_path:
self.active_file = config_path
def to_rasi(self):
"""Returns a rasi string composed of all its entries"""
output = "configuration {\n"
for key, entry in self.config.items():
if entry.current != entry.default:
output += " %s\n" % self.config[key].to_rasi()
output += "}\n"
return output
def backup(self, source=None, destination=None, restore=False):
"""Backs up the provided config file"""
if source is None:
source = self.active_file
if not exists(source):
return
if destination is None:
destination = "%s.bak" % source
self.active_backup = destination
if restore:
copyfile(destination, source)
else:
copyfile(source, destination)
self.update_mtime()
def write_config(self, path=None):
"""Writes the config to a file"""
if path is None:
path = self.active_file
with open(path, 'w') as rasi_file:
rasi_file.write(self.to_rasi())
self.update_mtime()
def save(self, path=None, backup_path=None, backup=True):
"""Saves the config file"""
self.active_backup = None
if backup:
self.backup(path, backup_path)
self.write_config(path)
def can_restore(self):
"""Checks if the config can be restored"""
active = self.active_file
if active is None:
active = Rofi.create_default_path()
if not exists(active):
return False
backup = self.active_backup
if backup is None:
backup = "%s.bak" % active
if not exists(backup):
return False
return not file_cmp(active, backup)
def get_mtime(self):
"""Polls the last modification time on the active config file"""
return file_stat(self.active_file)[8]
def update_mtime(self):
"""Updates the stored mtime"""
self.last_mtime = self.get_mtime()
def probably_modified(self):
"""Checks modification time as a metric for file changes"""
return self.last_mtime != self.get_mtime()
@staticmethod
def create_default_path():
"""Creates the default save path"""
if 'XDG_USER_CONFIG_DIR' in environ:
lead = environ['XDG_USER_CONFIG_DIR']
else:
lead = join('~', '.config')
return expanduser(join(lead, 'rofi', 'config.rasi'))
| 2.171875 | 2 |
main.py | if1live/rpi-mpd-controller | 1 | 12771770 | <filename>main.py
#!/usr/bin/python
#-*- coding: utf-8 -*-
u'''
http://www.rpiblog.com/2012/09/using-gpio-of-raspberry-pi-to-blink-led.html
'''
import RPi.GPIO as GPIO
import time
import commands
import re
PIN_STATUS = 3
PIN_PREV_BTN = 8
PIN_PLAY_BTN = 10
PIN_NEXT_BTN = 12
class Player(object):
def __init__(self):
self.play_state = self.is_playing()
def is_playing(self):
re_play = re.compile(r'^\[playing\].+$')
result = commands.getstatusoutput('mpc')
line_list = result[1].splitlines()
playing = False
for line in line_list:
if re_play.match(line) is not None:
playing = True
self.play_state = playing
return playing
def play(self):
if self.is_playing():
retval = commands.getoutput('mpc pause')
self.play_state = False
else:
retval = commands.getoutput('mpc play')
self.play_state = True
print retval
def prev(self):
retval = commands.getoutput('mpc prev')
print retval
def next(self):
retval = commands.getoutput('mpc next')
print retval
class PlayerStatusLED(object):
def __init__(self, player):
self.player = player
def update(self):
if self.player.play_state:
GPIO.output(PIN_STATUS, GPIO.HIGH)
time.sleep(0.1)
GPIO.output(PIN_STATUS, GPIO.LOW)
time.sleep(0.1)
else:
GPIO.output(PIN_STATUS, GPIO.LOW)
time.sleep(0.1)
class Button(object):
def __init__(self, pin, func):
self.pin = pin
self.old_state = 0
self.func = func
GPIO.setup(pin, GPIO.IN)
def update(self):
curr_state = GPIO.input(self.pin)
if self.old_state is 0 and curr_state is 1:
self.func()
self.old_state = curr_state
def main():
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(PIN_STATUS, GPIO.OUT)
player = Player()
player.play()
status_led = PlayerStatusLED(player)
prev_btn = Button(PIN_PREV_BTN, lambda: player.prev())
play_btn = Button(PIN_PLAY_BTN, lambda: player.play())
next_btn = Button(PIN_NEXT_BTN, lambda: player.next())
while True:
prev_btn.update()
play_btn.update()
next_btn.update()
status_led.update()
time.sleep(0.01)
if __name__ == '__main__':
main()
GPIO.cleanup()
| 3.28125 | 3 |
ringmaster/snowflake.py | declarativesystems/ringmaster | 1 | 12771771 | <gh_stars>1-10
# Copyright 2020 Declarative Systems Pty Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import snowflake.connector
import os
from loguru import logger
import yaml
import ringmaster.util as util
import ringmaster.constants as constants
SNOWFLAKE_CONFIG_FILE = "~/.ringmaster/snowflake.yaml"
connection = None
profile_data = None
def setup_connection(connection_settings):
global profile_data
profile = util.get_connection_profile(connection_settings, "snowflake")
snowflake_config_file = os.path.expanduser(SNOWFLAKE_CONFIG_FILE)
if os.path.exists(snowflake_config_file):
config = util.read_yaml_file(snowflake_config_file)
profile_data = config.get(profile)
else:
raise RuntimeError(f"snowflake settings not found at: {snowflake_config_file}")
def get_cursor(data):
ctx = snowflake.connector.connect(**profile_data["credentials"])
cs = ctx.cursor(snowflake.connector.DictCursor)
# grab some convenince variables from snowflake settings
data["snowflake_account"] = profile_data["credentials"]["account"]
data["snowflake_region"] = profile_data["region"]
test_connection(cs)
return cs
def test_connection(cs):
cs.execute("SELECT current_version()")
one_row = cs.fetchone()
def process_file_and_connect(working_dir, filename, verb, data):
# process substitutions
processed_file = util.substitute_placeholders_from_file_to_file(
working_dir,
filename,
constants.COMMENT_SQL,
verb,
data
)
logger.debug(f"snowflake processed file: {processed_file}")
# connect to snowflake, bail if it fails
cs = get_cursor(data)
return cs, processed_file
def do_snowflake_sql(working_dir, filename, verb, data):
"""Run an SQL script with substition variables"""
script_name = os.path.basename(filename)
if (verb == constants.UP_VERB and script_name != constants.SNOWFLAKE_CLEANUP_FILENAME) or \
(verb == constants.DOWN_VERB and script_name == constants.SNOWFLAKE_CLEANUP_FILENAME):
logger.info(f"snowflake sql: {filename}")
cs, processed_file = process_file_and_connect(working_dir, filename, verb, data)
# build up stmt line-by-line, when we find `;` execute stmt and
# empty the variable for the next iteration
stmt = ""
with open(processed_file, "r") as file:
for line in file:
if not line.startswith(constants.COMMENT_SQL):
stmt += line.rstrip()
if stmt.endswith(";"):
logger.debug(f"sql: {stmt}")
cs.execute(stmt)
stmt = ""
else:
logger.info(f"skippking snowflake: {filename}")
def do_snowflake_query(working_dir, filename, verb, data):
"""Query snowflake for a single row of values, add each column to the databag"""
if verb == constants.UP_VERB:
logger.info(f"snowflake query: {filename}")
cs, processed_file = process_file_and_connect(
working_dir,
filename,
verb,
data
)
extra_data = {}
# load the entire processed file and run EACH STATEMENT
with open(processed_file, 'r') as file:
sql = file.read()
for stmt in sql.split(";"):
logger.debug(f"snowflake sql query: {stmt}")
cs.execute(stmt)
result = cs.fetchone()
logger.info(f"sql result: {result}")
# result is a dict so just lowercase each key and add to databag
for k, v in result.items():
extra_data[k.lower()] = v
logger.debug(f"query result - items: {len(extra_data)} values:{extra_data}")
data.update(extra_data)
def snowflake_connector_version():
return snowflake.connector.__version__
| 1.78125 | 2 |
tutorial/tetrix.py | apetcho/pyside6tut | 0 | 12771772 | #!/usr/bin/env python3
"""Tetrix
This is a Qt6 version of the classic Tetrix game.
The object of the game is to stack pieces dropped from the top of the playing
area so that they fill entire rows at the bottom of the playing area.
When a row is filled, all the blocks on that row are removed, the player earns
a number of points, and the peices above are moved down to occupy that row. If
more that one row is filled, the blocks on each row are removed, and the player
earns extra points.
The LEFT cursors cursor key moves the current piece one space to the left. the
RIGHT cursor key moves it one space to the right, the UP cursor key rotates the
piece counter-clockwise by 90 degrees, and the DOWN cursor key rotates the piece
clockwise by 90 degrees.
To avoid waiting for a piece to fall to the bottom of the board, press D to
immediately move the piece down by one row, or press the SPACE key to drop
it as close to the bottom of the board as possible.
This example example shows how a simple game can be created using only three
classes.
- The *TetrixWindow* class is used to display the payer's score, number of
lives, and information about the next piece to appear.
- The *TetrixBoard* class contains the next game logic, handles keyboard input,
and displays the pieces on the playing area.
- The *TetrixPiece* class contains information about each piece.
In this approach, the *TetrixBoard* clss is the most complex class, since it
handle the game logic and rendering. One benefit of this is that the
*TetrixWindow* and *TeTrixPiece* class are very simple and contains only a
minimum of code.
Credit: Most of the code here is based on official PySide6 example of Tetrix
code.
Note: This code is used for learning Qt6 Programming.
"""
import random
from enum import IntEnum, auto
from unittest import result
from PySide6 import QtCore
from PySide6 import QtGui
from PySide6 import QtWidgets
class ShapeEnum(IntEnum):
NO_SHAPE = 0
Z_SHAPE = 1
S_SHAPE = 2
LINE_SHAPE = 3
T_SHAPE = 4
SQUARE_SHAPE = 5
L_SHAPE = 6
MIRRORED_L_SHAPE = 7
class TetrixWindow(QtWidgets.QWidget):
"""TetrixWindow."""
def __init__(self, root):
super(TetrixWindow, self).__init__()
self.board = TetrixBoard()
nxtPieceLabel = QtWidgets.QLabel()
nxtPieceLabel.setFrameStyle(
QtWidgets.QFrame.Box | QtWidgets.QFrame.Raised)
nxtPieceLabel.setAlignment(QtCore.Qt.AlignCenter)
self.board.setNextPieceLabel(nxtPieceLabel)
scoreLcd = QtWidgets.QLCDNumber(5)
scoreLcd.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
levelLcd = QtWidgets.QLCDNumber(2)
levelLcd.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
linesLcd = QtWidgets.QLCDNumber(5)
linesLcd.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
startBtn = QtWidgets.QPushButton("&Start")
startBtn.setFocusPolicy(QtCore.Qt.NoFocus)
quitBtn = QtWidgets.QPushButton("&Quit")
quitBtn.setFocusPolicy(QtCore.Qt.NoFocus)
pauseBtn = QtWidgets.QPushButton("&Pause")
pauseBtn.setFocusPolicy(QtCore.Qt.NoFocus)
startBtn.clicked.connect(self.board.start)
pauseBtn.clicked.connect(self.board.pause)
quitBtn.clicked.connect(root.quit)
self.board.scoreChanged.connect(scoreLcd.display)
self.board.levelChanged.connect(levelLcd.display)
self.board.linesRemovedChanged.connect(linesLcd.display)
layout = QtWidgets.QGridLayout()
layout.addWidget(self.createLabel("NEXT"), 0, 0)
layout.addWidget(nxtPieceLabel, 1, 0)
layout.addWidget(self.createLabel("LEVEL"), 2, 0)
layout.addWidget(levelLcd, 3, 0)
layout.addWidget(startBtn, 4, 0)
layout.addWidget(self.board, 0, 1, 6, 1)
layout.addWidget(self.createLabel("SCORE"), 0, 2)
layout.addWidget(scoreLcd, 1, 2)
layout.addWidget(self.createLabel("LINES REMOVED"), 2, 2)
layout.addWidget(linesLcd, 3, 2)
layout.addWidget(quitBtn, 4, 2)
layout.addWidget(pauseBtn, 5, 2)
self.setLayout(layout)
self.setWindowTitle("Tetrix")
self.resize(550, 370)
def createLabel(self, text):
label = QtWidgets.QLabel(text)
label.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom)
return label
class TetrixBoard(QtWidgets.QFrame):
"""TetrixBoard."""
BOARD_WIDTH = 10
BOARD_HEIGHT = 22
scoreChanged = QtCore.Signal(int)
levelChanged = QtCore.Signal(int)
linesRemovedChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(TetrixBoard, self).__init__(parent)
self.timer = QtCore.QBasicTimer()
self.nxtPieceLabel = None
self.isWaitingAfterLine = False
self.curPiece = TetrixPiece()
self.nxtPiece = TetrixPiece()
self.curx = 0
self.cury = 0
self.numLinesRemoved = 0
self.numPiecesDropped = 0
self.score = 0
self.level = 0
self.board = None
self.setFrameStyle(QtWidgets.QFrame.Panel | QtWidgets.QFrame.Sunken)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.isStarted = False
self.isPaused = False
self.clearBoard()
self.nxtPiece.setRandomShape()
def shapeAt(self, x, y):
return self.board[(y * TetrixBoard.BOARD_WIDTH) + x]
def setShapeAt(self, x, y, shape: ShapeEnum):
self.board[(y * TetrixBoard.BOARD_WIDTH) + x] = shape
def timeoutTime(self):
return 1000 / (1 + self.level)
def squareWidth(self):
return self.contentsRect().width() / TetrixBoard.BOARD_WIDTH
def squareHeight(self):
return self.contentsRect().height() / TetrixBoard.BOARD_HEIGHT
def setNextPieceLabel(self, label):
self.nxtPieceLabel = label
def sizeHint(self):
return QtCore.QSize(TetrixBoard.BOARD_WIDTH * 15 + self.frameWidth()*2,
TetrixBoard.BOARD_HEIGHT*15 + self.frameWidth()*2)
def minimumSizeHint(self):
return QtCore.QSize(TetrixBoard.BOARD_WIDTH*15 + self.frameWidth()*2,
TetrixBoard.BOARD_WIDTH * 5 + self.frameWidth()*2)
def start(self):
if self.isPaused:
return
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.numPiecesDropped = 0
self.score = 0
self.level = 1
self.clearBoard()
self.linesRemovedChanged.emit(self.numLinesRemoved)
self.scoreChanged.emit(self.score)
self.levelChanged.emit(self.level)
self.newPiece()
self.timer.start(self.timeoutTime(), self)
def pause(self):
if not self.isStarted:
return
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.stop()
else:
self.timer.start(self.timeoutTime(), self)
self.update()
def paintEvent(self, event):
super(TetrixBoard, self).paintEvent(event)
painter = QtGui.QPainter(self)
rect = self.contentsRect()
if self.isPaused:
painter.drawText(rect, QtCore.Qt.AlignCenter, "Pause")
return
boardTop = rect.bottom() - TetrixBoard.BOARD_HEIGHT*self.squareHeight()
for i in range(TetrixBoard.BOARD_HEIGHT):
for j in range(TetrixBoard.BOARD_WIDTH):
shape = self.shapeAt(j, TetrixBoard.BOARD_HEIGHT-i-1)
if shape != ShapeEnum.NO_SHAPE:
self.drawSquare(
painter, rect().left() + j*self.squareWidth(),
boardTop+i*self.squareHeight(), shape)
if self.curPiece.shape() != ShapeEnum.NO_SHAPE:
for i in range(4):
x = self.curx + self.curPiece.xcoord(i)
y = self.cury - self.curPiece.ycoord(i)
self.drawSquare(painter, rect.left() + x *self.squareWidth(),
boardTop+(TetrixBoard.BOARD_HEIGHT-y-1)*self.squareHeight(),
self.curPiece.shape())
def keyPressEvent(self, event):
if (not self.isStarted or self.isPaused or
self.curPiece.shape() == ShapeEnum.NO_SHAPE):
super(TetrixBoard, self).keyPressEvent(event)
return
key = event.key()
if key == QtCore.Qt.Key_Left:
self.tryMove(self.curPiece, self.curx-1, self.cury)
elif key == QtCore.Qt.Key_Right:
self.tryMove(self.curPiece, self.curx+1, self.cury)
elif key == QtCore.Qt.Key_Down:
self.tryMove(self.curPiece.rotatedRight(), self.curx, self.cury)
elif key == QtCore.Qt.Key_Up:
self.tryMove(self.curPiece.rotatedLeft(), self.curx, self.cury)
elif key == QtCore.Qt.Key_Space:
self.dropDown()
elif key == QtCore.Qt.Key_D:
self.oneLineDown()
else:
super(TetrixBoard, self).keyPressEvent(event)
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
self.timer.start(self.timeoutTime(), self)
else:
self.oneLineDown()
else:
super(TetrixBoard, self).timerEvent(event)
def clearBoard(self):
self.board = [ShapeEnum.NO_SHAPE for i in
range(TetrixBoard.BOARD_HEIGHT*TetrixBoard.BOARD_WIDTH)]
def dropDown(self):
dropHeight = 0
newy = self.cury
while newy:
if not self.tryMove(self.curPiece, self.cury, newy - 1):
break
newy -= 1
dropHeight += 1
self.pieceDropped(dropHeight)
def oneLineDown(self):
if not self.tryMove(self.curPiece, self.curx, self.cury-1):
self.pieceDropped(0)
def pieceDropped(self, dropHeight):
for i in range(4):
x = self.curx + self.curPiece.xcoord(i)
y = self.cury - self.curPiece.ycoord(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.numPiecesDropped += 1
if self.numPiecesDropped % 25 == 0:
self.level += 1
self.timer.start(self.timeoutTime(), self)
self.levelChanged.emit(self.level)
self.score += dropHeight + 7
self.scoreChanged.emit(self.score)
self.removeFullLines()
def removeFullLines(self):
numFullLines = 0
for i in range(TetrixBoard.BOARD_HEIGHT -1, -1, -1):
lineIsFull = True
for j in range(TetrixBoard.BOARD_WIDTH):
if self.shapeAt(j, i) == ShapeEnum.NO_SHAPE:
lineIsFull = False
break
if lineIsFull:
numFullLines += 1
for k in range(TetrixBoard.BOARD_HEIGHT-1):
for j in range(TetrixBoard.BOARD_WIDTH):
self.setShapeAt(j, k, self.shapeAt(j, k+1))
for j in range(TetrixBoard.BOARD_WIDTH):
self.setShapeAt(j, TetrixBoard.BOARD_HEIGHT-1,
ShapeEnum.NO_SHAPE)
if numFullLines > 0:
self.numLinesRemoved += numFullLines
self.score += 10 * numFullLines
self.linesRemovedChanged.emit(self.numLinesRemoved)
self.scoreChanged.emit(self.score)
self.timer.start(500, self)
self.isWaitingAfterLine = True
self.curPiece.setShape(ShapeEnum.NO_SHAPE)
self.update()
def newPiece(self):
self.curPiece = self.nxtPiece
self.nxtPiece.setRandomShape()
self.showNextPiece()
self.curx = (TetrixBoard.BOARD_WIDTH // 2) + 1
self.cury = TetrixBoard.BOARD_HEIGHT - 1 + self.curPiece.ymin()
if not self.tryMove(self.curPiece, self.curx, self.cury):
self.curPiece.setShape(ShapeEnum.NO_SHAPE)
self.timer.stop()
self.isStarted = False
def showNextPiece(self):
if self.nxtPieceLabel is not None:
return
dx = self.nxtPiece.xmax() - self.nxtPiece.xmin() + 1
dy = self.nxtPiece.ymax() - self.nxtPiece.ymin() + 1
pixmap = QtGui.QPixmap(
dx*self.squareWidth(), dy*self.squareHeight())
painter = QtGui.QPainter(pixmap)
painter.fillRect(
pixmap.rect(), self.nxtPieceLabel.palette().background())
for i in range(4):
x = self.nxtPiece.xcoord(i) - self.nxtPiece.xmin()
y = self.nxtPiece.ycoord(i) - self.nxtPiece.ymin()
self.drawSquare(painter, x*self.squareWidth(),
y*self.squareHeight(), self.nxtPiece.shape())
self.nxtPieceLabel.setPixmap(pixmap) # FIXME
def tryMove(self, newPiece, newX, newY):
for i in range(4):
x = newX + newPiece.xcoord(i)
y = newY - newPiece.ycoord(i)
if (x < 0 or x >= TetrixBoard.BOARD_WIDTH or y < 0 or
y >= TetrixBoard.BOARD_HEIGHT):
return False
if self.shapeAt(x, y) != ShapeEnum.NO_SHAPE:
return False
self.curPiece = newPiece
self.curx = newX
self.cury = newY
self.update()
return True
def drawSquare(self, painter: QtGui.QPainter, x, y, shape: ShapeEnum):
COLOR_TABLE = [
0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00
]
color = QtGui.QColor(COLOR_TABLE[shape.value])
painter.fillRect(x+1, y+1, self.squareWidth()-2,
self.squareHeight()-2, color)
painter.setPen(color.lighter())
painter.drawLine(x, y+self.squareHeight()-1, x, y)
painter.drawLine(x, y, x+self.squareWidth()-1, y)
painter.setPen(color.darker())
painter.drawLine(x+1, y+self.squareHeight()-1,
x+self.squareWidth()-1, y+self.squareHeight()-1)
painter.drawLine(x+self.squareWidth()-1, y+self.squareHeight()-1,
x+self.squareWidth()-1, y+1)
class TetrixPiece:
COORDS_TABLES = (
((0, 0), (0, 0), (0, 0), ( 0, 0)),
((0, -1), (0, 0), (-1, 0), (-1, 1)),
((0, -1), (0, 0), ( 1, 0), ( 1, 1)),
((0, -1), (0, 0), ( 0, 1), ( 0, 2)),
((-1, 0), (0, 0), ( 1, 0), ( 0, 1)),
(( 0, 0), (1, 0), ( 0, 1), ( 1, 1)),
((-1, -1), (0, -1), ( 0, 0), ( 0, 1)),
(( 1, -1), (0, -1), ( 0, 0), ( 0, 1))
)
def __init__(self):
self.coords = [[0, 0] for _ in range(4)]
self.pieceShape = ShapeEnum.NO_SHAPE
self.setShape(ShapeEnum.NO_SHAPE)
def shape(self):
return self.pieceShape
def setShape(self, shape: ShapeEnum):
table = TetrixPiece.COORDS_TABLES[shape.value]
for i in range(4):
for j in range(2):
self.coords[i][j] = table[i][j]
self.pieceShape = shape
def setRandomShape(self):
SHAPES = [
ShapeEnum.L_SHAPE, ShapeEnum.LINE_SHAPE, ShapeEnum.MIRRORED_L_SHAPE,
ShapeEnum.S_SHAPE, ShapeEnum.SQUARE_SHAPE, ShapeEnum.T_SHAPE,
ShapeEnum.Z_SHAPE
]
shape = random.choice(SHAPES)
self.setShape(shape)
def xcoord(self, index):
return self.coords[index][0]
def ycoord(self, index):
return self.coords[index][1]
def setXCoord(self, index, x):
self.coords[index][0] = x
def setYCoord(self, index, y):
self.coords[index][1] = y
def xmin(self):
vmin = self.coords[0][0]
for i in range(4):
vmin = min(vmin, self.coords[i][0])
return vmin
def xmax(self):
vmax = self.coords[0][0]
for i in range(4):
vmax = max(vmax, self.coords[i][0])
return vmax
def ymin(self):
vmin = self.coords[0][1]
for i in range(4):
vmin = min(vmin, self.coords[i][1])
return vmin
def ymax(self):
vmax = self.coords[0][1]
for i in range(4):
vmax = max(vmax, self.coords[i][1])
return vmax
def rotatedLeft(self):
if self.pieceShape == ShapeEnum.SQUARE_SHAPE:
return self
result = TetrixPiece()
result.pieceShape = self.pieceShape
for i in range(4):
result.setXCoord(i, self.ycoord(i))
result.setYCoord(i, -self.xcoord(i))
return result
def rotatedRight(self):
if self.pieceShape == ShapeEnum.SQUARE_SHAPE:
return self
result = TetrixPiece()
result.pieceShape = self.pieceShape
for i in range(4):
result.setXCoord(i, -self.ycoord(i))
result.setYCoord(i, self.xcoord(i))
return result
if __name__ == "__main__":
import sys
import time
app = QtWidgets.QApplication(sys.argv)
tetrix = TetrixWindow()
tetrix.show()
random.seed(time.time())
sys.exit(app.exec())
| 3.8125 | 4 |
baseline/sota_semantic_hashing/data_formatting_stterror.py | gcunhase/StackedDeBERT | 32 | 12771773 | import csv
from baseline.base_utils import INTENTION_TAGS_WITH_SPACE
''' Convert .tsv to .csv without header for each intent
Format:
example1 intent1
example2 intent1
...
exampleN intentM
'''
dataset_arr = ['chatbot', 'snips']
dataset_fullname_arr = ['ChatbotCorpus', 'snips']
for tts_stt in ["gtts_witai", "macsay_witai"]:
for dataset, dataset_fullname in zip(dataset_arr, dataset_fullname_arr):
tags = INTENTION_TAGS_WITH_SPACE[dataset_fullname]
for type in ['test', 'train']:
# Data dir path
data_dir_path = "../../data/stterror_data/"
data_dir_path += "{}/{}/{}.tsv".format(dataset.lower(), tts_stt, type)
tsv_file = open(data_dir_path)
reader = csv.reader(tsv_file, delimiter='\t')
# Write csv
results_dir_path = data_dir_path.split('.tsv')[0] + "_semantic_hashing.csv"
file_test = open(results_dir_path, 'wt')
dict_writer = csv.writer(file_test, delimiter='\t')
row_count = 0
sentences, intents = [], []
for row in reader:
if row_count != 0:
dict_writer.writerow([row[0], tags[row[1]]])
row_count += 1
| 2.9375 | 3 |
kindlerWeb/shelf/migrations/0002_auto_20200517_0052.py | ssanjaysubramaniam/OpenKindlerWeb | 1 | 12771774 | # Generated by Django 3.0.6 on 2020-05-17 00:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shelf', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='clipping',
name='kind_of_clipping',
field=models.CharField(choices=[('Highlight', 'Highlight'), ('Note', 'Note'), ('Bookmark', 'Bookmark')], default='Highlight', max_length=10),
),
migrations.AlterField(
model_name='clipping',
name='message',
field=models.TextField(),
),
]
| 1.601563 | 2 |
python/testlib/tool_assembler_vasm.py | rmtew/peasauce | 40 | 12771775 | """
Peasauce - interactive disassembler
Copyright (C) 2012-2017 <NAME>
Licensed using the MIT license.
This file is intended to allow compilation of assembler instructions and
from the compiled binary, the ability to isolate the machine code those
instructions produce.
The targeted assembler is vasm, whose home page is located here:
http://sun.hasenbraten.de/vasm/index.php
As the author uses Windows it is the only platform currently supported.
Anyone wanting to add support for their own platform will need to do the
following steps:
1. Compile vasm for a combination of cpu and syntax.
On Windows with Visual Studio 2015 installed, this is done with the
following steps:
a) Download and extract the vasm source code from the link above.
b) Open a developer command prompt for Visual Studio.
c) In the command window, enter the vasm source code directory.
d) Type a variant of 'nmake -f Makefile.Win32 CPU=m68k SYNTAX=mot'.
e) Observe 'vasm<cpu>_<syntax>_win32.exe' now exists, where <cpu>
and <syntax> are whatever you specified to 'nmake'.
2. At this point you will have an executable. As illustrated in the
previous step, on Windows for the m68k cpu and Motorola syntax,
this will be named 'vasmm68k_mot_win32.exe'. This module currently
only knows to look in a particular directory for files with a name
matching 'vasm*_win32.exe'.
To add support for your platform:
a) Modify FILE_NAME_PREFIX and FILE_NAME_SUFFIX so that only vasm
executables matching your file name will be found.
b) Add some code to look for commands in your path matching the
resulting pattern - maybe call some shell command to do this.
c) Process the results the same way Windows does.
3. Commit the code to your github fork of the project and do a pull
request to the author.
"""
import glob
import logging
import os
import subprocess
import sys
import StringIO
import tempfile
from . import constants
logger = logging.getLogger("tool-assembler-vasm")
def get_top_level_path():
# path of the python script being run.
#path = sys.path[0]
##if not len(path):
path = os.getcwd()
return path
class BaseAssembler(object):
_cpu_id = constants.CPU_UNKNOWN
_syntax_id = constants.ASM_SYNTAX_UNKNOWN
_output_format_id = constants.OUTPUT_FORMAT_UNKNOWN
_option_names = None
_supported_cpus = None
def set_cpu(self, cpu_id):
self._cpu_id = cpu_id
def set_syntax(self, syntax_id):
self._syntax_id = syntax_id
def set_output_format(self, output_format_id):
_lookup_option_value(constants.OPTIONS_FILE_OUTPUT, output_format_id, check=True)
self._output_format_id = output_format_id
def _lookup_option_value(self, key_id, value_id, check=False):
try:
return self._option_names[key_id][value_id]
except KeyError:
if check:
return None
raise
def compile_text(self, text, cpu_id, syntax_id):
""" Take assembly language instructions and return the corresponding machine code. """
assembler_path = self._supported_cpus.get((cpu_id, syntax_id), None)
if assembler_path is None:
cpu_name = constants.get_cpu_name_by_id(cpu_id)
syntax_name = constants.get_syntax_name_by_id(syntax_id)
logger.error("cpu %s and syntax %s not unsupported", cpu_name, syntax_name)
return
# Work out the output file path.
output_path = tempfile.gettempdir()
output_file_name = self._option_names[constants.OPTIONS_STANDARD][constants.OPTION_DEFAULT_FILE_NAME]
output_file_path = os.path.join(output_path, output_file_name)
# Create a temporary file for the text to be assembled.
input_file_path = tempfile.mktemp()
input_file = open(input_file_path, "w")
input_file.write(text)
input_file.close()
LOG_STDOUT = True
LOG_STDERR = True
stdout = tempfile.NamedTemporaryFile()
stderr = tempfile.NamedTemporaryFile()
current_path = os.getcwd()
try:
input_file_path = input_file.name
os.chdir(output_path)
if os.path.exists(output_file_path):
os.remove(output_file_path)
call_args = [ assembler_path, input_file_path ]
option_list = [
(constants.OPTIONS_STANDARD, constants.OPTION_DISABLE_OPTIMISATIONS),
(constants.OPTIONS_FILE_OUTPUT, constants.OUTPUT_FORMAT_BINARY),
(constants.OPTIONS_CPU, cpu_id),
]
for k1, k2 in option_list:
flag_string = self._lookup_option_value(k1, k2, check=True)
if flag_string is not None:
call_args.append(flag_string)
logger.debug("command line arguments: %s", " ".join(call_args[2:]))
result = subprocess.call(call_args, stdout=stdout, stderr=stderr)
if LOG_STDOUT:
stdout.flush()
stdout.seek(0, os.SEEK_SET)
stdout_text = stdout.read()
if result == 0:
ret = open(output_file_path, "rb").read()
logger.debug("success: binary file of size %d bytes", len(ret))
return ret
else:
if LOG_STDERR:
stderr.flush()
stderr.seek(0, os.SEEK_SET)
stderr_text = stderr.read()
logger.error("assembler failure: standard error contents follow")
lines = [ line for line in stderr_text.split(os.linesep) if len(line) ]
for line in lines:
logger.error("assembler failure: %s", line)
else:
logger.error("assembler returned failure result")
finally:
stdout.close()
stderr.close()
os.remove(input_file_path)
os.chdir(current_path)
LOCAL_BINARIES_NAME = "local_binaries"
class Assembler(BaseAssembler):
_option_names = {
constants.OPTIONS_STANDARD: {
constants.OPTION_DISABLE_OPTIMISATIONS: "-no-opt",
constants.OPTION_DEFAULT_FILE_NAME: "a.out",
},
constants.OPTIONS_CPU: {
constants.CPU_MC60000: "-m68000",
constants.CPU_MC60010: "-m68010",
constants.CPU_MC60020: "-m68020",
constants.CPU_MC60030: "-m68030",
constants.CPU_MC60040: "-m68040",
constants.CPU_MC60060: "-m68060",
},
constants.OPTIONS_FILE_OUTPUT: {
constants.OUTPUT_FORMAT_BINARY: "-Fbin",
constants.OUTPUT_FORMAT_ATARIST_TOS: "-Ftos",
constants.OUTPUT_FORMAT_AMIGA_HUNK: "-Fhunk",
},
}
""" These will be populated from the naming of located executables. """
_supported_cpus = {
}
FILE_NAME_PREFIX = "vasm"
FILE_NAME_SUFFIX = "_win32.exe"
def __init__(self):
if os.name != "nt":
logger.warning("vasm only supported on Windows (pull requests accepted)")
return
# A top-level directory in the .
path = get_top_level_path()
local_binaries_path = os.path.join(path, LOCAL_BINARIES_NAME)
if not os.path.exists(local_binaries_path):
logger.warning("Top-level '%s' directory missing (place vasm binaries here)", LOCAL_BINARIES_NAME)
return
pattern_prefix = self.FILE_NAME_PREFIX
# TODO: Handle suffixes for other platforms.
pattern_suffix = self.FILE_NAME_SUFFIX
match_pattern = os.path.join(local_binaries_path, pattern_prefix +"*"+ pattern_suffix)
matches = glob.glob(match_pattern)
if not len(matches):
logger.warning("Unable to locate vasm executables (place vasm binaries in top-level '%s' directory", LOCAL_BINARIES_NAME)
return
for matched_file_path in matches:
matched_dir_path, matched_file_name = os.path.split(matched_file_path)
unique_substring = matched_file_name[len(pattern_prefix):-len(pattern_suffix)]
cpu_name, syntax_name = unique_substring.split("_")
syntax_id = None
if syntax_name == "mot":
syntax_id = constants.ASM_SYNTAX_MOTOROLA
else:
logger.warning("vasm executable '%s' has unknown syntax, skipping..", matched_file_path)
continue
if cpu_name == "m68k" and syntax_id is not None:
self._supported_cpus[(constants.CPU_MC60000, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60010, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60020, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60030, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60040, syntax_id)] = matched_file_path
self._supported_cpus[(constants.CPU_MC60060, syntax_id)] = matched_file_path
logger.debug("Detected %d supported cpu(s) for vasm assembler", len(self._supported_cpus))
| 2.859375 | 3 |
__init__.py | billyfranklim1/twitter-python | 0 | 12771776 | # TWITTER PYTHON
# Copyright 2022 Billyfranklim
# See LICENSE for details.
__version__ = '0.1.0'
__author__ = '<NAME>'
__license__ = 'MIT' | 0.984375 | 1 |
submission/Ugo_utils.py | MattiaMolon/Atari-Pong-RL | 2 | 12771777 | import matplotlib.pyplot as plt
import numpy as np
import random
from collections import namedtuple
def plot_winsratio(
wins: list,
title: str,
start_idx: int = 0,
wsize_mean: int = 100,
wsize_means_mean: int = 1000,
opponent_update_idxs=None,
):
"""Winrate plotting function, plots both a the WR over the last wsize_mean episodes and
a WR mean over the last wsize_means_mean wsize_mean episodes
Args:
wins (list): Wins vector. Contains 0 or 1 for each loss or victory
title (str): Title to use in the plot
start_idx (int, optional): Start for the x labels. Defaults to 0.
wsize_mean (int, optional): Window size to compute the Winrate. Defaults to 100.
wsize_means_mean (int, optional): Window size to compute the mean over the winrates. Defaults to 1000.
opponent_updates_idxs (list, optional): List of indexes where the update of the opponent state dict has happened in self play. Default None.
"""
if len(wins) >= wsize_mean:
# Take 100 episode averages
means = np.cumsum(wins, dtype=float)
means[wsize_mean:] = means[wsize_mean:] - means[:-wsize_mean]
means = means[wsize_mean - 1 :] / wsize_mean
idxs = [i + start_idx + wsize_mean - 1 for i in range(len(means))]
plt.plot(idxs, means, label=f"Running {wsize_mean} average WR")
# Take 20 episode averages of the 100 running average
if len(means) >= wsize_means_mean:
means_mean = np.cumsum(means)
means_mean[wsize_means_mean:] = (
means_mean[wsize_means_mean:] - means_mean[:-wsize_means_mean]
)
means_mean = means_mean[wsize_means_mean - 1 :] / wsize_means_mean
idxs_mean = [
i + start_idx + wsize_mean + wsize_means_mean - 2
for i in range(len(means_mean))
]
plt.plot(
idxs_mean,
means_mean,
label=f"Running {wsize_mean} average WR mean",
)
# add vertical lines for opponent update during self play
if opponent_update_idxs != None:
for x in opponent_update_idxs:
if x >= wsize_mean:
plt.axvline(x=x, c="red")
plt.legend()
plt.title(f"Training {title}")
plt.savefig("imgs/train_ai.png")
plt.close()
def rgb2grayscale(rgb: np.ndarray) -> np.ndarray:
"""Transform RGB image to grayscale
Args:
rgb (np.ndarray): RGB image to transform
Returns:
np.ndarray: Grayscale image
"""
# transform to rgb
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
grayscale = 0.2989 * r + 0.5870 * g + 0.1140 * b
return grayscale
Transition = namedtuple("Transition", ("ob", "action", "next_ob", "rew", "done"))
class ReplayMemory(object):
"""
Replay memory used for experience replay.
It stores transitions.
"""
def __init__(
self,
memory_capacity: int,
train_buffer_capacity: int,
test_buffer_capacity: int,
) -> None:
"""Initialization of the replay memory
Args:
memory_capacity (int): Maximum number of elements to fit in the memory
train_buffer_capacity (int): Maximum number of elements to fit in the train buffer
test_buffer_capacity (int): Maximum number of elements to fit in the test buffer
"""
self.memory_capacity = memory_capacity
self.train_buffer_capacity = train_buffer_capacity
self.test_buffer_capacity = test_buffer_capacity
self.memory = []
self.train_buffer = []
self.test_buffer = []
self.memory_position = 0
def push_to_memory(self, *args) -> None:
"""Save a transition to memory"""
if len(self.memory) < self.memory_capacity:
self.memory.append(None)
self.memory[self.memory_position] = Transition(*args)
self.memory_position = (self.memory_position + 1) % self.memory_capacity
def push_to_train_buffer(self, *args) -> None:
"""Save a transition to train buffer"""
self.train_buffer.append(Transition(*args))
if len(self.train_buffer) > self.train_buffer_capacity:
raise Exception("Error: capacity of the train_buffer exceded")
def push_to_test_buffer(self, ob: np.ndarray) -> None:
"""Save an observation to test buffer
Args:
ob (np.ndarray): Observation/state to push into the buffer
"""
self.test_buffer.append(ob)
if len(self.test_buffer) > self.test_buffer_capacity:
raise Exception("Error: capacity of the test_buffer exceded")
def sample(self, batch_size: int) -> np.ndarray:
"""Sample batch_size random elements from memory
Args:
batch_size (int): Number of elements to sample
Returns:
np.ndarray: Sampled elements
"""
return random.sample(self.memory, batch_size)
def __len__(self) -> int:
"""Overwrite of the len function for the object
Returns:
int: Length of the memory
"""
return len(self.memory) | 3.484375 | 3 |
lintcode/586.py | jianershi/algorithm | 1 | 12771778 | <filename>lintcode/586.py
"""
586. Sqrt(x) II
Implement double sqrt(double x) and x >= 0.
Compute and return the square root of x.
You do not care about the accuracy of the result, we will help you to output results.
binary search?
by result?
"""
class Solution:
"""
@param x: a double
@return: the square root of x
"""
def sqrt(self, x):
# write your code here
if x >= 1:
left, right = 0, x * 1.0
else:
left, right = x * 1.0, 1.0
while right - left > 1e-10:
mid = (left + right) / 2.0
if mid * mid < x:
left = mid
else:
right = mid
return mid
s = Solution()
print(s.sqrt(3))
| 4.1875 | 4 |
Female_Health_Analysis/Female_Health_Analysis/__init__.py | danielschulz/Female_Health_Analysis | 1 | 12771779 | <filename>Female_Health_Analysis/Female_Health_Analysis/__init__.py
"""Top-level package for Female Health Analysis."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 1.070313 | 1 |
src/prg_state_ctrl.py | ccarr66/handwriting_gui | 0 | 12771780 | <reponame>ccarr66/handwriting_gui
import sys, os, platform
import ocr_image_analyzer as OCR
try:
import PIL.Image
import PIL.ImageTk
except ModuleNotFoundError:
print('Required libraries not found, please install PIL')
if __name__ == "__main__":
raise Exception('Cannot be called as main script')
debug = True
#******************************************** Program state independent logic
def isWindowsOS():
return platform.system == "Windows"
filepathSlash = '\\' if isWindowsOS() else '/'
def scaleImage(imgObj, width, height):
imgWidth, imgHeight = imgObj.size
smallestOutDim = min(width, height)
largestInDim = max(imgObj.size)
imageScale = smallestOutDim/largestInDim
newWidth = (int)(imageScale * imgWidth)
newHeight = (int)(imageScale * imgHeight)
imgObj = imgObj.resize((newWidth,newHeight), PIL.Image.ANTIALIAS)
offsetX = (int)(abs(width - newWidth)/2)
offsetY = (int)(abs(height - newHeight)/2)
background = PIL.Image.new('RGBA', (width, height), (255, 0, 0, 0))
foreground = imgObj.convert('RGBA')
background.paste(foreground, (offsetX, offsetY), foreground)
return background
#******************************************** Object that contains program state
class PrgStateCtrl:
_execPath = os.path.dirname(os.path.realpath(__file__))
_resSubFolderPath = "\\res" if isWindowsOS() else "/res"
_imgSubFolderPath = "\\images" if isWindowsOS() else "/images"
_modelName = ""
_modelPath = _execPath + _resSubFolderPath + filepathSlash
_imgName = ""
_imgPath = _execPath + _resSubFolderPath + _imgSubFolderPath
_outputFileName = "results"
_outputFilePath = _execPath
_cachedImage = PIL.Image.new('RGBA', (400, 550), (0,0,0,0))
_modelSet = False
_imageLoaded = False
_outputIsValid = False
_currentOutputImageIdx = 0
_currentOutputImages = []
_currentOutputImageLabels = []
_currentOutputText = ""
def __init__(self):
if(debug):
self._imgName = "perry3.png"
self._modelName = "handwriting_v1.model"
self.SetModel(self._modelName)
self.LoadImage(self._imgName)
def GetImagePath(self):
return self._execPath + self._resSubFolderPath + self._imgSubFolderPath + filepathSlash
def GetImageFullPath(self):
return self._execPath + self._resSubFolderPath + self._imgSubFolderPath + filepathSlash + self._imgName
def GetModelPath(self):
return self._modelPath
def GetModelFullPath(self):
return self._modelPath + self._modelName
def GetCachedImage(self):
return (self._cachedImage, "Cached Image")
def GetOutputPath(self):
return self._outputFilePath
def GetOutputName(self):
return self._outputFileName
def isValidImg(self, name):
try:
PIL.Image.open(self.GetImagePath() + name)
return True
except:
return False
def SetModel(self, modelName):
if OCR.modelIsValid(self.GetModelPath() + modelName):
self._modelName = modelName
self._modelSet = True
return True
else:
return False
def LoadImage(self, imageName):
if self.isValidImg(imageName):
self._imgName = imageName
self._cachedImage = scaleImage(PIL.Image.open(self.GetImagePath() + self._imgName), 400, 550)
self._imageLoaded = True
self._outputIsValid = False;
self._currentOutputImages.clear()
self._currentOutputImageLabels.clear()
self._currentOutputImageIdx = 0
return True
else:
return False
def PerformOCR(self):
self._currentOutputImages.clear()
self._currentOutputImageLabels.clear()
if self._modelSet and self._imageLoaded:
try:
self._currentOutputImageIdx = 0
self._currentOutputImages.append(self._cachedImage)
self._currentOutputImageLabels.append("Original")
text, images, imageLabels = OCR.analyzeImage(self.GetImageFullPath())
self._currentOutputText = ""
for c in text:
self._currentOutputText += c
for img in images:
img_pil = PIL.Image.fromarray(img)
scaledImg = scaleImage(img_pil,400,550)
self._currentOutputImages.append(scaledImg)
for label in imageLabels:
self._currentOutputImageLabels.append(label)
self._outputIsValid = True
except:
self._outputIsValid = False
def GetOutputText(self):
if self._outputIsValid:
return self._currentOutputText
else:
return ""
def GetNextOutputImage(self):
if self._outputIsValid:
if self._currentOutputImageIdx == len(self._currentOutputImages) - 1:
self._currentOutputImageIdx = 0
else:
self._currentOutputImageIdx += 1
return (self._currentOutputImages[self._currentOutputImageIdx], self._currentOutputImageLabels[self._currentOutputImageIdx])
else:
return (self._cachedImage, "Cached Image")
def GetPrevOutputImage(self):
if self._outputIsValid:
if self._currentOutputImageIdx == 0:
self._currentOutputImageIdx = len(self._currentOutputImages) - 1
else:
self._currentOutputImageIdx -= 1
return (self._currentOutputImages[self._currentOutputImageIdx], self._currentOutputImageLabels[self._currentOutputImageIdx])
else:
return (self._cachedImage, "Cached Image")
| 2.71875 | 3 |
Python/shortest-distance-to-target-color.py | RideGreg/LeetCode | 1 | 12771781 | <gh_stars>1-10
# Time: O(n)
# Space: O(n)
# 1182 biweekly contest 8 9/7/2019
# You are given an array colors, in which there are three colors: 1, 2 and 3.
#
# You are also given some queries. Each query consists of two integers i and c, return the shortest distance
# between the given index i and the target color c. If there is no solution return -1.
# 1 <= colors.length, queries.length <= 5*10^4
# Input: colors = [1,1,2,1,3,2,2,3,3], queries = [[1,3],[2,2],[6,1]]
# Output: [3,0,3]
try:
xrange
except NameError:
xrange = range
class Solution(object):
# dp[c][j] distance from nearest color c to jth elem
def shortestDistanceColor(self, colors, queries):
N = len(colors)
dp = [[float('inf')] * N for _ in xrange(4)]
# forward pass, get nearest to the left
dp[colors[0]][0] = 0
for i in range(1, N):
for c in range(1, 4):
dp[c][i] = 1 + dp[c][i-1]
dp[colors[i]][i] = 0
# backward pass, get nearest to the right
for i in reversed(range(N-1)):
for c in range(1, 4):
dp[c][i] = min(dp[c][i], dp[c][i+1] + 1)
return [dp[c][i] if dp[c][i] != float('inf') else -1 for i, c in queries]
# dp[c][j] index of nearest color c to jth elem
def shortestDistanceColor_kamyu(self, colors, queries):
"""
:type colors: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
N = len(colors)
dp = [[-1] * N for _ in xrange(4)]
dp[colors[0]][0] = 0
for i in xrange(1, N):
for color in xrange(1, 4):
dp[color][i] = dp[color][i-1]
dp[colors[i]][i] = i
for i in reversed(xrange(N-1)):
for color in xrange(1, 4):
if dp[color][i+1] == -1:
continue
if dp[color][i] == -1 or \
abs(dp[color][i+1]-i) < abs(dp[color][i]-i):
dp[color][i] = dp[color][i+1]
return [abs(dp[color][i]-i) if dp[color][i] != -1 else -1 \
for i, color in queries]
print(Solution().shortestDistanceColor([1,1,2,1,3,2,2,3,3], [[1,3],[2,2],[6,1]])) # [3,0,3]
print(Solution().shortestDistanceColor([1,2], [[0,3]])) # [-1] | 3.578125 | 4 |
Castessoft_Python/Castessoft_Fund01.py | JuanDiegoCastellanos/All-in-one-Python-Full | 1 | 12771782 | <filename>Castessoft_Python/Castessoft_Fund01.py
# def print_hi(name):
# print(f'Hi, {name}')
# if __name__ == '__main__':
# print_hi('PyCharm')
print("Hello World Python xd")
# Para saber la direccion de memoria de cierta variable se usa el metodo id()
name = "<NAME>"
print(name)
print(id(name))
print("------")
print("tipo de dato")
print(type(name))
# Agregar hints o pistas de que tipo deben ser las variables
last_name: str = "<NAME>"
print(last_name)
print("---------Day Qualifier--------")
dia = int(input("How was your day? (between 1 to 10 ): "))
print(f"Your day was: {dia}")
print("----------------Book info----------------")
author = input("Please provide the author's name: ")
title = input("Please provide the book's title: ")
print("thanks for your information! ")
print(f"The {title} it was written by {author}")
print("--------------Rectangle Area and Perimeter-------------")
height = int(input("Please provide the height: "))
width = int(input("Please provide the width: "))
print(f"The Area is {(height*width)}")
print(f"The Perimeter is {((height+width)*2)}")
| 3.75 | 4 |
kanmail/server/mail/oauth_settings.py | frznvm0/Kanmail | 1,118 | 12771783 | <reponame>frznvm0/Kanmail
from kanmail.settings.hidden import get_hidden_value
OAUTH_PROVIDERS = {
'gmail': {
'auth_endpoint': 'https://accounts.google.com/o/oauth2/auth',
'token_endpoint': 'https://accounts.google.com/o/oauth2/token',
'profile_endpoint': 'https://www.googleapis.com/userinfo/v2/me',
'scope': 'https://mail.google.com https://www.googleapis.com/auth/userinfo.email',
'client_id': get_hidden_value('GOOGLE_OAUTH_CLIENT_ID'),
'client_secret': get_hidden_value('GOOGLE_OAUTH_CLIENT_SECRET'),
},
}
| 1.476563 | 1 |
foxtail/clinics/models.py | scottbarnes/foxtail | 5 | 12771784 | <reponame>scottbarnes/foxtail<filename>foxtail/clinics/models.py
""" foxtail/clinics/models.py """
from datetime import datetime
from django.contrib.auth import get_user_model
from django.db import models
from model_utils.models import TimeStampedModel
from guardian.shortcuts import get_objects_for_group
from foxtail.organizations.models import Organization
from foxtail.users.models import User
from .utilities import get_time_slot_choices
# Start and end times, in 24 hour format, for clinic time slot generation.
# Note, if the delta is changed here, it must be changed in appointments/models.py too.
start = datetime(1, 1, 1, 7, 0)
end = datetime(1, 1, 1, 21, 0)
delta = 30
TIME_CHOICES = get_time_slot_choices(start, end, delta)
class Clinic(TimeStampedModel):
""" Make clinic instances. """
organization = models.ForeignKey(Organization, on_delete=models.SET_NULL, null=True, related_name='clinics')
date = models.DateField('Date')
start_time = models.CharField('Start time', max_length=255, choices=TIME_CHOICES)
end_time = models.CharField('End time', max_length=255, choices=TIME_CHOICES)
created_by = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, blank=True, null=True,
related_name='clinics_created') # null=True necessary because of SET_NULL.
class Meta:
verbose_name = ('Tabular clinic view')
verbose_name_plural = ('Tabular clinic views')
def get_organization(self):
# This should return the organization name for the clinic. Needs testing.
return self.organization.name
def __str__(self):
try:
return f'{self.organization.abbreviation}: {self.date}'
except:
return 'ERROR'
class ClinicProxy(Clinic):
"""
:((((((((((((
Source: https://books.agiliq.com/projects/django-admin-cookbook/en/latest/add_model_twice.html
"""
class Meta:
proxy = True
verbose_name = ('Stacked clinic view')
verbose_name_plural = ('Stacked clinic views')
| 2.46875 | 2 |
mp3treesim/__main__.py | michelezoncheddu/mp3treesim | 1 | 12771785 | import mp3treesim as mp3
import argparse
def main():
parser = argparse.ArgumentParser(
description='mp3treesim', # TODO: fix this
add_help=True)
parser.add_argument('trees', metavar='TREE', nargs=2,
help='Paths to the trees')
group_mode = parser.add_mutually_exclusive_group()
group_mode.add_argument('-i', action='store_true', default=False,
help='Run MP3-treesim in Intersection mode.')
group_mode.add_argument('-u', action='store_true', default=False,
help='Run MP3-treesim in Union mode.')
group_mode.add_argument('-g', action='store_true', default=False,
help='Run MP3-treesim in Geometric mode.')
args = parser.parse_args()
if args.i:
mode = 'intersection'
elif args.u:
mode = 'union'
elif args.g:
mode = 'geometric'
else:
mode = 'sigmoid'
tree1 = mp3.read_dotfile(args.trees[0])
tree2 = mp3.read_dotfile(args.trees[1])
score = mp3.similarity(tree1, tree2, mode=mode)
print(score)
if __name__ == "__main__":
main()
| 3.09375 | 3 |
RestAdm/apps/system/migrations/0019_auto_20200704_0022.py | xeroCBW/testmodel | 0 | 12771786 | # Generated by Django 2.2.5 on 2020-07-04 00:22
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('system', '0018_auto_20200703_2354'),
]
operations = [
migrations.AddField(
model_name='role',
name='code',
field=models.CharField(default='', max_length=20),
),
migrations.AlterField(
model_name='userprofile',
name='user_secret',
field=models.CharField(default=uuid.UUID('f51c894d-a532-4ddc-b3e9-2847d4b2e7ae'), max_length=500, verbose_name='用户JWT秘钥'),
),
]
| 1.703125 | 2 |
modules.py | thu-spmi/LABES | 14 | 12771787 | <reponame>thu-spmi/LABES
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from config import global_config as cfg
def cuda_(var):
return var.cuda() if cfg.cuda else var
def get_one_hot_input(input_t, v_dim=None):
"""
word index sequence -> one hot sparse input
:param x_input_np: [B, Tenc]
:return: tensor: [B,Tenc, V]
"""
def to_one_hot(y, n_dims=None):
""" Take integer y (tensor or variable) with n dims and convert it to 1-hot representation with n+1 dims. """
y_tensor = y.data if isinstance(y, Variable) else y
y_tensor = y_tensor.type(torch.LongTensor).contiguous().view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).fill_(1e-10).scatter_(1, y_tensor, 1) #1e-10
return y_one_hot.view(*y.shape, -1)
# input_t = torch.from_numpy(x_input_np).long() #[B, T]
input_t_onehot = to_one_hot(input_t, n_dims=v_dim) #[B,T,V]
input_t_onehot[:, :, 0] = 1e-10 #<pad> to zero
# input_t_onehot = [cuda_(t.to_sparse()) for t in input_t_onehot]
# return input_t_onehot
return cuda_(input_t_onehot)
def get_sparse_input_efficient(x_input_np):
ignore_index = [0]
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size), dtype=np.float32)
result.fill(1e-10)
for b in range(x_input_np.shape[0]):
for t in range(x_input_np.shape[1]):
if x_input_np[b][t] not in ignore_index:
result[b][t][x_input_np[b][t]] = 1.0
result = torch.from_numpy(result).float()
return result
def shift(pz_proba):
"""[summary]
:param pz_proba: [B,T,V]
:returns: shifted pz_proba
"""
first_input = cuda_(torch.zeros((pz_proba.size(0), 1, pz_proba.size(2)))).fill_(1e-12)
pz_proba = torch.cat([first_input, pz_proba], dim=1)
return pz_proba[:, :-1].detach()
def gumbel_softmax(logits, temperature):
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).cuda()
return -Variable(torch.log(-torch.log(U + eps) + eps))
y = logits + sample_gumbel(logits.size())
return F.softmax(y / temperature, dim=-1)
def ST_gumbel_softmax_sample(y):
"""
ST-gumbel-softmax
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
y_hard = (y_hard - y).detach() + y
return y_hard
class Attn(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Linear(hidden_size, 1, bias=False)
def forward(self, hidden, encoder_outputs, mask=None):
"""
:param hidden: tensor of size [n_layer, B, H]
:param encoder_outputs: tensor of size [B,T, H]
"""
attn_energies = self.score(hidden, encoder_outputs) # [B,T,H]
if mask is None:
normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]
else:
attn_energies.masked_fill_(mask, -1e20)
normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]
context = torch.bmm(normalized_energy, encoder_outputs) # [B,1,H]
return context # [B,1, H]
def score(self, hidden, encoder_outputs):
max_len = encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1) # [B,T,H]
energy = torch.tanh(self.attn(torch.cat([H, encoder_outputs], 2))) # [B,T,2H]->[B,T,H]
energy = self.v(energy).transpose(1,2) # [B,1,T]
return energy
class Encoder(nn.Module):
def __init__(self, embedding, input_size, embed_size, hidden_size, n_layers, dropout):
super(Encoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout_rate = dropout
self.dropout = nn.Dropout(self.dropout_rate)
self.embedding = embedding
self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout_rate, bidirectional=True, batch_first=True)
def forward(self, input_seqs, hidden=None, input_type='index'):
if input_type == 'index':
embedded = self.embedding(input_seqs)
elif input_type == 'embedding':
embedded = input_seqs
outputs, hidden = self.gru(embedded, hidden)
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs
return outputs, hidden
class DynamicEncoder(nn.Module):
def __init__(self, embedding, input_size, embed_size, hidden_size, n_layers, dropout):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout_rate = dropout
self.dropout = nn.Dropout(self.dropout_rate)
self.embedding = embedding
self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout_rate, bidirectional=True, batch_first=True)
def forward(self, input_seqs, input_lens, hidden=None, input_type='index'):
"""
forward procedure. No need for inputs to be sorted
:param input_seqs: Variable of [B, T] (input_type=index) or
[B,T,E] (input_type=embedding)
:param hidden:
:param input_lens: *numpy array* of len for each input sequence
:return:
"""
batch_size = input_seqs.size(0)
if input_type == 'index':
embedded = self.embedding(input_seqs)
elif input_type == 'embedding':
embedded = input_seqs
sort_idx = np.argsort(-input_lens)
# print(sort_idx)
# print('tensor:', torch.LongTensor(np.argsort(sort_idx)))
unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx)))
input_lens = input_lens[sort_idx]
sort_idx = cuda_(torch.LongTensor(sort_idx))
embedded = embedded[sort_idx]
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens, batch_first=True)
outputs, hidden = self.gru(packed, hidden)
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
outputs = outputs[:,:,:self.hidden_size] + outputs[:,:,self.hidden_size:]
outputs = outputs[unsort_idx].contiguous()
hidden = hidden[unsort_idx].contiguous()
return outputs, hidden | 2.5 | 2 |
tp2/src/grid.py | ha2398/robotica-movel | 2 | 12771788 | <reponame>ha2398/robotica-movel<filename>tp2/src/grid.py
#!/usr/bin/env python2
'''
@author: <NAME>
Class Grid models the robot environment as a grid of cells.
'''
from nav_msgs.msg import OccupancyGrid
from scipy import ndimage
import numpy as np
import rospy
class Grid:
# Constants
OUTPUT_NAME = 'map.pgm'
PGM_MAGIC_NUM = 'P2'
MAX_GRAY_VALUE = 100
OCC_THRESHOLD = 0.7
INITIAL_P = 0.5
ALPHA = 0.5 # Thickness of obstacles
def __init__(self, height, width, resolution, max_laser_range, num_ranges):
'''
@height: (int) Number of cells that form the grid's height.
@width: (int) Number of cells that form the grid's width.
@resolution: (float) Size of the cells side.
@max_laser_range: (float) Max distance of laser ranges.
@num_ranges: (int) Number of laser beams in robot's sensor.
'''
self.height = height
self.width = width
self.cells = np.full((height, width),
self.log_odds_ratio(self.INITIAL_P))
self.resolution = resolution
# Width of each laser beam.
self.beta = (max_laser_range * np.pi) / num_ranges
self.l_occ = self.log_odds_ratio(self.OCC_THRESHOLD)
self.l_free = self.log_odds_ratio(1 - self.OCC_THRESHOLD)
def log_odds_ratio(self, p):
'''
Calculate the log odds ratio for a give probability.
@p: (float) Probability.
@return: (float) Log odds ratio for @p.
'''
return np.log(p / (1 - p))
def get_prob_from_log_odds(self, l):
'''
Retrieve probability from log odds ratio.
@l: (float) Log odds ratio.
@return: (float) Probability.
'''
return 1 - (1 / (1 + np.exp(l)))
def position_to_index(self, x, y):
'''
Get the cell index of a given position in the world.
@x: (float) x coordinate of position in the world frame.
@y: (float) y coordinate of position in the world frame.
@return: (int, int) Index of the corresponding grid cell of the
given position.
'''
# Get grid cell index.
i = int(y / self.resolution + self.width / 2)
j = int(x / self.resolution + self.height / 2)
return (i, j)
def center_of_mass_from_index(self, i, j):
'''
Get the coordinates of the center of mass of the cell with the
given index.
@i: (int) Grid row.
@j: (int) Grid column.
@return: (float, float) Coordinates of the cell's center of mass.
'''
x = self.resolution * (j - (self.height + 1) / 2.)
y = self.resolution * (i - (self.width + 1) / 2.)
return x, y
def is_valid_index(self, i, j):
'''
Check if a given index is valid for the grid.
@i: (int) Grid row.
@j: (int) Grid column.
@return: (bool) True iff the index is valid for the grid.
'''
return i >= 0 and j >= 0 and i < self.height and j < self.width
def get_neighbours(self, array, index):
'''
Get neighbour cells of the cell with given index in an array.
@array: (numpy array) Array.
@index: (int, int) Index of cell.
@return: (numpy array) Array with the indices of neighbour cells.
'''
matrix = np.array(array)
indices = tuple(np.transpose(np.atleast_2d(index)))
arr_shape = np.shape(matrix)
dist = np.ones(arr_shape)
dist[indices] = 0
dist = ndimage.distance_transform_cdt(dist, metric='chessboard')
nb_indices = np.transpose(np.nonzero(dist == 1))
return [tuple(x) for x in nb_indices]
def get_random_frontier_cell(self):
'''
Get random frontier cell coordinates.
@return: (float, float) Closest frontier cell coordinates.
'''
probs = np.flipud(self.get_prob_from_log_odds(self.cells))
visited = np.full((self.height, self.width), False)
# Create random search starting point.
rand_cell_i = np.random.randint(0, self.height)
rand_cell_j = np.random.randint(0, self.width)
random_starting_point = self.center_of_mass_from_index(
rand_cell_i, rand_cell_j)
queue = [self.position_to_index(*random_starting_point)]
occ_prob = self.OCC_THRESHOLD
free_prob = 1 - occ_prob
while len(queue) > 0: # BFS
cur_cell = queue.pop(0)
if visited[cur_cell]:
continue
visited[cur_cell] = True
neighbours = self.get_neighbours(probs, cur_cell)
queue += neighbours
if probs[cur_cell] <= free_prob: # Free cell
while len(neighbours) > 0:
n = neighbours.pop(0)
p = probs[n]
if p > free_prob and p < occ_prob: # Unknown cell
return self.center_of_mass_from_index(*cur_cell)
return None
def get_occupancy_msg(self):
'''
Create an Occupancy Grid message from grid data.
@return: (OccupancyGrid) Occupancy Grid message.
'''
msg = OccupancyGrid()
msg.header.frame_id = 'map'
msg.info.resolution = self.resolution
msg.info.width = self.width
msg.info.height = self.height
msg.info.origin.position.x = - self.width // 2 * self.resolution
msg.info.origin.position.y = - self.height // 2 * self.resolution
msg.info.origin.orientation.x = 0
msg.info.origin.orientation.y = 0
msg.info.origin.orientation.z = 0
msg.info.origin.orientation.w = 1
msg.header.stamp = rospy.Time.now()
cells = np.flipud(self.cells)
msg.data = [int(self.get_prob_from_log_odds(
x) * self.MAX_GRAY_VALUE) for x in cells.flat[::-1]]
return msg
def dump_pgm(self):
'''
Dump the grid probabilities to a PGM image file.
'''
header_str = '{}\n{} {}\n{}'.format(
self.PGM_MAGIC_NUM, self.width, self.height, self.MAX_GRAY_VALUE)
cells = np.rot90(self.cells)
cells = np.vectorize(lambda x: int(
(1 - self.get_prob_from_log_odds(x)) * self.MAX_GRAY_VALUE))(cells)
np.savetxt(self.OUTPUT_NAME, cells, delimiter=' ',
header=header_str, comments='', fmt='%d')
| 2.828125 | 3 |
eventstore_grpc/constants.py | jmshnds/eventstore_grpc | 6 | 12771789 | """
Shared constant values.
"""
"""
## Handling Concurrency.
When appending events to a stream, you can supply a
*stream state* or *stream revision*. Your client can
use this to tell EventStoreDB what state or version
you expect the stream to be in when you append. If the
stream isn't in that state the an exception will be thrown.
The are three available stream states:
* Any
* NoStream
* StreamExists
This check can be used to implement optimistic concurrency. When you retrieve a stream
from EventStoreDB, you take note of the current version number, then when you save it
back you can determine if somebody else has modified the record in the meantime.
"""
NO_STREAM = "NO_STREAM"
ANY = "ANY"
STREAM_EXISTS = "STREAM_EXISTS"
"""
## Directions
"""
FORWARDS = "FORWARDS"
BACKWARDS = "BACKWARDS"
"""
## Revision positions.
"""
START = "START"
END = "END"
"""
## OTHERS
"""
WRONG_EXPECTED_VERSION = "wrong_expected_version" | 1.914063 | 2 |
buildVars.py | bingen13/unspokenPy3 | 2 | 12771790 | from pathlib import Path
_ = lambda x : x
addon_info = {
"addon_name" : "unspokenPy3",
"addon_summary" : _("Unspoken"),
"addon_description" : _("""Removes names label from object like, link, button. It plays different sounds instead of labels."""),
"addon_version" : "0.4",
"addon_author" : u", Sean (ported python3): <EMAIL>, Camlorn (main developer): <EMAIL>, Bryan Smart: NoMail",
"addon_url" : "https://github.com/SeanTolstoyevski/unspokenPy3/releases",
"addon_docFileName" : "readme.html",
"addon_minimumNVDAVersion" : 2019.3,
"addon_lastTestedNVDAVersion" : 2022.3,
"addon_updateChannel" : None,
}
pythonSources = list(Path.cwd().joinpath("addon", "globalPlugins").rglob("*.py"))
i18nSources = pythonSources + ["buildVars.py", "addon\\installTasks.py"]
excludedFiles = []
| 1.742188 | 2 |
applications/zcomx/private/bin/process_activity_logs.py | zcomx/zco.mx | 0 | 12771791 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
process_activity_logs.py
Script to process activity_log records.
* Create activity_log records from tentative_activity_log records.
* Delete tentative_activity_log records converted thus.
"""
import sys
import traceback
from optparse import OptionParser
from applications.zcomx.modules.activity_logs import \
ActivityLog, \
CompletedTentativeLogSet, \
MINIMUM_AGE_TO_LOG_IN_SECONDS, \
PageAddedTentativeLogSet, \
TentativeLogSet
from applications.zcomx.modules.logger import set_cli_logging
VERSION = 'Version 0.1'
def man_page():
"""Print manual page-like help"""
print("""
USAGE
process_activity_logs.py [OPTIONS]
OPTIONS
-h, --help
Print a brief help.
--man
Print man page-like help.
-m, --minimum-age
Tentative activity log records must have this minimum age in order
to be processed. Age is in seconds. Default: {m}
-v, --verbose
Print information messages to stdout.
--vv,
More verbose. Print debug messages to stdout.
""".format(m=MINIMUM_AGE_TO_LOG_IN_SECONDS))
def main():
"""Main processing."""
usage = '%prog [options]'
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option(
'--man',
action='store_true', dest='man', default=False,
help='Display manual page-like help and exit.',
)
parser.add_option(
'-m', '--minimum-age', type='int',
dest='minimum_age', default=MINIMUM_AGE_TO_LOG_IN_SECONDS,
help='Minimum age of tentative log to process.',
)
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print messages to stdout.',
)
parser.add_option(
'--vv',
action='store_true', dest='vv', default=False,
help='More verbose.',
)
(options, unused_args) = parser.parse_args()
if options.man:
man_page()
quit(0)
set_cli_logging(LOG, options.verbose, options.vv)
LOG.debug('Starting')
logs = db(db.tentative_activity_log).select(
db.tentative_activity_log.book_id,
groupby=db.tentative_activity_log.book_id,
)
for log in logs:
LOG.debug('Checking book id: %s', log.book_id)
filters = {'book_id': log.book_id}
tentative_log_set = TentativeLogSet.load(filters=filters)
youngest_log = tentative_log_set.youngest()
age = youngest_log.age()
if age.total_seconds() < options.minimum_age:
LOG.debug(
'Tentative log records too young, book_id: %s', log.book_id)
continue
LOG.debug('Logging book id: %s', log.book_id)
log_set_classes = [
PageAddedTentativeLogSet,
CompletedTentativeLogSet,
]
for log_set_class in log_set_classes:
log_set = log_set_class.load(filters=filters)
activity_log_data = log_set.as_activity_log()
if activity_log_data:
activity_log = ActivityLog.from_add(activity_log_data)
LOG.debug(
'Created activity_log action: %s',
activity_log.action
)
for tentative_activity_log in tentative_log_set.tentative_records:
tentative_activity_log.delete()
LOG.debug('Done')
if __name__ == '__main__':
# pylint: disable=broad-except
try:
main()
except SystemExit:
pass
except Exception:
traceback.print_exc(file=sys.stderr)
exit(1)
| 2.703125 | 3 |
Command/exercise.py | NachoCP/python-design-patterns | 0 | 12771792 | <filename>Command/exercise.py
import unittest
from enum import Enum
class Command:
class Action(Enum):
DEPOSIT = 0
WITHDRAW = 1
def __init__(self, action, amount):
self.action = action
self.amount = amount
self.success = False
class Account:
def __init__(self, balance=0):
self.balance = balance
def process(self, command):
if command.action == Command.Action.DEPOSIT:
self.balance += command.amount
command.success = True
elif command.action == Command.Action.WITHDRAW:
command.success = self.balance >= command.amount
if command.success:
self.balance -= command.amount
class Evaluate(unittest.TestCase):
def test(self):
a = Account()
cmd = Command(Command.Action.DEPOSIT, 100)
a.process(cmd)
self.assertEqual(100, a.balance)
self.assertTrue(cmd.success)
cmd = Command(Command.Action.WITHDRAW, 50)
a.process(cmd)
self.assertEqual(50, a.balance)
self.assertTrue(cmd.success)
cmd.amount = 150
a.process(cmd)
self.assertEqual(50, a.balance)
self.assertFalse(cmd.success)
| 3.640625 | 4 |
app.py | vidyadharmestry/magnet_chatbot | 0 | 12771793 | #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
import cx_Oracle
install_aliases()
import json
import os
import sys
import pandas as pd
sys.path.append('cognitiveSQL')
from flask import Flask,session
from flask import request
from flask import make_response
from flask import url_for, redirect
from flask_socketio import SocketIO, send, emit
import subprocess
#import cognitiveSQL.Database as Database
#import cognitiveSQL.LangConfig as LangConfig
#import cognitiveSQL.Parser as Parser
#import cognitiveSQL.Thesaurus as Thesaurus
#import cognitiveSQL.StopwordFilter as StopwordFilter
#from cognitiveSQL.HashMap import hashMap_columns
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import datetime
import math
import psycopg2
import apiai
import requests
# Flask app should start in global layout
app = Flask(__name__, static_url_path='')
socketio = SocketIO(app)
app.secret_key = 'my unobvious secret key'
parser = ""
baseUrl = "https://api.dialogflow.com/v1/query?v=20170712"
accessToken = "66ad5ee869a34d3593181c0f9ff0922c"
# @app.route('/')
# def index():
# return redirect(url_for('static_url', filename='index.html'))
def select_inquiry_response(prod_name, columnName,indication):
try:
url = urlparse(
"postgres://caedtehsggslri:4679ba0abec57484a1d7ed261b74e80b08391993433c77c838c58415087a9c34@ec2-107-20-255-96.compute-1.amazonaws.com:5432/d5tmi1ihm5f6hv")
print(url.path[1:])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
sql = "select " + columnName + " from public.inquiry_response where product_name = '%s' and indication = '%s' limit %s" %(prod_name,indication,1)
print(sql)
cur.execute(sql)
row = cur.fetchone()
#print(row[1])
#print("The number of parts: ", cur.rowcount)
cur.close()
return row
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def select_temp_data():
try:
url = urlparse(
"postgres://caedtehsggslri:4679ba0abec57484a1d7ed261b74e80b08391993433c77c838c58415087a9c34@ec2-107-20-255-96.compute-1.amazonaws.com:5432/d5tmi1ihm5f6hv")
print(url.path[1:])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=<PASSWORD>,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
sql = "select header,header_value from public.mia_temp_param"
print(sql)
cur.execute(sql)
row = cur.fetchall()
#print(row[1])
#print("The number of parts: ", cur.rowcount)
cur.close()
return row
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def truncate_temp_table():
try:
url = urlparse(
"postgres://caedtehsggslri:4679ba0abec57484a1d7ed261b74e80b08391993433c77c838c58415087a9c34@ec2-107-20-255-96.compute-1.amazonaws.com:5432/d5tmi1ihm5f6hv")
print(url.path[1:])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
sql = "DELETE FROM public.mia_temp_param"
print(sql)
cur.execute(sql)
#print(row[1])
print("The number of parts: ", cur.rowcount)
conn.commit()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def insert_into_temp(header,header_value):
sql = "INSERT INTO public.mia_temp_param (header,header_value) VALUES(%s, %s)";
try:
# read database configuration
# params = config()
# connect to the PostgreSQL database
url = urlparse(
"postgres://caedtehsggslri:4679ba0abec57484a1d7ed261b74e80b08391993433c77c838c58415087a9c34@ec2-107-20-255-96.compute-1.amazonaws.com:5432/d5tmi1ihm5f6hv")
# print(url.path[1:])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=<PASSWORD>.password,
host=url.hostname,
port=url.port
)
# create a new cursor
cur = conn.cursor()
# execute the INSERT statement
cur.execute(sql, (header, header_value))
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def insert_into_temp(list):
sql = "INSERT INTO public.mia_temp_param (header,header_value) VALUES(%s, %s)";
try:
# read database configuration
# params = config()
# connect to the PostgreSQL database
url = urlparse(
"postgres://caedtehsggslri:4<EMAIL>ba<EMAIL>:5432/d5tmi1ihm5f6hv")
# print(url.path[1:])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=<PASSWORD>.password,
host=url.hostname,
port=url.port
)
# create a new cursor
cur = conn.cursor()
for x in list:
# execute the INSERT statement
cur.execute(sql, (x[0],x[1]))
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
#Cur.execute("truncate mytable;")
def insert_inquiry_details(division,country,master_prod,inquiry,customer_type,customer_channel,facilitated_unfacilitated,case_create_dt,case_clsd_dt,resp_id,response,user_mail_id,query_category):
sql = "INSERT INTO public.inquiry_data (Division,Country,Master_Prod ,Inquiry,Customer_Type,Customer_channel,Facilitated_Unfacilitated,Case_Create_Date,Case_Closed_Date,Resp_Id,Response,user_mail_id,query_category) VALUES(%s, %s, %s, %s, %s, %s,%s,%s, %s, %s, %s, %s, %s)";
try:
# read database configuration
#params = config()
# connect to the PostgreSQL database
url = urlparse(
"postgres://caedtehsggslri:<EMAIL>:5432/d5tmi1ihm5f6hv")
#print(url.path[1:])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=<PASSWORD>,
host=url.hostname,
port=url.port
)
# create a new cursor
cur = conn.cursor()
# execute the INSERT statement
cur.execute(sql,(division,country,master_prod,inquiry,customer_type,customer_channel,facilitated_unfacilitated,case_create_dt,case_clsd_dt,resp_id,response,user_mail_id,query_category))
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
@app.route('/speech')
def speech():
return redirect(url_for('static', filename='speech.html'))
@app.route('/visualization')
def visualization():
return redirect(url_for('static', filename='visualization.html'))
# @app.route('/inventory')
# def inventory():d
# return redirect(url_for('static_url', filename='index.html'))
@app.route('/emailAffair', methods=['POST'])
def emailAffair():
req = request.get_json(silent=True, force=True)
print("Request")
action = ""
speech = ""
productName = ""
print(json.dumps(req, indent=4))
if (req.get("inquiryQuestion") is not None or req.get("inquiryQuestion") != ""):
print(req.get("age"))
if req.get("age") == "":
age = "0"
else:
age = req.get("age")
if (req.get("location") == ""):
location = "India"
else:
location = req.get("location")
if (req.get("profession") == ""):
profession = "Doc"
else:
profession = req.get("profession")
values = json.dumps({
"lang": "en",
"query": req.get("inquiryQuestion")+" "+age+" "+profession+" "+location,
"sessionId": "12345",
"timezone": "America/New_York"
})
headers ={
'Content-Type': 'application/json',
'Authorization': 'Bearer '+accessToken
}
res = json.loads(requests.post(url=baseUrl, data=values,headers=headers).text)
print(res.get("result").get("fulfillment").get("speech"))
action = res.get("result").get("action")
speech = res.get("result").get("fulfillment").get("speech")
productName = res.get("result").get("parameters").get("ProductName")
res = json.dumps({
"category": action,
"response": speech,
"ProductName": productName
}, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
@app.route('/bookMeeting', methods=['POST'])
def medicalAffair():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if (req.get("queryResult").get("parameters").get("userid") != "") and (
req.get("queryResult").get("parameters").get("date") != "") and (
req.get("queryResult").get("parameters").get("geo-city") != "") and (
req.get("queryResult").get("parameters").get("number") != "") and (
req.get("queryResult").get("parameters").get("time-period") != ""):
if (req.get("queryResult").get("parameters").get("time-period").get("startTime") != "") and (
req.get("queryResult").get("parameters").get("time-period").get("endTime") != "") :
return {
"fulfillmentText": "Thank you for your time. We are processing your request, you will hear from us shortly",
"source": "agent"
}
if __name__ == '__main__':
#database = Database.Database()
#database.load("cognitiveSQL/database/HCM.sql")
#database.print_me()
#config = LangConfig.LangConfig()
#config.load("cognitiveSQL/lang/english.csv")
#parser = Parser.Parser(database, config)
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
port = int(os.getenv('PORT', 5001))
print("Starting app on port %d" % port)
#app.run(debug=True, port=port, host='0.0.0.0')
socketio.run(app, debug=True, port=port, host='0.0.0.0') | 2.140625 | 2 |
src/skallel_tensor/test/test_variants_dataframe.py | alimanfoo/skallel-tensor | 2 | 12771794 | <gh_stars>1-10
import zarr
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import dask.dataframe as dd
import pytest
from skallel_tensor.api import variants_to_dataframe
def setup_variants_numpy():
variants = dict()
variants["ID"] = np.array(["RS1", "RS3", "RS9", "RS11"])
variants["POS"] = np.array([3, 6, 18, 42])
variants["REF"] = np.array(["A", "C", "T", "G"])
variants["ALT"] = np.array([["C", ""], ["T", "G"], ["G", ""], ["CAA", "T"]])
variants["QUAL"] = np.array([3.4, 6.7, 18.1, 42.0])
variants["FILTER_PASS"] = np.array([True, True, False, True])
variants["DP"] = np.array([12, 23, 34, 45])
variants["AC"] = np.array([[12, 23], [34, 45], [56, 67], [78, 89]])
return variants
def setup_variants_zarr():
variants_numpy = setup_variants_numpy()
variants_zarr = zarr.group()
for k, v in variants_numpy.items():
variants_zarr.create_dataset(k, data=v)
return variants_zarr
def test_variants_to_dataframe():
# Setup.
variants_numpy = setup_variants_numpy()
variants_zarr = setup_variants_zarr()
# Construct dataframes.
pdf = variants_to_dataframe(variants_numpy)
ddf = variants_to_dataframe(variants_zarr)
assert isinstance(pdf, pd.DataFrame)
assert isinstance(ddf, dd.DataFrame)
for df in pdf, ddf:
expected_cols = [
"POS",
"ID",
"REF",
"ALT_1",
"ALT_2",
"QUAL",
"FILTER_PASS",
"AC_1",
"AC_2",
"DP",
]
assert expected_cols == df.columns.tolist()
# Check POS used as index.
assert None is df.index.name
# Check data.
assert_array_equal(variants_numpy["POS"], np.asarray(df["POS"]))
assert_array_equal(variants_numpy["ID"], np.asarray(df["ID"]))
assert_array_equal(variants_numpy["REF"], np.asarray(df["REF"]))
assert_array_equal(variants_numpy["ALT"][:, 0], np.asarray(df["ALT_1"]))
assert_array_equal(variants_numpy["ALT"][:, 1], np.asarray(df["ALT_2"]))
assert_array_equal(variants_numpy["QUAL"], np.asarray(df["QUAL"]))
assert_array_equal(
variants_numpy["FILTER_PASS"], np.asarray(df["FILTER_PASS"])
)
assert_array_equal(variants_numpy["AC"][:, 0], np.asarray(df["AC_1"]))
assert_array_equal(variants_numpy["AC"][:, 1], np.asarray(df["AC_2"]))
assert_array_equal(variants_numpy["DP"], np.asarray(df["DP"]))
def test_variants_to_dataframe_columns():
# Setup.
variants_numpy = setup_variants_numpy()
variants_zarr = setup_variants_zarr()
# Construct dataframes.
cols = ["REF", "POS", "DP", "AC"]
pdf = variants_to_dataframe(variants_numpy, columns=cols)
ddf = variants_to_dataframe(variants_zarr, columns=cols)
assert isinstance(pdf, pd.DataFrame)
assert isinstance(ddf, dd.DataFrame)
for df in pdf, ddf:
# Check column ordering, should be as requested.
assert ["REF", "POS", "DP", "AC_1", "AC_2"] == df.columns.tolist()
# Check POS not used as index.
assert None is df.index.name
# Check data.
assert_array_equal(variants_numpy["POS"], np.asarray(df["POS"]))
assert_array_equal(variants_numpy["REF"], np.asarray(df["REF"]))
assert_array_equal(variants_numpy["AC"][:, 0], np.asarray(df["AC_1"]))
assert_array_equal(variants_numpy["AC"][:, 1], np.asarray(df["AC_2"]))
assert_array_equal(variants_numpy["DP"], np.asarray(df["DP"]))
def test_variants_to_dataframe_exceptions():
# Setup.
variants_numpy = setup_variants_numpy()
variants_zarr = setup_variants_zarr()
# Bad types.
with pytest.raises(TypeError):
variants_to_dataframe("foo")
with pytest.raises(TypeError):
variants_to_dataframe(variants_numpy, columns="foo")
with pytest.raises(TypeError):
variants_to_dataframe(variants_numpy, columns=[42])
with pytest.raises(TypeError):
variants_to_dataframe({0: np.arange(10)})
# Unknown dispatch type.
variants_other = {"bar": [1, 2, 3, 4]}
with pytest.raises(NotImplementedError):
variants_to_dataframe(variants_other)
# Field not present in data.
with pytest.raises(ValueError):
variants_to_dataframe(variants_numpy, columns=["foo"])
with pytest.raises(ValueError):
variants_to_dataframe(variants_zarr, columns=["foo"])
# Array has too many dimensions.
variants_numpy["bar"] = np.arange(1000).reshape((10, 10, 10))
variants_zarr.create_dataset(
"bar", data=np.arange(1000).reshape((10, 10, 10))
)
with pytest.warns(UserWarning):
variants_to_dataframe(variants_numpy)
with pytest.warns(UserWarning):
variants_to_dataframe(variants_zarr)
| 2.140625 | 2 |
Python/geocoding/leaflet map/geocod_script.py | fugrusha/blackhole | 0 | 12771795 | <filename>Python/geocoding/leaflet map/geocod_script.py
from arcgis.gis import GIS
from arcgis.geocoding import batch_geocode
import pandas as pd
import csv
from time import sleep
# login to ARCGIS
gis = GIS("http://www.arcgis.com", "fugrusha", "NiWhuBTtRrFqk3X")
#File location
input_file = input('Enter the name of the import csv file to geocode: ')
output_file = 'C://Users/andre/Anaconda3/Scripts/my_scripts/Script-geocoding/export.csv'
# Load data to Pandas Dataframe
data = pd.read_csv(input_file, encoding = 'utf8')
# Uncomment to check data
#data.head()
# Put data from column Address to list
addresses = data["Address"].tolist()
# Get coords from the list addresses
# return json-file
results = batch_geocode(addresses)
# Create two empty lists
latcoords = []
longcoords = []
time_to_sleep_when_captcha = 5
# put coords from json-file to list
for coordinates in results:
try:
longitude = "{:.4f}".format(float(coordinates[ 'location'][ 'x']))
latitude = "{:.4f}".format(float(coordinates[ 'location'][ 'y']))
longcoords.append(longitude)
latcoords.append(latitude)
except:
sleep(time_to_sleep_when_captcha)
time_to_sleep_when_captcha += 1
# Add columns to DataFrame
data['Lat'] = latcoords
data['Long'] = longcoords
# Create csv-file from DataFrame
pd.DataFrame(data).to_csv(output_file, encoding='utf-16')
len(results) | 3.0625 | 3 |
rest_auth/__init__.py | lordpeara/django-rest-auth | 1 | 12771796 | <filename>rest_auth/__init__.py
"""
Django Rest Framework Auth provides very simple & quick way to adopt
authentication APIs' to your django project.
Rationale
---------
django-rest-framework's `Serializer` is nice idea for detaching
business logic from view functions. It's very similar to django's
``Form``, but serializer is not obligible for rendering response data,
and should not. - django forms also do this, seriously!!!
some expert beginners just know form is ONLY FOR `html form rendering` :(
Unluckily, even though django already provides forms and views
for authentication, We cannot use these for REST-APIs. It uses forms!!
(rest_framework does not use forms.)
We think there should be some serializers & views (or viewsets)
to use ``rest_framework``'s full features.
(such as throttling, pagination, versioning or content-negotiations)
Let's have a good taste of these elegant implementations.
API Endpoints
-------------
Below API endpoints can be re-configured if you write your urls.py
* POST /login/
* username
* password
authenticate user and persist him/her to website
* POST /logout/
let a user logged out.
.. NOTE::
Logout from HTTP GET is not implemented.
* POST /forgot/
* email
send a link for resetting password to user
* GET /reset/{uid64}/{token}/
* uid64, token - automatically generated tokens (when email is sent)
* new_password
* new_password (confirm)
reset a password for user
* GET /reset/d/
a view seen by user after resetting password
* POST /change-password/
* old_password
* new_password
* new_password (confirm)
change a password for user
* GET /api-root/
* see api lists
* POST /signup/
* username
* email
* password
* confirm_password
Create a user.
verification e-mail is sent when you set
``REST_AUTH_SIGNUP_REQUIRE_EMAIL_CONFIRMATION``
* GET /signup/v/{uid64}/{token}/
Verify user. After verification, user can use full features of websites.
"""
__version__ = '1.0.0'
default_app_config = 'rest_auth.apps.AppConfig'
| 2.5 | 2 |
run.py | capralifecycle/office-games-client | 0 | 12771797 | <gh_stars>0
#!/usr/bin/python
import sys
import os
from clint.arguments import Args
from commands.backup import backup
from commands.check_player_statistics import check_player_statistics
from commands.start import start
AVAILABLE_COMMANDS = [
'backup',
'check_player_statistics',
'recalculate_player_rating',
'start'
]
if __name__ == '__main__':
# TODO: Get from ENV variable
game_slug = 'ping-pong'
sys.path.insert(0, os.path.abspath('..'))
args = Args()
if len(args) == 0 or args.all[0] not in AVAILABLE_COMMANDS:
print(f'Unknown command. Available commands: {", ".join(AVAILABLE_COMMANDS)}')
sys.exit(1)
command = args.all[0].lower()
if command == 'backup':
backup()
elif command == 'check_player_statistics':
check_player_statistics(game_slug)
elif command == 'start':
start(game_slug)
sys.exit(0)
| 2.46875 | 2 |
import_handler.py | LucidumInc/update-manager | 0 | 12771798 | import sys
import os
from loguru import logger
from config_handler import get_mongo_config
from docker_service import create_archive, get_docker_container
from exceptions import AppError
class MongoImportRunner:
container_dest_dir = "/home"
def __call__(self, source, destination):
container = get_docker_container("mongo")
tar_stream = create_archive(source)
success = container.put_archive(self.container_dest_dir, tar_stream)
if not success:
raise AppError(f"Putting '{source}' file to 'mongo' container was failed")
container_filepath = f"{self.container_dest_dir}/{os.path.basename(source)}"
import_cmd = f"mongoimport --username={{mongo_user}} --password={{<PASSWORD>}} --authenticationDatabase=test_database --host={{mongo_host}} --port={{mongo_port}} --db={{mongo_db}} --collection={destination} --file={container_filepath}"
try:
result = container.exec_run(import_cmd.format(**get_mongo_config()))
if result.exit_code:
raise AppError(result.output.decode('utf-8'))
finally:
rm_result = container.exec_run(f"rm {container_filepath}", user='root')
if rm_result.exit_code:
logger.warning(rm_result.output.decode('utf-8'))
def _get_import_runner(db):
if db == "mongo":
return MongoImportRunner()
@logger.catch(onerror=lambda _: sys.exit(1))
def run(db, source, destination):
import_runner = _get_import_runner(db)
import_runner(source, destination)
| 2.234375 | 2 |
src/graph.py | manucordova/ProbAsn | 6 | 12771799 | ####################################################################################################
### ###
### Functions for graph handling (creation, ...) ###
### Author: <NAME> (EPFL) ###
### Last modified: 19.08.2021 ###
### ###
####################################################################################################
# Import libraries
import networkx as nx
from openbabel import pybel as pb
import matplotlib.pyplot as plt
def make_mol(mol_in, in_type="smi", out_type="mol", from_file=False, name="", make_3d=False, save=None):
"""
Generate a molfile from the SMILES representation of the molecule
Inputs: - mol_in Input molecule
- in_type Type of input
- out_type Type of output
- from_file Whether mol_in is a string (False) or points to a file (True)
- name Name of the molecule
- make_3d Whether to make
- save If set, defines the file to which the molfile should be saved
Output: - mol OBMol object of the molecule
"""
if from_file:
# Read the input file
mol = next(pb.readfile(in_type, mol_in))
else:
# Read the input string
mol = pb.readstring(in_type, mol_in)
# Set molecule name
mol.title = name
# Add implicit hydrogens
mol.addh()
if make_3d:
# Make 3D coordinates
mol.make3D()
else:
# Make 2D coordinates for drawing
mol.make2D()
# Convert the molecule into the output format
pp = mol.write(out_type)
# Save molecule to file
if save is not None:
with open(save, "w") as F:
F.write(pp)
return mol
def get_bonds(mol):
"""
Identify the bonds in a molecule
Input: - mol OBMol object of the molecule
Outputs: - atoms List of atoms in the molecule
- bonds List of bonded atoms for each atom in the molecule
"""
# Get molfile of the molecule
pp = mol.write("mol")
lines = pp.split("\n")
# Get number of atoms
n_atoms = len(mol.atoms)
# Initialize arrays of neighbours
bonds = [[] for _ in mol.atoms]
# Initialize array of atoms
atoms = []
# Parse the atom block
for l in lines[4:n_atoms + 4]:
atoms.append(l.split()[3])
# Parse the bond block
for l in lines[n_atoms + 4:]:
# Detect end of file
if "END" in l or len(l.split()) != 7:
break
bond = [int(li)-1 for li in l.split()[:2]]
# Update the list of bonds
bonds[bond[0]].append(bond[1])
bonds[bond[1]].append(bond[0])
return atoms, bonds
def identify_env(G):
"""
Identify the environment of the central node (index 0) of a graph
Input: - G Input graph
Output: - env environment of the central node in G
"""
# Initialize array of neighbouring elements
nei_elems = []
# Identify all nodes bonded to the central node
for e in G.edges:
if 0 in e:
# Get neighbour atom
if e[0] == 0:
i = e[1]
else:
i = e[0]
# Update array of neighbouring elements
nei_elems.append(G.nodes[i]["elem"])
# Return the environment in string format,
# with neighbours sorted alphabetically
return "-".join(sorted(nei_elems))
def generate_graph(atoms, bonds, i0, max_w, elems=["H", "C", "N", "O", "S"], hetatm="error", hetatm_rep=None):
"""
Generate a graph from atom i0 using the list of atoms and bonds in the molecule
Inputs: - atoms List of atoms in the molecule
- bonds Bonded atoms for each atom in the molecule (by index)
- i0 Index of the central atom in the graph
- max_w Maximum graph depth
- elems Allowed elements in the molecule
- hetatm Behaviour for handling unknown elements:
"error": raise an error
"ignore": ignore the atom
"replace": replace the atom with another element
"replace_and_terminate": replace the atom with another element and
cut all bonds from this atom
- hetatm_rep Dictionary of replacements for unknown elements
(used only with hetatm set to "replace" or "replace_and_terminate")
Outputs: - G Graph generated
- env Environment of the central node
"""
# The maximum depth should be at least one
if max_w < 1:
raise ValueError("max_weight should be at least 1, not {}".format(max_w))
# Initialize graph object
G = nx.Graph()
# Add central node
G.add_node(0, elem=atoms[i0], ind=i0)
# Initialize number of nodes in the graph and atom index of each node
N = G.number_of_nodes()
node_inds = [G.nodes[i]["ind"] for i in range(N)]
# Loop over all nodes
i = 0
while i < N:
# Identify the atoms bonded to that node
for j in bonds[node_inds[i]]:
at = atoms[j]
# Handle invalid elements
if at not in elems:
# Raise an error
if hetatm == "error":
raise ValueError("Invalid element found: {}".format(at))
# Ignore the atom
elif hetatm == "ignore":
continue
# Replace the atom with another element
elif hetatm == "replace":
at = hetatm_rep[at]
# Replace the atom with another element and cut all bonds from this atom
elif hetatm == "replace_and_terminate":
at = hetatm_rep[at]
bonds[j] = []
else:
raise ValueError("Invalid behaviour for unknown elements: {}".format(hetatm))
# If a new node is found, add it to the graph
if j not in node_inds:
G.add_node(N, elem=at, ind=j)
G.add_edge(i, N, w="1")
# If the new node is too far away, remove it
if nx.shortest_path_length(G, source=0, target=N) > max_w:
G.remove_node(N)
# Otherwise, keep it and update the total number of nodes and the atom index of each node
else:
N += 1
node_inds = [G.nodes[i]["ind"] for i in range(N)]
# If the bonde node is already in the graph, just add the edge
else:
G.add_edge(i, node_inds.index(j), w="1")
# Proceed to the next node
i += 1
# Get the environment of the central node
env = identify_env(G)
return G, env
def generate_graphs(atoms, bonds, elem, max_w, elems=["H", "C", "N", "O", "S"], hetatm="error", hetatm_rep=None):
"""
Generate graphs for all atoms of a given element in the molecule
Inputs: - atoms List of atoms in the molecule
- bonds Bonded atoms for each atom in the molecule (by index)
- elem Element for which to construct the graphs
- max_w Maximum graph depth
- elems Allowed elements in the molecule
- hetatm Behaviour for handling unknown elements:
"error": raise an error
"ignore": ignore the atom
"replace": replace the atom with another element
"replace_and_terminate": replace the atom with another element and
cut all bonds from this atom
- hetatm_rep Dictionary of replacements for unknown elements
(used only with hetatm set to "replace" or "replace_and_terminate")
Outputs: - Gs
- envs
"""
# Initialize arrays of graphs and environments
Gs = []
envs = []
# Loop over all atoms
for i, at in enumerate(atoms):
# Identify the atoms for which a graph should be constructed
if at == elem:
# Construct the graph
G, env = generate_graph(atoms, bonds, i, max_w, elems=elems, hetatm=hetatm, hetatm_rep=hetatm_rep)
Gs.append(G)
envs.append(env)
return Gs, envs
def cut_graph(G, w):
"""
Cut a graph down to a given depth
Inputs: - G Input graph
- w Depth to cut to
Output: - cut_G Cut graph
"""
# Copy the initial graph and get the number of nodes
cut_G = G.copy()
N = len(G.nodes)
# Check all nodes
for i in range(N):
# If the depth is greater than w, remove the node
if nx.shortest_path_length(G, source=0, target=i) > w:
cut_G.remove_node(i)
return cut_G
def generate_hash(G):
"""
Generate the Weisfeiler-Lehman hash corresponding to a graph
Input: - G Input graph
Output: - H Hash corresponding to graph G
"""
# Replace the central element by "Y" in order to make sure that the hash correctly identifies the central node
G2 = G.copy()
G2.nodes[0]["elem"] = "Y"
return nx.algorithms.graph_hashing.weisfeiler_lehman_graph_hash(G2, edge_attr="w", node_attr="elem", iterations=5)
def print_graph(G, w, layout="kamada_kawai", base_color="C0", center_color="r", out_color="g", show=True, save=None):
"""
Plot a graph at a given depth
Inputs: - G Networkx graph object
- w Maximum depth to display
- layout Node layout for plotting
- base_color Base color of nodes
- center_color Color of the central node
- out_color Color of nodes at the maximum graph depth
- show Whether the plot should be shown or not
- save If set, defines the file to save the plot to
"""
# Cut the graph to the maximum depth
cut_G = cut_graph(G, w)
# Get the label (element) of each node
labs = nx.get_node_attributes(cut_G, "elem")
# Set node layout
if layout == "kamada_kawai":
# Get the position of each node on the plot
pos = nx.kamada_kawai_layout(cut_G)
elif layout == "circular":
pos = nx.circular_layout(cut_G)
elif layout == "planar":
pos = nx.planar_layout(cut_G)
elif layout == "random":
pos = nx.random_layout(cut_G)
elif layout == "shell":
pos = nx.shell_layout(cut_G)
elif layout == "spring":
pos = nx.spring_layout(cut_G)
elif layout == "spectral":
pos = nx.spectral_layout(cut_G)
elif layout == "spiral":
pos = nx.spiral_layout(cut_G)
else:
raise ValueError("Unknown layout: {}".format(layout))
# Initialize figure handle
f = plt.figure(figsize=(6,5))
ax = f.add_subplot(1,1,1)
# Draw the nodes
nx.draw_networkx_nodes(cut_G, pos, ax=ax, node_color=base_color)
# Draw the central node in red
nx.draw_networkx_nodes(cut_G, pos, nodelist=[0], node_color=center_color, ax=ax)
# Draw the edge nodes in green
edge_nodes = []
for i in range(cut_G.number_of_nodes()):
if nx.shortest_path_length(cut_G, source=0, target=i) == w:
edge_nodes.append(i)
nx.draw_networkx_nodes(cut_G, pos, nodelist=edge_nodes, node_color=out_color, ax=ax)
# Get the covalent bonds and H-bonds
H_e = []
es = []
for i, e in enumerate(cut_G.edges):
if cut_G.edges[e]["w"] == 0:
H_e.append(e)
else:
es.append(e)
# Draw the edges
nx.draw_networkx_edges(cut_G, pos, edgelist=es, ax=ax)
nx.draw_networkx_edges(cut_G, pos, edgelist=H_e, style="dashed", ax=ax)
# Draw the labels on the nodes
nx.draw_networkx_labels(cut_G, pos, labs, ax=ax)
f.tight_layout()
# Show the plot
if show:
plt.show()
# Save the plot
if save:
if save.endswith(".png"):
f.savefig(save, dpi=150)
else:
f.savefig(save)
plt.close()
return
| 2.84375 | 3 |
example.py | arjd/aio-peewee | 9 | 12771800 | <gh_stars>1-10
"""Example application
Requirements:
asgi-tools
uvicorn
Run the example with uvicorn:
$ uvicorn --port 5000 example:app
"""
import datetime as dt
from asgi_tools import App
from aiopeewee import PeeweeASGIPlugin
import peewee as pw
db = PeeweeASGIPlugin(url='sqlite+async:///db.sqlite')
@db.register
class Visit(pw.Model):
created = pw.DateTimeField(default=dt.datetime.utcnow())
address = pw.CharField()
db.create_tables()
app = App()
@app.route('/')
async def visits_json(request):
"""Store the visit and load latest 10 visits."""
Visit.create(address=request.client[0])
return [{
'id': v.id, 'address': v.address, 'timestamp': round(v.created.timestamp()),
} for v in Visit.select().order_by(Visit.id.desc()).limit(10)]
app = db.middleware(app)
| 2.734375 | 3 |
portals/wwits/groups/access/function_authorize_list/models.py | jalanb/portals | 0 | 12771801 | from dataclasses import dataclass
from typing import List
from portals.wwits.mixins import WWITSBaseMixin
@dataclass
class ParmModel:
Version: str
Env: str
UserID: str
Session: int
Source: str
RC: int
ResultMsg: str
@dataclass
class FunctionModel(WWITSBaseMixin):
FunctionName: str
@dataclass
class FunctionAuthorizeListModel:
Parms: ParmModel
DataList: List[FunctionModel]
| 2.109375 | 2 |
ocs_ci/utility/workloads/fio.py | zmc/ocs-ci | 0 | 12771802 | """
This module implements all the functionalities required for setting up and
running Fio workloads on the pods.
This module implements few functions
setup(): for setting up fio utility on the pod and any necessary
environmental params.
run(): for running fio on pod on specified mount point
Note: The above mentioned functions will be invoked from Workload.setup()
and Workload.run() methods along with user provided parameters.
"""
import logging
from ocs_ci.ocs import exceptions
log = logging.getLogger(__name__)
DISTROS = {"Debian": "apt-get", "RHEL": "yum"}
def find_distro(io_pod):
"""
Find whats the os distro on pod
Args:
io_pod (Pod): app pod object
Returns:
distro (str): representing 'Debian' or 'RHEL' as of now
"""
for distro, pkg_mgr in DISTROS.items():
try:
io_pod.exec_cmd_on_pod(f"which {pkg_mgr}", out_yaml_format=False)
except exceptions.CommandFailed:
log.debug(f"Distro is not {distro}")
else:
return distro
def setup(**kwargs):
"""
setup fio workload
Args:
**kwargs (dict): fio setup configuration.
At this point in time only argument present in kwargs will be
'pod' on which we want to setup. In future if we move to
containerized fio then pod.yaml will be presented in kwargs.
Returns:
bool: True if setup succeeds else False
"""
io_pod = kwargs['pod']
# For first cut doing simple fio install
distro = find_distro(io_pod)
pkg_mgr = DISTROS[distro]
if distro == 'Debian':
cmd = f'{pkg_mgr} update'
io_pod.exec_cmd_on_pod(cmd, out_yaml_format=False)
cmd = f"{pkg_mgr} -y install fio"
return io_pod.exec_cmd_on_pod(cmd, out_yaml_format=False)
def run(**kwargs):
"""
Run fio with params from kwargs.
Default parameter list can be found in
templates/workloads/fio/workload_io.yaml and user can update the
dict as per the requirement.
Args:
kwargs (dict): IO params for fio
Result:
result of command
"""
io_pod = kwargs.pop('pod')
st_type = kwargs.pop('type')
path = kwargs.pop('path')
fio_cmd = "fio"
args = ""
for k, v in kwargs.items():
if k == 'filename':
if st_type == 'fs':
args = args + f" --{k}={path}/{v}"
else:
# For raw block device
args = args + f" --{k}={path}"
else:
args = args + f" --{k}={v}"
fio_cmd = fio_cmd + args
fio_cmd += " --output-format=json"
log.info(f"Running cmd: {fio_cmd}")
return io_pod.exec_cmd_on_pod(fio_cmd, out_yaml_format=False)
| 2.390625 | 2 |
alembic/versions/6458bd3b46dc_add_pending_flag.py | evz/budget | 1 | 12771803 | """Add pending flag
Revision ID: 6458bd3b46dc
Revises: <PASSWORD>
Create Date: 2017-11-26 07:32:09.814699
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('iou', sa.Column('pending', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('iou', 'pending')
# ### end Alembic commands ###
| 1.335938 | 1 |
extra_models.py | vincnt/tcn-audio-fx | 0 | 12771804 | <reponame>vincnt/tcn-audio-fx
import torch
import torch.nn as nn
from models import BaseTCN, TCN, CausalConv1d, _conv_stack
from model_utilities import GatedActivation
class OutputcoderTCN(TCN):
def __init__(self, num_channels, dilation_depth, num_repeat, kernel_size=2, dilation_factor=2, conditioning=False, num_conditioning=3, activation='gated', grouping='all', bias=True, conditioning_type='basic_film',force_local_residual=False,conditioning_structure='shallow'):
super(OutputcoderTCN, self).__init__(num_channels, dilation_depth, num_repeat, kernel_size, dilation_factor, conditioning, num_conditioning, activation, grouping, bias, conditioning_type,force_local_residual,conditioning_structure)
self.output_layer = torch.nn.Sequential(
CausalConv1d(in_channels=num_channels,out_channels=64,kernel_size=1,bias=bias),
GatedActivation(num_channels=32),
CausalConv1d(in_channels=32,out_channels=32,kernel_size=1,bias=bias),
GatedActivation(num_channels=16),
CausalConv1d(in_channels=16,out_channels=16,kernel_size=1,bias=bias),
GatedActivation(num_channels=8),
CausalConv1d(in_channels=8,out_channels=1,kernel_size=1,bias=bias)
)
class DeepOutputcoderTCN(TCN):
def __init__(self, num_channels, dilation_depth, num_repeat, kernel_size=2, dilation_factor=2, conditioning=False, num_conditioning=3, activation='gated', grouping='all', bias=True, conditioning_type='basic_film',force_local_residual=False,conditioning_structure='shallow'):
super(DeepOutputcoderTCN, self).__init__(num_channels, dilation_depth, num_repeat, kernel_size, dilation_factor, conditioning, num_conditioning, activation, grouping, bias, conditioning_type,force_local_residual,conditioning_structure)
self.output_layer = torch.nn.Sequential(
CausalConv1d(in_channels=num_channels,out_channels=64,kernel_size=1,bias=bias),
torch.nn.ReLU(),
CausalConv1d(in_channels=64,out_channels=128,kernel_size=1,bias=bias),
torch.nn.ReLU(),
CausalConv1d(in_channels=128,out_channels=64,kernel_size=1,bias=bias),
torch.nn.ReLU(),
CausalConv1d(in_channels=64,out_channels=32,kernel_size=1,bias=bias),
torch.nn.ReLU(),
CausalConv1d(in_channels=32,out_channels=8,kernel_size=1,bias=bias),
torch.nn.ReLU(),
CausalConv1d(in_channels=8,out_channels=4,kernel_size=1,bias=bias),
torch.nn.ReLU(),
CausalConv1d(in_channels=4,out_channels=num_channels,kernel_size=1,bias=bias),
torch.nn.ReLU(),
CausalConv1d(in_channels=num_channels,out_channels=1,kernel_size=1,bias=bias)
)
class NormedTCN(BaseTCN):
def __init__(self, num_channels, dilation_depth, num_repeat, kernel_size=2, dilation_factor=2, conditioning=False, num_conditioning=3, activation='gated', grouping='all', bias=True, conditioning_type='basic_film',force_local_residual=False,conditioning_structure='shallow'):
super(NormedTCN, self).__init__(num_channels, dilation_depth, num_repeat, kernel_size, dilation_factor, conditioning, num_conditioning, activation, grouping, bias, conditioning_type,force_local_residual,conditioning_structure)
def forward(self, x, cond_params):
out = x
out = self.input_layer(out) # this wasnt there before I think
for hidden, residual in zip(self.hidden, self.residuals):
skip_x = out
out_hidden = hidden(skip_x)
if self.conditioning:
out = self.film(out, cond_params)
out = self.activ(out_hidden)
out = torch.nn.functional.normalize(out, dim=-1)
res = residual(skip_x)
res = torch.nn.functional.normalize(res, dim=-1)
out = out + residual(skip_x)
out = torch.nn.functional.normalize(out, dim=-1)
output = self.output_layer(out)
output = torch.nn.functional.normalize(output, dim=-1)
return output, None
class ParallelTCN(BaseTCN):
def __init__(self, num_channels, dilation_depth, num_repeat, kernel_size=2, dilation_factor=2, conditioning=False, num_conditioning=3, activation='gated', grouping='all', bias=True, conditioning_type='basic_film',force_local_residual=False,conditioning_structure='shallow'):
super(ParallelTCN, self).__init__(num_channels, dilation_depth, num_repeat, kernel_size, dilation_factor, conditioning, num_conditioning, activation, grouping, bias, conditioning_type,force_local_residual,conditioning_structure)
self.num_channels = num_channels
self.conditioning = conditioning
dilations = [dilation_factor ** d for d in range(dilation_depth)] * num_repeat
internal_channels = int(num_channels * 2) if 'gated' in activation else int(num_channels)
if grouping == 'all':
groups = 1
elif grouping == 'local_out':
groups = internal_channels
else:
groups = num_channels
groups = 1 if grouping == 'all' else num_channels
self.hidden_parallel = _conv_stack(dilations, num_channels, internal_channels, kernel_size, groups=groups, bias=bias)
self.output_layer = CausalConv1d(in_channels=num_channels*2,out_channels=1,kernel_size=1,bias=bias)
def forward(self, x, cond_params):
out = x
out = self.input_layer(out) # this wasnt there before I think
for hidden, residual in zip(self.hidden, self.residuals):
skip_x = out
out_hidden = hidden(skip_x)
if self.conditioning:
out_hidden = self.film(out_hidden, cond_params)
out = self.activ(out_hidden)
out = out + residual(skip_x)
parallel_out = self.input_layer(x)
for hidden in self.hidden_parallel:
parallel_out = hidden(parallel_out)
if self.conditioning:
parallel_out = self.film(parallel_out, cond_params)
parallel_out = self.activ(parallel_out)
combined_out = torch.cat([out, parallel_out],dim=1)
data = {'pre_out':combined_out}
output = self.output_layer(combined_out)
return torch.tanh(output), data
# input into each layer rather than cumulative residual
class Parallel2TCN(BaseTCN):
def __init__(self, num_channels, dilation_depth, num_repeat, kernel_size=2, dilation_factor=2, conditioning=False, num_conditioning=3, activation='gated', grouping='all', bias=True, conditioning_type='basic_film',force_local_residual=False):
super(Parallel2TCN, self).__init__(num_channels, dilation_depth, num_repeat, kernel_size, dilation_factor, conditioning, num_conditioning, activation, grouping, bias, conditioning_type,force_local_residual)
def forward(self, x, cond_params):
out = x
out = self.input_layer(out) # this wasnt there before I think
skip_x = out
for hidden, residual in zip(self.hidden, self.residuals):
out_hidden = hidden(skip_x)
out = self.activ(out_hidden)
if self.conditioning:
out = self.film(out, cond_params)
out = out + residual(skip_x)
pre_out_data = out
output = self.output_layer(out)
data = {'pre_out':pre_out_data}
return torch.tanh(output), data
class NotanhTCN(BaseTCN):
def __init__(self, num_channels, dilation_depth, num_repeat, kernel_size=2, dilation_factor=2, conditioning=False, num_conditioning=3, activation='gated', grouping='all', bias=True, conditioning_type='basic_film',force_local_residual=False):
super(NotanhTCN, self).__init__(num_channels, dilation_depth, num_repeat, kernel_size, dilation_factor, conditioning, num_conditioning, activation, grouping, bias, conditioning_type,force_local_residual)
def forward(self, x, cond_params):
out = x
out = self.input_layer(out) # this wasnt there before I think
for hidden, residual in zip(self.hidden, self.residuals):
skip_x = out
out_hidden = hidden(skip_x)
out = self.activ(out_hidden)
if self.conditioning:
out = self.film(out, cond_params)
out = out + residual(skip_x)
pre_out_data = out
output = self.output_layer(out)
data = {'pre_out':pre_out_data}
output = torch.nn.functional.normalize(output, dim=-1)
return output, data
class OctopusTCN(BaseTCN):
def __init__(self, num_channels, dilation_depth, num_repeat, kernel_size=2, dilation_factor=2, conditioning=False, num_conditioning=3, activation='gated', grouping='all', bias=True, conditioning_type='basic_film', force_local_residual=False):
super(OctopusTCN, self).__init__(num_channels, dilation_depth, num_repeat, kernel_size, dilation_factor, conditioning, num_conditioning, activation, grouping, bias, conditioning_type,force_local_residual)
mix_channels = num_channels * dilation_depth * num_repeat
groups = 1 if grouping == 'all' else num_channels
self.output_mixer = nn.Conv1d(
in_channels= mix_channels,
out_channels=mix_channels,
kernel_size=1,
groups=groups
)
self.output_layer = nn.Conv1d(
in_channels= mix_channels,
out_channels=1,
kernel_size=1,
)
def forward(self, x, cond_params):
out = x
out = self.input_layer(out) # this wasnt there before I think
skip_x = out
layer_outs = []
for hidden, residual in zip(self.hidden, self.residuals):
out_hidden = hidden(skip_x)
if self.conditioning:
out_hidden = self.film(out_hidden, cond_params)
out = self.activ(out_hidden)
layer_out = out + residual(skip_x)
layer_outs.append(layer_out)
out = torch.cat(layer_outs, dim=1)
out = self.output_mixer(out)
out = self.output_layer(out)
return out, None | 2.15625 | 2 |
bumblebee_status/modules/contrib/http_status.py | rosalogia/bumblebee-status | 1,089 | 12771805 | # pylint: disable=C0111,R0903
"""Display HTTP status code
Parameters:
* http__status.label: Prefix label (optional)
* http__status.target: Target to retrieve the HTTP status from
* http__status.expect: Expected HTTP status
contributed by `valkheim <https://github.com/valkheim>`_ - many thanks!
"""
from requests import head
import psutil
import core.module
import core.widget
import core.decorators
class Module(core.module.Module):
UNK = "UNK"
@core.decorators.every(seconds=30)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.output))
self.__label = self.parameter("label")
self.__target = self.parameter("target")
self.__expect = self.parameter("expect", "200")
def labelize(self, s):
if self.__label is None:
return s
return "{}: {}".format(self.__label, s)
def getStatus(self):
try:
res = head(self.__target)
except Exception as e:
print(e)
return self.UNK
else:
status = str(res.status_code)
return status
def getOutput(self):
if self.__status == self.__expect:
return self.labelize(self.__status)
else:
reason = " != {}".format(self.__expect)
return self.labelize("{}{}".format(self.__status, reason))
def output(self, widget):
return self.__output
def update(self):
self.__status = self.getStatus()
self.__output = self.getOutput()
def state(self, widget):
if self.__status == self.UNK:
return "warning"
if self.__status != self.__expect:
return "critical"
return self.__output
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 2.6875 | 3 |
aristotle/__main__.py | secureworks/aristotle | 12 | 12771806 | from .aristotle import main as aristotle_main
aristotle_main()
| 1.085938 | 1 |
spike/backends/x86_32/__init__.py | spikeio/lang | 1 | 12771807 | <filename>spike/backends/x86_32/__init__.py<gh_stars>1-10
from CodeGenerator import CodeGenerator
| 1.234375 | 1 |
{{cookiecutter.project_name}}/{{cookiecutter.project_slug}}/service_layer.py | gbozee/api-scaffold | 0 | 12771808 | import jwt
from {{cookiecutter.project_slug}} import types, settings
async def verify_access_token(bearer_token, **kwargs) -> types.AuthorizedUser:
init = jwt.decode(bearer_token, verify=False)
if "aud" in init:
decoded_value = decode_access_token(bearer_token, audience=init["aud"][0])
else:
decoded_value = decode_access_token(bearer_token)
return types.AuthorizedUser(
user=decoded_value["email"], auth_roles=["authenticated"]
)
def decode_access_token(access_token, **kwargs):
ALGORITHM = "HS256"
access_token_jwt_subject = "access"
decoded_value = jwt.decode(
access_token, str(settings.SECRET_KEY), algorithms=ALGORITHM, **kwargs
)
return decoded_value
async def demo(post_data,query_params=None, headers=None)->types.SampleResult:
return types.SampleResult(data={'hello':'world'})
service = {
"/demo":{"func":demo,'methods':['GET']}
} | 2.296875 | 2 |
examples/commandline.py | eronde/vim_suggest | 0 | 12771809 | """commandline.
Usage:
commandline.py lookup --lang=<lang> --word=<preword> [--ip=<redis-ip>] [--port=<redis-port>]
commandline.py -h | --help
Options:
-h --help Show this screen.
--lang=<lang> Language of suggested word.
--word=<preword> Pre-word of suggested word.
--ip=<rediis-ip> Ip of redis server (Default: 172.17.0.3)
--port=<rediis-port> Port of redis server (Default: 6379)
"""
import redis
import py_word_suggest
# from config import REDIS_IP
from docopt import docopt
def main():
arguments = docopt(__doc__, version='commandline 0.0.1')
if arguments['--port']:
rp = arguments['--port']
else:
rp = 6379
if arguments['--ip']:
rs = arguments['--ip']
else:
rs = '172.17.0.3'
r = redis.StrictRedis(host=rs, port=rp, db=0)
try:
obj = py_word_suggest.Selector_redis(r)
except Exception as e:
print("{e} Fail to connect to: {ip}:{port}".format(e=e, ip=rs, port=rp))
exit(1)
if arguments['lookup']:
key = 'lang:{l}:gram:2:{w}'.format(l=arguments['--lang'], w=arguments['--word'])
try:
fetch = obj.gen_fetchWords(key)
except Exception as e:
print("{e}".format(e=e))
exit(1)
print("'{w}' has the following suggested words:\n".format(w=arguments['--word']))
print(list(obj.gen_suggestWord(*fetch)))
if __name__ == "__main__":
main()
| 3.171875 | 3 |
service/tests/__init__.py | tuub/jper | 2 | 12771810 | <reponame>tuub/jper<filename>service/tests/__init__.py<gh_stars>1-10
"""
This module contains all of the system tests and associated fixtures
"""
| 1.21875 | 1 |
safetylinespackage/__init__.py | MCasteras/safetylinespackage | 0 | 12771811 |
import json
import numpy as np
from sklearn.datasets import make_classification, make_regression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.metrics import confusion_matrix
from safetylinespackage.safetylinespackage import generate,get_metric,learn,learn,predict,target_statistics,features_statistics,correlation,statistics,print_correlations
| 1.632813 | 2 |
SEIP/Segmentation.py | seip-tool/seip | 0 | 12771812 | <reponame>seip-tool/seip
import numpy as np
import pandas as pd
from multiprocessing import Process, Queue, current_process, freeze_support, cpu_count
from NW import NW
from DTW import DTW
from utils import *
class Segmentation(object):
def __init__(self, executor, cpu = 0):
self.mode = type(executor).__name__
self.executor = executor
self.cpu = cpu
def execute(self, onestr, twostr):
# print('twostrseg:',twostr)
# print('twostrtype:',type(twostr[0]))
str_one = strpreprocess(onestr,'bytelist')
str_two = strpreprocess(twostr,'bytelist')
# print('seg: ',str_two)
f_index = [i for i in range(len(str_one))]
f_columns = [i for i in range(len(str_one))]
final_matrix=pd.DataFrame(np.zeros([len(str_one),len(str_one)]),index=f_index,columns=f_columns)
##################
if self.cpu == 0:
if self.mode == 'NW':
for i in range(len(str_one)):
score_matrix = self.executor.execute(str_one[i:], str_two, 0)
scores_temp = list(score_matrix.iloc[1:,len(str_two)].values[:])
final_matrix.iloc[i,:] = [np.nan for k in range(i)] + scores_temp
else:
for i in range(len(str_one)):
score_matrix = self.executor.execute(str_one[i:], str_two, 0)
scores_temp = list(score_matrix.iloc[:,len(str_two)-1].values[:])
#每一个值都做个长度补偿
for j in range(len(scores_temp)):
scores_temp[j] = scores_temp[j]**2
final_matrix.iloc[i,:] = [np.nan for k in range(i)] + scores_temp
##################
else:
PROCESSES = cpu_count()
if PROCESSES > 20:
PROCESSES = 20
#NW for every line
TASKS1 = [(self.executor.execute, (str_one[i:], str_two, 0), i) for i in range(len(str_one))]
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
# print('Unordered results:')
for i in range(len(TASKS1)):
temp = done_queue.get()
# print('temp: ', temp)
if self.mode == 'NW':
scores_temp = list(temp[0].iloc[1:,len(str_two)].values[:])
elif self.mode == 'DTW':
scores_temp = list(temp[0].iloc[:,len(str_two)-1].values[:])
for j in range(len(scores_temp)):
scores_temp[j] = scores_temp[j]**2
else:
print('mode error!')
final_matrix.iloc[temp[1],:] = [np.nan for k in range(temp[1])] + scores_temp
for i in range(PROCESSES):
task_queue.put('STOP')
###################
# print('final_matrix:',final_matrix)
################ update
# for i in range(1,len(str_one)):
# for j in range(i,len(str_one)):
# if i == j:
# temp = final_matrix.iloc[i,j] + final_matrix.iloc[i-1,j-1]
# temp_list = list(final_matrix.iloc[:i-1,j].values) + [temp]
# if self.mode == 'NW':
# final_matrix.iloc[i,j] = max(temp_list)
# elif self.mode == 'DTW':
# final_matrix.iloc[i,j] = min(temp_list)
# else:
# print('mode error!')
# else:
# final_matrix.iloc[i,j] += final_matrix.iloc[i-1,i-1]
###add the max last column
for i in range(1,len(str_one)):
for j in range(i,len(str_one)):
final_matrix.iloc[i,j] += max(list(final_matrix.iloc[:i,i-1]))
# print("final_matrix_update:",final_matrix)
#####################
final_score = final_matrix.iloc[len(str_one)-1,len(str_one)-1]
# print("final_score:",final_score)
i = len(str_one)-1
final_pos = []
final_pos.append(i+1)
#########
# while(i>0):
# temp = list(final_matrix.iloc[:,i].values)[:i]
# # temp_index = temp.index(max(temp))
# if self.mode == 'NW':
# temp_index = temp.index(max(temp))
# elif self.mode == 'DTW':
# temp_index = temp.index(min(temp))
# else:
# print('mode error!')
# # print("temp:",temp)
# # print("temp_index:",temp_index)
# final_pos.append(temp_index)
# if temp_index == 0:
# break
# i = temp_index - 1
###
while(i>0):
temp = list(final_matrix.iloc[:,i].values)[:i+1]
# temp_index = temp.index(max(temp))
if self.mode == 'NW':
temp_index = temp.index(max(temp))
elif self.mode == 'DTW':
temp_index = temp.index(min(temp))
else:
print('mode error!')
# print("temp:",temp)
# print("temp_index:",temp_index)
final_pos.append(temp_index)
if temp_index == 0:
break
i = temp_index - 1
#######
final_pos = final_pos[::-1]
# print("final_pos_t:",final_pos)
onepos = final_pos
if len(str_one) > onepos[-1]:
onepos[-1] = len(str_one)
# print('xiuzheng')
one_seqs = []
# one_seqs.append(str_one_ogl[:onepos[0]])
for i in range(len(onepos)-1):
one_seqs.append(str_one[onepos[i]:onepos[i+1]])
# print('one_seqs:', one_seqs)
return one_seqs,onepos,final_score
| 2.4375 | 2 |
src_old/tests/scripts/inprogress/itertools/ex3.py | toddrme2178/pyccel | 0 | 12771813 | <gh_stars>0
# coding: utf-8
from pyccel.stdlib.parallel.openmp import omp_get_thread_num
#$ header class StopIteration(public, hide)
#$ header method __init__(StopIteration)
#$ header method __del__(StopIteration)
class StopIteration(object):
def __init__(self):
pass
def __del__(self):
pass
#$ header class Range(public, iterable, openmp)
#$ header method __init__(Range, int, int, int, bool, int, str [:], str [:], str [:], str [:], str [:], int, str [:])
#$ header method __del__(Range)
#$ header method __iter__(Range)
#$ header method __next__(Range)
class Range(object):
def __init__(self, start, stop, step, nowait=None, collapse=None,
private=None, firstprivate=None, lastprivate=None,
reduction=None, schedule=None, ordered=None, linear=None):
self.start = start
self.stop = stop
self.step = step
self._ordered = ordered
self._private = private
self._firstprivate = firstprivate
self._lastprivate = lastprivate
self._linear = linear
self._reduction = reduction
self._schedule = schedule
self._collapse = collapse
self._nowait = nowait
self.i = start
def __del__(self):
print('> free')
def __iter__(self):
self.i = 0
def __next__(self):
if (self.i < self.stop):
i = self.i
self.i = self.i + 1
else:
raise StopIteration()
x = 0.0
#$ omp parallel
for i in Range(-2, 5, 1, nowait=True, private=['i', 'idx'], reduction=['+', 'x'], schedule='static', ordered=True):
idx = omp_get_thread_num()
x += 2 * i
# print("> thread id : ", idx, " working on ", i)
#$ omp end parallel
print('x = ', x)
| 2.65625 | 3 |
language/syntax/_syntax/tests/test_expr.py | jedhsu/language | 0 | 12771814 | from ...symbol.symbol import Symbol
from ..expr import Atom, ForwardIs, Option, Seq, ZeroOrMore, _Expr, _Expr_, _Op_
class Test_Op_:
def test_sequence(self):
...
class TestAtom:
def test_init(self):
expr = Atom(Symbol("A"))
def test_mro(self):
expr = Atom(Symbol("A"))
assert isinstance(expr, Atom)
assert isinstance(expr, _Expr_)
assert isinstance(expr, _Op_)
assert isinstance(expr, _Expr)
class TestSeq:
expr1 = Atom(Symbol("A"))
expr2 = Atom(Symbol("B"))
seq = Seq(expr1, expr2)
def test_init(self):
assert self.seq.expr1 == Atom(Symbol("A"))
assert self.seq.expr2 == Atom(Symbol("B"))
def test_class_getitem(self):
seq = Seq[self.expr1, self.expr2]
assert seq.expr1 == Atom(Symbol("A"))
assert seq.expr2 == Atom(Symbol("B"))
assert isinstance(seq, Seq)
def test_compose(self):
seq = Seq.__compose__(self.seq.expr1, self.seq.expr2)
assert seq.expr1 == Atom(Symbol("A"))
assert seq.expr2 == Atom(Symbol("B"))
class TestOption:
expr = Atom(Symbol("A"))
option = Option(expr)
def test_init(self):
assert self.option.expr == Atom(Symbol("A"))
def test_class_getitem(self):
option = Option[self.expr]
assert self.option.expr == Atom(Symbol("A"))
class TestForwardIs:
expr1 = Atom(Symbol("A"))
expr2 = Atom(Symbol("B"))
forward_is = ForwardIs(expr1, expr2)
def test_init(self):
assert self.forward_is.expr == Atom(Symbol("A"))
assert self.forward_is.forward_expr == Atom(Symbol("B"))
def test_class_getitem(self):
forward_is = ForwardIs[self.expr1, self.expr2]
assert forward_is.expr == Atom(Symbol("A"))
assert forward_is.forward_expr == Atom(Symbol("B"))
assert isinstance(forward_is, ForwardIs)
class TestOp:
expr = Atom(Symbol("A"))
expr2 = Atom(Symbol("B"))
def test_sequence(self):
assert self.expr.sequence(self.expr2) == Seq[self.expr, self.expr2]
def test_zero_or_more(self):
assert self.expr.zero_or_more() == ZeroOrMore(self.expr)
def test_zero_or_more_op(self):
assert self.expr[0:] == ZeroOrMore(self.expr)
def test_option(self):
...
| 2.796875 | 3 |
tests/test_background_loader.py | pjeanjean/dakara-player | 0 | 12771815 | <filename>tests/test_background_loader.py
from unittest import TestCase
from unittest.mock import call, patch
from path import Path
from dakara_player_vlc.background_loader import (
BackgroundLoader,
BackgroundNotFoundError,
)
class BackgroundLoaderTestCase(TestCase):
"""Test the loader for backgrounds
"""
@patch(
"dakara_player_vlc.background_loader.exists", return_value=True, autospec=True
)
def test_load_default_name_default_directory(self, mocked_exists):
"""Test to load one default background from defauld directory
"""
# create the instance
loader = BackgroundLoader(
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds,
{"background": Path("default/background.png").normpath()},
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("default/background.png").normpath())
@patch(
"dakara_player_vlc.background_loader.exists", return_value=True, autospec=True
)
def test_load_default_name_custom_directory(self, mocked_exists):
"""Test to load one default background from custom directory
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds, {"background": Path("custom/background.png").normpath()}
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("custom/background.png").normpath())
@patch(
"dakara_player_vlc.background_loader.exists", return_value=True, autospec=True
)
def test_load_custom_name_custom_directory(self, mocked_exists):
"""Test to load one custom background from custom directory
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": "custom.png"},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds, {"background": Path("custom/custom.png").normpath()}
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("custom/custom.png").normpath())
@patch(
"dakara_player_vlc.background_loader.exists", return_value=True, autospec=True
)
def test_load_custom_name_default_directory(self, mocked_exists):
"""Test to load one custom background from default directory
Should load default background from default directory.
"""
# create the instance
loader = BackgroundLoader(
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
background_filenames={"background": "other.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds,
{"background": Path("default/background.png").normpath()},
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("default/background.png").normpath())
@patch("dakara_player_vlc.background_loader.exists", autospec=True)
def test_load_fallback_default_name_custom_directory(self, mocked_exists):
"""Test to fallback to load one default background from custom directory
Was initially trying to load one custom background from custom directory.
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": "custom.png"},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# setup mock
mocked_exists.side_effect = [False, True]
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds, {"background": Path("custom/background.png").normpath()}
)
# assert the call of the mocked method
mocked_exists.assert_has_calls(
[
call(Path("custom/custom.png").normpath()),
call(Path("custom/background.png").normpath()),
]
)
@patch("dakara_player_vlc.background_loader.exists", autospec=True)
def test_load_fallback_default_name_default_directory(self, mocked_exists):
"""Test to fallback to load one default background from default directory
Was initially trying to load one custom background from custom directory.
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": "custom.png"},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# setup mock
mocked_exists.side_effect = [False, False, True]
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds,
{"background": Path("default/background.png").normpath()},
)
# assert the call of the mocked method
mocked_exists.assert_has_calls(
[
call(Path("custom/custom.png").normpath()),
call(Path("custom/background.png").normpath()),
call(Path("default/background.png").normpath()),
]
)
@patch("dakara_player_vlc.background_loader.exists", autospec=True)
def test_load_error(self, mocked_exists):
"""Test to load one unexisting background
Was initially trying to load one custom background from custom directory.
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": "custom.png"},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# setup mock
mocked_exists.side_effect = [False, False, False]
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
with self.assertRaises(BackgroundNotFoundError) as error:
loader.load()
# assert the error
self.assertEqual(
str(error.exception), "Unable to find a background file for background"
)
# assert the backgrounds
self.assertDictEqual(loader.backgrounds, {})
# assert the call of the mocked method
mocked_exists.assert_has_calls(
[
call(Path("custom/custom.png").normpath()),
call(Path("custom/background.png").normpath()),
call(Path("default/background.png").normpath()),
]
)
@patch(
"dakara_player_vlc.background_loader.exists", return_value=True, autospec=True
)
def test_load_none_filename(self, mocked_exists):
"""Test to load a None custom filename
"""
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": None},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds, {"background": Path("custom/background.png").normpath()}
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("custom/background.png").normpath())
| 2.8125 | 3 |
model.py | EST09/cross-modal-autoencoders | 20 | 12771816 | import torch
import torch.nn as nn
from torch.autograd import Variable
# adapted from pytorch/examples/vae and ethanluoyc/pytorch-vae
class ImageClassifier(nn.Module):
def __init__(self, latent_variable_size, pretrained, nout=2):
super(ImageClassifier, self).__init__()
self.latent_variable_size = latent_variable_size
self.feature_extractor = pretrained
self.classifier = nn.Linear(latent_variable_size, nout)
def forward(self, x):
x, _ = self.feature_extractor.encode(x)
x = self.classifier(x.view(-1, self.latent_variable_size))
return x
class VAE(nn.Module):
def __init__(self, nc=1, ngf=128, ndf=128, latent_variable_size=128, imsize=64, batchnorm=False):
super(VAE, self).__init__()
self.nc = nc
self.ngf = ngf
self.ndf = ndf
self.imsize = imsize
self.latent_variable_size = latent_variable_size
self.batchnorm = batchnorm
self.encoder = nn.Sequential(
# input is 3 x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 2 x 2
)
self.fc1 = nn.Linear(ndf*8*2*2, latent_variable_size)
self.fc2 = nn.Linear(ndf*8*2*2, latent_variable_size)
# decoder
self.decoder = nn.Sequential(
# input is Z, going into a convolution
# state size. (ngf*8) x 2 x 2
nn.ConvTranspose2d(ngf * 8, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Sigmoid(),
# state size. (nc) x 64 x 64
)
self.d1 = nn.Sequential(
nn.Linear(latent_variable_size, ngf*8*2*2),
nn.ReLU(inplace=True),
)
self.bn_mean = nn.BatchNorm1d(latent_variable_size)
def encode(self, x):
h = self.encoder(x)
h = h.view(-1, self.ndf*8*2*2)
if self.batchnorm:
return self.bn_mean(self.fc1(h)), self.fc2(h)
else:
return self.fc1(h), self.fc2(h)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def decode(self, z):
h = self.d1(z)
h = h.view(-1, self.ngf*8, 2, 2)
return self.decoder(h)
def get_latent_var(self, x):
mu, logvar = self.encode(x.view(-1, self.nc, self.imsize, self.imsize))
z = self.reparametrize(mu, logvar)
return z
def generate(self, z):
res = self.decode(z)
return res
def forward(self, x):
mu, logvar = self.encode(x.view(-1, self.nc, self.imsize, self.imsize))
z = self.reparametrize(mu, logvar)
res = self.decode(z)
return res, z, mu, logvar
class FC_VAE(nn.Module):
"""Fully connected variational Autoencoder"""
def __init__(self, n_input, nz, n_hidden=1024):
super(FC_VAE, self).__init__()
self.nz = nz
self.n_input = n_input
self.n_hidden = n_hidden
self.encoder = nn.Sequential(nn.Linear(n_input, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
)
self.fc1 = nn.Linear(n_hidden, nz)
self.fc2 = nn.Linear(n_hidden, nz)
self.decoder = nn.Sequential(nn.Linear(nz, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_input),
)
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
res = self.decode(z)
return res, z, mu, logvar
def encode(self, x):
h = self.encoder(x)
return self.fc1(h), self.fc2(h)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def decode(self, z):
return self.decoder(z)
def get_latent_var(self, x):
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
return z
def generate(self, z):
res = self.decode(z)
return res
class FC_Autoencoder(nn.Module):
"""Autoencoder"""
def __init__(self, n_input, nz, n_hidden=512):
super(FC_Autoencoder, self).__init__()
self.nz = nz
self.n_input = n_input
self.n_hidden = n_hidden
self.encoder = nn.Sequential(nn.Linear(n_input, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, nz),
)
self.decoder = nn.Sequential(nn.Linear(nz, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_input),
)
def forward(self, x):
encoding = self.encoder(x)
decoding = self.decoder(encoding)
return encoding, decoding
class FC_Classifier(nn.Module):
"""Latent space discriminator"""
def __init__(self, nz, n_hidden=1024, n_out=2):
super(FC_Classifier, self).__init__()
self.nz = nz
self.n_hidden = n_hidden
self.n_out = n_out
self.net = nn.Sequential(
nn.Linear(nz, n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(inplace=True),
# nn.Linear(n_hidden, n_hidden),
# nn.ReLU(inplace=True),
# nn.Linear(n_hidden, n_hidden),
# nn.ReLU(inplace=True),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(inplace=True),
nn.Linear(n_hidden,n_out)
)
def forward(self, x):
return self.net(x)
class Simple_Classifier(nn.Module):
"""Latent space discriminator"""
def __init__(self, nz, n_out=2):
super(Simple_Classifier, self).__init__()
self.nz = nz
self.n_out = n_out
self.net = nn.Sequential(
nn.Linear(nz, n_out),
)
def forward(self, x):
return self.net(x)
| 2.734375 | 3 |
op/dataDraw/demo_data_funnel.py | RustFisher/python-playground | 2 | 12771817 | <gh_stars>1-10
from openpyxl import load_workbook
from pyecharts.charts import Funnel
import pyecharts.options as opts
def read_in_data(file_path):
print('获取原始数据 >>>')
wb = load_workbook(file_path)
st = wb['data3']
data_list = []
for row_index in range(2, st.max_row + 1):
event_name = st.cell(row=row_index, column=1).value
event_value = st.cell(row=row_index, column=2).value
data_list.append([event_name, event_value])
print(data_list)
return data_list
def draw_funnel_default(input_data):
print('绘制漏斗图 >>> https://gallery.pyecharts.org/#/Funnel/funnel_base')
(
Funnel(init_opts=opts.InitOpts(width="800px", height="400px"))
.add(series_name='漏斗1', data_pair=input_data, )
.set_global_opts(title_opts=opts.TitleOpts(title="漏斗图1", subtitle="默认样式"))
.render("funnel_chart_1.html")
)
def draw_funnel_2(input_data):
print('绘制漏斗图1 >>> https://gallery.pyecharts.org/#/Funnel/funnel_base')
(
Funnel(init_opts=opts.InitOpts(width="1000px", height="600px"))
.add(series_name='漏斗1', data_pair=input_data,
label_opts=opts.LabelOpts(position="inside", formatter='{b} 数量: {c}'),
gap=2,
tooltip_opts=opts.TooltipOpts(trigger="item", formatter="{a} <br/>{b} 数量: {c}"),
)
.set_global_opts(title_opts=opts.TitleOpts(title="漏斗图1", subtitle="改变了label"))
.render("funnel_chart_2.html")
)
def draw_funnel_3(input_data):
print('绘制漏斗图2 >>> https://gallery.pyecharts.org/#/Funnel/funnel_base')
data = input_data.copy() # 复制一份出来处理标题
base_value = data[0][1]
for x in range(0, len(data)):
event_name = data[x][0]
value = data[x][1] # 拿到数值
ratio = value / base_value
event_name += '\n总体转化率: {:.2f}%'.format(ratio * 100)
if x > 0:
event_name += '\n相对转化率: {:.2f}%'.format(100 * value / data[x - 1][1])
data[x][0] = event_name
print(event_name)
(
Funnel(init_opts=opts.InitOpts(width="1000px", height="800px"))
.add(series_name='漏斗1', data_pair=data,
label_opts=opts.LabelOpts(position="inside", formatter='{b}\n数量: {c}'),
gap=2,
tooltip_opts=opts.TooltipOpts(trigger="item", formatter="{a} <br/>{b} 数量: {c}"),
)
.set_global_opts(title_opts=opts.TitleOpts(title="漏斗图1", subtitle="修改了各项的标题"))
.render("funnel_chart_3.html")
)
if __name__ == '__main__':
src_data = read_in_data('res/事件demo.xlsx')
draw_funnel_default(src_data)
draw_funnel_2(src_data)
draw_funnel_3(src_data)
| 2.90625 | 3 |
python_fishc/48.0.py | iisdd/Courses | 1 | 12771818 | <reponame>iisdd/Courses
'''
0. 用 while 语句实现与以下 for 语句相同的功能:
for each in range(5):
print(each)
'''
r = iter(range(5))
while 1:
try:
each = next(r)
except StopIteration:
break
print(each)
| 3.421875 | 3 |
MultiCal.py | ZiadHatab/multiline-trl-calibration | 0 | 12771819 | <filename>MultiCal.py
# -*- coding: utf-8 -*-
"""
@author: <NAME> (<EMAIL>)
This is an implementation of the MultiCal multiline TRL calibration algorithm
discussed in [1]. The original algorithm is from [2].
Some concerns regarding [1]:
1. on page 139, the paragraph after Eq. (11). It says that "MultiCal sets
phieff to 0-deg if the argument of the arcsine happens to exceed 1 due
to measurement noise". This sounds counter-intuitive to me, because you
would think that phieff should be set to 90-deg if the argument of arcsine
exceeds 1 (i.e., saturation). And when you look at [2], on page 1209,
the paragraph after Eq. (53) reads "We stipulate that phieff = 90-deg if
the argument of the arcsin is greater than 1". This statement makes more
sense to me than that in [1]. The question now: is the statement about
phieff in [1] a typo? I implemented both, see the function commonLine().
2. On page 148, at Eq. (48). In the numerator of the first sum term
it says: exp(-g*(lm-lc))*exp(-g*(lm-lc)).conj(). This is definitely a typo.
The correct term should be exp(-g*(lm-lc))*exp(-g*(ln-lc)).conj().
The typo is the line length in the second term, it should be ln not lm.
The original equation can be found in [2] at page 1211, Eq. (76). There it
is written correctly.
3. On page 150, Eq. (57), the equation has a sign error. The second
fraction should have a plus sign in both numerator and denominator.
4. On page 151, Eqs. (62) and (63) are used to separate the 7-th error
term into two parts for each port. I tried using Eqs. (62) and (63) multiple
times and it never worked properly. When I take their product to compute R1R2
and use it in the calibration, the result of the calibration is wrong.
Am I missing something here? I ended up deriving an equation for R1R2
directly from the Thru measurements (which works quite good).
[1] <NAME>, <NAME> and <NAME>, "Multiline TRL revealed,"
60th ARFTG Conference Digest, Fall 2002, pp. 131-155
[2] <NAME>, "A multiline method of network analyzer calibration",
IEEE Transactions on Microwave Theory and Techniques,
vol. 39, no. 7, pp. 1205-1215, July 1991.
##########-NOTE-##########
This script is written to process only one frequency point. Therefore, you need
to call this script in your main script and iterate through all frequency points.
##########-END-##########
"""
import numpy as np
c0 = 299792458 # speed of light in vacuum (m/s)
def S2T(S):
T = S.copy()
T[0,0] = -(S[0,0]*S[1,1]-S[0,1]*S[1,0])
T[0,1] = S[0,0]
T[1,0] = -S[1,1]
T[1,1] = 1
return T/S[1,0]
def T2S(T):
S = T.copy()
S[0,0] = T[0,1]
S[0,1] = T[0,0]*T[1,1]-T[0,1]*T[1,0]
S[1,0] = 1
S[1,1] = -T[1,0]
return S/T[1,1]
def deleteDiag(A):
# delete diagonal elements
return A[~np.eye(A.shape[0],dtype=bool)].reshape(A.shape[0],-1)
def commonLine(gamma, line_lengths):
# select a common line
exps = np.exp(gamma*line_lengths)
A = abs(np.outer(exps,1/exps) - np.outer(1/exps,exps))/2
A = deleteDiag(A) # remove the same line differences
# for this, please read comment #1 in the header of this file!
# A[A > 1] = 0 # anything above 1 is set to 0 (see [1]): arcsin(0)=0
A[A > 1] = 1 # anything above 1 is set to 1 (see [2]): arcsin(1)=pi/2
phieff = np.arcsin(A)
min_phieff = np.min(phieff, axis=1) # find minimum phase in every group
return np.argmax(min_phieff) # the group that has maximum minimum phase is chosen
def BLUE(x,y,Vinv):
# perform the Gauss-Markov linear estimation
#return (x.transpose().dot(Vinv).dot(y))/(x.transpose().dot(Vinv).dot(x))
return (x.conj()*(Vinv@y)).sum()/(x.conj()*(Vinv@x)).sum()
def VGamma(N):
# inverse covariance matrix for gamma
return np.eye(N-1, dtype=complex) - (1/N)*np.ones(shape=(N-1, N-1), dtype=complex)
def VCA(gamma, cline, lines):
# inverse covariance matrix for C/A coefficient
g = gamma
l = lines
lc = cline
dl = l-lc
lm, ln = np.meshgrid(lines, lines, sparse=False, indexing='ij')
V = ( 1/(np.exp(-g*(lm-lc))*np.exp(-g*(ln-lc)).conj())
+ 1/(abs(np.exp(-g*lc))**2*np.exp(-g*lm)*np.exp(-g*ln).conj()) )/( np.exp(-g*(lm-lc)) - np.exp(g*(lm-lc)) )/( np.exp(-g*(ln-lc)) - np.exp(g*(ln-lc)) ).conj()
# Account for the terms with kronecker-delta (see [2], Eq.(77))
diag = (abs(np.exp(-g*dl))**2 + 1/(abs(np.exp(-g*l))*abs(np.exp(-g*lc)))**2)/abs(np.exp(-g*dl) - np.exp(g*dl))**2
V = V + np.diag(diag)
return np.linalg.pinv(V)
def VB(gamma, cline, lines):
# inverse covariance matrix for B coefficient
g = gamma
l = lines
lc = cline
dl = l-lc
lm, ln = np.meshgrid(lines, lines, sparse=False, indexing='ij')
V = (np.exp(-g*(lm-lc))*np.exp(-g*(ln-lc)).conj()
+ abs(np.exp(-g*lc))**2*np.exp(-g*lm)*np.exp(-g*ln).conj())/( np.exp(-g*(lm-lc)) - np.exp(g*(lm-lc)) )/( np.exp(-g*(ln-lc)) - np.exp(g*(ln-lc)) ).conj()
# Account for the terms with kronecker-delta (see [2], Eq.(76))
diag = (1/abs(np.exp(-g*dl))**2 + (abs(np.exp(-g*l))*abs(np.exp(-g*lc)))**2)/abs(np.exp(-g*dl) - np.exp(g*dl))**2
V = V + np.diag(diag)
return np.linalg.pinv(V)
def computeGL(Mc, Mlines, cline, lines, g_est):
# Perform root choice procedure as described in [1]
# Compute the vectors G and L used to estimate gamma (see [1], Eq. (24))
# The work at https://github.com/simonary/MultilineTRL was very helpful.
gdl = []
dls = []
for inx, (linej, Mj) in enumerate(zip(lines, Mlines)): # For every other line
Mjc = Mj@np.linalg.pinv(Mc)
Eij = np.linalg.eigvals(Mjc)
dl = linej - cline
# case 1 Eij[0] = exp(-gamma*l), Eij[1] = exp(gamma*l)
Ea1 = (Eij[0]+1/Eij[1])/2 # exp(-gamma*l)
Eb1 = (Eij[1]+1/Eij[0])/2 # exp(gamma*l)
Pa1 = np.round( (g_est*dl + np.log(Ea1)).imag/2/np.pi )
g_a1 = (-np.log(Ea1) + 1j*2*np.pi*Pa1)/dl
Da1 = abs(g_a1/g_est - 1)
Pb1 = np.round( -(g_est*dl - np.log(Eb1)).imag/2/np.pi )
g_b1 = (-np.log(Eb1) + 1j*2*np.pi*Pb1)/dl
Db1 = abs(g_b1/g_est + 1)
# case 2 Eij[1] = exp(-gamma*l), Eij[0] = exp(gamma*l)
Ea2 = (Eij[1] + 1/Eij[0])/2 # exp(-gamma*l)
Eb2 = (Eij[0] + 1/Eij[1])/2 # exp(gamma*l)
Pa2 = np.round( (g_est*dl + np.log(Ea2)).imag/2/np.pi )
g_a2 = (-np.log(Ea2) + 1j*2*np.pi*Pa2)/dl
Da2 = abs(g_a2/g_est - 1)
Pb2 = np.round( -(g_est*dl - np.log(Eb2)).imag/2/np.pi )
g_b2 = (-np.log(Eb2) + 1j*2*np.pi*Pb2)/dl
Db2 = abs(g_b2/g_est + 1)
dls.append(dl)
# Determin the assignment of eigenvalue
if (Da1 + Db1) <= 0.1*(Da2 + Db2):
gdl.append(-np.log(Ea1) + 1j*2*np.pi*Pa1)
elif (Da2 + Db2) <= 0.1*(Da1 + Db1):
gdl.append(-np.log(Ea2) + 1j*2*np.pi*Pa2)
else:
if (g_a1 + g_b1).real >= 0 and (g_a2 + g_b2).real < 0:
gdl.append(-np.log(Ea1) + 1j*2*np.pi*Pa1)
elif (g_a1 + g_b1).real < 0 and (g_a2 + g_b2).real >= 0:
gdl.append(-np.log(Ea2) + 1j*2*np.pi*Pa2)
else: # sign of real part not the same
if (Da1 + Db1) <= (Da2 + Db2):
gdl.append(-np.log(Ea1) + 1j*2*np.pi*Pa1)
else:
gdl.append(-np.log(Ea2) + 1j*2*np.pi*Pa2)
return np.array(gdl), np.array(dls) # vectors G and L
def compute_B_CA(Mc, Mlines, cline, lines, g_est, direction='forward'):
Mcinv = np.linalg.pinv(Mc)
Bs = []
CAs = []
for inx, Mj in enumerate(Mlines): # For every other line
if direction == 'forward':
Eij, V = np.linalg.eig(Mj@Mcinv)
elif direction == 'backward':
Eij, V = np.linalg.eig((Mcinv@Mj).T)
dl = lines[inx] - cline
mininx = np.argmin( abs(Eij-np.exp(-g_est*dl)) )
v1 = V[:,mininx]
v2 = V[:,~mininx]
CA = v1[1]/v1[0]
B = v2[0]/v2[1]
CAs.append(CA)
Bs.append(B)
return np.array(CAs), np.array(Bs) # vectors B and C/A
def mTRL(line_meas_T, line_lengths, meas_reflect_S, gamma_est, reflect_est, reflect_offset, override_gamma=-1):
#
# line_meas_T : 3D array of 2D T-paramters of line measurements (first is set to Thru)
# line_lengths: 1D array containing line lengths in same order of measurements
# meas_reflect_S: 3D array of 2D S-parameters of the measured reflects
# gamma_est: Scalar of estimated gamma
# reflect_est: 1D array of reference reflection coefficients (e.g., short=-1, open=1)
# reflect_offset: 1D array of offset lengths of the reflect standards (reference to Thru)
#
# make sure all inputs have proper shape
line_meas_T = np.atleast_3d(line_meas_T).reshape((-1,2,2))
line_lengths = np.atleast_1d(line_lengths)
meas_reflect_S = np.atleast_3d(meas_reflect_S).reshape((-1,2,2))
reflect_est = np.atleast_1d(reflect_est)
reflect_offset = np.atleast_1d(reflect_offset)
line_lengths = line_lengths - line_lengths[0] # setting the first line Thru
meas_Thru_S = T2S(line_meas_T[0]) # S-paramters of the Thru standard
thru_T = line_meas_T[0]
N = len(line_lengths)
gamma = abs(gamma_est.real) + 1j*abs(gamma_est.imag)
cline_inx = commonLine(gamma, line_lengths)
# extract the common line out from the list
line_length_com = line_lengths[cline_inx]
line_meas_com = line_meas_T[cline_inx]
# delete the common line from the list
line_meas_T = np.delete(line_meas_T, cline_inx, axis=0)
line_lengths = np.delete(line_lengths, cline_inx, axis=0)
# estimate gamma
if override_gamma != -1:
gamma = override_gamma
else:
G,L = computeGL(line_meas_com, line_meas_T, line_length_com, line_lengths, gamma)
gamma = BLUE(L, G, VGamma(N))
# gamma = abs(gamma.real) + 1j*abs(gamma.imag)
# estimate B and C/A for forward direction
CAs, Bs = compute_B_CA(line_meas_com, line_meas_T, line_length_com,
line_lengths, gamma, direction='forward')
CA1 = BLUE(np.ones(N-1), CAs, VCA(gamma, line_length_com, line_lengths))
B1 = BLUE(np.ones(N-1), Bs, VB(gamma, line_length_com, line_lengths))
# estimate B and C/A for backward direction
CAs, Bs = compute_B_CA(line_meas_com, line_meas_T, line_length_com,
line_lengths, gamma, direction='backward')
CA2 = BLUE(np.ones(N-1), CAs, VCA(gamma, line_length_com, line_lengths))
B2 = BLUE(np.ones(N-1), Bs, VB(gamma, line_length_com, line_lengths))
'''
# The original method MultiCal uses to compute A1A2 and R1R2... not recommended!
# solve for A1A2 and R1R2 from Thru measurements
S11,S12,S21,S22 = meas_Thru_S.flatten()
A1A2 = -(B1*B2-B1*S22-B2*S11+(S11*S22-S21*S12))/(1-CA1*S11-CA2*S22+CA1*CA2*(S11*S22-S21*S12))
R1R2 = 1/S21/(CA1*CA2*A1A2 + 1)
'''
'''
Below is a mathematical rewriting of the error-box model using
Kronecker product formulation. This has no influence on MultiCal procedures,
this is just for my convenience (see the NOTE at the end of this file!).
You can write the problem in terms of the conventional 2 error-boxes
matrices, if you wish...
'''
# normalized calibration matrix
X_ = np.array([[1, B1, B2, B1*B2],
[CA1, 1, B2*CA1, B2 ],
[CA2, B1*CA2, 1, B1 ],
[CA1*CA2, CA2, CA1, 1 ]])
# solve for A1A2 and K, simultaneously (this gives much better results)
KA1A2,_,_,K = np.linalg.pinv(X_)@thru_T.T.flatten()
A1A2 = KA1A2/K
# solve for A1/A2, A1 and A2
A1 = []
A2 = []
for reflect_S, reflect_ref, offset in zip(meas_reflect_S,reflect_est,reflect_offset):
G1 = reflect_S[0,0]
G2 = reflect_S[1,1]
A1_A2 = (G1 - B1)/(1 - G1*CA1)*(1 + G2*CA2)/(G2 + B2)
a1 = np.sqrt(A1_A2*A1A2)
# choose correct answer for A1 and A2
ref_est = reflect_ref*np.exp(-2*gamma*offset)
G1 = (G1 - B1)/(1 - G1*CA1)/a1
if abs(G1 - ref_est) > abs(-G1 - ref_est):
a1 = -a1
A1.append(a1)
A2.append(A1A2/a1)
A1 = np.array(A1).mean()
A2 = np.array(A2).mean()
# de-normalize
X = X_@np.diag([A1A2, A2, A1, 1])
'''
X: is 4x4 matrix and is defined as X = (B.T kron A), where A is the
forward error-box and B is the reverse error-box (T-parameters).
This matrix holds the 6 error terms.
K: is the 7-th term of the error-box model
############-NOTE-############
Background on Kronecker product:
Given three matrices X, Y, and Z, the vectorization of their triplet
product is given as:
vec(XZY) = (Y.T kron X)*vec(Z)
where vec() flatten a matrix into a vector.
In our case if the error-box model is given as:
M = K*A*T*B
where K is scalar, A and B are forward and backward error-boxes, and
T is the DUT. Then using Kronecker product description, we have:
vec(M) = K*(B.T kron A)*vec(T)
Therefore, the calibrated DUT is solved as:
vec(T) = (1/K)*(B.T kron A)^(-1)*vec(M)
https://en.wikipedia.org/wiki/Kronecker_product
https://en.wikipedia.org/wiki/Vectorization_(mathematics)
############-END-############
'''
return X, K, gamma
# EOF | 2.3125 | 2 |
test.py | linzhiming0826/ADSL | 6 | 12771820 | import urllib2
proxies = {"http": "192.168.3.11:6666"}
proxy_s = urllib2.ProxyHandler(proxies)
opener = urllib2.build_opener(proxy_s)
urllib2.install_opener(opener)
content = urllib2.urlopen('http://wwww.baidu.com').read()
print content
| 2.59375 | 3 |
flixed_django/flixedREST/admin.py | nilesh1168/flixed-movie-tracker | 0 | 12771821 | <reponame>nilesh1168/flixed-movie-tracker
from django.contrib import admin
# Register your models here.
from .models import WatchList
from .models import WatchedMovie
admin.site.register(WatchList)
admin.site.register(WatchedMovie)
| 1.320313 | 1 |
ECE650-system-programming/HW4_db/hw4_django_orm/app_homepage/apps.py | yo1995/cht_Duke_courses | 8 | 12771822 | <filename>ECE650-system-programming/HW4_db/hw4_django_orm/app_homepage/apps.py<gh_stars>1-10
from django.apps import AppConfig
class AppHomepageConfig(AppConfig):
name = 'app_homepage'
| 1.195313 | 1 |
Python32/alice.py | andersonsilvade/python_C | 0 | 12771823 | import string
texto = open('alice.txt').read().lower()
for c in string.punctuation:
texto = texto.replace(c, ' ')
palavras = texto.split()
wc = {}
for p in palavras:
if p in wc:
wc[p] += 1
else:
wc[p] = 1
def contador(dupla):
return dupla[1]
duplas = sorted(wc.items(), key=contador, reverse=True)
for dupla in duplas[:10]:
print (dupla[0], dupla[1])
| 3.140625 | 3 |
utils/drift_diffusion_analysis.py | ClaraKuper/VVSS2021 | 0 | 12771824 | <filename>utils/drift_diffusion_analysis.py
import pandas as pd
import numpy as np
import pickle
import os
from ddm import Fittable, Model, Sample, Bound
from ddm.models import LossRobustBIC, DriftConstant, NoiseConstant, BoundConstant, OverlayNonDecision, Drift
from ddm.functions import fit_adjust_model, display_model
path_models = './models/'
def get_ddm_sample(data, subject, correct_name):
"""
reduces the data frame and returns a ddm sample
:param data: the data frame
:param subject: the subject we want to fit
:param correct_name: the name of the column that holds the answer values
:return: the sample
"""
# define the sample for the models
ddm_df = data
# reduce to one subject
ddm_df = ddm_df[ddm_df.subject == subject]
# reduce my data file to the necessary columns
ddm_df = ddm_df.loc[:, ['rea_time', 'goResp', 'answer', 'sampleProbHit_01', 'sampleProbHit_02', 'sampleProbHit_03',
'sampleProbHit_04', 'sampleProbHit_05', 'sampleProbHit_06']]
# drop all rows that contain nans and reset the index
ddm_df.dropna(axis=0, inplace=True)
ddm_df.reset_index(drop=True, inplace=True)
# turn my datafile into a pyDDM sample
sample = Sample.from_pandas_dataframe(ddm_df, rt_column_name="rea_time", correct_column_name=correct_name)
return sample
# step 1: define a collapsing boundary class.
# this class definition was taken from https://pyddm.readthedocs.io/en/latest/cookbook/bounds.html#bound-exp-delay
class BoundCollapsingExponentialDelay(Bound):
"""Bound collapses exponentially over time.
Takes three parameters:
`B` - the bound at time t = 0.
`tau` - the time constant for the collapse, should be greater than
zero.
`t1` - the time at which the collapse begins, in seconds
"""
name = "Delayed exponential collapsing bound"
required_parameters = ["B", "tau", "t1"]
def get_bound(self, t, conditions, **kwargs):
if t <= self.t1:
return self.B
if t > self.t1:
return self.B * np.exp(-self.tau * (t - self.t1))
# step 2 define the drift classes
# the drift for the fist model
class FirstValDrift(Drift):
""" returns the first evidence value multiplied by the scale
Takes one parameter:
'scale' - scales the evidence up or down to the final drift rate
"""
name = "returns drift of first value"
required_conditions = ["sampleProbHit_01"]
required_parameters = ["scale"]
def get_drift(self, t, conditions, **kwargs):
return conditions['sampleProbHit_01'] * self.scale
# the drift for the second model
class ThreshDrift(Drift):
""" returns 0 till a threshold is crossed. Then, returns the first evidence that was above the threshold
Takes two parameters:
'scale' - scales the evidence up or down to the final drift rate
'thresh' - the threshold up to which the model returns "0"
"""
name = "drifts with the first value above threshold"
required_conditions = ["sampleProbHit_01", "sampleProbHit_02", "sampleProbHit_03", "sampleProbHit_04",
"sampleProbHit_05", "sampleProbHit_06"]
required_parameters = ["scale", "thresh"]
# set a drift value here to access it later
drift_value = 0
# this schema defines the temporal structure how samples appeared
time_schema = np.linspace(0, 0.88, 6)
def get_drift(self, t, conditions, **kwargs):
# get all samples that were already shown
passed = self.time_schema[(self.time_schema - t) <= 0]
# get the most recent of these
prob = self.required_conditions[np.argmax(passed)]
# check if we have to set a drift value
if (conditions[prob] > self.thresh) and (self.drift_value == 0):
# set the drift value
ThreshDrift.drift_value = conditions[prob] * self.scale
# return the drift value
return self.drift_value
# Define the Drift for the Third Model
class ContinuousUpdate(Drift):
""" always returns the current evidence * scale
Takes one parameter:
'scale' - scales the evidence up or down to the final drift rate
"""
name = "continuously updating drifts"
required_conditions = ["sampleProbHit_01", "sampleProbHit_02", "sampleProbHit_03", "sampleProbHit_04",
"sampleProbHit_05", "sampleProbHit_06"]
required_parameters = ["scale"]
# this schema defines the temporal structure how samples appeared
time_schema = np.linspace(0, 0.88, 6)
def get_drift(self, t, conditions, **kwargs):
# get all samples that were already shown
passed = self.time_schema[(self.time_schema - t) <= 0]
# get the most recent
prob = self.required_conditions[np.argmax(passed)]
# multiply with scale and return
return conditions[prob] * self.scale
# define all 3 models
# model one: immediate constant drift
ddm1 = Model(name='drift rate depends on first tw (fitted)',
# custom, fittable drift rate
drift=FirstValDrift(scale=Fittable(minval=0.1, maxval=1)),
# constant, fittable noise
noise=NoiseConstant(noise=Fittable(minval=.5, maxval=4)),
# custom, fittable boundary
bound=BoundCollapsingExponentialDelay(B=1,
tau=Fittable(minval=3, maxval=7),
t1=Fittable(minval=0.5, maxval=1.5)),
# constant, fittable non-decision time
overlay=OverlayNonDecision(nondectime=Fittable(minval=0, maxval=1)),
dx=.001, dt=.01, T_dur=1)
# model two: delayed drift with threshold
ddm2 = Model(name='drift starts after threshold was crossed',
# custom, fittable drift rate
drift=ThreshDrift(scale=Fittable(minval=1, maxval=10), thresh=Fittable(minval=0.1, maxval=1)),
noise=NoiseConstant(noise=Fittable(minval=.5, maxval=4)),
# custom, fittable boundary
bound=BoundCollapsingExponentialDelay(B=1,
tau=Fittable(minval=0.1, maxval=5),
t1=Fittable(minval=0, maxval=1)),
# constant, fittable non-decision time
overlay=OverlayNonDecision(nondectime=Fittable(minval=0, maxval=1)),
dx=.001, dt=.01, T_dur=1)
# model three: drift according to the the lastest sample
ddm3 = Model(name='drift changes with every new sample',
# custom, fittable drift rate
drift=ContinuousUpdate(scale=Fittable(minval=1, maxval=10)),
# constant, fittable noise
noise=NoiseConstant(noise=Fittable(minval=.5, maxval=4)),
# custom, fittable boundary
bound=BoundCollapsingExponentialDelay(B=1,
tau=Fittable(minval=0.1, maxval=5),
t1=Fittable(minval=0, maxval=1)),
# constant, fittable non-decision time
overlay=OverlayNonDecision(nondectime=Fittable(minval=0, maxval=1)),
dx=.001, dt=.01, T_dur=1)
def fit_save_ddm(sample, ddm, name):
"""
checks if a ddm was already fitted, loads if yes, fits new one if not
:param sample: the sample generated with get_ddm_sample
:param ddm: the name of the ddm
:param name: the filename
:return: the fitted model
"""
if os.path.exists(path_models + name):
print('model was fitted, returning fitted model')
with open(path_models + name, 'rb') as file:
model = pickle.load(file)
else:
if ddm == 'ddm1':
model = ddm1
elif ddm == 'ddm2':
model = ddm2
elif ddm == 'ddm3':
model = ddm3
else:
raise ValueError('only ddm1, ddm2 and ddm3 are valid')
print('Fitting a new model. this will take some time')
fit_adjust_model(sample, model, lossfunction=LossRobustBIC, verbose=False)
with open(path_models + name, 'wb') as output:
pickle.dump(model, output, pickle.HIGHEST_PROTOCOL)
return model
def get_ddm_responses(ddm, data, subject):
"""
generate responses to dataset based on a fitted ddm
:param ddm: the fitted ddm
:param data: the data to which we want to create responses
:param subject: the subject for which we want to subset data
:return: the data frame with predicted response columns
"""
obs_file = data[data.subject == subject]
model_pred = obs_file.copy()
# solve the models for every trial, get an estimate of the response time, and of a true/false answer
# finally, get the response by comparing true/false to the condition
for r in obs_file.index:
conditions = {
'sampleProbHit_01': obs_file.loc[r, 'sampleProbHit_01'],
'sampleProbHit_02': obs_file.loc[r, 'sampleProbHit_02'],
'sampleProbHit_03': obs_file.loc[r, 'sampleProbHit_03'],
'sampleProbHit_04': obs_file.loc[r, 'sampleProbHit_04'],
'sampleProbHit_05': obs_file.loc[r, 'sampleProbHit_05'],
'sampleProbHit_06': obs_file.loc[r, 'sampleProbHit_06'],
}
Solution = ddm.solve(conditions=conditions)
model_pred.loc[r, 'rea_time'] = Solution.resample(1)
model_pred.loc[r, 'probAnswer'] = Solution.prob_correct()
model_pred.loc[:, 'answer'] = np.round(model_pred.probAnswer)
model_pred.loc[:, 'goResp'] = 1 - (abs(model_pred.answer - model_pred.hitGoal))
return model_pred
def match_response_types(model_data):
"""
matches correct/incorrect responses to the ground truth condition of a trial
:param model_data: the data with model-generated response predictions
:return:
"""
for ix in model_data.index:
if model_data.loc[ix, 'goResp'] == 1:
if model_data.loc[ix, 'hitGoal'] == 1:
model_data.loc[ix, 'response_cat'] = 'HIT'
else:
model_data.loc[ix, 'response_cat'] = 'FALSE ALARM'
else:
if model_data.loc[ix, 'hitGoal'] == 1:
model_data.loc[ix, 'response_cat'] = 'MISS'
else:
model_data.loc[ix, 'response_cat'] = 'CORRECT REJECTION'
return model_data
| 2.703125 | 3 |
MT_2018_PPGIA/AULA_05_06/LE_04/resolutions/k-nn/pre-processing-001.py | MWTA/Text-Mining | 0 | 12771825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Software: Pre-Processing-01
Description:
- Processa todos arquivos, dos diretórios neg_ e pos_ do dataset que contem os registros
já separados em arquivos .TXT;
- Converte todos para caracteres minusculos;
- Remove caracteres numéricos;
- Remove caracteres especiais;
- Remove stopwords usando uma lista de stopwords;
- Stemmer reduz a raiz das palavras;
- Remove palavras que contenha menos de 4 catacteres.
- Adicionar rótulos: "1" positivos ou "0" negativo para cada arquivo.
- Remove lixo gerado;
- Juntar todos os dados em um único arquivo. Sendo assim, um novo dataser é gerado com o
dados para treinamento contendo n linhas "documentos" e 2 colunas, sendo que na 1ª coluna
as palavras e na 2ª coluna o rótulo;
- Pega o dataset de treinamento gerado e com base nele gera um novo dataset para teste
"sem rótulos".
Register: 21/05/2018
Authors:
RodriguesFAS;
"coloquem nome de vocês :)"
Email:
<<EMAIL>>
"""
import re
import os
import csv
import sys
import nltk
reload(sys)
sys.setdefaultencoding('utf-8')
path_root = '/home/rodriguesfas/Workspace/k-nn/data/'
path_dataset = 'polarity-detection-200-reviews/'
path_neg = 'neg_100/'
path_pos = 'pos_100/'
path_out = 'out/'
file_stopwords = 'stopwords/stopwords.txt'
path_list_stopwords = path_root + file_stopwords
files_neg = path_root + path_dataset + path_neg
files_pos = path_root + path_dataset + path_pos
path_train = path_root + path_out + 'generated-polarity-train.csv'
path_test = path_root + path_out + 'generated-polarity-test.csv'
label_pos = '1'
label_neg = '0'
TEST = True
def LOG(text):
if TEST is True:
print(">> " + text)
def remove_numbers(document):
LOG('Remove numbers..')
return re.sub('[-|0-9]', ' ', document).strip()
def remove_special_characters(document):
LOG('Remove characters special..')
document = re.sub(r'[-_./?!,`":;=+()<>|@#$%&*^~\']', '', document)
document = "".join(document.splitlines())
document = ' '.join(document.split())
return document.strip()
def clean_document(document):
LOG('Cleaning content..')
document = document.replace('(', '').replace(')', '').replace("'", '').replace(
',', '').replace('\\n', '').replace('0', ', 0').replace('1', ', 1').replace('[', '').replace(']', '')
return document
def remove_very_small_words(document):
document = re.sub(r'\b\w{1,3}\b', '', document)
return document.strip()
def remove_stopwords(document):
LOG('Removing stopwords..')
list_stopwords = open(path_list_stopwords).read()
words_filtered = document[:]
words_filtered = [i for i in document.split() if not i in list_stopwords]
return (" ".join(words_filtered))
def stemmer(document):
LOG('Stemming..')
stemmer = nltk.stem.RSLPStemmer()
words = []
for word in document.split():
words.append(stemmer.stem(word))
return (" ".join(words))
def load_document(path, label):
dataset_train = open(path_train, 'a+')
for file in os.listdir(path):
LOG('Processing data file: ' + file)
if file.endswith('.txt'):
document = open(path + file).read().lower()
document = remove_numbers(document)
document = remove_special_characters(str(document))
document = remove_stopwords(document)
document = stemmer(document)
document = remove_very_small_words(str(document))
LOG('Labeling content..')
document = str(document), label
document = clean_document(str(document))
dataset_train.write(document + '\n')
dataset_train.close()
LOG('Dataset Train generated!')
def generater_test(path):
LOG('Generating dataset from test..')
dataset_test = open(path_test, 'a+')
with open(path) as documents:
for document in documents:
document = document.replace(', 0', '').replace(', 1', '')
dataset_test.write(document)
dataset_test.close()
LOG('Dataset Test generated!')
def main():
LOG('Started!')
LOG('Processing directory neg_ ...')
load_document(files_neg, label_neg)
LOG('Processing directory pos_ ...')
load_document(files_pos, label_pos)
generater_test(path_train)
LOG('Finalized!')
# Run
if __name__ == '__main__':
main()
| 2.78125 | 3 |
circuitpython/sparkfun_alphanumeric_ex1.py | thess/qwiic_alphanumeric_py | 4 | 12771826 | <reponame>thess/qwiic_alphanumeric_py<filename>circuitpython/sparkfun_alphanumeric_ex1.py
#!/usr/bin/python3
# CircuitPython version
# Basic example of setting digits on a LED segment display.
# This example and library is meant to work with the Sparkfun qwiic_alphanumeric display.
# Author: <NAME>
# License: Public Domain
import time
# Import all board pins.
import board
import busio
# Import SparkX HT16K33 14x4 segment module
from sparkfun_alphanumeric import QwiicAlphanumeric
i2c = busio.I2C(board.SCL, board.SDA)
# Init and clear Clear the display.
display = QwiicAlphanumeric(i2c)
display.clear()
# Can just print a number
display.print(42)
time.sleep(2)
display.print(53.2)
time.sleep(2)
# Or, can print a hexadecimal value
display.print_hex(0xFF23)
time.sleep(2)
# Or, print the time
display.print("12:30")
time.sleep(2)
display.colon = False
# Or, can set individual digits / characters
# Set the first character to '1':
display[0] = "1"
# Set the second character to '2':
display[1] = "2"
# Set the third character to 'A':
display[2] = "A"
# Set the forth character to 'B':
display[3] = "B"
time.sleep(2)
# Or, can even set the segments to make up characters
# 14-segment raw digits
display.set_digit_raw(0, 0x2D3F)
display.set_digit_raw(1, 0b0010110100111111)
display.set_digit_raw(2, (0b00101101, 0b00111111))
display.set_digit_raw(3, [0x2D, 0x3F])
time.sleep(2)
# Cylon shift left/right testing
display.print('*')
dir = True
pos = 3
for i in range(10):
display.scroll(1 if dir else -1)
display.show()
if dir:
pos -= 1
if pos == 0:
dir = not dir
else:
pos += 1
if pos == 3:
dir = not dir
time.sleep(1)
time.sleep(2)
display.print("Fini")
| 3.46875 | 3 |
grok/knowledge_transfer.py | arpitvaghela/grok | 0 | 12771827 | import torch as th
from copy import deepcopy
def knowledge_transfer(net2: th.nn.Module, old_state_path: str):
print(f"Copied weights from {old_state_path}")
net1 = th.load(old_state_path)
old_state = net1.state_dict()
n_layers_old = net1.transformer.n_layers
n_head_old = net1.transformer.n_heads
dk_old = net1.transformer.d_model // net1.transformer.n_heads
dk_new = net2.transformer.d_model // net2.transformer.n_heads
new_state = net2.state_dict()
updated_state = deepcopy(new_state)
for k in new_state:
if k == "position_encoding" or k == "self_attn_mask":
continue
elif "self_attn_norm" in k.split(".") or "ffn_norm" in k.split("."):
continue
elif "attn_heads" in k.split("."):
updated_state[k] = th.randn_like(new_state[k]) * 0.001
weight_name = k.split(".")
layer_idx = int(weight_name[3])
if layer_idx < n_layers_old:
head_idx = int(weight_name[6])
lst = [
(i // dk_old, i % dk_old)
for i in (head_idx * dk_new, head_idx * dk_new + dk_new)
]
w = []
if lst[0][0] == lst[1][0]:
if not lst[0][0] < n_head_old:
continue
weight_name_old = weight_name.copy()
weight_name_old[6] = str(lst[0][0])
k_old = ".".join(weight_name_old)
w.append(old_state[k_old][lst[0][1] : lst[1][1], :])
else:
for prev_head_idx in range(lst[0][0], lst[1][0] + 1):
if not prev_head_idx < n_head_old:
continue
weight_name_old = weight_name.copy()
weight_name_old[6] = str(prev_head_idx)
k_old = ".".join(weight_name_old)
if prev_head_idx == lst[0][0]:
w_dash = old_state[k_old][lst[0][1] :, :]
# print(rng,w_dash.shape)
w.append(w_dash)
elif prev_head_idx == lst[1][0]:
w_dash = old_state[k_old][: lst[1][1], :]
# print(rng, w_dash.shape)
w.append(w_dash)
else:
w.append(old_state[k_old])
if w:
final_old_w = th.cat(w)
dice = [slice(dim) for dim in final_old_w.shape]
updated_state[k][dice] = final_old_w
else:
updated_state[k] = th.randn_like(new_state[k]) * 0.001
if k in old_state:
dice = [slice(dim) for dim in old_state[k].shape]
updated_state[k][dice] = old_state[k]
net2.load_state_dict(updated_state)
| 2.1875 | 2 |
widget.py | wangzqzero/calculator | 0 | 12771828 | ## This Python file uses the following encoding: utf-8
from PyQt5.QtWidgets import *
from mainwindow import MainWindow
from call_standard import StandardPageWindow
import sys
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
| 1.671875 | 2 |
9 - statistical thinking in python - part 2/case study/4 - Hypothesis test: Are beaks deeper in 2012?.py | Baidaly/datacamp-samples | 0 | 12771829 | '''
Your plot of the ECDF and determination of the confidence interval make it pretty clear that the beaks of G. scandens on Daphne Major have gotten deeper. But is it possible that this effect is just due to random chance? In other words, what is the probability that we would get the observed difference in mean beak depth if the means were the same?
Be careful! The hypothesis we are testing is not that the beak depths come from the same distribution. For that we could use a permutation test. The hypothesis is that the means are equal. To perform this hypothesis test, we need to shift the two data sets so that they have the same mean and then use bootstrap sampling to compute the difference of means.
'''
# Compute mean of combined data set: combined_mean
combined_mean = np.mean(np.concatenate((bd_1975, bd_2012)))
# Shift the samples
bd_1975_shifted = bd_1975 - np.mean(bd_1975) + combined_mean
bd_2012_shifted = bd_2012 - np.mean(bd_2012) + combined_mean
# Get bootstrap replicates of shifted data sets
bs_replicates_1975 = draw_bs_reps(bd_1975_shifted, np.mean, 10000)
bs_replicates_2012 = draw_bs_reps(bd_2012_shifted, np.mean, 10000)
# Compute replicates of difference of means: bs_diff_replicates
bs_diff_replicates = bs_replicates_2012 - bs_replicates_1975
# Compute the p-value
p = np.sum(bs_diff_replicates >= mean_diff) / len(bs_diff_replicates)
# Print p-value
print('p =', p)
| 2.78125 | 3 |
moi/menu/migrations/0001_initial.py | Ecotrust/F2S-MOI | 0 | 12771830 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 06:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0023_alter_page_revision_on_delete_behaviour'),
]
operations = [
migrations.CreateModel(
name='NavMenu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('footer', models.BooleanField(default=False, help_text='Select to display this menu in the footer rather than in the nav bar.')),
('order', models.PositiveSmallIntegerField(default=1, help_text='The order that this menu appears. Lower numbers appear first.')),
('url', models.ForeignKey(blank=True, help_text='Internal path to specific page', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ('footer', 'order'),
},
),
]
| 1.75 | 2 |
test/integration/src/py/test_ebrelayer_restart.py | salahgapr5/snif | 0 | 12771831 | import copy
import json
import logging
import time
import pytest
import test_utilities
from pytest_utilities import create_new_sifaddr
from pytest_utilities import generate_test_account, generate_minimal_test_account
from test_utilities import EthereumToSifchainTransferRequest
def test_ebrelayer_restart(
basic_transfer_request: EthereumToSifchainTransferRequest,
source_ethereum_address: str,
integration_dir,
):
basic_transfer_request.ethereum_address = source_ethereum_address
request, credentials = generate_minimal_test_account(
base_transfer_request=basic_transfer_request,
target_ceth_balance=10 ** 15
)
balance = test_utilities.get_sifchain_addr_balance(request.sifchain_address, request.sifnodecli_node, "ceth")
logging.info("restart ebrelayer normally, leaving the last block db in place")
test_utilities.get_shell_output(f"{integration_dir}/sifchain_start_ebrelayer.sh")
test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, request.smart_contracts_dir)
time.sleep(5)
assert balance == test_utilities.get_sifchain_addr_balance(request.sifchain_address, request.sifnodecli_node,
"ceth")
@pytest.mark.usefixtures("ensure_relayer_restart")
def test_ethereum_transactions_with_offline_relayer(
basic_transfer_request: EthereumToSifchainTransferRequest,
smart_contracts_dir,
source_ethereum_address,
bridgebank_address,
integration_dir,
):
logging.info("shut down ebrelayer")
test_utilities.get_shell_output(f"pkill -9 ebrelayer || true")
logging.info("prepare transactions to be sent while ebrelayer is offline")
amount = 9000
new_addresses = list(map(lambda x: create_new_sifaddr(), range(3)))
logging.debug(f"new_addresses: {new_addresses}")
request: EthereumToSifchainTransferRequest = copy.deepcopy(basic_transfer_request)
requests = list(map(lambda addr: {
"amount": amount,
"symbol": test_utilities.NULL_ADDRESS,
"sifchain_address": addr
}, new_addresses))
json_requests = json.dumps(requests)
logging.info("send ethereum transactions while ebrelayer is offline")
yarn_result = test_utilities.run_yarn_command(
" ".join([
f"yarn --cwd {smart_contracts_dir}",
"integrationtest:sendBulkLockTx",
f"--amount {amount}",
f"--symbol eth",
f"--json_path {request.solidity_json_path}",
f"--sifchain_address {new_addresses[0]}",
f"--transactions \'{json_requests}\'",
f"--ethereum_address {source_ethereum_address}",
f"--bridgebank_address {bridgebank_address}"
])
)
logging.info(f"bulk result: {yarn_result}")
logging.info("restart ebrelayer")
test_utilities.get_shell_output(f"{integration_dir}/sifchain_start_ebrelayer.sh")
test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, request.smart_contracts_dir)
for a in new_addresses:
test_utilities.wait_for_sif_account(a, basic_transfer_request.sifnodecli_node, 90)
test_utilities.wait_for_sifchain_addr_balance(a, "ceth", amount, basic_transfer_request.sifnodecli_node, 90)
@pytest.mark.usefixtures("ensure_relayer_restart")
def test_sifchain_transactions_with_offline_relayer(
basic_transfer_request: EthereumToSifchainTransferRequest,
rowan_source_integrationtest_env_credentials: test_utilities.SifchaincliCredentials,
rowan_source_integrationtest_env_transfer_request: EthereumToSifchainTransferRequest,
rowan_source,
smart_contracts_dir,
source_ethereum_address,
integration_dir,
):
basic_transfer_request.ethereum_address = source_ethereum_address
request, credentials = generate_test_account(
basic_transfer_request,
rowan_source_integrationtest_env_transfer_request,
rowan_source_integrationtest_env_credentials,
target_ceth_balance=10 ** 19,
target_rowan_balance=10 ** 19,
)
logging.info("shut down ebrelayer")
test_utilities.get_shell_output(f"pkill -9 ebrelayer || true")
logging.info("prepare transactions to be sent while ebrelayer is offline")
amount = 9000
new_eth_addrs = test_utilities.create_ethereum_addresses(
smart_contracts_dir,
basic_transfer_request.ethereum_network,
2
)
request.amount = amount
request.sifchain_symbol = "ceth"
request.ethereum_symbol = "eth"
for a in new_eth_addrs:
request.ethereum_address = a["address"]
sifchain_balance = test_utilities.get_sifchain_addr_balance(request.sifchain_address, request.sifnodecli_node,
"ceth")
logging.info(f"sifchain balance is {sifchain_balance}, request is {request}")
test_utilities.send_from_sifchain_to_ethereum(
transfer_request=request,
credentials=credentials
)
time.sleep(5)
logging.info("restart ebrelayer")
test_utilities.get_shell_output(f"{integration_dir}/sifchain_start_ebrelayer.sh")
test_utilities.advance_n_ethereum_blocks(test_utilities.n_wait_blocks, request.smart_contracts_dir)
for a in new_eth_addrs:
request.ethereum_address = a["address"]
test_utilities.wait_for_eth_balance(request, amount, 600)
| 1.90625 | 2 |
iwg_blog/taskapp/__init__.py | razortheory/who-iwg-webapp | 0 | 12771832 | <gh_stars>0
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
if not settings.configured:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.dev')
app = Celery('iwg_blog')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 1.710938 | 2 |
setup.py | stuart-nolan/mpdb | 0 | 12771833 | import setuptools
import os, sys
sys.path.append(os.getcwd())
import mpdb
with open("README.md", "r") as fh:
long_description = fh.read()
try:
from Cython.Build import cythonize
cem = cythonize("mpdb/eq/*.pyx")
except:
cem = []
setuptools.setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
],
description="material property data base",
include_package_data=True,
packages=setuptools.find_packages(),
#packages = ['.'],
install_requires=[
'appdirs',
'tabulate',],
extras_require={
"libreOffice":['uno',],
"coolprop":['CoolProp',],
"yawsImport":["requests",],
"scipy": ["scipy",],
"cython": ["Cython",],
},
ext_modules=cem,
long_description=long_description,
long_description_content_type="text/markdown",
name="mpdb",
url="https://github.com/stuart-nolan/mpdb.git",
version=mpdb.__version__,
zip_safe = False
)
"""
https://acp.copernicus.org/articles/15/4399/2015/acp-15-4399-2015-supplement.zip
"""
| 1.609375 | 2 |
resources/Wireshark/WiresharkDissectorFoo/tools/generate-nl80211-fields.py | joshis1/C_Programming | 2 | 12771834 | #!/usr/bin/env python
# Parses the nl80211.h interface and generate appropriate enums and fields
# (value_string) for packet-netlink-nl80211.c
#
# Copyright (c) 2017, <NAME> <<EMAIL>>
# Copyright (c) 2018, <NAME> <<EMAIL>>
#
# Wireshark - Network traffic analyzer
# By Ger<NAME> <<EMAIL>>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
#
# To update the dissector source file, run this from the source directory:
#
# python tools/generate-nl80211-fields.py --update
#
import argparse
import re
import requests
import sys
# Begin of comment, followed by the actual array definition
HEADER = "/* Definitions from linux/nl80211.h {{{ */\n"
FOOTER = "/* }}} */\n"
# Enums to extract from the header file
EXPORT_ENUMS = {
# 'enum_name': ('field_name', field_type', 'field_blurb')
'nl80211_commands': ('Command', 'FT_UINT8', '"Generic Netlink Command"'),
'nl80211_attrs': (None, None, None),
'nl80211_iftype': (None, None, None),
'nl80211_sta_flags': (None, None, None),
'nl80211_rate_info': (None, None, None),
'nl80211_sta_bss_param': (None, None, None),
'nl80211_sta_info': (None, None, None),
'nl80211_tid_stats': (None, None, None),
'nl80211_mpath_info': (None, None, None),
'nl80211_mntr_flags': (None, None, None),
'nl80211_bss': (None, None, None),
'nl80211_key_attributes': (None, None, None),
'nl80211_survey_info': (None, None, None),
'nl80211_frequency_attr': (None, None, None),
'nl80211_tx_rate_attributes': (None, None, None),
'nl80211_attr_cqm': (None, None, None),
'nl80211_key_default_types': (None, None, None),
'nl80211_mesh_setup_params': (None, None, None),
'nl80211_meshconf_params': (None, None, None),
'nl80211_if_combination_attrs': (None, None, None),
'nl80211_rekey_data': (None, None, None),
'nl80211_sta_wme_attr': (None, None, None),
'nl80211_pmksa_candidate_attr': (None, None, None),
'nl80211_sched_scan_plan': (None, None, None),
'nl80211_bss_select_attr': (None, None, None),
'nl80211_nan_func_attributes': (None, None, None),
'nl80211_nan_match_attributes': (None, None, None),
'nl80211_txq_stats': (None, None, None),
'nl80211_band_attr': (None, None, None),
'nl80211_bitrate_attr': (None, None, None),
'nl80211_reg_rule_attr': (None, None, None),
'nl80211_txq_attr': (None, None, None),
'nl80211_band_iftype_attr': (None, None, None),
'nl80211_dfs_state': (None, None, None),
'nl80211_wmm_rule': (None, None, None),
'nl80211_txq_stats': (None, None, None),
'nl80211_sched_scan_match_attr': (None, None, None),
'nl80211_chan_width': ('Attribute Value', 'FT_UINT32', None),
'nl80211_channel_type': ('Attribute Value', 'FT_UINT32', None),
'plink_actions': ('Attribute Value', 'FT_UINT8', None),
'nl80211_reg_initiator': ('Attribute Value', 'FT_UINT8', None),
'nl80211_reg_type': ('Attribute Value', 'FT_UINT8', None),
'nl80211_auth_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_key_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_bss_status': ('Attribute Value', 'FT_UINT32', None),
'nl80211_bss_scan_width': ('Attribute Value', 'FT_UINT32', None),
'nl80211_mfp': ('Attribute Value', 'FT_UINT32', None),
'nl80211_ps_state': ('Attribute Value', 'FT_UINT32', None),
'nl80211_tx_power_setting': ('Attribute Value', 'FT_UINT32', None),
'nl80211_plink_state': ('Attribute Value', 'FT_UINT8', None),
'nl80211_tdls_operation': ('Attribute Value', 'FT_UINT8', None),
'nl80211_user_reg_hint_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_connect_failed_reason': ('Attribute Value', 'FT_UINT32', None),
'nl80211_mesh_power_mode': ('Attribute Value', 'FT_UINT32', None),
'nl80211_acl_policy': ('Attribute Value', 'FT_UINT32', None),
'nl80211_radar_event': ('Attribute Value', 'FT_UINT32', None),
'nl80211_crit_proto_id': ('Attribute Value', 'FT_UINT16', None),
'nl80211_smps_mode': ('Attribute Value', 'FT_UINT8', None),
'nl80211_sta_p2p_ps_status': ('Attribute Value', 'FT_UINT8', None),
'nl80211_timeout_reason': ('Attribute Value', 'FT_UINT32', None),
'nl80211_external_auth_action': ('Attribute Value', 'FT_UINT32', None),
'nl80211_dfs_regions': ('Attribute Value', 'FT_UINT8', None),
}
# File to be patched
SOURCE_FILE = "epan/dissectors/packet-netlink-nl80211.c"
# URL where the latest version can be found
URL = "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/include/uapi/linux/nl80211.h"
def make_enum(name, values, expressions, indent):
code = 'enum ws_%s {\n' % name
for value, expression in zip(values, expressions):
if expression and 'NL80211' in expression:
expression = 'WS_%s' % expression
if expression:
code += '%sWS_%s = %s,\n' % (indent, value, expression)
else:
code += '%sWS_%s,\n' % (indent, value)
code += '};\n'
return code
def make_value_string(name, values, indent,):
code = 'static const value_string ws_%s_vals[] = {\n' % name
align = 40
for value in values:
code += indent + ('{ WS_%s,' % value).ljust(align - 1) + ' '
code += '"%s" },\n' % value
code += '%s{ 0, NULL }\n' % indent
code += '};\n'
code += 'static value_string_ext ws_%s_vals_ext =' % name
code += ' VALUE_STRING_EXT_INIT(ws_%s_vals);\n' % name
return code
def make_hfi(name, indent):
(field_name, field_type, field_blurb) = EXPORT_ENUMS.get(name)
field_abbrev = name
# Fill in default values
if not field_name:
field_name = 'Attribute Type'
if not field_type:
field_type = 'FT_UINT16'
if not field_blurb:
field_blurb = 'NULL'
# Special treatment of already existing field names
rename_fields = {
'nl80211_attrs': 'nl80211_attr_type',
'nl80211_commands': 'nl80211_cmd'
}
if rename_fields.get(name):
field_abbrev = rename_fields[name]
field_abbrev = field_abbrev.lstrip('nl80211_')
code = 'static header_field_info hfi_%s NETLINK_NL80211_HFI_INIT =\n' % name
code += indent + '{ "%s", "nl80211.%s", %s, BASE_DEC | BASE_EXT_STRING,\n' % \
(field_name, field_abbrev, field_type)
code += indent + ' VALS_EXT_PTR(&ws_%s_vals_ext), 0x00, %s, HFILL };\n' % (name, field_blurb)
return code
def make_ett_defs(name, indent):
code = 'static gint ett_%s = -1;' % name
return code
def make_hfi_init(name, indent):
code = indent + indent + '&hfi_%s,' % name
return code
def make_ett(name, indent):
code = indent + indent + '&ett_%s,' % name
return code
class EnumStore(object):
__RE_ENUM_VALUE = re.compile(
r'\s+?(?P<value>\w+)(?:\ /\*.*?\*\/)?(?:\s*=\s*(?P<expression>.*?))?(?:\s*,|$)',
re.MULTILINE | re.DOTALL)
def __init__(self, name, values):
self.name = name
self.values = []
self.expressions = []
self.active = True
self.parse_values(values)
def parse_values(self, values):
for m in self.__RE_ENUM_VALUE.finditer(values):
value, expression = m.groups()
if value.startswith('NUM_'):
break
if value.endswith('_AFTER_LAST'):
break
if value.endswith('_LAST'):
break
if value.startswith('__') and value.endswith('_NUM'):
break
if expression and expression in self.values:
# Skip aliases
continue
self.values.append(value)
self.expressions.append(expression)
def finish(self):
return self.name, self.values, self.expressions
RE_ENUM = re.compile(
r'enum\s+?(?P<enum>\w+)\s+?\{(?P<values>.*?)\}\;',
re.MULTILINE | re.DOTALL)
RE_COMMENT = re.compile(r'/\*.*?\*/', re.MULTILINE | re.DOTALL)
def parse_header(content):
# Strip comments
content = re.sub(RE_COMMENT, '', content)
enums = []
for m in RE_ENUM.finditer(content):
enum = m.group('enum')
values = m.group('values')
if enum in EXPORT_ENUMS:
enums.append(EnumStore(enum, values).finish())
return enums
def parse_source():
"""
Reads the source file and tries to split it in the parts before, inside and
after the block.
"""
begin, block, end = '', '', ''
parts = []
# Stages: 1 (before block), 2 (in block, skip), 3 (after block)
stage = 1
with open(SOURCE_FILE) as f:
for line in f:
if line == FOOTER and stage == 2:
stage = 3 # End of block
if stage == 1:
begin += line
elif stage == 2:
block += line
elif stage == 3:
end += line
if line == HEADER and stage == 1:
stage = 2 # Begin of block
if line == HEADER and stage == 3:
stage = 2 # Begin of next code block
parts.append((begin, block, end))
begin, block, end = '', '', ''
parts.append((begin, block, end))
if stage != 3 or len(parts) != 3:
raise RuntimeError("Could not parse file (in stage %d) (parts %d)" % (stage, len(parts)))
return parts
parser = argparse.ArgumentParser()
parser.add_argument("--update", action="store_true",
help="Update %s as needed instead of writing to stdout" % SOURCE_FILE)
parser.add_argument("--indent", default=" " * 4,
help="indentation (use \\t for tabs, default 4 spaces)")
parser.add_argument("header_file", nargs="?", default=URL,
help="nl80211.h header file (use - for stdin or a HTTP(S) URL, "
"default %(default)s)")
def main():
args = parser.parse_args()
indent = args.indent.replace("\\t", "\t")
if any(args.header_file.startswith(proto) for proto in ('http:', 'https')):
r = requests.get(args.header_file)
r.raise_for_status()
enums = parse_header(r.text)
elif args.header_file == "-":
enums = parse_header(sys.stdin.read())
else:
with open(args.header_file) as f:
enums = parse_header(f.read())
assert len(enums) == len(EXPORT_ENUMS), \
"Could not parse data, found %d/%d results" % \
(len(enums), len(EXPORT_ENUMS))
code_enums, code_vals, code_hfi, code_ett_defs, code_hfi_init, code_ett = '', '', '', '', '', ''
for enum_name, enum_values, expressions in enums:
code_enums += make_enum(enum_name, enum_values, expressions, indent) + '\n'
code_vals += make_value_string(enum_name, enum_values, indent) + '\n'
code_hfi += make_hfi(enum_name, indent) + '\n'
code_ett_defs += make_ett_defs(enum_name, indent) + '\n'
code_hfi_init += make_hfi_init(enum_name, indent) + '\n'
code_ett += make_ett(enum_name, indent) + '\n'
code_top = code_enums + code_vals + code_hfi + code_ett_defs
code_top = code_top.rstrip("\n") + "\n"
code = [code_top, code_hfi_init, code_ett]
update = False
if args.update:
parts = parse_source()
# Check if file needs update
for (begin, old_code, end), new_code in zip(parts, code):
if old_code != new_code:
update = True
break
if not update:
print("File is up-to-date")
return
# Update file
with open(SOURCE_FILE, "w") as f:
for (begin, old_code, end), new_code in zip(parts, code):
f.write(begin)
f.write(new_code)
f.write(end)
print("Updated %s" % SOURCE_FILE)
else:
for new_code in code:
print(new_code)
if __name__ == '__main__':
main()
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
#
| 2.203125 | 2 |
project/number.py | rainbowhuanguw/electionguard-SDK | 0 | 12771835 | <gh_stars>0
import random
import hashlib
from typing import (
Sequence
)
LARGE_PRIME = (int(('''104438888141315250669175271071662438257996424904738378038423348328
3953907971553643537729993126875883902173634017777416360502926082946377942955704498
5420976148418252467735806893983863204397479111608977315510749039672438834271329188
1374801626975452234350528589881677721176191239277291448552115552164104927344620757
8961939840619466145806859275053476560973295158703823395710210329314709715239251736
5523840808458360487786673189314183384224438910259118847234330847012077719019445932
8662497991739135056466263272370300796422984915475619689061525228653308964318490270
6926081744149289517418249153634178342075381874131646013444796894582106870531535803
6662545796026324531037414525697939055519015418561732513850474148403927535855819099
5015804625681054267836812127850996052095762473794291460031064660979266501285839738
1435755902851312071248102599442308951327039250818892493767423329663783709190716162
0235296692173009397831714158082331468230007669177892861540060422814237337064629052
4377485454312723950024587358201266366643058386277816736954760301634424272959224454
4608279405999759391099775667746401633668308698186721172238255007962658564443858927
6348504157753488390520266757856948263869301753031434500465754608438799417919463132
99322976993405829119''').replace('\n', '')))
SMALL_PRIME = pow(2, 256) - 189
def is_prime(num: int, k=5) -> bool:
"""
implements Miller-Rabin algorithm to test the primality of a number
:param num: a positive integer
:param k: the number of iterations, impacting accuracy; the larger the number, the higher accuracy will be.
set default as 5
:return: True if it's a prime, False otherwise
"""
# Corner cases
if num <= 1 or num == 4:
return False
if num <= 3:
return True
# Find r such that n = 2^d * r + 1 for some r >= 1
d = num - 1
while d % 2 == 0:
d //= 2
# Iterate given number of 'k' times
for i in range(k):
if not __miller_test(d, num):
return False
return True
def __miller_test(d: int, num: int) -> bool:
"""
find a odd number of d such that num - 1 = d * 2^r
:param d: an odd number that num - 1 = d * 2^r for r >= 1
:param num: the number needs to be check against
:return: True if num is prime, False if it's a composite
"""
# Pick a random number in [2..n-2]
# Corner cases make sure that n > 4
a = 2 + random.randint(1, num - 4)
# Compute a^d % n
x = pow(a, d, num)
if x == 1 or x == num - 1:
return True
# Keep squaring x while one of the following doesn't happen
# (i) d does not reach n-1
# (ii) (x^2) % n is not 1
# (iii) (x^2) % n is not n-1
while d != num - 1:
x = (x * x) % num
d *= 2
if x == 1:
return False
if x == num - 1:
return True
# Return composite
return False
def equals(a, b) -> bool:
"""
compares two values and check if their values are equal
@input: two integers a, b
@output: True if a, b have same values, False otherwise
"""
a, b = int(a), int(b)
return a == b
def is_divisor(a, b) -> bool:
"""
check if a is a divisor of b
:param a: a positive integer
:param b: b positive integer
:return: True if a is a divisor of b, False otherwise
"""
a, b = int(a), int(b)
return a % b == 0
def is_within_range(num, lower_bound: int, upper_bound: int) -> bool:
"""
check if a number is within a range bounded by upper and lower bound
:param num: target number needs to be checked against
:param lower_bound: exclusive lower bound
:param upper_bound: exclusive upper bound
:return: True if number is within this range, False otherwise
"""
num = int(num)
if upper_bound < lower_bound:
raise ValueError("bounds are incorrect")
return lower_bound < num < upper_bound
def is_within_set_zq(num) -> bool:
"""
check if a number is within set Zq,
:param num: target number needs to be checked against
:return: True if 0 <= num < q , False otherwise
"""
num = int(num)
# exclusive bounds, set lower bound to -1
return is_within_range(num, 0 - 1, SMALL_PRIME)
def is_within_set_zrp(num) -> bool:
"""
check if a number is within set Zrp, 0 < num < p and num ^ q mod p = 1
:param num: target number needs to be checked against
:return: True if 0 < num < p and num ^ q mod p = 1 , False otherwise
"""
num = int(num)
return is_within_range(num, 0, LARGE_PRIME) and equals(
pow(num, SMALL_PRIME, LARGE_PRIME), 1)
def mod_p(dividend) -> int:
"""
compute the modulus number by calculating dividend mod p
:param dividend: dividend, the number in front
:return: dividend mod p
"""
dividend = int(dividend)
return dividend % int(LARGE_PRIME)
def mod_q(dividend) -> int:
"""
:param dividend:
:return:
"""
dividend = int(dividend)
return dividend % int(SMALL_PRIME)
def multiply(*args, mod_num=1) -> int:
"""
:param mod_num:
:return:
"""
product = 1
for arg in args:
if arg >= mod_num:
arg = arg % mod_num
if product >= mod_num:
product = product % mod_num
product *= arg
return product % mod_num
def hash_elems(*a):
"""
main hash function using SHA-256, used in generating data, reference:
:param a: elements being fed into the hash function
:return: a hash number of 256 bit
"""
h = hashlib.sha256()
h.update("|".encode("utf-8"))
for x in a:
if not x:
# This case captures empty lists and None, nicely guaranteeing that we don't
# need to do a recursive call if the list is empty. So we need a string to
# feed in for both of these cases. "None" would be a Python-specific thing,
# so we'll go with the more JSON-ish "null".
hash_me = "null"
elif isinstance(x, str):
# strings are iterable, so it's important to handle them before the following check
hash_me = x
elif isinstance(x, Sequence):
# The simplest way to deal with lists, tuples, and such are to crunch them recursively.
hash_me = str(hash_elems(*x))
else:
hash_me = str(x)
h.update((hash_me + "|").encode("utf-8"))
# Note: the returned value will range from [1,Q), because zeros are bad
# for some of the nonces. (g^0 == 1, which would be an unhelpful thing
# to multiply something with, if you were trying to encrypt it.)
# Also, we don't need the checked version of int_to_q, because the
# modulo operation here guarantees that we're in bounds.
# return int_to_q_unchecked(
# 1 + (int.from_bytes(h.digest(), byteorder="big") % Q_MINUS_ONE)
# )
return int.from_bytes(h.digest(), byteorder="big") % (SMALL_PRIME - 1) | 1.640625 | 2 |
app/core/migrations/0002_auto_20200610_1443.py | Amirh-zahmatkesh/acc-back | 0 | 12771836 | # Generated by Django 3.0.6 on 2020-06-10 14:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='certificates', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.SlugField()),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='core.Category')),
],
options={
'verbose_name_plural': 'categories',
'unique_together': {('slug', 'parent')},
},
),
]
| 1.6875 | 2 |
server.py | theSage21/evo-site | 2 | 12771837 | import bottle
from genetic import GA
ga = GA()
app = bottle.Bottle()
@app.get('/')
def home():
data = ga.get_data()
return bottle.template('home.html', **data)
@app.get('/mark-hit/<ident>')
def mark_hit(ident):
ga.mark_hit(ident)
if __name__ == '__main__':
app.run(debug=True)
| 1.96875 | 2 |
flod_sak/documents/epost/tilskudd_emails.py | Trondheim-kommune/Tilskuddsbasen | 0 | 12771838 | <gh_stars>0
# -*- coding: utf-8 -*-
import urllib
from flask import render_template, current_app, request
from celery import Celery
from flask.ext.restful.representations import json
from api import SoknaderFilterResource
from api.auth import get_user_by_id, get_organisation, get_person
from statemachine.soknad_sm import SoknadStateMachine
from utils.facets import AnyOfFacet
from utils.lz_string import LZString
SK_SOKNAD_ON_PURRING_TEMPLATE = u"epost/soker_email_soknad_purring.txt"
SK_SOKNAD_SENDT_INN_TEMPLATE = u"epost/soker_email_soknad_sendt_inn.txt"
SB_SOKNAD_SENDT_INN_TEMPLATE = u"epost/saksbehandler_email_soknad_sendt_inn.txt"
ORG_SOKNAD_SENDT_INN_TEMPLATE = u"epost/organisasjon_email_soknad_sendt_inn.txt"
SK_VEDTAK_FATTET_TEMPLATE = u"epost/soker_email_vedtak_fattet.txt"
SK_KLAGEVEDTAK_FATTET_TEMPLATE = u"epost/soker_email_klagevedtak_fattet.txt"
SK_RAPPORT_UNDERKJENT_TEMPLATE = u"epost/soker_email_rapport_underkjent.txt"
SK_RAPPORT_LEVERT_TEMPLATE = u"epost/soker_email_rapport_levert.txt"
SK_RAPPORT_PURRING_TEMPLATE = u"epost/soker_email_rapport_purring.txt"
SB_RAPPORT_LEVERT_TEMPLATE = u"epost/saksbehandler_email_rapport_levert.txt"
GK_VARSEL_FOR_TILSKUDDSORDNING_TEMPLATE = u'epost/godkjenner_email_varsel_for_tilskuddsordning.txt'
celery_app = Celery(broker='amqp://guest:guest@localhost:5672//')
def get_ansvarlige_saksbehandlere(soknad):
ansvarlige_saksbehandlere = soknad.tilskuddsordning.saksbehandlere
saksbehandlere = []
for ansvarlig_saksbehandler in ansvarlige_saksbehandlere:
user = get_user_by_id(ansvarlig_saksbehandler.saksbehandler_id, request.cookies)
if user is not None:
saksbehandlere.append(user)
return saksbehandlere
def send_email_to_saksbehandlere_on_action_send_inn(soknad):
saksbehandlere = get_ansvarlige_saksbehandlere(soknad)
message = render_template(SB_SOKNAD_SENDT_INN_TEMPLATE,
soknad_id=soknad.id,
tilskuddsordning_navn=soknad.tilskuddsordning.navn)
if saksbehandlere is None or len(saksbehandlere) == 0:
current_app.logger.warn(u"Kunne ikke sende epost til saksbehandlere ved innsendelse av søknad %s, "
u"ingen saksbehandlere funnet for denne tilskuddsordningen '%s'!"
% (soknad.id, soknad.tilskuddsordning.navn))
else:
for saksbehandler in saksbehandlere:
if saksbehandler['profile']['email'] is not None:
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Ny søknad om tilskudd mottatt',
'sender': u'<EMAIL>',
'recipients': [saksbehandler['profile']['email']],
'body': message})
else:
current_app.logger.warn(u"Kunne ikke sende epost til saksbehandler %s ved innsendelse av søknad %s, "
u"igen epost er satt!"
% (saksbehandler['id'], soknad.id))
def send_email_to_soker_on_action_lever_rapport(soknad):
if soknad.epost is None or soknad.epost == '':
current_app.logger.warn(u"Kunne ikke sende epost til personen %s ved levering av rapport %s, "
u"ingen epost adresse registrert på søknaden!"
% (soknad.person_id, soknad.id))
else:
message = render_template(SK_RAPPORT_LEVERT_TEMPLATE, soknad_id=soknad.id)
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Bekreftelse på mottatt rapport',
'sender': u'<EMAIL>',
'recipients': [soknad.epost],
'body': message})
def send_email_to_saksbehandler_on_action_lever_rapport(soknad):
saksbehandler = get_user_by_id(soknad.saksbehandler_id, request.cookies)
message = render_template(SB_RAPPORT_LEVERT_TEMPLATE, soknad_id=soknad.id, rapport_id=soknad.rapport[0].id)
if saksbehandler is None:
current_app.logger.warn(u"Kunne ikke sende epost til saksbehandler %s ved levering av rapport, "
u"ingen saksbehandler satt for denne søknaden '%s'!"
% (soknad.id, soknad.id))
else:
if saksbehandler['profile']['email'] is not None:
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Ny rapport mottatt',
'sender': u'<EMAIL>',
'recipients': [saksbehandler['profile']['email']],
'body': message})
else:
current_app.logger.warn(u"Kunne ikke sende epost til saksbehandler %s ved levering av rapport til søknad %s, "
u"igen epost er satt!"
% (saksbehandler['id'], soknad.id))
def send_email_to_soker_on_action_send_inn(soknad):
if soknad.epost is None or soknad.epost == '':
current_app.logger.warn(u"Kunne ikke sende epost til personen %s ved innsendelse av søknad %s, "
u"ingen epost adresse registrert på søknaden!"
% (soknad.person_id, soknad.id))
else:
message = render_template(SK_SOKNAD_SENDT_INN_TEMPLATE, soknad_id=soknad.id)
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Bekreftelse på mottatt søknad',
'sender': u'<EMAIL>',
'recipients': [soknad.epost],
'body': message})
def send_email_to_organisasjon_on_action_send_inn(soknad):
organisation = get_organisation(soknad.organisation_id, request.cookies)
if (organisation.get('email_address') is None or organisation.get('email_address') == '') and \
(organisation.get('local_email_address') is None or organisation.get('local_email_address') == ''):
current_app.logger.warn(u"Kunne ikke sende epost til organisasjon %s ved innsendelse av søknad %s, "
u"ingen epost adresse registrert på søknaden!"
% (organisation.get("id"), soknad.id))
else:
person = get_person(soknad.person_id, request.cookies)
name = person.get('first_name', '')+' '+ person.get('last_name', '')
message = render_template(ORG_SOKNAD_SENDT_INN_TEMPLATE, aktor=organisation.get('name'), soker=name,
prosjektnavn=soknad.prosjektnavn,levert_dato=soknad.levert_dato, soknad_id=soknad.id)
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Bekreftelse på mottatt søknad',
'sender': u'<EMAIL>',
'recipients': [organisation.get('email_address'), organisation.get('local_email_address')],
'body': message})
def send_email_to_soker_on_action_fatt_vedtak(soknad):
if soknad.epost is None or soknad.epost == '':
current_app.logger.warn(u"Kunne ikke sende epost til personen %s om fattet vedtak for søknad %s, "
u"ingen epost adresse registrert på søknaden!"
% (soknad.person_id, soknad.id))
else:
message = render_template(SK_VEDTAK_FATTET_TEMPLATE, soknad_id=soknad.id)
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Søknaden din er behandlet',
'sender': u'<EMAIL>',
'recipients': [soknad.epost],
'body': message})
def send_email_to_soker_on_action_fatt_klagevedtak(soknad):
if soknad.epost is None or soknad.epost == '':
current_app.logger.warn(u"Kunne ikke sende epost til personen %s om fattet klagevedtak for søknad %s, "
u"ingen epost adresse registrert på søknaden!"
% (soknad.person_id, soknad.id))
else:
message = render_template(SK_KLAGEVEDTAK_FATTET_TEMPLATE, soknad_id=soknad.id)
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Klagen din er behandlet',
'sender': u'<EMAIL>',
'recipients': [soknad.epost],
'body': message})
def send_email_to_soker_on_action_underkjenn(soknad, saksbehandler_kommentar):
if soknad.epost is None or soknad.epost == '':
current_app.logger.warn(u"Kunne ikke sende epost til personen %s ved underkjennelse av søknad %s, "
u"ingen epost adresse registrert på søknaden!"
% (soknad.person_id, soknad.id))
else:
message = render_template(SK_RAPPORT_UNDERKJENT_TEMPLATE, soknad_id=soknad.id, saksbehandler_kommentar=saksbehandler_kommentar)
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Din rapport ble underkjent',
'sender': u'<EMAIL>',
'recipients': [soknad.epost],
'body': message})
def send_email_to_soker_on_soknad_purring(soknad):
if soknad.epost is None or soknad.epost == '':
current_app.logger.warn(u"Kan ikke sende epost ved purring på svar på søknad %s, ingen epost-adresse registrert på søknaden!" % soknad.id)
return False
else:
message = render_template(SK_SOKNAD_ON_PURRING_TEMPLATE, soknad_id=soknad.id)
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Purring på svar på vedtak',
'sender': u'<EMAIL>',
'recipients': [soknad.epost],
'body': message})
return True
def send_email_to_soker_on_rapport_purring(soknad):
if soknad.epost is None or soknad.epost == '':
current_app.logger.warn(u"Kan ikke sende epost ved purring på rapport for søknad %s, ingen epost-adresse registrert på søknaden!" % soknad.id)
return False
else:
message = render_template(SK_RAPPORT_PURRING_TEMPLATE, soknad_id=soknad.id, rapportfrist=soknad.nyeste_fattet_vedtak().rapportfrist.strftime('%d-%m-%Y'))
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Purring på rapport',
'sender': u'<EMAIL>',
'recipients': [soknad.epost],
'body': message})
return True
def send_notification_email_to_godkjenner_for_tilskuddsordning(tilskuddsordning, saksbehandler):
godkjenner = get_user_by_id(tilskuddsordning.godkjenner_id, request.cookies)
def get_godkjenner_tilskuddsordning_filter():
facets = [
AnyOfFacet(name="Tilskuddsordning",
values=SoknaderFilterResource.transfrom_to_facet_values(
[tilskuddsordning],
"navn"
)),
AnyOfFacet(name="Status",
values=SoknaderFilterResource.transfrom_to_facet_values(
[SoknadStateMachine.s_til_vedtak.id, SoknadStateMachine.s_til_klagevedtak.id]
))
]
return [facet.to_json() for facet in facets]
if godkjenner is None:
current_app.logger.warn(u"Kunne ikke sende epost til godkjenner %s, "
u"ingen godkjenner satt for denne tilskuddsordningen '%s'!"
% (tilskuddsordning.godkjenner_id, tilskuddsordning.id))
return False, 'Tilskuddsordningen mangler godkjenner.'
else:
filter_str = json.dumps(get_godkjenner_tilskuddsordning_filter())
encoded_filter = LZString().compressToBase64(filter_str)
encoded_filter = urllib.quote_plus(encoded_filter)
message = render_template(GK_VARSEL_FOR_TILSKUDDSORDNING_TEMPLATE,
filter=encoded_filter,
saksbehandler=saksbehandler['profile']['full_name'],
tilskuddsordning_navn=tilskuddsordning.navn)
if godkjenner['profile']['email'] is not None:
celery_app.send_task('celery_tasks.email_tasks.send_email_task',
queue='email',
kwargs={'subject': u'Tilskuddsordning "%s"' % tilskuddsordning.navn,
'sender': u'<EMAIL>',
'recipients': [godkjenner['profile']['email']],
'body': message})
return True, None
else:
current_app.logger.warn(u"Kunne ikke sende epost til godkjenner %s, "
u"igen epost er satt!" % godkjenner['id'])
return False, 'Godkjenner mangler epostadresse.' | 1.867188 | 2 |
lab_2/start.py | shmaginadi/2020-2-level-labs | 0 | 12771839 | <gh_stars>0
"""
Longest common subsequence implementation starter
"""
from lab_2 import main
if __name__ == "__main__":
original_text = '''The horse is running.
It is fast.'''
second_text = '''The cow is eating.
It is slow.'''
original_tokens = main.tokenize_by_lines(original_text)
print('Original tokens: ', original_tokens)
second_tokens = main.tokenize_by_lines(second_text)
print('Second tokens: ', second_tokens)
plagiarism_threshold = 0.3
zero_matrix = main.create_zero_matrix(len(original_tokens), len(second_tokens))
lcs_matrix = main.fill_lcs_matrix(original_tokens[0], second_tokens[0])
print('LCS matrix: ', lcs_matrix)
lcs_length = main.find_lcs_length(original_tokens[0], second_tokens[0], plagiarism_threshold)
print('LCS is', lcs_length)
RESULT = lcs_length
assert RESULT == lcs_length, "Not working"
| 2.984375 | 3 |
controller/front_controller.py | jvpersuhn/Certo | 0 | 12771840 | <gh_stars>0
from dao.front_dao import FrontDao, FrontEnd
class FrontController:
def __init__(self):
self.dao = FrontDao()
def select_all(self):
lista = self.dao.select_all()
lista_retorno = []
for b in lista:
ba = FrontEnd(b[0], b[1], b[2], b[3])
lista_retorno.append(ba)
return lista_retorno
def select_byId(self, id):
lista = self.dao.select_byId(id)
b = FrontEnd(lista[0],lista[1],lista[2],lista[3])
return b
def update(self, front : FrontEnd):
self.dao.update(front)
def insert(self, front : FrontEnd):
return self.dao.insert(front)
def delete(self, id):
self.dao.delete(id)
| 2.4375 | 2 |
tests/test_doctests.py | doranid/pottery | 0 | 12771841 | <filename>tests/test_doctests.py
# --------------------------------------------------------------------------- #
# test_doctests.py #
# #
# Copyright © 2015-2021, <NAME>, original author. #
# All rights reserved. #
# --------------------------------------------------------------------------- #
import doctest
import importlib
import os
import unittest
from tests.base import TestCase # type: ignore
class DoctestTests(TestCase): # pragma: no cover
def _modules(self):
test_dir = os.path.dirname(__file__)
package_dir = os.path.dirname(test_dir)
source_dir = os.path.join(package_dir, 'pottery')
source_files = (f for f in os.listdir(source_dir) if f.endswith('.py'))
for source_file in source_files:
module_name = os.path.splitext(source_file)[0]
module = importlib.import_module(f'pottery.{module_name}')
yield module
@unittest.skip('our doctests run too slowly')
def test_doctests(self):
for module in self._modules():
with self.subTest(module=module):
results = doctest.testmod(m=module)
assert not results.failed
| 2.5 | 2 |
netbox/extras/management/commands/runscript.py | markkuleinio/netbox | 0 | 12771842 | <filename>netbox/extras/management/commands/runscript.py<gh_stars>0
import json
import logging
import sys
import traceback
import uuid
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from extras.api.serializers import ScriptOutputSerializer
from extras.choices import JobResultStatusChoices
from extras.context_managers import change_logging
from extras.models import JobResult
from extras.scripts import get_script
from utilities.exceptions import AbortTransaction
from utilities.utils import NetBoxFakeRequest
class Command(BaseCommand):
help = "Run a script in Netbox"
def add_arguments(self, parser):
parser.add_argument(
'--loglevel',
help="Logging Level (default: info)",
dest='loglevel',
default='info',
choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--commit', help="Commit this script to database", action='store_true')
parser.add_argument('--user', help="User script is running as")
parser.add_argument('--data', help="Data as a string encapsulated JSON blob")
parser.add_argument('script', help="Script to run")
def handle(self, *args, **options):
def _run_script():
"""
Core script execution task. We capture this within a subfunction to allow for conditionally wrapping it with
the change_logging context manager (which is bypassed if commit == False).
"""
try:
with transaction.atomic():
script.output = script.run(data=data, commit=commit)
job_result.set_status(JobResultStatusChoices.STATUS_COMPLETED)
if not commit:
raise AbortTransaction()
except AbortTransaction:
script.log_info("Database changes have been reverted automatically.")
except Exception as e:
stacktrace = traceback.format_exc()
script.log_failure(
f"An exception occurred: `{type(e).__name__}: {e}`\n```\n{stacktrace}\n```"
)
script.log_info("Database changes have been reverted due to error.")
logger.error(f"Exception raised during script execution: {e}")
job_result.set_status(JobResultStatusChoices.STATUS_ERRORED)
finally:
job_result.data = ScriptOutputSerializer(script).data
job_result.save()
logger.info(f"Script completed in {job_result.duration}")
# Params
script = options['script']
loglevel = options['loglevel']
commit = options['commit']
try:
data = json.loads(options['data'])
except TypeError:
data = {}
module, name = script.split('.', 1)
# Take user from command line if provided and exists, other
if options['user']:
try:
user = User.objects.get(username=options['user'])
except User.DoesNotExist:
user = User.objects.filter(is_superuser=True).order_by('pk')[0]
else:
user = User.objects.filter(is_superuser=True).order_by('pk')[0]
# Setup logging to Stdout
formatter = logging.Formatter(f'[%(asctime)s][%(levelname)s] - %(message)s')
stdouthandler = logging.StreamHandler(sys.stdout)
stdouthandler.setLevel(logging.DEBUG)
stdouthandler.setFormatter(formatter)
logger = logging.getLogger(f"netbox.scripts.{module}.{name}")
logger.addHandler(stdouthandler)
try:
logger.setLevel({
'critical': logging.CRITICAL,
'debug': logging.DEBUG,
'error': logging.ERROR,
'fatal': logging.FATAL,
'info': logging.INFO,
'warning': logging.WARNING,
}[loglevel])
except KeyError:
raise CommandError(f"Invalid log level: {loglevel}")
# Get the script
script = get_script(module, name)()
# Parse the parameters
form = script.as_form(data, None)
script_content_type = ContentType.objects.get(app_label='extras', model='script')
# Create the job result
job_result = JobResult.objects.create(
name=script.full_name,
obj_type=script_content_type,
user=User.objects.filter(is_superuser=True).order_by('pk')[0],
job_id=uuid.uuid4()
)
request = NetBoxFakeRequest({
'META': {},
'POST': data,
'GET': {},
'FILES': {},
'user': user,
'path': '',
'id': job_result.job_id
})
if form.is_valid():
job_result.status = JobResultStatusChoices.STATUS_RUNNING
job_result.save()
logger.info(f"Running script (commit={commit})")
script.request = request
# Execute the script. If commit is True, wrap it with the change_logging context manager to ensure we process
# change logging, webhooks, etc.
with change_logging(request):
_run_script()
else:
logger.error('Data is not valid:')
for field, errors in form.errors.get_json_data().items():
for error in errors:
logger.error(f'\t{field}: {error.get("message")}')
job_result.status = JobResultStatusChoices.STATUS_ERRORED
job_result.save()
| 2.03125 | 2 |
cabby/evals/utils_test.py | googleinterns/cabby | 2 | 12771843 | <filename>cabby/evals/utils_test.py<gh_stars>1-10
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for RVS model evaluation utils.py'''
import unittest
from cabby.evals import utils
class UtilsTest(unittest.TestCase):
def setUp(self):
self.evaluator = utils.Evaluator()
self.test_eval_input = "cabby/evals/testdata/sample_test_evals.tsv"
def assertListAlmostEqual(self, list1, list2, tol=2):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, tol)
def testErrorDistances(self):
actual_errors = self.evaluator.get_error_distances(
self.test_eval_input)
self.assertListAlmostEqual(
actual_errors, [62259.67, 58923.88, 0.0, 1854892.25, 69314.61])
def testComputeMetrics(self):
actual_metrics = self.evaluator.compute_metrics([10, 50, 20, 0, 30])
self.assertAlmostEqual(actual_metrics.accuracy, 0.2, 2)
self.assertAlmostEqual(actual_metrics.mean_distance, 22.0, 2)
self.assertAlmostEqual(actual_metrics.median_distance, 20.0, 2)
self.assertAlmostEqual(actual_metrics.max_error, 50.0, 2)
self.assertAlmostEqual(actual_metrics.norm_auc, 0.07, 2)
if __name__ == "__main__":
unittest.main()
| 2.53125 | 3 |
python/slack_types/rtm_api/reaction_removed_event.py | warrenseine/slack-types | 0 | 12771844 | <filename>python/slack_types/rtm_api/reaction_removed_event.py<gh_stars>0
# To use this code, make sure you
#
# import json
#
# and then, to convert JSON from a string, do
#
# result = reaction_removed_event_from_dict(json.loads(json_string))
from dataclasses import dataclass
from typing import Optional, Any, TypeVar, Type, cast
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
@dataclass
class Item:
type: Optional[str] = None
channel: Optional[str] = None
ts: Optional[str] = None
file: Optional[str] = None
file_comment: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'Item':
assert isinstance(obj, dict)
type = from_union([from_str, from_none], obj.get("type"))
channel = from_union([from_str, from_none], obj.get("channel"))
ts = from_union([from_str, from_none], obj.get("ts"))
file = from_union([from_str, from_none], obj.get("file"))
file_comment = from_union([from_str, from_none], obj.get("file_comment"))
return Item(type, channel, ts, file, file_comment)
def to_dict(self) -> dict:
result: dict = {}
result["type"] = from_union([from_str, from_none], self.type)
result["channel"] = from_union([from_str, from_none], self.channel)
result["ts"] = from_union([from_str, from_none], self.ts)
result["file"] = from_union([from_str, from_none], self.file)
result["file_comment"] = from_union([from_str, from_none], self.file_comment)
return result
@dataclass
class ReactionRemovedEvent:
type: Optional[str] = None
user: Optional[str] = None
reaction: Optional[str] = None
item_user: Optional[str] = None
item: Optional[Item] = None
event_ts: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'ReactionRemovedEvent':
assert isinstance(obj, dict)
type = from_union([from_str, from_none], obj.get("type"))
user = from_union([from_str, from_none], obj.get("user"))
reaction = from_union([from_str, from_none], obj.get("reaction"))
item_user = from_union([from_str, from_none], obj.get("item_user"))
item = from_union([Item.from_dict, from_none], obj.get("item"))
event_ts = from_union([from_str, from_none], obj.get("event_ts"))
return ReactionRemovedEvent(type, user, reaction, item_user, item, event_ts)
def to_dict(self) -> dict:
result: dict = {}
result["type"] = from_union([from_str, from_none], self.type)
result["user"] = from_union([from_str, from_none], self.user)
result["reaction"] = from_union([from_str, from_none], self.reaction)
result["item_user"] = from_union([from_str, from_none], self.item_user)
result["item"] = from_union([lambda x: to_class(Item, x), from_none], self.item)
result["event_ts"] = from_union([from_str, from_none], self.event_ts)
return result
def reaction_removed_event_from_dict(s: Any) -> ReactionRemovedEvent:
return ReactionRemovedEvent.from_dict(s)
def reaction_removed_event_to_dict(x: ReactionRemovedEvent) -> Any:
return to_class(ReactionRemovedEvent, x)
| 2.515625 | 3 |
files/p2_metodo_nuevo.py | ce-box/CE3102-CALC_FX | 0 | 12771845 | from sympy import sympify, Symbol
x = Symbol('x') #Se comvierte la variable a simbolico
#Funcion principal del metodo
def metodo_nuevo(x_0, f, tol, iter_max):
if tol <= 0:
raise ValueError('Tolerancia no debe ser cero.') #Error al ingresar una tolerancia con las condiones no aptas
f = sympify(f)
k = 0
x_k = x_0
error = tol+1
D = []
A = []
# Cliclo de iteraciones segun tolerancia o maximo de iteraciones
while error > tol and k < iter_max:
x_k = calc_sgte(x_k, f)
error = calc_error(x_k, f)
D.append(k)
A.append(error)
k+=1
return [x_k, error, k, D, A]
#Calcula el error
def calc_error(x_k, f):
return abs(f.subs(x, x_k))
#Calcula la variable Z al ser una expresion tan grande
def calc_z(x_k, f):
f_k = f.subs(x, x_k)
den = f.subs(x, x_k + f_k) - f.subs(x, x_k - f_k)
z = x_k + (2* (f_k**2)/den)
return z
#Realiza el calculo de Xk+1
def calc_sgte(x_k, f):
f_k = f.subs(x, x_k)
z = calc_z(x_k, f)#Invoca la funcion para calcular el valor de Z
den = f.subs(x, x_k + f_k) - f.subs(x, x_k - f_k)
x_sgte = x_k - 2*(f_k)*(f.subs(x,z) - f_k)/den
return round(x_sgte,6) #Restinge la cantidad de decimales a 5
if __name__ == '__main__':
# Cada caso de prueba recibe: valor de X0, Ecuacion y numero de iteraciones maximas
# Test 1. Ejemplo (b)
result = metodo_nuevo(1.5, 'x^3+4*x^2-10', iter_max=7)
print(result)
# Test 2. Ejemplo (h)
result = metodo_nuevo(0.7, 'x^2-exp(x)-3*x+2', iter_max=7)
print(result)
# Test 3. Ejemplo (e)
result = metodo_nuevo(2, 'x^3-10', iter_max=7)
print(result)
# Test 4. Ejemplo (c)
result = metodo_nuevo(1, 'cos(x)-x', iter_max=6)
print(result)
| 3.515625 | 4 |
dcd_traj_SaltDistribution.py | naotohori/cafysis | 2 | 12771846 | <reponame>naotohori/cafysis
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2015/06/03
@author: <NAME>
'''
import sys
import math
import numpy as np
from cafysis.lib_f2py import py_distance2_hist
from cafysis.file_io.dcd import DcdFile
from cafysis.file_io.psf import PsfFile
if len(sys.argv) != 4:
print ('\n Usage: SCRIPT [input DCD] [input PSF] [output]\n')
sys.exit(2)
psffile = PsfFile(sys.argv[2])
psffile.open_to_read()
psf = psffile.read_all()
dcd = DcdFile(sys.argv[1])
id0_Mg = []
id0_P = []
id0_K = []
id0_Cl = []
for i,a in enumerate(psf.atoms):
if a.atom_name.strip() == 'P':
id0_P.append(i)
elif a.atom_name.strip() == 'Mg':
id0_Mg.append(i)
elif a.atom_name.strip() == 'K':
id0_K.append(i)
elif a.atom_name.strip() == 'Cl':
id0_Cl.append(i)
#print id0_Mg
#print id0_P
f_out = open(sys.argv[-1],'w')
r_hist = 20.0
r2_hist = r_hist * r_hist
bins = [(x*0.1)**2 for x in range(200+1)]
nbin = len(bins) - 1
hist_Mg = np.zeros( (nbin,))
hist_K = np.zeros( (nbin,))
hist_Cl = np.zeros( (nbin,))
hist_P = np.zeros( (nbin,))
dcd.open_to_read()
dcd.read_header()
nstep = 0
while dcd.has_more_data() :
nstep += 1
data = dcd.read_onestep_npF()
if len(id0_Mg) > 0:
dist2_Mg, idx_Mg, = py_distance2_hist.distance2_hist( data, id0_P, id0_Mg, r2_hist)
H, B = np.histogram(dist2_Mg[:idx_Mg], bins=bins)
hist_Mg = hist_Mg + H
if len(id0_K) > 0:
dist2_K, idx_K = py_distance2_hist.distance2_hist( data, id0_P, id0_K, r2_hist)
H, B = np.histogram(dist2_K[:idx_K], bins=bins)
hist_K = hist_K + H
if len(id0_Cl) > 0:
dist2_Cl, idx_Cl = py_distance2_hist.distance2_hist( data, id0_P, id0_Cl, r2_hist)
H, B = np.histogram(dist2_Cl[:idx_Cl], bins=bins)
hist_Cl = hist_Cl + H
if len(id0_P) > 0:
dist2_P, idx_P = py_distance2_hist.distance2_hist( data, id0_P, id0_P, r2_hist)
H, B = np.histogram(dist2_P[:idx_P], bins=bins)
hist_P = hist_P + H
dcd.close()
factor = float(len(id0_P)) * float(nstep) * 4.0 / 3.0 * math.pi
f_out.write('#nstep: %i\n#\n' % (nstep,))
f_out.write('#r1 r2 hist_Mg rho(r)_Mg hist_K rho(r)_K hist_Cl rho(r)_Cl hist_P rho(r)_P\n')
f_out.write('## rho(r1<r<r2) = hist / (float(#P) * float(nstep) * 4.0 / 3.0 * math.pi * (r2**3 - r1**3))\n')
if len(id0_Mg) > 0 and len(id0_K) > 0:
for i in range(nbin):
r1 = math.sqrt(bins[i])
r2 = math.sqrt(bins[i+1])
div_factor = factor * (r2**3 - r1**3)
f_out.write('%f %f %i %g %i %g %i %g %i %g\n' % (r1,r2,
hist_Mg[i], hist_Mg[i] / div_factor,
hist_K[i], hist_K[i] / div_factor,
hist_Cl[i], hist_Cl[i] / div_factor,
hist_P[i], hist_P[i] / div_factor,) )
elif len(id0_K) > 0:
for i in range(nbin):
r1 = math.sqrt(bins[i])
r2 = math.sqrt(bins[i+1])
div_factor = factor * (r2**3 - r1**3)
f_out.write('%f %f %i %g %i %g %i %g %i %g\n' % (r1,r2,
0, 0.0,
hist_K[i], hist_K[i] / div_factor,
hist_Cl[i], hist_Cl[i] / div_factor,
hist_P[i], hist_P[i] / div_factor,) )
f_out.close()
'''
dist2_m,idx_m = distance2_hist(xyz,id0_p,id0_m,r2_hist,[nmp,np,nm])
Wrapper for ``distance2_hist``.
Parameters
----------
xyz : input rank-2 array('d') with bounds (3,nmp)
id0_p : input rank-1 array('i') with bounds (np)
id0_m : input rank-1 array('i') with bounds (nm)
r2_hist : input float
Other Parameters
----------------
nmp : input int, optional
Default: shape(xyz,1)
np : input int, optional
Default: len(id0_p)
nm : input int, optional
Default: len(id0_m)
Returns
-------
dist2_m : rank-1 array('d') with bounds (np*nm)
idx_m : int
'''
| 2.125 | 2 |
nlp-scripts/word-counts.py | aJns/CS574_NLP | 0 | 12771847 | <filename>nlp-scripts/word-counts.py
import nltk
import csv
import os
data_dir = "../project-data"
def get_word_count(file_path):
tokens = list()
with open(file_path, 'r') as f:
try:
text = f.read()
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
except UnicodeDecodeError:
print("Couldn't read ", file_path)
return len(tokens)
def compile_path_list(path_list, rootdir):
for root, _, files in os.walk(rootdir):
for f in files:
if f.endswith('.txt'):
path_list.append(os.path.join(root, f))
text_paths = list()
compile_path_list(text_paths, data_dir)
wc_dict = dict()
for path in text_paths:
wc_dict[path] = get_word_count(path)
with open("word_counts.csv", "w") as f:
w = csv.DictWriter(f, wc_dict.keys())
w.writeheader()
w.writerow(wc_dict)
| 3.296875 | 3 |
looking_for_group/gamer_profiles/migrations/0005_auto_20180705_1559.py | andrlik/looking-for-group | 0 | 12771848 | <filename>looking_for_group/gamer_profiles/migrations/0005_auto_20180705_1559.py
# Generated by Django 2.0.7 on 2018-07-05 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gamer_profiles', '0004_gamerprofile_private'),
]
operations = [
migrations.AlterField(
model_name='gamerprofile',
name='private',
field=models.BooleanField(default=True, help_text='Only allow GMs and communities I belong/apply to see profile.'),
),
]
| 1.601563 | 2 |
test/python/TestRFileWrites.py | BuildJet/sharkbite | 30 | 12771849 | from testmodule import *
class TestRFileWrites(TestRunner):
def __init__(self):
parser = ArgumentParser(description="This is an Apache Accummulo Python connector")
parser.add_argument("-i", "--instance", dest="instance",
help="Apache Accumulo Instance Name", required=True)
parser.add_argument("-z", "--zookeepers", dest="zookeepers",
help="Comma Separated Zookeeper List", required=True)
parser.add_argument("-u", "--username", dest="username",
help="User to access Apache Accumulo", required=True)
parser.add_argument("-p", "--password", dest="password",
help="Password to access Apache Accumulo. May also be supplied at the command line")
parser.add_argument("-t", "--table", dest="table",
help="Table to create/update")
parser.add_argument("-s", "--solocation", dest="shardobject",
help="Shared object Location")
args = parser.parse_args()
instance = args.instance
zookeepers = args.zookeepers
password = <PASSWORD>.password
table = args.table
dll = args.shardobject
print ("Opening ",dll)
py = cdll.LoadLibrary(dll)
import pysharkbite
def writerfile(self):
import pysharkbite
rfile = pysharkbite.RFileOperations.openForWrite("blah.rf")
for x in range(1000):
key = pysharkbite.Key(row=str("row" + str(x)))
value = pysharkbite.Value()
kv = pysharkbite.KeyValue(key,value)
rfile.append(kv)
rfile.close()
def mthd(self):
import pysharkbite
pysharkbite.LoggingConfiguration.enableTraceLogger()
self.writerfile()
readrfile = pysharkbite.RFileOperations.sequentialRead("blah.rf")
accrange = pysharkbite.Range()
seekable = pysharkbite.Seekable(accrange)
readrfile.seek( seekable)
i = 0
while readrfile.hasNext():
assert( str("row" + str(i)) == readrfile.getTopKey().getRow() )
i=i+1
readrfile.next()
assert(i==1000)
runner = TestRFileWrites()
runner.mthd()
| 2.6875 | 3 |
utils.py | satyenrajpal/speed-prediction-challenge | 5 | 12771850 | import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
def movingAverage(x, window):
ret = np.zeros_like(x)
for i in range(len(x)):
idx1 = max(0, i - (window - 1) // 2)
idx2 = min(len(x), i + (window - 1) // 2 + (2 - (window % 2)))
ret[i] = np.mean(x[idx1:idx2])
return ret
def computeAverage(x, window, idx):
min_idx = max(0, idx - window - 1)
return np.mean(x[min_idx:idx])
def plot(predict_values, gt):
fig, ax = plt.subplots()
ax.plot(np.arange(len(gt)), gt, label='ground truth')
ax.plot(np.arange(len(predict_values)), np.array(predict_values), label='predict')
start, end = ax.get_xlim()
ax.yaxis.set_ticks(np.arange(0, max(gt) +10, 5.0))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
ax.legend(loc='upper left')
plt.xlabel('Frame num.')
plt.ylabel('Speed [mph]')
# ax.figure.savefig('result.png', bbox_inches='tight')
plt.show() | 3.296875 | 3 |