hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16fd232c0205bc961d584aa0dc8a764d33f272ac
| 4,441
|
py
|
Python
|
midap_software/min_mem_access.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | 2
|
2021-03-28T16:19:06.000Z
|
2022-02-26T08:58:33.000Z
|
midap_software/min_mem_access.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | null | null | null |
midap_software/min_mem_access.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | 1
|
2021-02-22T08:44:20.000Z
|
2021-02-22T08:44:20.000Z
|
from __future__ import print_function
from functools import reduce
from config import cfg
from midap_software.layer_compiler import LayerCompiler
class MinMemAccess(LayerCompiler):
def _preprocess(self):
from generic_op import ConvOp
super(MinMemAccess, self)._preprocess()
self.num_remain_banks = 1
layer = self.layer
op = layer.main_op
if isinstance(op, ConvOp):
self._set_outbank_num()
def _calc_dram_access_by_weight(self):
layer = self.layer
op = layer.main_op
action = layer.control_info.action
process_num = 1 if layer.is_weight_in_wmem else reduce(lambda x, y: x + y, [0] + [1 if a[0] == 'PROCESS' else 0 for a in action])
return (op.weight.size * process_num)
def _calc_dram_access_by_outfeature(self):
import numpy as np
layer = self.layer
out_shape = layer.get_output_shape()
mapping = layer.control_info.output_mapping
num_out_banks = len(mapping)
reduced_width = layer.num_planes_per_fmem * num_out_banks
return (max(out_shape[0] - reduced_width, 0)) * np.prod(out_shape[1:]) * 2
def _flip(self, num_output, min_bank_num):
layer = self.layer
control_info = layer.control_info
fmem_info = self.fmem_info
num_available_bank = fmem_info.get_num_unreserved_bank()
if num_output < layer.require_fmem and num_output < num_available_bank - min_bank_num:
min_bank_num = num_available_bank - num_output
self.num_remain_banks = min_bank_num
# TODO clean code
if control_info.output_stationary < 0:
reverse_write = control_info.reverse_write = layer.require_fmem > num_available_bank - min_bank_num
input_layer = layer.input[0]
input_flip = control_info.input_flip = input_layer.control_info.flip
control_info.flip = not input_flip if reverse_write else input_flip
def _set_outbank_num(self):
import sys
layer = self.layer
min_bank_num = 1
min_access = sys.maxsize
self.fmem_info.backup()
layer.control_info.backup()
num_output = layer.require_fmem
control_info = layer.control_info
for n in range(min_bank_num, cfg.MIDAP.FMEM.NUM - len(control_info.output_mapping)):
end = False
num_available_bank = self.fmem_info.get_num_unreserved_bank()
self._flip(min(layer.require_fmem, num_available_bank - n), n)
while not end:
end = self._do_step()
w = self._calc_dram_access_by_weight()
of = self._calc_dram_access_by_outfeature()
if w + of < min_access:
min_access = w + of
min_bank_num = n
num_output = len(control_info.output_mapping)
self.fmem_info.restore()
layer.control_info.restore()
self._flip(num_output, min_bank_num)
def _do_operation(self):
layer = self.layer
control_info = layer.control_info
fragments = control_info.remain_inputs # Remain input mappings
if not control_info.fixed_output:
output_fragments = layer.get_output_fragments(self.num_out_banks, self._next_layer)
self._set_out_mappings(output_fragments)
if control_info.num_output_mapping < len(output_fragments):
fragments = control_info.limit_processing_fragments(output_fragments, fragments)
if not fragments:
self._fix_exception()
else:
control_info.fixed_output = True # no more change occurs
# post process
if not fragments:
return
self._generate_process_op(fragments)
def _do_load(self, fragments):
fmem_info = self.fmem_info
control_info = self.layer.control_info
if not control_info.fixed_output:
num_available_banks = fmem_info.get_num_available_bank()
num_unreserved_bank = fmem_info.get_num_unreserved_bank()
num_output_bank = num_unreserved_bank - self.num_remain_banks
fragments = control_info.limit_load_fragments(num_available_banks, num_output_bank, fragments, self._next_layer)
if not fragments:
return False
self._generate_load_op(fragments)
return True
| 37.008333
| 137
| 0.654357
|
8f1b95ea0c47ad6f208731f9d91c0c210fdd71f9
| 2,306
|
py
|
Python
|
rupo/stress/word.py
|
dagrigorev/rupo
|
3b1bb5873f3d0f8ef4fe662221d5f7a3573909cc
|
[
"Apache-2.0"
] | 171
|
2017-06-06T17:01:32.000Z
|
2022-02-03T10:32:54.000Z
|
rupo/stress/word.py
|
GraphGrailAi/rupo
|
8e2fbcdb3e948dd5e8f007d471171c730be3ad3d
|
[
"Apache-2.0"
] | 12
|
2017-03-20T18:09:54.000Z
|
2021-07-28T17:39:38.000Z
|
rupo/stress/word.py
|
GraphGrailAi/rupo
|
8e2fbcdb3e948dd5e8f007d471171c730be3ad3d
|
[
"Apache-2.0"
] | 33
|
2017-03-29T13:27:56.000Z
|
2022-02-21T18:36:38.000Z
|
# -*- coding: utf-8 -*-
# Автор: Гусев Илья
# Описание: Класс слова с ударением.
from enum import Enum
from typing import List, Set
from russ.syllables import get_syllables
class Stress:
"""
Ударение
"""
class Type(Enum):
ANY = -1
PRIMARY = 0
SECONDARY = 1
def __init__(self, position: int, stress_type: Type=Type.PRIMARY) -> None:
self.position = position
self.type = stress_type
def __hash__(self):
return hash(self.position)
def __eq__(self, other: 'Stress'):
return self.position == other.position and self.type == other.type
def __str__(self):
return str(self.position) + "\t" + str(self.type)
def __repr__(self):
return self.__str__()
class StressedWord:
"""
Слово и его ударения.
"""
def __init__(self, text: str, stresses: Set[Stress]) -> None:
self.stresses = stresses
self.text = text
self.syllables = get_syllables(text)
self.__accent_syllables()
def get_primary_stresses(self) -> List[int]:
return [stress.position for stress in self.stresses if stress.type == Stress.Type.PRIMARY]
def get_secondary_stresses(self) -> List[int]:
return [stress.position for stress in self.stresses if stress.type == Stress.Type.SECONDARY]
def add_stress(self, position: int, stress_type: Stress.Type=Stress.Type.PRIMARY) -> None:
self.stresses.add(Stress(position, stress_type))
self.__accent_syllables()
def add_stresses(self, stresses: List[Stress]) -> None:
self.stresses = set(self.stresses).union(set(stresses))
self.__accent_syllables()
def __accent_syllables(self):
for syllable in self.syllables:
if Stress(syllable.vowel()) in self.stresses:
syllable.stress = syllable.vowel()
else:
syllable.stress = -1
def __str__(self):
return self.text + "\t" + ",".join([str(i) for i in self.get_primary_stresses()])+ \
"\t" + ",".join([str(i) for i in self.get_secondary_stresses()])
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(self.text)
def __eq__(self, other: 'StressedWord'):
return self.text == other.text
| 28.469136
| 100
| 0.625325
|
eafb3a44b3532350d1a9bf9d1201622262e7cdab
| 3,139
|
py
|
Python
|
PythonAPI/quickstart/26-npc-trigger-waypoints.py
|
MaheshM99/PolyVerif
|
7894bdd46796b059dc856e6058935eb294ed299a
|
[
"Apache-2.0"
] | 1
|
2022-03-07T05:56:21.000Z
|
2022-03-07T05:56:21.000Z
|
PythonAPI/quickstart/26-npc-trigger-waypoints.py
|
MaheshM99/PolyVerif
|
7894bdd46796b059dc856e6058935eb294ed299a
|
[
"Apache-2.0"
] | null | null | null |
PythonAPI/quickstart/26-npc-trigger-waypoints.py
|
MaheshM99/PolyVerif
|
7894bdd46796b059dc856e6058935eb294ed299a
|
[
"Apache-2.0"
] | 1
|
2021-12-31T09:35:59.000Z
|
2021-12-31T09:35:59.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2019-2021 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#
from environs import Env
import lgsvl
print("Python API Quickstart #26: NPC triggering the waypoints callbacks")
env = Env()
sim = lgsvl.Simulator(env.str("LGSVL__SIMULATOR_HOST", lgsvl.wise.SimulatorSettings.simulator_host), env.int("LGSVL__SIMULATOR_PORT", lgsvl.wise.SimulatorSettings.simulator_port))
if sim.current_scene == lgsvl.wise.DefaultAssets.map_borregasave:
sim.reset()
else:
sim.load(lgsvl.wise.DefaultAssets.map_borregasave)
spawns = sim.get_spawn()
# EGO
state = lgsvl.AgentState()
forward = lgsvl.utils.transform_to_forward(spawns[0])
right = lgsvl.utils.transform_to_right(spawns[0])
state.transform = spawns[0]
state.velocity = 12 * forward
ego = sim.add_agent(env.str("LGSVL__VEHICLE_0", lgsvl.wise.DefaultAssets.ego_lincoln2017mkz_apollo5), lgsvl.AgentType.EGO, state)
# NPC
state = lgsvl.AgentState()
state.transform.position = spawns[0].position + 10 * forward
state.transform.rotation = spawns[0].rotation
npc = sim.add_agent("Sedan", lgsvl.AgentType.NPC, state)
vehicles = {
ego: "EGO",
npc: "Sedan",
}
# Executed upon receiving collision callback -- NPC is expected to drive through colliding objects
def on_collision(agent1, agent2, contact):
name1 = vehicles[agent1]
name2 = vehicles[agent2] if agent2 is not None else "OBSTACLE"
print("{} collided with {}".format(name1, name2))
ego.on_collision(on_collision)
npc.on_collision(on_collision)
# This block creates the list of waypoints that the NPC will follow
# Each waypoint is an position vector paired with the speed that the NPC will drive to it
waypoints = []
z_delta = 12
layer_mask = 0
layer_mask |= 1 << 0 # 0 is the layer for the road (default)
for i in range(20):
speed = 24 # if i % 2 == 0 else 12
px = 0
pz = (i + 1) * z_delta
# Waypoint angles are input as Euler angles (roll, pitch, yaw)
angle = spawns[0].rotation
# Raycast the points onto the ground because BorregasAve is not flat
hit = sim.raycast(
spawns[0].position + px * right + pz * forward,
lgsvl.Vector(0, -1, 0),
layer_mask,
)
# Trigger is set to 10 meters for every other waypoint (0 means no trigger)
tr = 0
if i % 2:
tr = 10
wp = lgsvl.DriveWaypoint(
position=hit.point, speed=speed, angle=angle, idle=0, trigger_distance=tr
)
waypoints.append(wp)
# When the NPC is within 0.5m of the waypoint, this will be called
def on_waypoint(agent, index):
print("waypoint {} reached, waiting for ego to get closer".format(index))
# The above function needs to be added to the list of callbacks for the NPC
npc.on_waypoint_reached(on_waypoint)
# The NPC needs to be given the list of waypoints.
# A bool can be passed as the 2nd argument that controls whether or not the NPC loops over the waypoints (default false)
npc.follow(waypoints)
input("Press Enter to run simulation for 22 seconds")
sim.run(22)
| 32.030612
| 180
| 0.69863
|
74c875711f6d38b58f09e4045876d69ca544fb7a
| 3,003
|
py
|
Python
|
util/undirected_graph_sage.py
|
rburing/gcaops
|
3866e11584d42354c65643c70cd2b6982866c129
|
[
"MIT"
] | null | null | null |
util/undirected_graph_sage.py
|
rburing/gcaops
|
3866e11584d42354c65643c70cd2b6982866c129
|
[
"MIT"
] | null | null | null |
util/undirected_graph_sage.py
|
rburing/gcaops
|
3866e11584d42354c65643c70cd2b6982866c129
|
[
"MIT"
] | null | null | null |
from graph.undirected_graph import UndirectedGraph
from util.permutation import selection_sort
import sage.all # make SageMath work when called from Python
from sage.graphs.graph import Graph
import subprocess
import os
NAUTY_PREFIX = '' # e.g. '/home/rburing/src/nauty27r1/'
def nauty_generate_undirected(num_vertices, num_edges, connected=None, biconnected=None, min_degree=0):
args = [str(num_vertices), "{}:{}".format(num_edges, num_edges)]
if connected:
args.append("-c")
if biconnected:
args.append("-C")
if min_degree != 0:
args.append("-d{}".format(min_degree))
FNULL = open(os.devnull, 'w')
geng = subprocess.Popen((NAUTY_PREFIX + 'geng', *args), stdout=subprocess.PIPE, stderr=FNULL)
showg = subprocess.Popen((NAUTY_PREFIX + 'showg', '-e', '-l0'), stdin=geng.stdout, stderr=FNULL, stdout=subprocess.PIPE)
line_count = -1
for line in showg.stdout:
if line_count % 4 == 2:
graph_encoding = line.decode('ascii').rstrip()
edges = [tuple(map(int,e.split(' '))) for e in graph_encoding.split(' ')]
yield Graph([list(range(num_vertices)), edges])
line_count += 1
def undirected_graph_canonicalize(g):
n = len(g)
edges = g.edges()
G, sigma = Graph([list(range(n)), edges]).canonical_label(certificate=True)
new_edges = list(G.edges(labels=False))
edge_permutation = [tuple(sorted([sigma[edge[0]],sigma[edge[1]]])) for edge in edges]
index_permutation = [new_edges.index(e) for e in edge_permutation]
undo_canonicalize = [0]*n
for k, v in sigma.items():
undo_canonicalize[v] = k
return UndirectedGraph(n, list(new_edges)), undo_canonicalize, selection_sort(index_permutation)
def undirected_graph_has_odd_automorphism(g):
n = len(g)
edges = g.edges()
G = Graph([list(range(n)), edges])
for sigma in G.automorphism_group().gens(): # NOTE: it suffices to check generators
edge_permutation = [tuple(sorted([sigma(edge[0]),sigma(edge[1])])) for edge in edges]
index_permutation = [edges.index(e) for e in edge_permutation]
if selection_sort(index_permutation) == -1:
return True
return False
def undirected_graph_generate(num_vertices, num_edges, connected=None, biconnected=None, min_degree=0, has_odd_automorphism=None):
for G in nauty_generate_undirected(num_vertices, num_edges, connected=connected, biconnected=biconnected, min_degree=min_degree):
G = G.canonical_label()
g = UndirectedGraph(num_vertices, list(G.edges(labels=False)))
if has_odd_automorphism is None or undirected_graph_has_odd_automorphism(g) == has_odd_automorphism:
yield g
def undirected_graph_to_encoding(g):
n = len(g)
edges = g.edges()
G = Graph([list(range(n)), edges])
return G.graph6_string()
def undirected_graph_from_encoding(graph6_string):
G = Graph(graph6_string)
return UndirectedGraph(len(G.vertices()), list(G.edges(labels=False)))
| 44.161765
| 133
| 0.692308
|
e30892581ec843b73d6f10668b9e1c4bbfa70788
| 540
|
py
|
Python
|
problems/exercism/luhn/luhn.py
|
JayMonari/py-personal
|
ef16d030cc7fe2266d661e1378d95f588229b746
|
[
"MIT"
] | null | null | null |
problems/exercism/luhn/luhn.py
|
JayMonari/py-personal
|
ef16d030cc7fe2266d661e1378d95f588229b746
|
[
"MIT"
] | null | null | null |
problems/exercism/luhn/luhn.py
|
JayMonari/py-personal
|
ef16d030cc7fe2266d661e1378d95f588229b746
|
[
"MIT"
] | null | null | null |
import re
class Luhn:
def __init__(self, cardNo: str) -> None:
self.cardNo = cardNo.strip()
def valid(self) -> bool:
if len(self.cardNo) == 1 or re.search("[^0-9 ]", self.cardNo):
return False
sum, sanitized = 0, re.sub("[^0-9]", "", self.cardNo)
for i, digit in enumerate(reversed(sanitized), start=1):
d = int(digit)
if i % 2 == 0:
d *= 2
if d > 9:
d -= 9
sum += d
return sum % 10 == 0
| 25.714286
| 70
| 0.448148
|
cdd010448843b6cb7c6faf2bdbc0a903c3f339aa
| 8,085
|
py
|
Python
|
avalanche/benchmarks/classic/cinaturalist.py
|
coreylowman/avalanche
|
9c1e7765f1577c400ec0c57260221bcffd9566a2
|
[
"MIT"
] | 1
|
2021-09-15T13:57:27.000Z
|
2021-09-15T13:57:27.000Z
|
avalanche/benchmarks/classic/cinaturalist.py
|
coreylowman/avalanche
|
9c1e7765f1577c400ec0c57260221bcffd9566a2
|
[
"MIT"
] | null | null | null |
avalanche/benchmarks/classic/cinaturalist.py
|
coreylowman/avalanche
|
9c1e7765f1577c400ec0c57260221bcffd9566a2
|
[
"MIT"
] | null | null | null |
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 20-05-2020 #
# Author: Matthias De Lange #
# E-mail: contact@continualai.org #
# Website: continualai.org #
################################################################################
from pathlib import Path
from typing import Union, Any, Optional
from avalanche.benchmarks.classic.classic_benchmarks_utils import (
check_vision_benchmark,
)
from avalanche.benchmarks.datasets import (
INATURALIST2018,
default_dataset_location,
)
from avalanche.benchmarks import nc_benchmark
from torchvision import transforms
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
_default_train_transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
_default_eval_transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
def SplitInaturalist(
*,
super_categories=None,
return_task_id=False,
download=False,
seed=0,
train_transform: Optional[Any] = _default_train_transform,
eval_transform: Optional[Any] = _default_eval_transform,
dataset_root: Union[str, Path] = None
):
"""
Creates a CL benchmark using the iNaturalist2018 dataset.
A selection of supercategories (by default 10) define the experiences.
Note that the supercategories are highly imbalanced in the number of classes
and the amount of data available.
If the dataset is not present in the computer, **this method will
automatically download** and store it if `download=True`
(120Gtrain/val).
To parse the dataset jsons you need to install an additional dependency:
"pycocotools". You can install it like this:
"conda install -c conda-forge pycocotools"
Implementation is based on the CL survey
(https://ieeexplore.ieee.org/document/9349197) but differs slightly.
The survey uses only the original iNaturalist2018 training dataset split
into 70/10/20 for train/val/test streams. This method instead uses the full
iNaturalist2018 training set to make the `train_stream`, whereas the
`test_stream` is defined by the original iNaturalist2018 validation data.
The returned benchmark will return experiences containing all patterns of a
subset of classes, which means that each class is only seen "once".
This is one of the most common scenarios in the Continual Learning
literature. Common names used in literature to describe this kind of
scenario are "Class Incremental", "New Classes", etc. By default,
an equal amount of classes will be assigned to each experience.
This generator doesn't force a choice on the availability of task labels,
a choice that is left to the user (see the `return_task_id` parameter for
more info on task labels).
The benchmark instance returned by this method will have two fields,
`train_stream` and `test_stream`, which can be iterated to obtain
training and test :class:`Experience`. Each Experience contains the
`dataset` and the associated task label.
The benchmark API is quite simple and is uniform across all benchmark
generators. It is recommended to check the tutorial of the "benchmark" API,
which contains usage examples ranging from "basic" to "advanced".
:param super_categories: The list of supercategories which define the
tasks, i.e. each task consists of all classes in a super-category.
:param download: If true and the dataset is not present in the computer,
this method will automatically download and store it. This will take 120G
for the train/val set.
:param return_task_id: if True, a progressive task id is returned for every
experience. If False, all experiences will have a task ID of 0.
:param seed: A valid int used to initialize the random number generator.
Can be None.
:param train_transform: The transformation to apply to the training data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations).
If no transformation is passed, the default train transformation
will be used.
:param eval_transform: The transformation to apply to the test data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations).
If no transformation is passed, the default test transformation
will be used.
:param dataset_root: The root path of the dataset.
Defaults to None, which means that the default location for
'inatuarlist2018' will be used.
:returns: A properly initialized :class:`NCScenario` instance.
"""
# Categories with > 100 datapoints
if super_categories is None:
super_categories = [
"Amphibia",
"Animalia",
"Arachnida",
"Aves",
"Fungi",
"Insecta",
"Mammalia",
"Mollusca",
"Plantae",
"Reptilia",
]
train_set, test_set = _get_inaturalist_dataset(
dataset_root, super_categories, download=download
)
per_exp_classes, fixed_class_order = _get_split(super_categories, train_set)
if return_task_id:
return nc_benchmark(
fixed_class_order=fixed_class_order,
per_exp_classes=per_exp_classes,
train_dataset=train_set,
test_dataset=test_set,
n_experiences=len(super_categories),
task_labels=True,
seed=seed,
class_ids_from_zero_in_each_exp=True,
train_transform=train_transform,
eval_transform=eval_transform,
)
else:
return nc_benchmark(
fixed_class_order=fixed_class_order,
per_exp_classes=per_exp_classes,
train_dataset=train_set,
test_dataset=test_set,
n_experiences=len(super_categories),
task_labels=False,
seed=seed,
train_transform=train_transform,
eval_transform=eval_transform,
)
def _get_inaturalist_dataset(dataset_root, super_categories, download):
if dataset_root is None:
dataset_root = default_dataset_location("inatuarlist2018")
train_set = INATURALIST2018(
dataset_root, split="train", supcats=super_categories, download=download
)
test_set = INATURALIST2018(
dataset_root, split="val", supcats=super_categories, download=download
)
return train_set, test_set
def _get_split(super_categories, train_set):
"""Get number of classes per experience, and
the total order of the classes."""
per_exp_classes, fixed_class_order = {}, []
for idx, supcat in enumerate(super_categories):
new_cats = list(train_set.cats_per_supcat[supcat])
fixed_class_order += new_cats
per_exp_classes[idx] = len(new_cats)
return per_exp_classes, fixed_class_order
__all__ = ["SplitInaturalist"]
if __name__ == "__main__":
import sys
benchmark_instance = SplitInaturalist()
check_vision_benchmark(benchmark_instance, show_without_transforms=False)
sys.exit(0)
| 38.684211
| 80
| 0.653309
|
45720093fa7234f0c2cc4000abc1fe880619a385
| 15,901
|
py
|
Python
|
pysnmp/CXAtmDxi-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CXAtmDxi-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CXAtmDxi-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CXAtmDxi-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CXAtmDxi-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:16:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint")
Alias, cxAtmDxi = mibBuilder.importSymbols("CXProduct-SMI", "Alias", "cxAtmDxi")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, ObjectIdentity, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, TimeTicks, NotificationType, MibIdentifier, Integer32, Gauge32, ModuleIdentity, Bits, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "ObjectIdentity", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "TimeTicks", "NotificationType", "MibIdentifier", "Integer32", "Gauge32", "ModuleIdentity", "Bits", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class PSapIndex(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 0)
class SubRef(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 255)
class Dfa(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 1023)
class DfaX(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 16777216)
class Vpi(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 255)
class Vci(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535)
atmDxiMibLevel = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiMibLevel.setStatus('mandatory')
atmDxiTranslationDfa = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 2), DfaX()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiTranslationDfa.setStatus('mandatory')
atmDxiTranslationVpi = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 3), Vpi()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiTranslationVpi.setStatus('mandatory')
atmDxiTranslationVci = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 4), Vci()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiTranslationVci.setStatus('mandatory')
atmDxiTranslationMode = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("twobytes", 1), ("fourbytes", 2))).clone('twobytes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiTranslationMode.setStatus('mandatory')
atmDxiPSapTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10), )
if mibBuilder.loadTexts: atmDxiPSapTable.setStatus('mandatory')
atmDxiPSapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1), ).setIndexNames((0, "CXAtmDxi-MIB", "atmDxiPSapNumber"))
if mibBuilder.loadTexts: atmDxiPSapEntry.setStatus('mandatory')
atmDxiPSapNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 1), PSapIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapNumber.setStatus('mandatory')
atmDxiPSapConnectTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 600)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiPSapConnectTimer.setStatus('mandatory')
atmDxiPSapControl = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clearStats", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: atmDxiPSapControl.setStatus('mandatory')
atmDxiPSapState = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 4))).clone(namedValues=NamedValues(("offline", 1), ("connected", 4))).clone('offline')).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapState.setStatus('mandatory')
atmDxiPSapTxFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapTxFrames.setStatus('mandatory')
atmDxiPSapRxFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapRxFrames.setStatus('mandatory')
atmDxiPSapTxOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapTxOctets.setStatus('mandatory')
atmDxiPSapRxOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapRxOctets.setStatus('mandatory')
atmDxiPSapOutSuccessfullConnects = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapOutSuccessfullConnects.setStatus('mandatory')
atmDxiPSapOutUnsuccessfullConnects = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapOutUnsuccessfullConnects.setStatus('mandatory')
atmDxiPSapInConnectsReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapInConnectsReceived.setStatus('mandatory')
atmDxiPSapTxResets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapTxResets.setStatus('mandatory')
atmDxiPSapRxResets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapRxResets.setStatus('mandatory')
atmDxiPSapNoServiceDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapNoServiceDiscards.setStatus('mandatory')
atmDxiPSapCongestionDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 10, 1, 36), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiPSapCongestionDiscards.setStatus('mandatory')
atmDxiSapTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11), )
if mibBuilder.loadTexts: atmDxiSapTable.setStatus('mandatory')
atmDxiSapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: atmDxiSapEntry.setStatus('mandatory')
atmDxiSapMode = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("mode-1a", 1), ("mode-1b", 2), ("mode-2", 3), ("modeTransparent", 4), ("modeLoopback", 5))).clone('mode-1a')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiSapMode.setStatus('mandatory')
atmDxiSapTransparentDfa = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1, 6), Dfa()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiSapTransparentDfa.setStatus('mandatory')
atmDxiSapControl = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clearStats", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: atmDxiSapControl.setStatus('mandatory')
atmDxiSapRxLmiFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSapRxLmiFrames.setStatus('mandatory')
atmDxiSapNoRouteDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSapNoRouteDiscards.setStatus('mandatory')
atmDxiSapRxInvalidDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSapRxInvalidDiscards.setStatus('mandatory')
atmDxiSapCongestionDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSapCongestionDiscards.setStatus('mandatory')
atmDxiSapFlowControlDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 11, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSapFlowControlDiscards.setStatus('mandatory')
atmDxiSysRouteTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 12), )
if mibBuilder.loadTexts: atmDxiSysRouteTable.setStatus('mandatory')
atmDxiSysRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 12, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CXAtmDxi-MIB", "atmDxiSRDxiFrameAddress"))
if mibBuilder.loadTexts: atmDxiSysRouteEntry.setStatus('mandatory')
atmDxiSRDxiFrameAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 12, 1, 1), Dfa()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRDxiFrameAddress.setStatus('mandatory')
atmDxiSRRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 12, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiSRRowStatus.setStatus('mandatory')
atmDxiSRDestAlias = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 12, 1, 3), Alias()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiSRDestAlias.setStatus('mandatory')
atmDxiSRSubRef = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 12, 1, 4), SubRef()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmDxiSRSubRef.setStatus('mandatory')
atmDxiSRRouteState = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 12, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("offLine", 1), ("notConnected", 2), ("inProgress", 3), ("connected", 4), ("connectedFlowOff", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRRouteState.setStatus('mandatory')
atmDxiSRFailureReason = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 12, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 15, 16, 17, 18))).clone(namedValues=NamedValues(("noFailure", 1), ("internalError", 2), ("localAllocFailure", 3), ("remoteAllocFailure", 4), ("localNoAccess", 5), ("remoteNoAccess", 6), ("remotePvcDown", 8), ("remotePvcBusy", 10), ("localFcnFailure", 11), ("remoteFcnFailure", 12), ("localDsnFailure", 13), ("remoteAliasNotFound", 15), ("remoteNoPvcService", 16), ("mpeInvalidSubref", 17), ("routeStalled", 18)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRFailureReason.setStatus('mandatory')
atmDxiSysRouteStatsTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13), )
if mibBuilder.loadTexts: atmDxiSysRouteStatsTable.setStatus('mandatory')
atmDxiSysRouteStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CXAtmDxi-MIB", "atmDxiSRStatsDxiFrameAddress"))
if mibBuilder.loadTexts: atmDxiSysRouteStatsEntry.setStatus('mandatory')
atmDxiSRStatsDxiFrameAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1, 1), Dfa()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRStatsDxiFrameAddress.setStatus('mandatory')
atmDxiSRStatsCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1, 2), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRStatsCreationTime.setStatus('mandatory')
atmDxiSRStatsNegotiatedDataSize = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRStatsNegotiatedDataSize.setStatus('mandatory')
atmDxiSRStatsTxFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRStatsTxFrames.setStatus('mandatory')
atmDxiSRStatsRxFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRStatsRxFrames.setStatus('mandatory')
atmDxiSRStatsTxOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRStatsTxOctets.setStatus('mandatory')
atmDxiSRStatsRxOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRStatsRxOctets.setStatus('mandatory')
atmDxiSRStatsFlowControlDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 6, 58, 13, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmDxiSRStatsFlowControlDiscards.setStatus('mandatory')
mibBuilder.exportSymbols("CXAtmDxi-MIB", atmDxiSapNoRouteDiscards=atmDxiSapNoRouteDiscards, atmDxiPSapCongestionDiscards=atmDxiPSapCongestionDiscards, DfaX=DfaX, atmDxiPSapTxFrames=atmDxiPSapTxFrames, atmDxiSapCongestionDiscards=atmDxiSapCongestionDiscards, atmDxiSysRouteEntry=atmDxiSysRouteEntry, atmDxiSRStatsTxFrames=atmDxiSRStatsTxFrames, atmDxiSysRouteTable=atmDxiSysRouteTable, atmDxiPSapRxResets=atmDxiPSapRxResets, atmDxiSRStatsNegotiatedDataSize=atmDxiSRStatsNegotiatedDataSize, SubRef=SubRef, atmDxiPSapState=atmDxiPSapState, atmDxiPSapTxResets=atmDxiPSapTxResets, atmDxiPSapTxOctets=atmDxiPSapTxOctets, atmDxiTranslationMode=atmDxiTranslationMode, atmDxiTranslationVpi=atmDxiTranslationVpi, Vpi=Vpi, Vci=Vci, atmDxiPSapOutUnsuccessfullConnects=atmDxiPSapOutUnsuccessfullConnects, atmDxiSapEntry=atmDxiSapEntry, atmDxiPSapNumber=atmDxiPSapNumber, atmDxiPSapOutSuccessfullConnects=atmDxiPSapOutSuccessfullConnects, atmDxiSRStatsRxFrames=atmDxiSRStatsRxFrames, PSapIndex=PSapIndex, atmDxiMibLevel=atmDxiMibLevel, atmDxiSapTransparentDfa=atmDxiSapTransparentDfa, atmDxiPSapConnectTimer=atmDxiPSapConnectTimer, atmDxiSapRxLmiFrames=atmDxiSapRxLmiFrames, atmDxiSRDxiFrameAddress=atmDxiSRDxiFrameAddress, atmDxiPSapTable=atmDxiPSapTable, atmDxiPSapRxFrames=atmDxiPSapRxFrames, atmDxiSRRowStatus=atmDxiSRRowStatus, atmDxiSysRouteStatsEntry=atmDxiSysRouteStatsEntry, atmDxiSapControl=atmDxiSapControl, atmDxiTranslationVci=atmDxiTranslationVci, atmDxiSapTable=atmDxiSapTable, atmDxiSRStatsFlowControlDiscards=atmDxiSRStatsFlowControlDiscards, atmDxiPSapRxOctets=atmDxiPSapRxOctets, atmDxiSRSubRef=atmDxiSRSubRef, atmDxiSRStatsCreationTime=atmDxiSRStatsCreationTime, atmDxiSapFlowControlDiscards=atmDxiSapFlowControlDiscards, atmDxiSysRouteStatsTable=atmDxiSysRouteStatsTable, atmDxiSRRouteState=atmDxiSRRouteState, atmDxiPSapNoServiceDiscards=atmDxiPSapNoServiceDiscards, atmDxiSRStatsDxiFrameAddress=atmDxiSRStatsDxiFrameAddress, atmDxiTranslationDfa=atmDxiTranslationDfa, atmDxiPSapControl=atmDxiPSapControl, atmDxiSapMode=atmDxiSapMode, atmDxiSRStatsRxOctets=atmDxiSRStatsRxOctets, atmDxiSRFailureReason=atmDxiSRFailureReason, atmDxiSRStatsTxOctets=atmDxiSRStatsTxOctets, atmDxiSRDestAlias=atmDxiSRDestAlias, atmDxiPSapEntry=atmDxiPSapEntry, atmDxiSapRxInvalidDiscards=atmDxiSapRxInvalidDiscards, atmDxiPSapInConnectsReceived=atmDxiPSapInConnectsReceived, Dfa=Dfa)
| 117.785185
| 2,370
| 0.764858
|
135cc94285abc24dd77630cb1da42d9dc58269cc
| 12
|
py
|
Python
|
tests/syntax/space_between_operators_1.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 287
|
2019-04-08T13:18:29.000Z
|
2021-03-14T19:10:21.000Z
|
tests/syntax/space_between_operators_1.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 191
|
2019-04-08T14:39:18.000Z
|
2021-03-14T22:14:56.000Z
|
tests/syntax/space_between_operators_1.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 9
|
2019-04-08T12:54:08.000Z
|
2020-11-20T02:26:27.000Z
|
a = 2 * * 5
| 6
| 11
| 0.25
|
3c34999bf63c571ad2302399829abfd18e1f7629
| 10,647
|
py
|
Python
|
sdk/python/pulumi_azure/network/get_gateway_connection.py
|
kenny-wealth/pulumi-azure
|
e57e3a81f95bf622e7429c53f0bff93e33372aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/network/get_gateway_connection.py
|
kenny-wealth/pulumi-azure
|
e57e3a81f95bf622e7429c53f0bff93e33372aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/network/get_gateway_connection.py
|
kenny-wealth/pulumi-azure
|
e57e3a81f95bf622e7429c53f0bff93e33372aa1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetGatewayConnectionResult:
"""
A collection of values returned by getGatewayConnection.
"""
def __init__(__self__, authorization_key=None, connection_protocol=None, egress_bytes_transferred=None, enable_bgp=None, express_route_circuit_id=None, express_route_gateway_bypass=None, ingress_bytes_transferred=None, ipsec_policies=None, local_network_gateway_id=None, location=None, name=None, peer_virtual_network_gateway_id=None, resource_group_name=None, resource_guid=None, routing_weight=None, shared_key=None, tags=None, type=None, use_policy_based_traffic_selectors=None, virtual_network_gateway_id=None, id=None):
if authorization_key and not isinstance(authorization_key, str):
raise TypeError("Expected argument 'authorization_key' to be a str")
__self__.authorization_key = authorization_key
"""
The authorization key associated with the
Express Route Circuit. This field is present only if the type is an
ExpressRoute connection.
"""
if connection_protocol and not isinstance(connection_protocol, str):
raise TypeError("Expected argument 'connection_protocol' to be a str")
__self__.connection_protocol = connection_protocol
if egress_bytes_transferred and not isinstance(egress_bytes_transferred, float):
raise TypeError("Expected argument 'egress_bytes_transferred' to be a float")
__self__.egress_bytes_transferred = egress_bytes_transferred
if enable_bgp and not isinstance(enable_bgp, bool):
raise TypeError("Expected argument 'enable_bgp' to be a bool")
__self__.enable_bgp = enable_bgp
"""
If `true`, BGP (Border Gateway Protocol) is enabled
for this connection.
"""
if express_route_circuit_id and not isinstance(express_route_circuit_id, str):
raise TypeError("Expected argument 'express_route_circuit_id' to be a str")
__self__.express_route_circuit_id = express_route_circuit_id
"""
The ID of the Express Route Circuit
(i.e. when `type` is `ExpressRoute`).
"""
if express_route_gateway_bypass and not isinstance(express_route_gateway_bypass, bool):
raise TypeError("Expected argument 'express_route_gateway_bypass' to be a bool")
__self__.express_route_gateway_bypass = express_route_gateway_bypass
"""
If `true`, data packets will bypass ExpressRoute Gateway for data forwarding. This is only valid for ExpressRoute connections.
"""
if ingress_bytes_transferred and not isinstance(ingress_bytes_transferred, float):
raise TypeError("Expected argument 'ingress_bytes_transferred' to be a float")
__self__.ingress_bytes_transferred = ingress_bytes_transferred
if ipsec_policies and not isinstance(ipsec_policies, list):
raise TypeError("Expected argument 'ipsec_policies' to be a list")
__self__.ipsec_policies = ipsec_policies
if local_network_gateway_id and not isinstance(local_network_gateway_id, str):
raise TypeError("Expected argument 'local_network_gateway_id' to be a str")
__self__.local_network_gateway_id = local_network_gateway_id
"""
The ID of the local network gateway
when a Site-to-Site connection (i.e. when `type` is `IPsec`).
"""
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
"""
The location/region where the connection is
located.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if peer_virtual_network_gateway_id and not isinstance(peer_virtual_network_gateway_id, str):
raise TypeError("Expected argument 'peer_virtual_network_gateway_id' to be a str")
__self__.peer_virtual_network_gateway_id = peer_virtual_network_gateway_id
"""
The ID of the peer virtual
network gateway when a VNet-to-VNet connection (i.e. when `type`
is `Vnet2Vnet`).
"""
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
__self__.resource_guid = resource_guid
if routing_weight and not isinstance(routing_weight, float):
raise TypeError("Expected argument 'routing_weight' to be a float")
__self__.routing_weight = routing_weight
"""
The routing weight.
"""
if shared_key and not isinstance(shared_key, str):
raise TypeError("Expected argument 'shared_key' to be a str")
__self__.shared_key = shared_key
"""
The shared IPSec key.
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
(Optional) A mapping of tags to assign to the resource.
"""
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
__self__.type = type
"""
The type of connection. Valid options are `IPsec`
(Site-to-Site), `ExpressRoute` (ExpressRoute), and `Vnet2Vnet` (VNet-to-VNet).
"""
if use_policy_based_traffic_selectors and not isinstance(use_policy_based_traffic_selectors, bool):
raise TypeError("Expected argument 'use_policy_based_traffic_selectors' to be a bool")
__self__.use_policy_based_traffic_selectors = use_policy_based_traffic_selectors
"""
If `true`, policy-based traffic
selectors are enabled for this connection. Enabling policy-based traffic
selectors requires an `ipsec_policy` block.
"""
if virtual_network_gateway_id and not isinstance(virtual_network_gateway_id, str):
raise TypeError("Expected argument 'virtual_network_gateway_id' to be a str")
__self__.virtual_network_gateway_id = virtual_network_gateway_id
"""
The ID of the Virtual Network Gateway
in which the connection is created.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetGatewayConnectionResult(GetGatewayConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGatewayConnectionResult(
authorization_key=self.authorization_key,
connection_protocol=self.connection_protocol,
egress_bytes_transferred=self.egress_bytes_transferred,
enable_bgp=self.enable_bgp,
express_route_circuit_id=self.express_route_circuit_id,
express_route_gateway_bypass=self.express_route_gateway_bypass,
ingress_bytes_transferred=self.ingress_bytes_transferred,
ipsec_policies=self.ipsec_policies,
local_network_gateway_id=self.local_network_gateway_id,
location=self.location,
name=self.name,
peer_virtual_network_gateway_id=self.peer_virtual_network_gateway_id,
resource_group_name=self.resource_group_name,
resource_guid=self.resource_guid,
routing_weight=self.routing_weight,
shared_key=self.shared_key,
tags=self.tags,
type=self.type,
use_policy_based_traffic_selectors=self.use_policy_based_traffic_selectors,
virtual_network_gateway_id=self.virtual_network_gateway_id,
id=self.id)
def get_gateway_connection(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing Virtual Network Gateway Connection.
:param str name: Specifies the name of the Virtual Network Gateway Connection.
:param str resource_group_name: Specifies the name of the resource group the Virtual Network Gateway Connection is located in.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/d/virtual_network_gateway_connection.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:network/getGatewayConnection:getGatewayConnection', __args__, opts=opts).value
return AwaitableGetGatewayConnectionResult(
authorization_key=__ret__.get('authorizationKey'),
connection_protocol=__ret__.get('connectionProtocol'),
egress_bytes_transferred=__ret__.get('egressBytesTransferred'),
enable_bgp=__ret__.get('enableBgp'),
express_route_circuit_id=__ret__.get('expressRouteCircuitId'),
express_route_gateway_bypass=__ret__.get('expressRouteGatewayBypass'),
ingress_bytes_transferred=__ret__.get('ingressBytesTransferred'),
ipsec_policies=__ret__.get('ipsecPolicies'),
local_network_gateway_id=__ret__.get('localNetworkGatewayId'),
location=__ret__.get('location'),
name=__ret__.get('name'),
peer_virtual_network_gateway_id=__ret__.get('peerVirtualNetworkGatewayId'),
resource_group_name=__ret__.get('resourceGroupName'),
resource_guid=__ret__.get('resourceGuid'),
routing_weight=__ret__.get('routingWeight'),
shared_key=__ret__.get('sharedKey'),
tags=__ret__.get('tags'),
type=__ret__.get('type'),
use_policy_based_traffic_selectors=__ret__.get('usePolicyBasedTrafficSelectors'),
virtual_network_gateway_id=__ret__.get('virtualNetworkGatewayId'),
id=__ret__.get('id'))
| 52.448276
| 528
| 0.703391
|
37e6636c93140a776c3daf42708b9a1126f25b0a
| 22,599
|
py
|
Python
|
geotrek/maintenance/migrations/0004_auto__del_field_intervention_comments__add_field_intervention_descript.py
|
jmdecastel/GEOTADMIN
|
15547c0a99ae4c541ca517cdbc2cf17ab5c96f87
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/maintenance/migrations/0004_auto__del_field_intervention_comments__add_field_intervention_descript.py
|
jmdecastel/GEOTADMIN
|
15547c0a99ae4c541ca517cdbc2cf17ab5c96f87
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/maintenance/migrations/0004_auto__del_field_intervention_comments__add_field_intervention_descript.py
|
jmdecastel/GEOTADMIN
|
15547c0a99ae4c541ca517cdbc2cf17ab5c96f87
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('m_t_intervention', 'commentaire', 'descriptif')
def backwards(self, orm):
db.rename_column('m_t_intervention', 'descriptif', 'commentaire')
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'common.organism': {
'Meta': {'ordering': "['organism']", 'object_name': 'Organism', 'db_table': "'m_b_organisme'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organism': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'organisme'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.comfort': {
'Meta': {'ordering': "['comfort']", 'object_name': 'Comfort', 'db_table': "'l_b_confort'"},
'comfort': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'confort'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.datasource': {
'Meta': {'ordering': "['source']", 'object_name': 'Datasource', 'db_table': "'l_b_source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.network': {
'Meta': {'ordering': "['network']", 'object_name': 'Network', 'db_table': "'l_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'reseau'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.path': {
'Meta': {'object_name': 'Path', 'db_table': "'l_t_troncon'"},
'arrival': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'arrivee'", 'blank': 'True'}),
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'comfort': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'confort'", 'to': u"orm['core.Comfort']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'remarques'", 'blank': 'True'}),
'datasource': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'source'", 'to': u"orm['core.Datasource']"}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'depart'", 'blank': 'True'}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
'geom_cadastre': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'db_column': "'nom'", 'blank': 'True'}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Network']", 'db_table': "'l_r_troncon_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'}),
'stake': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'enjeu'", 'to': u"orm['core.Stake']"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Usage']", 'db_table': "'l_r_troncon_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_column': "'valide'"})
},
u'core.pathaggregation': {
'Meta': {'ordering': "['id']", 'object_name': 'PathAggregation', 'db_table': "'e_r_evenement_troncon'"},
'end_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_fin'", 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'ordre'", 'blank': 'True'}),
'path': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'troncon'", 'to': u"orm['core.Path']"}),
'start_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_debut'", 'db_index': 'True'}),
'topo_object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'db_column': "'evenement'", 'to': u"orm['core.Topology']"})
},
u'core.stake': {
'Meta': {'ordering': "['id']", 'object_name': 'Stake', 'db_table': "'l_b_enjeu'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stake': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'enjeu'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.topology': {
'Meta': {'object_name': 'Topology', 'db_table': "'e_t_evenement'"},
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'offset': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'decallage'"}),
'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Path']", 'through': u"orm['core.PathAggregation']", 'db_column': "'troncons'", 'symmetrical': 'False'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'})
},
u'core.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'l_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'usage'"})
},
u'maintenance.contractor': {
'Meta': {'ordering': "['contractor']", 'object_name': 'Contractor', 'db_table': "'m_b_prestataire'"},
'contractor': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'prestataire'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'maintenance.funding': {
'Meta': {'object_name': 'Funding', 'db_table': "'m_r_chantier_financement'"},
'amount': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'montant'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organism': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['common.Organism']", 'db_column': "'organisme'"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maintenance.Project']", 'db_column': "'chantier'"})
},
u'maintenance.intervention': {
'Meta': {'object_name': 'Intervention', 'db_table': "'m_t_intervention'"},
'area': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_column': "'surface'"}),
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now', 'db_column': "'date'"}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'descriptif'", 'blank': 'True'}),
'disorders': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'interventions'", 'blank': 'True', 'db_table': "'m_r_intervention_desordre'", 'to': u"orm['maintenance.InterventionDisorder']"}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
'height': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'hauteur'"}),
'heliport_cost': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'cout_heliport'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maintenance.InterventionJob']", 'through': u"orm['maintenance.ManDay']", 'symmetrical': 'False'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'material_cost': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'cout_materiel'"}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'interventions'", 'null': 'True', 'db_column': "'chantier'", 'to': u"orm['maintenance.Project']"}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'}),
'stake': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interventions'", 'null': 'True', 'db_column': "'enjeu'", 'to': u"orm['core.Stake']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maintenance.InterventionStatus']", 'db_column': "'status'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'subcontract_cost': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'cout_soustraitant'"}),
'subcontracting': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'sous_traitance'"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'interventions_set'", 'null': 'True', 'to': u"orm['core.Topology']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maintenance.InterventionType']", 'null': 'True', 'db_column': "'type'", 'blank': 'True'}),
'width': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'largeur'"})
},
u'maintenance.interventiondisorder': {
'Meta': {'ordering': "['disorder']", 'object_name': 'InterventionDisorder', 'db_table': "'m_b_desordre'"},
'disorder': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'desordre'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'maintenance.interventionjob': {
'Meta': {'ordering': "['job']", 'object_name': 'InterventionJob', 'db_table': "'m_b_fonction'"},
'cost': ('django.db.models.fields.DecimalField', [], {'default': '1.0', 'db_column': "'cout_jour'", 'decimal_places': '2', 'max_digits': '8'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'fonction'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'maintenance.interventionstatus': {
'Meta': {'ordering': "['id']", 'object_name': 'InterventionStatus', 'db_table': "'m_b_suivi'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'status'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'maintenance.interventiontype': {
'Meta': {'ordering': "['type']", 'object_name': 'InterventionType', 'db_table': "'m_b_intervention'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'type'"})
},
u'maintenance.manday': {
'Meta': {'object_name': 'ManDay', 'db_table': "'m_r_intervention_fonction'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intervention': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maintenance.Intervention']", 'db_column': "'intervention'"}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maintenance.InterventionJob']", 'db_column': "'fonction'"}),
'nb_days': ('django.db.models.fields.DecimalField', [], {'db_column': "'nb_jours'", 'decimal_places': '2', 'max_digits': '6'})
},
u'maintenance.project': {
'Meta': {'ordering': "['-begin_year', 'name']", 'object_name': 'Project', 'db_table': "'m_t_chantier'"},
'begin_year': ('django.db.models.fields.IntegerField', [], {'db_column': "'annee_debut'"}),
'comments': ('django.db.models.fields.TextField', [], {'db_column': "'commentaires'", 'blank': 'True'}),
'constraint': ('django.db.models.fields.TextField', [], {'db_column': "'contraintes'", 'blank': 'True'}),
'contractors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'db_table': "'m_r_chantier_prestataire'", 'to': u"orm['maintenance.Contractor']"}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maintenance.ProjectDomain']", 'null': 'True', 'db_column': "'domaine'", 'blank': 'True'}),
'end_year': ('django.db.models.fields.IntegerField', [], {'db_column': "'annee_fin'"}),
'founders': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['common.Organism']", 'through': u"orm['maintenance.Funding']", 'symmetrical': 'False'}),
'global_cost': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_column': "'cout_global'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'project_manager': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'manage'", 'db_column': "'maitre_ouvrage'", 'to': u"orm['common.Organism']"}),
'project_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'own'", 'db_column': "'maitre_oeuvre'", 'to': u"orm['common.Organism']"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maintenance.ProjectType']", 'null': 'True', 'db_column': "'type'", 'blank': 'True'})
},
u'maintenance.projectdomain': {
'Meta': {'ordering': "['domain']", 'object_name': 'ProjectDomain', 'db_table': "'m_b_domaine'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'domaine'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'maintenance.projecttype': {
'Meta': {'ordering': "['type']", 'object_name': 'ProjectType', 'db_table': "'m_b_chantier'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'type'"})
}
}
complete_apps = ['maintenance']
| 100.888393
| 252
| 0.57591
|
a1920efd22067084ef1d3df4e99a09efa61481f9
| 408
|
py
|
Python
|
Assignments/a2_7.py
|
gargraghav/Probability-and-Statistics-for-Data-Scientists
|
756e6195e99f829f5e5de0c7389163dfbce1448b
|
[
"MIT"
] | null | null | null |
Assignments/a2_7.py
|
gargraghav/Probability-and-Statistics-for-Data-Scientists
|
756e6195e99f829f5e5de0c7389163dfbce1448b
|
[
"MIT"
] | null | null | null |
Assignments/a2_7.py
|
gargraghav/Probability-and-Statistics-for-Data-Scientists
|
756e6195e99f829f5e5de0c7389163dfbce1448b
|
[
"MIT"
] | null | null | null |
import numpy as np
def steady_state_power(transition_matrix):
# k >> 1
k = 1000
# raise to power function
res = np.linalg.matrix_power(transition_matrix,k)
print ("Steady_State: Power iteration >> " + str(res[1,:]))
return res[1,:]
matrix = [[0.9, 0, 0.1, 0],
[0.8, 0, 0.2, 0],
[0, 0.5, 0, 0.5],
[0, 0.1, 0, 0.9]
]
steady_state_power(matrix)
| 25.5
| 63
| 0.546569
|
194e29921ff23552cf03f738d6ac9194d8bd35b2
| 11,194
|
py
|
Python
|
txjsonrpc/web/jsonrpc.py
|
aborilov/txjsonrpc
|
9501ab023a51ca6f3e37fcad3c9c9ff04223986b
|
[
"MIT"
] | null | null | null |
txjsonrpc/web/jsonrpc.py
|
aborilov/txjsonrpc
|
9501ab023a51ca6f3e37fcad3c9c9ff04223986b
|
[
"MIT"
] | null | null | null |
txjsonrpc/web/jsonrpc.py
|
aborilov/txjsonrpc
|
9501ab023a51ca6f3e37fcad3c9c9ff04223986b
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A generic resource for publishing objects via JSON-RPC.
Requires simplejson; can be downloaded from
http://cheeseshop.python.org/pypi/simplejson
API Stability: unstable
Maintainer: U{Duncan McGreggor<mailto:oubiwann@adytum.us>}
"""
from __future__ import nested_scopes
import urlparse
import xmlrpclib
from twisted.web import resource, server
from twisted.internet import defer, reactor
from twisted.python import log, context
from twisted.web import http
from txjsonrpc import jsonrpclib
from txjsonrpc.jsonrpc import BaseProxy, BaseQueryFactory, BaseSubhandler
# Useful so people don't need to import xmlrpclib directly.
Fault = xmlrpclib.Fault
Binary = xmlrpclib.Binary
Boolean = xmlrpclib.Boolean
DateTime = xmlrpclib.DateTime
def requires_auth():
def inner(method):
method.requires_auth = True
return method
return inner
class NoSuchFunction(Fault):
"""
There is no function by the given name.
"""
class Unauthorized(jsonrpclib.Fault):
def __init__(self, message):
Fault.__init__(self, 4000, message)
class Handler:
"""
Handle a JSON-RPC request and store the state for a request in progress.
Override the run() method and return result using self.result,
a Deferred.
We require this class since we're not using threads, so we can't
encapsulate state in a running function if we're going to have
to wait for results.
For example, lets say we want to authenticate against twisted.cred,
run a LDAP query and then pass its result to a database query, all
as a result of a single JSON-RPC command. We'd use a Handler instance
to store the state of the running command.
"""
def __init__(self, resource, *args):
# the JSON-RPC resource we are connected to
self.resource = resource
self.result = defer.Deferred()
self.run(*args)
def run(self, *args):
# event driven equivalent of 'raise UnimplementedError'
self.result.errback(
NotImplementedError("Implement run() in subclasses"))
class JSONRPC(resource.Resource, BaseSubhandler):
"""
A resource that implements JSON-RPC.
Methods published can return JSON-RPC serializable results, Faults,
Binary, Boolean, DateTime, Deferreds, or Handler instances.
By default methods beginning with 'jsonrpc_' are published.
"""
# Error codes for Twisted, if they conflict with yours then
# modify them at runtime.
NOT_FOUND = 8001
FAILURE = 8002
isLeaf = 1
except_map = {}
auth_token = "Auth-Token"
def __init__(self):
resource.Resource.__init__(self)
BaseSubhandler.__init__(self)
def render(self, request):
request.content.seek(0, 0)
# Unmarshal the JSON-RPC data.
content = request.content.read()
log.msg("Client({}): {}".format(request.client, content))
if not content and request.method=='GET' and request.args.has_key('request'):
content=request.args['request'][0]
self.callback = request.args['callback'][0] if request.args.has_key('callback') else None
self.is_jsonp = True if self.callback else False
parsed = jsonrpclib.loads(content)
functionPath = parsed.get("method")
params = parsed.get('params', {})
args, kwargs = [], {}
if params.__class__ == list:
args = params
else:
kwargs = params
id = parsed.get('id')
token = None
if request.requestHeaders.hasHeader(self.auth_token):
token = request.requestHeaders.getRawHeaders(self.auth_token)[0]
version = parsed.get('jsonrpc')
if version:
version = int(float(version))
elif id and not version:
version = jsonrpclib.VERSION_1
else:
version = jsonrpclib.VERSION_PRE1
# XXX this all needs to be re-worked to support logic for multiple
# versions...
try:
function = self._getFunction(functionPath)
d = None
if hasattr(function, 'requires_auth'):
d = defer.maybeDeferred(self.auth, token, functionPath)
except jsonrpclib.Fault, f:
self._cbRender(f, request, id, version)
else:
if not self.is_jsonp:
request.setHeader("content-type", "application/json")
else:
request.setHeader("content-type", "text/javascript")
if d:
d.addCallback(context.call, function, *args, **kwargs)
else:
d = defer.maybeDeferred(function, *args, **kwargs)
d.addErrback(self._ebRender, id)
d.addCallback(self._cbRender, request, id, version)
def _responseFailed(err, call):
call.cancel()
request.notifyFinish().addErrback(_responseFailed, d)
return server.NOT_DONE_YET
def _cbRender(self, result, request, id, version):
if isinstance(result, Handler):
result = result.result
if version == jsonrpclib.VERSION_PRE1:
if not isinstance(result, jsonrpclib.Fault):
result = (result,)
# Convert the result (python) to JSON-RPC
try:
s = jsonrpclib.dumps(result, id=id, version=version) if not self.is_jsonp else "%s(%s)" %(self.callback,jsonrpclib.dumps(result, id=id, version=version))
except:
f = jsonrpclib.Fault(self.FAILURE, "can't serialize output")
s = jsonrpclib.dumps(f, id=id, version=version) if not self.is_jsonp else "%s(%s)" %(self.callback,jsonrpclib.dumps(f, id=id, version=version))
request.setHeader("content-length", str(len(s)))
request.write(s)
request.finish()
def _map_exception(self, exception):
return self.except_map.get(exception, self.FAILURE)
def _ebRender(self, failure, id):
if isinstance(failure.value, jsonrpclib.Fault):
return failure.value
log.err(failure)
message = failure.value.message
code = self._map_exception(type(failure.value))
return jsonrpclib.Fault(code, message)
def auth(self, token, func):
return True
class QueryProtocol(http.HTTPClient):
def connectionMade(self):
self.sendCommand('POST', self.factory.path)
self.sendHeader('User-Agent', 'Twisted/JSONRPClib')
self.sendHeader('Host', self.factory.host)
self.sendHeader('Content-type', 'application/json')
self.sendHeader('Content-length', str(len(self.factory.payload)))
if self.factory.user:
auth = '%s:%s' % (self.factory.user, self.factory.password)
auth = auth.encode('base64').strip()
self.sendHeader('Authorization', 'Basic %s' % (auth,))
self.endHeaders()
self.transport.write(self.factory.payload)
def handleStatus(self, version, status, message):
if status != '200':
self.factory.badStatus(status, message)
def handleResponse(self, contents):
self.factory.parseResponse(contents)
class QueryFactory(BaseQueryFactory):
deferred = None
protocol = QueryProtocol
def __init__(self, path, host, method, user=None, password=None,
version=jsonrpclib.VERSION_PRE1, *args):
BaseQueryFactory.__init__(self, method, version, *args)
self.path, self.host = path, host
self.user, self.password = user, password
class Proxy(BaseProxy):
"""
A Proxy for making remote JSON-RPC calls.
Pass the URL of the remote JSON-RPC server to the constructor.
Use proxy.callRemote('foobar', *args) to call remote method
'foobar' with *args.
"""
def __init__(self, url, user=None, password=None,
version=jsonrpclib.VERSION_PRE1, factoryClass=QueryFactory, ssl_ctx_factory = None):
"""
@type url: C{str}
@param url: The URL to which to post method calls. Calls will be made
over SSL if the scheme is HTTPS. If netloc contains username or
password information, these will be used to authenticate, as long as
the C{user} and C{password} arguments are not specified.
@type user: C{str} or None
@param user: The username with which to authenticate with the server
when making calls. If specified, overrides any username information
embedded in C{url}. If not specified, a value may be taken from C{url}
if present.
@type password: C{str} or None
@param password: The password with which to authenticate with the
server when making calls. If specified, overrides any password
information embedded in C{url}. If not specified, a value may be taken
from C{url} if present.
@type version: C{int}
@param version: The version indicates which JSON-RPC spec to support.
The available choices are jsonrpclib.VERSION*. The default is to use
the version of the spec that txJSON-RPC was originally released with,
pre-Version 1.0.
@type ssl_ctx_factory: C{twisted.internet.ssl.ClientContextFactory} or None
@param ssl_ctx_factory: SSL client context factory class to use instead
of default twisted.internet.ssl.ClientContextFactory.
"""
BaseProxy.__init__(self, version, factoryClass)
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
netlocParts = netloc.split('@')
if len(netlocParts) == 2:
userpass = netlocParts.pop(0).split(':')
self.user = userpass.pop(0)
try:
self.password = userpass.pop(0)
except:
self.password = None
else:
self.user = self.password = None
hostport = netlocParts[0].split(':')
self.host = hostport.pop(0)
try:
self.port = int(hostport.pop(0))
except:
self.port = None
self.path = path
if self.path in ['', None]:
self.path = '/'
self.secure = (scheme == 'https')
if user is not None:
self.user = user
if password is not None:
self.password = password
self.ssl_ctx_factory = ssl_ctx_factory
def callRemote(self, method, *args, **kwargs):
version = self._getVersion(kwargs)
# XXX generate unique id and pass it as a parameter
factoryClass = self._getFactoryClass(kwargs)
factory = factoryClass(self.path, self.host, method, self.user,
self.password, version, *args)
if self.secure:
from twisted.internet import ssl
if self.ssl_ctx_factory is None:
self.ssl_ctx_factory = ssl.ClientContextFactory
reactor.connectSSL(self.host, self.port or 443,
factory, self.ssl_ctx_factory())
else:
reactor.connectTCP(self.host, self.port or 80, factory)
return factory.deferred
__all__ = ["JSONRPC", "Handler", "Proxy"]
| 35.993569
| 165
| 0.637931
|
73ef43e9b11b24e7c11fc90e7ad23eed08ba90cb
| 6,247
|
py
|
Python
|
mmdet/core/bbox/assigners/cross_assigner.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/bbox/assigners/cross_assigner.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/bbox/assigners/cross_assigner.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
import torch
from ..builder import BBOX_ASSIGNERS
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class CrossAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each point.
Each proposals will be assigned with `0`, or a positive integer
indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
"""
def __init__(self, scale=4, pos_num=3):
self.scale = scale
self.pos_num = pos_num
def assign(self, cross, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign gt to points.
This method assign a gt bbox to every points set, each points set
will be assigned with the background_label (-1), or a label number.
-1 is background, and semi-positive number is the index (0-based) of
assigned gt.
The assignment is done in following steps, the order matters.
1. assign every points to the background_label (-1)
2. A point is assigned to some gt bbox if
(i) the point is within the k closest points to the gt bbox
(ii) the distance between this point and the gt is smaller than
other gt bboxes
Args:
points (Tensor): points to be assigned, shape(n, 3) while last
dimension stands for (x, y, stride).
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
NOTE: currently unused.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_cross = cross.shape[0]
num_gts = gt_bboxes.shape[0]
if num_gts == 0 or num_cross == 0:
# If no truth assign everything to the background
assigned_gt_inds = cross.new_full((num_cross, ),
0,
dtype=torch.long)
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = cross.new_full((num_cross, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
cross_xy=cross[:,:2]
cross_w = cross[:, 2]
cross_stride=cross_w*0.25
cross_lvl = torch.log2(
cross_stride).int() # [3...,4...,5...,6...,7...]
lvl_min, lvl_max = cross_lvl.min(), cross_lvl.max()
# assign gt box
gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2
gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)
scale = self.scale
gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +
torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()
gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)
# stores the assigned gt index of each point
assigned_gt_inds = cross.new_zeros((num_cross, ), dtype=torch.long)
# stores the assigned gt dist (to this point) of each point
assigned_gt_dist = cross.new_full((num_cross, ), float('inf'))
cross_range = torch.arange(cross.shape[0])
for idx in range(num_gts):
gt_lvl = gt_bboxes_lvl[idx]
# get the index of points in this level
lvl_idx = gt_lvl == cross_lvl
cross_index = cross_range[lvl_idx]
# get the points in this level
lvl_cross = cross_xy[lvl_idx, :]
lvl_cross_w=cross_w[lvl_idx]
# get the center point of gt
gt_cross = gt_bboxes_xy[[idx], :]
# get width and height of gt
gt_wh = gt_bboxes_wh[[idx], :]
# compute the distance between gt center and
# all points in this level
cross_gt_dist = ((lvl_cross - gt_cross) / gt_wh).norm(dim=1)
# find the nearest k points to gt center in this level
min_dist, min_dist_index = torch.topk( #orig,qhq
cross_gt_dist, self.pos_num, largest=False)
# the index of nearest k points to gt center in this level
min_dist_cross_index = cross_index[min_dist_index]
# The less_than_recorded_index stores the index
# of min_dist that is less then the assigned_gt_dist. Where
# assigned_gt_dist stores the dist from previous assigned gt
# (if exist) to each point.
less_than_recorded_index = min_dist < assigned_gt_dist[
min_dist_cross_index]
# The min_dist_points_index stores the index of points satisfy:
# (1) it is k nearest to current gt center in this level.
# (2) it is closer to current gt center than other gt center.
min_dist_cross_index = min_dist_cross_index[
less_than_recorded_index]
assigned_gt_inds[min_dist_cross_index] = idx + 1
assigned_gt_dist[min_dist_cross_index] = min_dist[
less_than_recorded_index]
# assigned_gt_inds[pos_cross_index] = idx + 1
# assigned_gt_dist[pos_cross_index] = min_dist[
# less_than_recorded_index]
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_cross, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
| 46.274074
| 79
| 0.576597
|
e90f49d4e163de25f227d57ac8bfed4d4fdec081
| 5,691
|
py
|
Python
|
AdvancedDataStructures/PersistentDS/persistence.py
|
StevenBryceLee/AdvancedDataStructures
|
0a1d5190c618bdd2edab1a6afd2212cdedd95285
|
[
"MIT"
] | null | null | null |
AdvancedDataStructures/PersistentDS/persistence.py
|
StevenBryceLee/AdvancedDataStructures
|
0a1d5190c618bdd2edab1a6afd2212cdedd95285
|
[
"MIT"
] | null | null | null |
AdvancedDataStructures/PersistentDS/persistence.py
|
StevenBryceLee/AdvancedDataStructures
|
0a1d5190c618bdd2edab1a6afd2212cdedd95285
|
[
"MIT"
] | null | null | null |
'''
This file is to follow along with session 1 of
MIT advanced Data Structures: Persistent Data Structures
'''
import numpy as np
from numpy import array
'''
Definitions
------------------
Pointer machine
A class or struct of pointers to other nodes.
Memory model
Operations
x = new node
x = y.field
x.field = y
root node
There is always a root node, and x and y are fields of the root
You can always find a node via the root
Temporal DS
-persistence: Where you don't forget anything
If you make a change in the past, you get a different universe
Persistence:
remember everything and keep all versions of data structures
All DS operations are relative to a specified version
An update makes and returns a new version
4 levels of persistence
Partial persistence
Only allowed to update the latest version, versions are ordered linearly
This allows looking at past versions, but writes are not allowed
Full persistence
update any version
The versions form a tree, through reference to a root
not possible to merge versions
confluent persistence
possible to combine two versions which creates a new version
Not possible to destroy versions
The new versions form a directed acyclic graph
functional persistence
never modify any nodes, can only make new nodes
Partial persistence
Any pointer-machine DS is accepted
There must be a constant number of pointers into any node
Any node can be made to be partially persistent with:
O(1) amortized factor overhead
O(1) space / change in DS
Back pointers are stored, but only for latest version of DS
modifications are stored as (version, field changed, value changed to)
A field read would require a field and version number, so you can see any past value
a field modify (node.field = x):
if node not full: add modification, increment version
else: new node' with all mods, including latest mod
New node will have an initially empty mod version
update back pointers from node -> node'
recursively update pointers
prof: "I claim this is good"
potential method of amortization analysis
c * sum(number of mods in latest version nodes)
c is a constant factor
When making a new node, since mods are empty, potential cost is low
amortized cost
At most <= c + c + [-2cp + p * number of recursions]
A constant time factor + cost if node not full + cost of changing pointers * cost of recursions
-2cp term comes from cancelling the initial if condition cost, which occurs because you are counting that in recursions
mind bending, since each recursion will cost 2c, the terms will cancel. Since that is the case, we have O(1)
Full persistence:
Versions are now nodes on a tree, rather than a line
To solve this, we linearlize the tree of versions
We linearize by traversing the tree, which is done in linear time
Based on his example, in order traversal, but probably fine to do any ordering
We need to maintain the order of each subtree
Using time travel, we take a DS from lecture 8 called an order-maintenance DS
This is formally called a magical linked list
You may insert an item before or after a given item in O(1)
You may find the relative order of two items in the list in O(1)
Is item X before or after item Y
This allows you to add new versions to the tree in constant time
Formally, is version V an ancestor of version W
True iff bv < bw < ew < ev
This means the first visit to v happens before visiting w
the last visit to v happens after the last visit to w
Any pointer-machine data structure can be made fully persistent with O(1) amortized factor overhead
In order to store mods, you need 2 * (number of fields or in degree) + (number of pointers or out degree) + 1)
To modify ie node.field = x
if node not full: add mod
else...
split the node into two halves each half full of mods
The old node is where it used to be
Make a new node
Apply half of the mods from the old node to the new node
This is actually splitting a tree of mods such that half the nodes are in a new tree
This will be (d + p + 1) mods
recursively update at most 2d + 2p + 1 pointers to the node
Potential function
-c * sum(# of empty mod slots)
Subtract recursion as c * 2 * (d + p + 1)
Deamortized costs
O(1) worst case in partial persistence modification
Open problem in full persistence modification
Confluent persistence
Consider a string. Every time you split the string, you have one more string
Every time you concatenate, you have 1 less string
If you pick random spots to copy and paste, you can double the size of the string in O(1)
in x updates, you could get a size of 2 ^ x
effective depth of a version ie e(v):
1 + log($ of paths from root to vertex)
overhead:
log(# of updates) + max(effective depth)
Lower bound:
sum(e(v))
disjoint transform
if you assume that confluent operations are performed only on two versions with no shared nodes
Then you can get O(log(n)) overhead
functional data structures
Balanced binary search trees, search and mod takes O(log(n))
dequeues with concatenation in O(1)
log(n) separation from functional to optimal
'''
| 47.425
| 123
| 0.68213
|
158c5c0198743e7a21ba0e723ab8fe2453bd9eee
| 4,929
|
py
|
Python
|
Neural_Netowrk.py
|
thakur-nishant/Flappy-Bird-NeuroEvolution
|
93e09d0693c02f7479b4c2f816f0ef25e376004c
|
[
"MIT"
] | 1
|
2018-03-03T23:55:43.000Z
|
2018-03-03T23:55:43.000Z
|
Neural_Netowrk.py
|
thakur-nishant/Flappy-Bird-NeuroEvolution
|
93e09d0693c02f7479b4c2f816f0ef25e376004c
|
[
"MIT"
] | 2
|
2018-03-01T18:39:29.000Z
|
2018-03-03T23:53:43.000Z
|
Neural_Netowrk.py
|
thakur-nishant/Flappy-Bird-NeuroEvolution
|
93e09d0693c02f7479b4c2f816f0ef25e376004c
|
[
"MIT"
] | 3
|
2018-02-25T20:41:23.000Z
|
2018-03-02T15:44:38.000Z
|
import numpy as np
class NeuronLayer:
def __init__(self, neurons, neurons_in_previous_layer, b=[0.0], w=[[0.0]]):
# import time
# np.random.seed(int(time.time()))
# np.random.seed(1)
self.bias = np.random.random()
self.weights_from_previous_to_this = 2 * np.random.random((neurons_in_previous_layer, neurons)) - self.bias
# self.bias = b
# self.weights_from_previous_to_this = w
# print("bias", self.bias)
# print("weights", self.weights_from_previous_to_this)
# print("----")
# self.weights_from_previous_to_this = 2 * np.random.random((neurons_in_previous_layer, neurons)) - 1
class NeuralNetwork:
def __init__(self, input_nodes, hidden_nodes, output_nodes):
self.hidden_layer = NeuronLayer(hidden_nodes, input_nodes, b=[0.8990440160748187],
w=[[-0.05353412, 0.6649129, -0.24340758, -0.0416839],
[-0.55266982, -0.50163635, 0.28309926, -0.07587811]])
self.output_layer = NeuronLayer(output_nodes, hidden_nodes, b=[0.23084011213966216], w=[[1.58517134],
[0.25307128],
[1.08582508],
[1.10956104]])
def get_hidden_weights_and_bias(self):
return self.hidden_layer.weights_from_previous_to_this, self.hidden_layer.bias
def get_output_weights_and_bias(self):
return self.output_layer.weights_from_previous_to_this, self.output_layer.bias
def set_hidden_weights_and_bias(self, hidden_weights, hidden_bias):
self.hidden_layer.weights_from_previous_to_this = hidden_weights
self.hidden_layer.bias = hidden_bias
def set_output_weights_and_bias(self, output_weights, output_bias):
self.output_layer.weights_from_previous_to_this = output_weights
self.output_layer.bias = output_bias
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
return x * (1 - x)
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in range(number_of_training_iterations):
output_from_layer_1, output_from_layer_2 = self.predict(training_set_inputs)
output_error = training_set_outputs - output_from_layer_2
output_gradient = output_error * self.sigmoid_derivative(output_from_layer_2)
hidden_error = np.dot(output_gradient, self.output_layer.weights_from_previous_to_this.T)
hidden_gradient = hidden_error * self.sigmoid_derivative(output_from_layer_1)
hidden_adjustment = np.dot(training_set_inputs.T, hidden_gradient)
output_adjustment = np.dot(output_from_layer_1.T, output_gradient)
self.hidden_layer.weights_from_previous_to_this += hidden_adjustment
self.output_layer.weights_from_previous_to_this += output_adjustment
self.hidden_layer.bias = self.hidden_layer.bias + hidden_gradient
self.output_layer.bias = self.output_layer.bias + output_gradient
def predict(self, inputs, test=False):
hidden_output = self.sigmoid(
np.dot(inputs, self.hidden_layer.weights_from_previous_to_this)
)
output = self.sigmoid(
np.dot(hidden_output, self.output_layer.weights_from_previous_to_this)
)
if test:
print("--")
print(inputs)
print(hidden_output)
print(output)
print("--")
return hidden_output, output
def print_weights(self):
print(" Layer 1 (4 neurons, each with 3 inputs): ")
print(self.hidden_layer.weights_from_previous_to_this)
print(" Layer 2 (1 neuron, with 4 inputs):")
print(self.output_layer.weights_from_previous_to_this)
if __name__ == "__main__":
input_nodes = 2
hidden_nodes = 4
output_nodes = 1
neural_network = NeuralNetwork(input_nodes, hidden_nodes, output_nodes)
training_set_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
training_set_outputs = np.array([[0, 1, 1, 0]]).T
# training_set_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# training_set_outputs = np.array([[0, 1, 1, 0]]).T
neural_network.train(training_set_inputs, training_set_outputs, 5000)
hidden_state, output = neural_network.predict(np.array([0, 0]))
print(output)
hidden_state, output = neural_network.predict(np.array([0, 1]))
print(output)
hidden_state, output = neural_network.predict(np.array([1, 0]))
print(output)
hidden_state, output = neural_network.predict(np.array([1, 1]))
print(output)
| 44.405405
| 115
| 0.634409
|
38d1944ffd42849898f6d3320dae46bbf2626b12
| 863
|
py
|
Python
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
xueran1991/CarND-Capstone
|
ed0fe33b5c9b8590788ae9def2d0ea5d9c7439ff
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/tl_classifier.py
|
xueran1991/CarND-Capstone
|
ed0fe33b5c9b8590788ae9def2d0ea5d9c7439ff
|
[
"MIT"
] | 8
|
2020-09-26T00:43:15.000Z
|
2022-02-10T01:12:34.000Z
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
xueran1991/CarND-Capstone
|
ed0fe33b5c9b8590788ae9def2d0ea5d9c7439ff
|
[
"MIT"
] | null | null | null |
from styx_msgs.msg import TrafficLight
class TLClassifier(object):
def __init__(self):
#TODO load classifier
self.classes = {0: TrafficLight.RED,
1: TrafficLight.YELLOW,
2: TrafficLight.GREEN,
4: TrafficLight.UNKNOWN}
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
uint8 UNKNOWN=4
uint8 GREEN=2
uint8 YELLOW=1
uint8 RED=0
"""
#TODO implement light color prediction
return TrafficLight.UNKNOWN
| 27.83871
| 80
| 0.531866
|
807a60c4758a6c8e0b1ff9a94fcaea8844adc260
| 888
|
py
|
Python
|
run_story.py
|
grayarea11235/zvm2
|
50afb650b730ab36918d3a4ac1b650d2d48e56bd
|
[
"BSD-3-Clause"
] | null | null | null |
run_story.py
|
grayarea11235/zvm2
|
50afb650b730ab36918d3a4ac1b650d2d48e56bd
|
[
"BSD-3-Clause"
] | null | null | null |
run_story.py
|
grayarea11235/zvm2
|
50afb650b730ab36918d3a4ac1b650d2d48e56bd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys
import os.path
from zvm import zmachine, trivialzui
def usage():
print """Usage: %s <story file>
Run a Z-Machine story under ZVM.
""" % sys.argv[0]
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
story_file = sys.argv[1]
if not os.path.isfile(story_file):
print "%s is not a file." % story_file
usage()
try:
# f = file(story_file)
f = open(story_file, 'rb')
story_image = f.read()
print len(story_image)
f.close()
except IOError:
print "Error accessing %s" % story_file
sys.exit(1)
print len(story_image)
machine = zmachine.ZMachine(story_image,
ui=trivialzui.create_zui(),
debugmode=True)
machine.run()
if __name__ == '__main__':
main()
| 22.2
| 59
| 0.545045
|
b8623cb7dc54ff22eb681a4f7d66be2adf87abf6
| 83
|
py
|
Python
|
library/src/detectors/autoencoder/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 3
|
2019-11-01T04:51:51.000Z
|
2019-12-17T04:25:18.000Z
|
library/src/detectors/htm/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 1
|
2019-11-11T18:29:36.000Z
|
2019-11-11T18:29:36.000Z
|
library/src/detectors/autoencoder/__init__.py
|
unSAD-admin/unSAD
|
9f1d0e680a0086d140bc8d1c55fe21dd7de87df5
|
[
"Apache-2.0"
] | 2
|
2019-12-18T11:49:00.000Z
|
2020-03-27T20:06:15.000Z
|
# Created by Xinyu Zhu on 10/5/2019, 11:03 PM
import sys
sys.path.append("../../")
| 20.75
| 45
| 0.650602
|
0ad3bbc834beff47374fc02a0990463ece79798f
| 598
|
py
|
Python
|
python/test.py
|
CiscoDevNet/flare
|
def72b74961a27c441f31039b4c3b1c61a870f1d
|
[
"Apache-2.0"
] | 18
|
2015-10-29T13:02:09.000Z
|
2021-11-15T15:34:34.000Z
|
python/test.py
|
CiscoDevNet/flare
|
def72b74961a27c441f31039b4c3b1c61a870f1d
|
[
"Apache-2.0"
] | 4
|
2015-11-03T17:29:29.000Z
|
2016-03-31T13:41:16.000Z
|
python/test.py
|
CiscoDevNet/flare
|
def72b74961a27c441f31039b4c3b1c61a870f1d
|
[
"Apache-2.0"
] | 11
|
2015-10-28T14:13:37.000Z
|
2021-11-15T15:34:36.000Z
|
import flare
for environment in flare.getEnvironments():
environment_id = environment['_id']
print(environment_id + ' - ' + environment['name'])
for zone in flare.getZones(environment_id):
zone_id = zone['_id']
print(' ' + zone_id + ' - ' + zone['name'])
for thing in flare.getThings(environment_id, zone_id):
thing_id = thing['_id']
print(' ' + thing_id + ' - ' + thing['name'])
for device in flare.getDevices(environment_id):
device_id = device['_id']
print(' ' + device_id + ' - ' + device['name'])
| 33.222222
| 62
| 0.575251
|
5729ce1d7028d6178e54f615400fd3a4ea86f376
| 874
|
py
|
Python
|
tools/workspace_status.py
|
GerritCodeReview/plugins_rename-project
|
f49236d1b697bef4566914d9717ac22222d05c40
|
[
"Apache-2.0"
] | null | null | null |
tools/workspace_status.py
|
GerritCodeReview/plugins_rename-project
|
f49236d1b697bef4566914d9717ac22222d05c40
|
[
"Apache-2.0"
] | 1
|
2019-03-07T09:24:50.000Z
|
2019-03-07T09:24:50.000Z
|
tools/workspace_status.py
|
GerritCodeReview/plugins_rename-project
|
f49236d1b697bef4566914d9717ac22222d05c40
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# This script will be run by bazel when the build process starts to
# generate key-value information that represents the status of the
# workspace. The output should be like
#
# KEY1 VALUE1
# KEY2 VALUE2
#
# If the script exits with non-zero code, it's considered as a failure
# and the output will be discarded.
from __future__ import print_function
import subprocess
import sys
CMD = ['git', 'describe', '--always', '--match', 'v[0-9].*', '--dirty']
def revision():
try:
return subprocess.check_output(CMD).strip().decode("utf-8")
except OSError as err:
print('could not invoke git: %s' % err, file=sys.stderr)
sys.exit(1)
except subprocess.CalledProcessError as err:
print('error using git: %s' % err, file=sys.stderr)
sys.exit(1)
print("STABLE_BUILD_RENAME-PROJECT_LABEL %s" % revision())
| 27.3125
| 71
| 0.685355
|
fa992af92ba91ba7dc8f775a6940d47e4ddffc95
| 1,621
|
py
|
Python
|
problem_19/problem_19.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
problem_19/problem_19.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
problem_19/problem_19.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
import sys
# give this example
# 1 | 2 | 3
# 4 | 8 | 2
# 1 | 5 | 3
# the minimum cost to go to path (2, 2) is
# 1 (x) | 2 (x) | 3
# 4 | 8 | 2 (x)
# 1 | 5 | 3 (x)
# to reach (m, n) must be through one of 3 cells (m-1, n-1) or (m-1, n) or (m, n-1)
# plus cost(m, n)
R = 3
C = 3
# using Minimum Cost Path
def min_cost(cost, m, n):
if n < 0 or m < 0:
return sys.maxsize
elif m == 0 and n == 0:
return cost[m][n]
else:
return cost[m][n] + min(min_cost(cost, m-1, n-1), min_cost(cost, m-1, n), min_cost(cost, m, n-1))
# but this solution repeat computational of recursive functions
# min_cost(cost, 2, 2)
# / | \ ...
# min_cost(cost, 1, 1) min_cost(cost, 1, 2) ...
# / \ / ...
# min_cost(cost, 0, 0) min_cost(0,1) ... min_cost(cost, 0, 1) ...
# so we try with bottom up approach
def min_cost_bu(cost, m, n):
total_cost = [[0 for i in range(C)] for i in range(R)]
total_cost[0][0] = cost[0][0]
# first column of total_cost
for i in range(1, m+1):
total_cost[i][0] = total_cost[i-1][0] + cost[i][0]
# first row of total_cost
for j in range(1, n+1):
total_cost[0][j] = total_cost[0][j-1] + cost[0][j]
# rest of cost
for i in range(1, m+1):
for j in range(1, n+1):
total_cost[i][j] = min(total_cost[i-1][j-1], total_cost[i-1][j], total_cost[i][j-1]) + cost[i][j]
return total_cost[m][n]
| 30.584906
| 109
| 0.473782
|
b66dd4ecdc2d223325adc8e1007fcb29157b34c2
| 3,888
|
py
|
Python
|
open_api_tools/test/chain.py
|
specify/open_api_tools
|
97f7a63df1197ca4ecb5612caf82225cf1dddc2e
|
[
"MIT"
] | null | null | null |
open_api_tools/test/chain.py
|
specify/open_api_tools
|
97f7a63df1197ca4ecb5612caf82225cf1dddc2e
|
[
"MIT"
] | null | null | null |
open_api_tools/test/chain.py
|
specify/open_api_tools
|
97f7a63df1197ca4ecb5612caf82225cf1dddc2e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Allow to test a chain of requests."""
import json
from typing import Callable, List, Dict, Union
from dataclasses import dataclass
from termcolor import colored
from open_api_tools.common.load_schema import Schema
from open_api_tools.test.test_endpoint import parse_parameters
from open_api_tools.test.utils import create_request_payload
from open_api_tools.validate.index import make_request
@dataclass
class Request:
"""Chain's request definition."""
method: str
endpoint: str
parameters: Union[
None, Dict[str, any], Callable[[any, List[any]], Dict[str, any]]
] = None
@dataclass
class Validate:
"""Chain's validator definition."""
validate: Callable[[any], bool]
def chain(
schema: Schema,
definition: List[Union[Request, Validate]],
before_request_send: Union[Callable[[str, any], any], None] = None,
):
"""Create a chain of requests.
Args:
schema: A schema object
definition:
Chain definition. More info in `README.md`
before_request_send:
A pre-hook that allows to amend the request object
"""
response = None
request = {"requestBody": None}
base_url = schema.schema.servers[0].url
for index, line in enumerate(definition):
if type(line) is Request:
print(
colored(f"[{index}/{len(definition)}] ", "cyan")
+ colored(
f"Fetching data from [{line.method}] {line.endpoint}",
"blue",
)
)
if line.endpoint not in schema.schema.paths:
raise Exception(
f"{line.endpoint} endpoint does not exist in your OpenAPI "
f"schema. Make sure to provide a URL without parameters "
f"and with a trailing '/' if it is present in the "
f"definition"
)
parameters = parse_parameters(
endpoint_name=line.endpoint,
endpoint_data=schema.schema.paths[line.endpoint],
method=line.method.lower(),
generate_examples=False,
)
if type(line.parameters) is dict:
request = line.parameters
elif callable(line.parameters):
request = line.parameters(parameters, response, request)
variation = [
request[parameter.name]
if parameter.name in request
else None
for parameter in parameters
]
body, request_url = create_request_payload(
line.endpoint, parameters, variation, base_url
)
response = make_request(
request_url=request_url,
endpoint_name=line.endpoint,
method=line.method.lower(),
body=body,
schema=schema,
before_request_send=lambda request: before_request_send(
line.endpoint, request
),
)
if response.type != "success":
raise Exception(
json.dumps(
response,
indent=4,
default=str,
)
)
response = response.response
elif type(line) is Validate:
print(
colored(f"[{index}/{len(definition)}] ", "cyan")
+ colored(
"Validating the response",
"blue",
)
)
if not line.validate(response):
return
else:
raise Exception(
f'Invalid chain line detected at index {index}:"'
f" {str(line)}"
)
| 29.679389
| 79
| 0.528035
|
ce841cb9e1a16858dcd15f1bdfa19870f546868a
| 9,677
|
py
|
Python
|
sdk/python/pulumi_azure_native/eventgrid/v20180101/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/eventgrid/v20180101/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/eventgrid/v20180101/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'EventHubEventSubscriptionDestinationResponse',
'EventSubscriptionFilterResponse',
'WebHookEventSubscriptionDestinationResponse',
]
@pulumi.output_type
class EventHubEventSubscriptionDestinationResponse(dict):
"""
Information about the event hub destination for an event subscription
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointType":
suggest = "endpoint_type"
elif key == "resourceId":
suggest = "resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EventHubEventSubscriptionDestinationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EventHubEventSubscriptionDestinationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EventHubEventSubscriptionDestinationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_type: str,
resource_id: Optional[str] = None):
"""
Information about the event hub destination for an event subscription
:param str endpoint_type: Type of the endpoint for the event subscription destination
Expected value is 'EventHub'.
:param str resource_id: The Azure Resource Id that represents the endpoint of an Event Hub destination of an event subscription.
"""
pulumi.set(__self__, "endpoint_type", 'EventHub')
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> str:
"""
Type of the endpoint for the event subscription destination
Expected value is 'EventHub'.
"""
return pulumi.get(self, "endpoint_type")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
The Azure Resource Id that represents the endpoint of an Event Hub destination of an event subscription.
"""
return pulumi.get(self, "resource_id")
@pulumi.output_type
class EventSubscriptionFilterResponse(dict):
"""
Filter for the Event Subscription
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "includedEventTypes":
suggest = "included_event_types"
elif key == "isSubjectCaseSensitive":
suggest = "is_subject_case_sensitive"
elif key == "subjectBeginsWith":
suggest = "subject_begins_with"
elif key == "subjectEndsWith":
suggest = "subject_ends_with"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EventSubscriptionFilterResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EventSubscriptionFilterResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EventSubscriptionFilterResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
included_event_types: Optional[Sequence[str]] = None,
is_subject_case_sensitive: Optional[bool] = None,
subject_begins_with: Optional[str] = None,
subject_ends_with: Optional[str] = None):
"""
Filter for the Event Subscription
:param Sequence[str] included_event_types: A list of applicable event types that need to be part of the event subscription.
If it is desired to subscribe to all event types, the string "all" needs to be specified as an element in this list.
:param bool is_subject_case_sensitive: Specifies if the SubjectBeginsWith and SubjectEndsWith properties of the filter
should be compared in a case sensitive manner.
:param str subject_begins_with: An optional string to filter events for an event subscription based on a resource path prefix.
The format of this depends on the publisher of the events.
Wildcard characters are not supported in this path.
:param str subject_ends_with: An optional string to filter events for an event subscription based on a resource path suffix.
Wildcard characters are not supported in this path.
"""
if included_event_types is not None:
pulumi.set(__self__, "included_event_types", included_event_types)
if is_subject_case_sensitive is None:
is_subject_case_sensitive = False
if is_subject_case_sensitive is not None:
pulumi.set(__self__, "is_subject_case_sensitive", is_subject_case_sensitive)
if subject_begins_with is not None:
pulumi.set(__self__, "subject_begins_with", subject_begins_with)
if subject_ends_with is not None:
pulumi.set(__self__, "subject_ends_with", subject_ends_with)
@property
@pulumi.getter(name="includedEventTypes")
def included_event_types(self) -> Optional[Sequence[str]]:
"""
A list of applicable event types that need to be part of the event subscription.
If it is desired to subscribe to all event types, the string "all" needs to be specified as an element in this list.
"""
return pulumi.get(self, "included_event_types")
@property
@pulumi.getter(name="isSubjectCaseSensitive")
def is_subject_case_sensitive(self) -> Optional[bool]:
"""
Specifies if the SubjectBeginsWith and SubjectEndsWith properties of the filter
should be compared in a case sensitive manner.
"""
return pulumi.get(self, "is_subject_case_sensitive")
@property
@pulumi.getter(name="subjectBeginsWith")
def subject_begins_with(self) -> Optional[str]:
"""
An optional string to filter events for an event subscription based on a resource path prefix.
The format of this depends on the publisher of the events.
Wildcard characters are not supported in this path.
"""
return pulumi.get(self, "subject_begins_with")
@property
@pulumi.getter(name="subjectEndsWith")
def subject_ends_with(self) -> Optional[str]:
"""
An optional string to filter events for an event subscription based on a resource path suffix.
Wildcard characters are not supported in this path.
"""
return pulumi.get(self, "subject_ends_with")
@pulumi.output_type
class WebHookEventSubscriptionDestinationResponse(dict):
"""
Information about the webhook destination for an event subscription
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointBaseUrl":
suggest = "endpoint_base_url"
elif key == "endpointType":
suggest = "endpoint_type"
elif key == "endpointUrl":
suggest = "endpoint_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WebHookEventSubscriptionDestinationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WebHookEventSubscriptionDestinationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WebHookEventSubscriptionDestinationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_base_url: str,
endpoint_type: str,
endpoint_url: Optional[str] = None):
"""
Information about the webhook destination for an event subscription
:param str endpoint_base_url: The base URL that represents the endpoint of the destination of an event subscription.
:param str endpoint_type: Type of the endpoint for the event subscription destination
Expected value is 'WebHook'.
:param str endpoint_url: The URL that represents the endpoint of the destination of an event subscription.
"""
pulumi.set(__self__, "endpoint_base_url", endpoint_base_url)
pulumi.set(__self__, "endpoint_type", 'WebHook')
if endpoint_url is not None:
pulumi.set(__self__, "endpoint_url", endpoint_url)
@property
@pulumi.getter(name="endpointBaseUrl")
def endpoint_base_url(self) -> str:
"""
The base URL that represents the endpoint of the destination of an event subscription.
"""
return pulumi.get(self, "endpoint_base_url")
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> str:
"""
Type of the endpoint for the event subscription destination
Expected value is 'WebHook'.
"""
return pulumi.get(self, "endpoint_type")
@property
@pulumi.getter(name="endpointUrl")
def endpoint_url(self) -> Optional[str]:
"""
The URL that represents the endpoint of the destination of an event subscription.
"""
return pulumi.get(self, "endpoint_url")
| 41.178723
| 164
| 0.666736
|
a91e5207a71341bd5eebb3cf9f266898ad5eae57
| 43,542
|
py
|
Python
|
python/src/keyczar/keys.py
|
piplcom/keyczar
|
ac750b95c2dd496f906e5d0cc5e5ebdb42925fa8
|
[
"Apache-2.0"
] | null | null | null |
python/src/keyczar/keys.py
|
piplcom/keyczar
|
ac750b95c2dd496f906e5d0cc5e5ebdb42925fa8
|
[
"Apache-2.0"
] | null | null | null |
python/src/keyczar/keys.py
|
piplcom/keyczar
|
ac750b95c2dd496f906e5d0cc5e5ebdb42925fa8
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represents cryptographic keys in Keyczar.
Identifies a key by its hash_id and type. Includes several subclasses
of base class Key.
@author: arkajit.dey@gmail.com (Arkajit Dey)
"""
from __future__ import division
from builtins import zip
from builtins import chr
from builtins import str
from past.utils import old_div
from builtins import object
import hmac
import math
import random
try:
# Import hashlib if Python >= 2.5
from hashlib import sha1
except ImportError:
import sha as sha1
from Crypto.Cipher import AES
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
try:
import simplejson as json
except ImportError:
import json
# do we have access to M2Crypto?
try:
from M2Crypto import EVP
except ImportError:
EVP = None
# overideable crypt library selection
ACTIVE_CRYPT_LIB = 'm2crypto' if EVP else 'pycrypto'
from . import errors
from .import keyczar
from .import keyinfo
from . import util
#TODO: Note that simplejson deals in Unicode strings. So perhaps we should
#modify all Read() methods to wrap data obtained from simplejson with str().
#Currently, only problem arose with base64 conversions -- this was dealt with
#directly in the encode/decode methods. Luckily 'hello' == u'hello'.
def GenKey(key_type, size=None):
"""
Generates a key of the given key_type and length.
@param key_type: the key_type of key to generate
@key_type key_type: L{keyinfo.KeyType}
@param size: the length in bits of the key to be generated
@key_type size: integer
@return: the generated key of the given key_type and size
@raise KeyczarError: if key_type is a public key or unsupported or if key size
is unsupported.
"""
if size is None:
size = key_type.default_size
if not key_type.IsValidSize(size):
raise errors.KeyczarError("Unsupported key size %d bits." % size)
try:
return {keyinfo.AES: AesKey.Generate,
keyinfo.HMAC_SHA1: HmacKey.Generate,
keyinfo.DSA_PRIV: DsaPrivateKey.Generate,
keyinfo.RSA_PRIV: RsaPrivateKey.Generate}[key_type](size)
except KeyError:
if key_type == keyinfo.DSA_PUB or key_type == keyinfo.RSA_PUB:
msg = "Public keys of key_type %s must be exported from private keys."
else:
msg = "Unsupported key key_type: %s"
raise errors.KeyczarError(msg % key_type)
def ReadKey(key_type, key):
"""
Reads a key of the given key_type from a JSON string representation.
@param key_type: the key_type of key to read
@key_type key_type: L{keyinfo.KeyType}
@param key: the JSON string representation of the key
@key_type key: string
@return: the key object read from the JSON string
@raise KeyczarError: if key_type is unsupported
"""
try:
return {keyinfo.AES: AesKey.Read,
keyinfo.HMAC_SHA1: HmacKey.Read,
keyinfo.DSA_PRIV: DsaPrivateKey.Read,
keyinfo.RSA_PRIV: RsaPrivateKey.Read,
keyinfo.DSA_PUB: DsaPublicKey.Read,
keyinfo.RSA_PUB: RsaPublicKey.Read}[key_type](key)
except KeyError:
raise errors.KeyczarError("Unsupported key key_type: %s" % key_type)
class Key(object):
"""Parent class for Keyczar Keys."""
def __init__(self, key_type):
self.type = key_type
self.__size = self.type.default_size # initially default
def __eq__(self, other):
return (self.type == other.type and
self.size == other.size and
self.key_string == other.key_string)
def __SetSize(self, new_size):
if self.type.IsValidSize(new_size):
self.__size = new_size
def _GetKeyString(self):
"""Return the key as a string. Abstract method."""
def __GetKeyString(self):
"""Indirect getter for the key string."""
return self._GetKeyString()
def _Hash(self):
"""Compute and return the hash_id id of this key. Can override default hash_id."""
fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)
return util.Base64WSEncode(fullhash[:keyczar.KEY_HASH_SIZE])
def __Hash(self):
"""Indirect getter for hash_id."""
return self._Hash()
hash_id = property(__Hash, doc="""The hash_id id of the key.""")
size = property(lambda self: self.__size, __SetSize,
doc="""The size of the key in bits.""")
key_string = property(__GetKeyString, doc="""The key as a Base64 string.""")
key_bytes = property(lambda self: util.Base64WSDecode(self.key_string),
doc="""The key as bytes.""")
def Header(self):
"""Return the 5-byte header string including version byte, 4-byte hash_id."""
return chr(keyczar.VERSION) + util.Base64WSDecode(self.hash_id)
class SymmetricKey(Key):
"""Parent class for symmetric keys such as AES, HMAC-SHA1"""
def __init__(self, key_type, key_string):
Key.__init__(self, key_type)
self.__key_string = key_string
def _GetKeyString(self):
"""Return the key as a string."""
return self.__key_string
class AsymmetricKey(Key):
"""Parent class for asymmetric keys."""
def __init__(self, key_type, params):
Key.__init__(self, key_type)
self._params = params
class AesKey(SymmetricKey):
"""Represents AES symmetric private keys."""
class AESAdaptor(object):
"""
Adaptor class to make PyCrypto's Cipher behave the same as M2Crypto's
EVP.Cipher class
"""
def __init__(self, key_bytes, iv_bytes, mode):
"""
Constructor
@param key_bytes: the key for this cipher
@type key: string
@param iv_bytes: the initialization vector for this cipher
@type iv_bytes: string
@param mode: the cipher mode
@type mode: integer (using AES values, e.g. AES.MODE_CBC)
"""
self.cipher = AES.new(key_bytes, mode, iv_bytes)
def __getattr__(self, name):
# defer everything to the actual cipher instance
return getattr(self.cipher, name)
def final(self):
"""
Collect any remaining encrypted data i.e. non-block size conforming
@return: remaining encrypted data, if any
"""
# except 'final' which is a no-op
return ''
class EVPAdaptor(object):
"""
Adaptor class to make M2Crypto's EVP.Cipher behave the same as PyCrypto's
Cipher class
"""
# cipher selection mode - EVP needs a different cipher for each
OP_ACTIVE = -1 # indicator that the request is for an existing cipher
OP_DECRYPT = 0
OP_ENCRYPT = 1
OP_TYPES = (OP_ACTIVE, OP_DECRYPT, OP_ENCRYPT)
def __init__(self, key_bytes, iv_bytes, mode):
"""
Constructor
@param key_bytes: the key for this cipher
@type key: string
@param iv_bytes: the initialization vector for this cipher
@type iv_bytes: string
@param mode: the cipher mode
@type mode: integer (using AES values, e.g. AES.MODE_CBC)
"""
# defer construction of ciphers until encrypt/decrypt request made
self.ciphers = {}
# preserve the data needed for cipher construction
self.key_bytes = key_bytes
self.IV = iv_bytes
self.mode = mode
self.block_size = AES.block_size
self.key_size = len(key_bytes)
def __Cipher(self, selector):
"""
Helper to get the cipher for this adaptor, creates if required
@param selector: type of cipher required (active/encrypt/decrypt)
@type selector: integer one of OP_TYPES
@return: EVP.Cipher
"""
assert selector in self.OP_TYPES, 'Invalid selector :%s' %selector
if selector == self.OP_ACTIVE and (len(list(self.ciphers.keys())) > 1 or
not len(list(self.ciphers.keys()))):
assert 0, 'If both encryption and decryption used then selector must \
be OP_ENCRYPT or OP_DECRYPT and at least 1 must be active'
cipher = None
if selector == self.OP_ACTIVE:
# should only be one cipher active
cipher = list(self.ciphers.values())[0]
else:
cipher = self.ciphers.get(selector)
# have we been created a cipher for this selector yet?
if not cipher:
# no, so set it up as requested
# convert between AES and EVP modes
# NOTE: AES auto-selects based on key size using the same mode, but
# EVP requires different mode strings for each key size (in bits)
mode = 'aes_%s_cbc' %(self.key_size*8)
cipher = EVP.Cipher(alg=mode,
key=self.key_bytes,
iv=self.IV,
op=selector,
padding=0
)
self.ciphers[selector] = cipher
return cipher
def decrypt(self, string):
"""
Return decrypted byte string
@param string: bytes to be decrypted.
@type string: string
@return: plaintext string
@rtype: string
"""
return self.__Cipher(self.OP_DECRYPT).update(string)
def encrypt(self, string):
"""
Return encrypted byte string
@param string: plaintext to be encrypted.
@type string: string
@return: raw byte encrypted string
@rtype: string
"""
return self.__Cipher(self.OP_ENCRYPT).update(string)
def final(self, selector=OP_ACTIVE):
"""
Collect any remaining encrypted data i.e. non-block size conforming
@return: remaining encrypted data, if any
"""
return self.__Cipher(selector).final()
def __init__(self, key_string, hmac_key, size=keyinfo.AES.default_size,
mode=keyinfo.CBC):
SymmetricKey.__init__(self, keyinfo.AES, key_string)
self.hmac_key = hmac_key
# sanity check in case other code was dependant on this specific value,
# prior to it being changed to AES.block_size
assert AES.block_size == 16
self.block_size = AES.block_size
self.size = size
# Only CBC mode is actually supported, in spite of what the signature leads you to believe.
assert mode == keyinfo.CBC
def __str__(self):
return json.dumps({"mode": str(keyinfo.CBC),
"size": self.size,
"aesKeyString": self.key_string,
"hmacKey": json.loads(str(self.hmac_key))})
def _Hash(self):
fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)),
self.key_bytes,
self.hmac_key.key_bytes)
return util.Base64WSEncode(fullhash[:keyczar.KEY_HASH_SIZE])
@staticmethod
def Generate(size=keyinfo.AES.default_size):
"""
Return a newly generated AES key.
@param size: length of key in bits to generate
@type size: integer
@return: an AES key
@rtype: L{AesKey}
"""
key_bytes = util.RandBytes(old_div(size, 8))
key_string = util.Base64WSEncode(key_bytes)
hmac_key = HmacKey.Generate() # use default HMAC-SHA1 key size
return AesKey(key_string, hmac_key, size)
@staticmethod
def Read(key):
"""
Reads an AES key from a JSON string representation of it.
@param key: a JSON representation of an AES key
@type key: string
@return: an AES key
@rtype: L{AesKey}
"""
aes = json.loads(key)
hmac_val = aes['hmacKey']
return AesKey(aes['aesKeyString'],
HmacKey(hmac_val['hmacKeyString'], hmac_val['size']),
aes['size'], keyinfo.GetMode(aes['mode']))
def _Pad(self, data):
"""
Returns the data padded using PKCS5.
For a block size B and data with N bytes in the last block, PKCS5
pads the data with B-N bytes of the value B-N.
@param data: data to be padded
@type data: string
@return: PKCS5 padded string
@rtype: string
"""
pad = self.block_size - len(data) % self.block_size
return data + pad * chr(pad)
def _UnPad(self, padded):
"""
Returns the unpadded version of a data padded using PKCS5.
@param padded: string padded with PKCS5
@type padded: string
@return: original, unpadded string
@rtype: string
"""
pad = ord(padded[-1])
return padded[:-pad]
def _NoPadBufferSize(self, buffer_size):
"""
Return a buffer size that does not require padding that is closest to the
requested buffer size. Minimum size is 1 block.
Returns a multiple of the cipher block size so there is NO PADDING required
on any blocks of this size
@param buffer_size: requested buffer size
@type data: int
@return: best buffer size
@rtype: int
"""
no_pad_size = self.block_size * (old_div(buffer_size, self.block_size))
return max(no_pad_size, self.block_size)
def Encrypt(self, data):
"""
Return ciphertext byte string containing Header|IV|Ciph|Sig.
@param data: plaintext to be encrypted.
@type data: string
@return: raw byte string ciphertext formatted to have Header|IV|Ciph|Sig.
@rtype: string
"""
data = self._Pad(data)
iv_bytes = util.RandBytes(self.block_size)
cipher = self.__CreateCipher(self.key_bytes, iv_bytes)
ciph_bytes = cipher.encrypt(data)
ciph_bytes += cipher.final()
msg_bytes = self.Header() + iv_bytes + ciph_bytes
sig_bytes = self.hmac_key.Sign(msg_bytes) # Sign bytes
return msg_bytes + sig_bytes
def Decrypt(self, input_bytes):
"""
Decrypts the given ciphertext.
@param input_bytes: raw byte string formatted as Header|IV|Ciph|Sig where
Sig is the signature over the entire payload (Header|IV|Ciph).
@type input_bytes: string
@return: plaintext message
@rtype: string
@raise ShortCiphertextError: if the ciphertext is too short to have IV & Sig
@raise InvalidSignatureError: if the signature doesn't correspond to payload
"""
data_bytes = input_bytes[keyczar.HEADER_SIZE:] # remove header
if len(data_bytes) < self.block_size + util.HLEN: # IV + sig
raise errors.ShortCiphertextError(len(data_bytes))
iv_bytes = data_bytes[:self.block_size] # first block of bytes is the IV
ciph_bytes = data_bytes[self.block_size:-util.HLEN]
sig_bytes = data_bytes[-util.HLEN:] # last 20 bytes are sig
if not self.hmac_key.Verify(input_bytes[:-util.HLEN], sig_bytes):
raise errors.InvalidSignatureError()
plain = AES.new(self.key_bytes, AES.MODE_CBC, iv_bytes).decrypt(ciph_bytes)
return self._UnPad(plain)
def __CreateCipher(self, key_bytes, iv_bytes, mode=AES.MODE_CBC):
"""
Factory function for creating cipher of specified type using the active
crypto library
@param key_bytes: the key for this cipher
@type key: string
@param iv_bytes: the initialization vector for this cipher
@type iv_bytes: string
@param mode: the cipher mode
@type mode: integer (using AES values, e.g. AES.MODE_CBC)
@return: the cipher object
"""
# can we use M2Crypto and was it requested?
if ACTIVE_CRYPT_LIB.lower() == 'm2crypto' and EVP:
# yes, so do so
return self.EVPAdaptor(key_bytes, iv_bytes, mode)
else:
# default to PyCrypto
return self.AESAdaptor(key_bytes, iv_bytes, mode)
class HmacKey(SymmetricKey):
"""Represents HMAC-SHA1 symmetric private keys."""
def __init__(self, key_string, size=keyinfo.HMAC_SHA1.default_size):
SymmetricKey.__init__(self, keyinfo.HMAC_SHA1, key_string)
self.size = size
def __str__(self):
return json.dumps({"size": self.size, "hmacKeyString": self.key_string})
def _Hash(self):
fullhash = util.Hash(self.key_bytes)
return util.Base64WSEncode(fullhash[:keyczar.KEY_HASH_SIZE])
def CreateStreamable(self):
"""Return a streaming version of this key"""
return HmacKeyStream(self)
@staticmethod
def Generate(size=keyinfo.HMAC_SHA1.default_size):
"""
Return a newly generated HMAC-SHA1 key.
@param size: length of key in bits to generate
@type size: integer
@return: an HMAC-SHA1 key
@rtype: L{HmacKey}
"""
key_bytes = util.RandBytes(old_div(size, 8))
key_string = util.Base64WSEncode(key_bytes)
return HmacKey(key_string, size)
@staticmethod
def Read(key):
"""
Reads an HMAC-SHA1 key from a JSON string representation of it.
@param key: a JSON representation of an HMAC-SHA1 key
@type key: string
@return: an HMAC-SHA1 key
@rtype: L{HmacKey}
"""
mac = json.loads(key)
return HmacKey(mac['hmacKeyString'], mac['size'])
def Sign(self, msg):
"""
Return raw byte string of signature on the message.
@param msg: message to be signed
@type msg: string
@return: raw byte string signature
@rtype: string
"""
return hmac.new(self.key_bytes, msg, sha1).digest()
def Verify(self, msg, sig_bytes):
"""
Return True if the signature corresponds to the message.
@param msg: message to be signed
@type msg: string
@param sig_bytes: raw byte string of the signature
@type sig_bytes: string
@return: True if signature is valid for message. False otherwise.
@rtype: boolean
"""
return self.VerifySignedData(self.Sign(msg), sig_bytes)
def VerifySignedData(self, mac_bytes, sig_bytes):
"""
Return True if the signature corresponds to the signed message
@param msg: message that has been signed
@type msg: string
@param sig_bytes: raw byte string of the signature
@type sig_bytes: string
@return: True if signature is valid for message. False otherwise.
@rtype: boolean
"""
if len(sig_bytes) != len(mac_bytes):
return False
result = 0
for x, y in zip(mac_bytes, sig_bytes):
result |= ord(x) ^ ord(y)
return result == 0
class HmacKeyStream(object):
"""Represents streamable HMAC-SHA1 symmetric private keys."""
def __init__(self, hmac_key):
self.hmac_key = hmac_key
self.hmac = hmac.new(self.hmac_key.key_bytes, '', sha1)
def Update(self, data):
self.hmac.update(data)
def Sign(self):
"""
Return raw byte string of signature on the streamed message.
@return: raw byte string signature
@rtype: string
"""
return self.hmac.digest()
class PrivateKey(AsymmetricKey):
"""Represents private keys in Keyczar for asymmetric key pairs."""
def __init__(self, key_type, params, pub):
AsymmetricKey.__init__(self, key_type, params)
self.public_key = pub
def _Hash(self):
return self.public_key.hash_id
class PublicKey(AsymmetricKey):
"""Represents public keys in Keyczar for asymmetric key pairs."""
def __init__(self, key_type, params):
AsymmetricKey.__init__(self, key_type, params)
class DsaPrivateKey(PrivateKey):
"""Represents DSA private keys in an asymmetric DSA key pair."""
def __init__(self, params, pub, key,
size=keyinfo.DSA_PRIV.default_size):
PrivateKey.__init__(self, keyinfo.DSA_PRIV, params, pub)
self.key = key
self.public_key = pub
self.params = params
self.size = size
def __str__(self):
return json.dumps({"publicKey": json.loads(str(self.public_key)),
"x": util.Base64WSEncode(self.params['x']),
"size": self.size})
@staticmethod
def Generate(size=keyinfo.DSA_PRIV.default_size):
"""
Return a newly generated DSA private key.
@param size: length of key in bits to generate
@type size: integer
@return: a DSA private key
@rtype: L{DsaPrivateKey}
"""
key = DSA.generate(size, util.RandBytes)
params = { 'x': util.PadBytes(util.BigIntToBytes(key.x), 1) }
pubkey = key.publickey()
pub_params = { 'g': util.PadBytes(util.BigIntToBytes(pubkey.g), 1),
'p': util.PadBytes(util.BigIntToBytes(pubkey.p), 1),
'q': util.PadBytes(util.BigIntToBytes(pubkey.q), 1),
'y': util.PadBytes(util.BigIntToBytes(pubkey.y), 1)
}
pub = DsaPublicKey(pub_params, pubkey, size)
return DsaPrivateKey(params, pub, key, size)
@staticmethod
def Read(key):
"""
Reads a DSA private key from a JSON string representation of it.
@param key: a JSON representation of a DSA private key
@type key: string
@return: an DSA private key
@rtype: L{DsaPrivateKey}
"""
dsa = json.loads(key)
pub = DsaPublicKey.Read(json.dumps(dsa['publicKey']))
params = { 'x' : util.Base64WSDecode(dsa['x']) }
key = DSA.construct((util.BytesToLong(pub._params['y']),
util.BytesToLong(pub._params['g']),
util.BytesToLong(pub._params['p']),
util.BytesToLong(pub._params['q']),
util.BytesToLong(params['x'])))
return DsaPrivateKey(params, pub, key, dsa['size'])
def Sign(self, msg):
"""
Return raw byte string of signature on the message.
@param msg: message to be signed
@type msg: string
@return: byte string formatted as an ASN.1 sequnce of r and s
@rtype: string
"""
# Need to chose a random k per-message, SystemRandom() is available
# since Python 2.4.
k = random.SystemRandom().randint(2, self.key.q-1)
(r, s) = self.key.sign(util.Hash(msg), k)
return util.MakeDsaSig(r, s)
def Verify(self, msg, sig):
"""@see: L{DsaPublicKey.Verify}"""
return self.public_key.Verify(msg, sig)
class RsaPrivateKey(PrivateKey):
"""Represents RSA private keys in an asymmetric RSA key pair."""
def __init__(self, params, pub, key, size=keyinfo.RSA_PRIV.default_size):
PrivateKey.__init__(self, keyinfo.RSA_PRIV, params, pub)
self.key = key # instance of PyCrypto RSA key
self.public_key = pub # instance of Keyczar RsaPublicKey
self.params = params
self.size = size
# em - encoded message
def __Decode(self, encoded_message, label=""):
# See PKCS#1 v2.1: ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.pdf
if len(label) >= 2**61:
# 2^61 = the input limit for SHA-1
raise errors.KeyczarError("OAEP Decoding Error - label is too large %d" % len(label))
if len(encoded_message) < 2 * util.HLEN + 2:
raise errors.KeyczarError(
"OAEP Decoding Error - encoded_message is too small: %d" % len(encoded_message))
# Step 3b EM = Y || maskedSeed || maskedDB
k = int(math.floor(math.log(self.key.n, 256)) + 1) # num bytes in n
diff_len = k - len(encoded_message)
# PyCrypto strips out leading zero bytes.
# In OAEP, the first byte is expected to be a zero, so we can ignore it
if diff_len > 1:
# If more bytes were chopped by PyCrypto, add zero bytes back on
encoded_message = '\x00' * (diff_len - 1) + encoded_message
masked_seed = encoded_message[:util.HLEN]
masked_datablock = encoded_message[util.HLEN:]
# Step 3c,d
seed_mask = util.MGF(masked_datablock, util.HLEN)
seed = util.Xor(masked_seed, seed_mask)
# Step 3e
datablock_mask = util.MGF(seed, len(masked_datablock)) # encoded_message already stripped of 0
# Step 3f
datablock = util.Xor(masked_datablock, datablock_mask)
label_hash = datablock[:util.HLEN]
expected_label_hash = util.Hash(label) # Debugging
if label_hash != expected_label_hash:
raise errors.KeyczarError("OAEP Decoding Error - hash_id is invalid")
delimited_message = datablock[util.HLEN:].lstrip('\x00')
if delimited_message[0] != '\x01':
raise errors.KeyczarError("OAEP Decoding Error - expected a 1 value")
return delimited_message[1:] # The message
def __str__(self):
return json.dumps({
"publicKey": json.loads(str(self.public_key)),
"privateExponent": util.Base64WSEncode(self.params['privateExponent']),
"primeP": util.Base64WSEncode(self.params['primeP']),
"primeQ": util.Base64WSEncode(self.params['primeQ']),
"primeExponentP": util.Base64WSEncode(self.params['primeExponentP']),
"primeExponentQ": util.Base64WSEncode(self.params['primeExponentQ']),
"crtCoefficient": util.Base64WSEncode(self.params['crtCoefficient']),
"size": self.size})
@staticmethod
def Generate(size=keyinfo.RSA_PRIV.default_size):
"""
Return a newly generated RSA private key.
@param size: length of key in bits to generate
@type size: integer
@return: a RSA private key
@rtype: L{RsaPrivateKey}
"""
key = RSA.generate(size, util.RandBytes)
#NOTE: PyCrypto stores p < q, u = p^{-1} mod q
#But OpenSSL and PKCS8 stores q < p, invq = q^{-1} mod p
#So we have to reverse the p and q values
params = { 'privateExponent': util.PadBytes(util.BigIntToBytes(key.d), 1),
'primeP': util.PadBytes(util.BigIntToBytes(key.q), 1),
'primeQ': util.PadBytes(util.BigIntToBytes(key.p), 1),
'primeExponentP': util.PadBytes(util.BigIntToBytes(key.d % (key.q - 1)), 1),
'primeExponentQ': util.PadBytes(util.BigIntToBytes(key.d % (key.p - 1)), 1),
'crtCoefficient': util.PadBytes(util.BigIntToBytes(key.u), 1)}
pubkey = key.publickey()
pub_params = { 'modulus': util.PadBytes(util.BigIntToBytes(key.n), 1),
'publicExponent': util.PadBytes(util.BigIntToBytes(key.e), 1)}
pub = RsaPublicKey(pub_params, pubkey, size)
return RsaPrivateKey(params, pub, key, size)
@staticmethod
def Read(key):
"""
Reads a RSA private key from a JSON string representation of it.
@param key: a JSON representation of a RSA private key
@type key: string
@return: a RSA private key
@rtype: L{RsaPrivateKey}
"""
rsa = json.loads(key)
pub = RsaPublicKey.Read(json.dumps(rsa['publicKey']))
params = {'privateExponent': util.Base64WSDecode(rsa['privateExponent']),
'primeP': util.Base64WSDecode(rsa['primeP']),
'primeQ': util.Base64WSDecode(rsa['primeQ']),
'primeExponentP': util.Base64WSDecode(rsa['primeExponentP']),
'primeExponentQ': util.Base64WSDecode(rsa['primeExponentQ']),
'crtCoefficient': util.Base64WSDecode(rsa['crtCoefficient'])
}
key = RSA.construct((util.BytesToLong(pub.params['modulus']),
util.BytesToLong(pub.params['publicExponent']),
util.BytesToLong(params['privateExponent']),
util.BytesToLong(params['primeQ']),
util.BytesToLong(params['primeP']),
util.BytesToLong(params['crtCoefficient'])))
return RsaPrivateKey(params, pub, key, rsa['size'])
def Encrypt(self, data):
"""@see: L{RsaPublicKey.Encrypt}"""
return self.public_key.Encrypt(data)
def Decrypt(self, input_bytes):
"""
Decrypts the given ciphertext.
@param input_bytes: raw byte string formatted as Header|Ciphertext.
@type input_bytes: string
@return: plaintext message
@rtype: string
"""
ciph_bytes = input_bytes[keyczar.HEADER_SIZE:]
decrypted = self.key.decrypt(ciph_bytes)
return self.__Decode(decrypted)
def Sign(self, msg):
"""
Return raw byte string of signature on the SHA-1 hash_id of the message.
@param msg: message to be signed
@type msg: string
@return: string representation of long int signature over message
@rtype: string
"""
emsa_encoded = util.MakeEmsaMessage(msg, self.size)
byte_string = util.BigIntToBytes(self.key.sign(emsa_encoded, None)[0])
return util.PadBytes(byte_string, old_div(self.size,8) - len(byte_string))
def Verify(self, msg, sig):
"""@see: L{RsaPublicKey.Verify}"""
return self.public_key.Verify(msg, sig)
class DsaPublicKey(PublicKey):
"""Represents DSA public keys in an asymmetric DSA key pair."""
def __init__(self, params, key, size=keyinfo.DSA_PUB.default_size):
PublicKey.__init__(self, keyinfo.DSA_PUB, params)
self.key = key
self.params = params
self.size = size
def __str__(self):
return json.dumps({"p": util.Base64WSEncode(self.params['p']),
"q": util.Base64WSEncode(self.params['q']),
"g": util.Base64WSEncode(self.params['g']),
"y": util.Base64WSEncode(self.params['y']),
"size": self.size})
def _Hash(self):
fullhash = util.PrefixHash(util.TrimBytes(self._params['p']),
util.TrimBytes(self._params['q']),
util.TrimBytes(self._params['g']),
util.TrimBytes(self._params['y']))
return util.Base64WSEncode(fullhash[:keyczar.KEY_HASH_SIZE])
@staticmethod
def Read(key):
"""
Reads a DSA public key from a JSON string representation of it.
@param key: a JSON representation of a DSA public key
@type key: string
@return: a DSA public key
@rtype: L{DsaPublicKey}
"""
dsa = json.loads(key)
params = {'y': util.Base64WSDecode(dsa['y']),
'p': util.Base64WSDecode(dsa['p']),
'g': util.Base64WSDecode(dsa['g']),
'q': util.Base64WSDecode(dsa['q'])}
pubkey = DSA.construct((util.BytesToLong(params['y']),
util.BytesToLong(params['g']),
util.BytesToLong(params['p']),
util.BytesToLong(params['q'])))
return DsaPublicKey(params, pubkey, dsa['size'])
def Verify(self, msg, sig):
"""
Return True if the signature corresponds to the message.
@param msg: message that has been signed
@type msg: string
@param sig: raw byte string of the signature formatted as an ASN.1 sequence
of r and s
@type sig: string
@return: True if signature is valid for message. False otherwise.
@rtype: boolean
"""
try:
(r, s) = util.ParseDsaSig(sig)
return self.key.verify(util.Hash(msg), (r, s))
except errors.KeyczarError:
# if signature is not in correct format
return False
class RsaPublicKey(PublicKey):
"""Represents RSA public keys in an asymmetric RSA key pair."""
def __init__(self, params, key, size=keyinfo.RSA_PUB.default_size):
PublicKey.__init__(self, keyinfo.RSA_PUB, params)
self.key = key
self.params = params
self.size = size
def __Encode(self, msg, label=""):
if len(label) >= 2**61: # the input limit for SHA-1
raise errors.KeyczarError("OAEP parameter string too long.")
k = int(math.floor(math.log(self.key.n, 256)) + 1) # num bytes in n
if len(msg) > k - 2 * util.HLEN - 2:
raise errors.KeyczarError("Message too long to OAEP encode.")
label_hash = util.Hash(label)
pad_octets = (k - len(msg) - 2 * util.HLEN - 2) # Number of zeros to pad
if pad_octets < 0:
raise errors.KeyczarError("Message is too long: %d" % len(msg))
datablock = label_hash + ('\x00' * pad_octets) + '\x01' + msg
seed = util.RandBytes(util.HLEN)
# Steps 2e, f
datablock_mask = util.MGF(seed, k - util.HLEN - 1)
masked_datablock = util.Xor(datablock, datablock_mask)
# Steps 2g, h
seed_mask = util.MGF(masked_datablock, util.HLEN)
masked_seed = util.Xor(seed, seed_mask)
# Step 2i: Construct the encoded message
return '\x00' + masked_seed + masked_datablock
def __str__(self):
return json.dumps(
{"modulus": util.Base64WSEncode(self.params['modulus']),
"publicExponent": util.Base64WSEncode(self.params['publicExponent']),
"size": self.size})
def _Hash(self):
fullhash = util.PrefixHash(util.TrimBytes(self._params['modulus']),
util.TrimBytes(self._params['publicExponent']))
return util.Base64WSEncode(fullhash[:keyczar.KEY_HASH_SIZE])
@staticmethod
def Read(key):
"""
Reads a RSA public key from a JSON string representation of it.
@param key: a JSON representation of a RSA public key
@type key: string
@return: a RSA public key
@rtype: L{RsaPublicKey}
"""
rsa = json.loads(key)
params = {'modulus': util.Base64WSDecode(rsa['modulus']),
'publicExponent': util.Base64WSDecode(rsa['publicExponent'])}
pubkey = RSA.construct((util.BytesToLong(params['modulus']),
util.BytesToLong(params['publicExponent'])))
return RsaPublicKey(params, pubkey, rsa['size'])
def Encrypt(self, data):
"""
Return a raw byte string of the ciphertext in the form Header|Ciph.
@param data: message to be encrypted
@type data: string
@return: ciphertext formatted as Header|Ciph
@rtype: string
"""
data = self.__Encode(data)
ciph_bytes = self.key.encrypt(data, None)[0] # PyCrypto returns 1-tuple
return self.Header() + ciph_bytes
def Verify(self, msg, sig):
"""
Return True if the signature corresponds to the message.
@param msg: message that has been signed
@type msg: string
@param sig: string representation of long int signature
@type sig: string
@return: True if signature is valid for the message hash_id. False otherwise.
@rtype: boolean
"""
try:
return self.key.verify(util.MakeEmsaMessage(msg, self.size), (util.BytesToLong(sig),))
except ValueError:
# if sig is not a long, it's invalid
return False
class EncryptingStreamWriter(object):
"""
An encrypting stream capable of creating a ciphertext byte stream
containing Header|IV|Ciph|Sig.
"""
def __init__(self, key, output_stream):
"""
Constructor
@param key: Keyczar Key to perform the padding, verification, cipher
creation needed by this stream
@type key: Key
@param output_stream: stream for encrypted output
@type output_stream: 'file-like' object
"""
self.__key = key
self.__output_stream = output_stream
self.__data = ''
self.__closed = False
self.__hmac_stream = key.hmac_key.CreateStreamable()
iv_bytes = util.RandBytes(key.block_size)
self.__cipher = AES.new(key.key_bytes, AES.MODE_CBC, iv_bytes)
hdr = key.Header()
self.__hmac_stream.Update(hdr + iv_bytes)
self.__output_stream.write(hdr + iv_bytes)
def write(self, data):
"""
Write the data in encrypted form to the output stream
@param data: data to be encrypted.
@type data: string
"""
self.__CheckOpen('write')
self.__data += data
encrypt_buffer_size = self.__key._NoPadBufferSize(len(self.__data))
if len(self.__data) >= encrypt_buffer_size:
self.__WriteEncrypted(self.__data[:encrypt_buffer_size])
else:
encrypt_buffer_size = 0
self.__data = self.__data[encrypt_buffer_size:]
def flush(self):
"""
Flush this stream.
Writes all remaining encrypted data to the output stream.
Will also flush the associated output stream.
"""
self.__CheckOpen('flush')
self.__WriteEncrypted(self.__data, pad=True)
self.__output_stream.write(self.__hmac_stream.Sign())
self.__output_stream.flush()
def close(self):
"""
Close this stream.
Discards any and all buffered data
Does *not* close the associated output stream.
"""
self.__CheckOpen('close')
self.__closed = True
def __WriteEncrypted(self, data, pad=False):
"""
Helper to write encrypted bytes to output stream.
Must *only* pad the last block as PKCS5 *always* pads, even when the data
length is a multiple of the block size - it adds block_size chars.
We cannot pad intermediate blocks as there is no guarantee that a streaming
read will receive the data in the same blocks as the writes were made.
@param data: data to be written.
@type data: string
@param pad: add padding to data
@type pad: boolean
"""
if pad:
data = self.__key._Pad(data)
encrypted_bytes = self.__cipher.encrypt(data)
self.__output_stream.write(encrypted_bytes)
self.__hmac_stream.Update(encrypted_bytes)
def __CheckOpen(self, operation):
"""Helper to ensure this stream is open"""
if self.__closed:
raise ValueError('%s() on a closed stream is not permitted' %operation)
class DecryptingStreamReader(object):
"""
A stream capable of decrypting a source ciphertext byte stream
containing Header|IV|Ciph|Sig into plain text.
"""
def __init__(self, key_set, input_stream,
buffer_size=util.DEFAULT_STREAM_BUFF_SIZE):
"""
Constructor
@param key_set: Keyczar key set to source key specified in message header
@type key: Keyczar
@param input_stream: source of encrypted input
@type input_stream: 'file-like' object
@param buffer_size: Suggested buffer size for reading data (will be
adjusted to suit the underlying cipher).
Use -1 to read as much data as possible from the source stream
@type buffer_size: integer
"""
self.__key_set = key_set
self.__input_stream = input_stream
self.__buffer_size = buffer_size
self.__key = None
self.__cipher = None
self.__encrypted_buffer = ''
self.__decrypted_buffer = ''
self.__closed = False
def read(self, chars=-1):
"""
Decrypts data from the source stream and returns the resulting plaintext.
NOTE: the signature validation is performed on the final read if sufficient
data is available. Streaming => it isn't possible to validate up front as
done by Decrypt().
@param chars: indicates the number of characters to read from the stream.
read() will never return more than chars characters, but it might return
less, if there are not enough characters available.
@type chars: integer
@raise ShortCiphertextError: if the ciphertext is too short to have IV & Sig
@raise InvalidSignatureError: if the signature doesn't correspond to payload
@raise KeyNotFoundError: if key specified in header doesn't exist
@raise ValueError: if stream closed
"""
self.__CheckOpen('read')
is_data_avail = True
if not self.__key:
is_data_avail = self.__CreateKey()
if is_data_avail and self.__key and not self.__cipher:
is_data_avail = self.__CreateCipher()
if is_data_avail and self.__key and self.__cipher:
data_to_decrypt = ''
need_more_data = True
while need_more_data:
read_bytes, is_data_avail = self.__ReadBytes(self.__key.block_size,
block=False)
if read_bytes:
self.__encrypted_buffer += read_bytes
reserved_data_len = util.HLEN
if is_data_avail:
reserved_data_len += self.__key.block_size
available_data = self.__encrypted_buffer[:-reserved_data_len]
if is_data_avail:
no_decrypt_len = len(available_data) % self.__key.block_size
else:
no_decrypt_len = 0
# slicing with [:-0] does not work!
if no_decrypt_len:
data_to_decrypt = available_data[:-no_decrypt_len]
else:
data_to_decrypt = available_data
need_more_data = (is_data_avail and not data_to_decrypt)
if data_to_decrypt:
self.__hmac_stream.Update(data_to_decrypt)
self.__encrypted_buffer = self.__encrypted_buffer[len(data_to_decrypt):]
decrypted_data = self.__cipher.decrypt(data_to_decrypt)
if not is_data_avail:
if len(self.__encrypted_buffer) != util.HLEN:
raise errors.ShortCiphertextError(len(self.__encrypted_buffer))
current_sig_bytes = self.__hmac_stream.Sign()
msg_sig_bytes = self.__encrypted_buffer
self.__encrypted_buffer = ''
if not self.__key.hmac_key.VerifySignedData(current_sig_bytes,
msg_sig_bytes):
raise errors.InvalidSignatureError()
decrypted_data = self.__key._UnPad(decrypted_data)
self.__decrypted_buffer += decrypted_data
if chars < 0:
result = self.__decrypted_buffer
self.__decrypted_buffer = ''
else:
result = self.__decrypted_buffer[:chars]
self.__decrypted_buffer = self.__decrypted_buffer[chars:]
if not result and is_data_avail:
result = None
return result
def close(self):
"""
Close this stream.
Assumes all data has been read or is thrown away as no signature validation
is done until all the data is read.
"""
self.__closed = True
def __CheckOpen(self, operation):
"""Helper to ensure this stream is open"""
if self.__closed:
raise ValueError('%s() on a closed stream is not permitted' %operation)
def __ReadBytes(self, size, block=True):
"""
Helper to read bytes from the input stream. If requested will block until
required number of bytes is read or input data is exhausted. Returns a
tuple of (the data bytes read, is more data available).
"""
need_more_data = True
result = ''
while need_more_data:
read_bytes = self.__input_stream.read(size)
if read_bytes:
result += read_bytes
elif read_bytes is not None:
return (result, False)
elif not block:
return (result, True)
need_more_data = (len(result) < size)
return (result, True)
def __CreateKey(self):
"""
Helper to create the actual key from the Header
NOTE: The key determines what the optimal read buffer size will be. It is a
size that does not require any padding to allow allow encrypting without
using a stream anddecrypting with a stream
i.e. Encrypt() => DecryptingStreamReader()
"""
is_data_avail = True
if not self.__key:
read_bytes, is_data_avail = self.__ReadBytes(keyczar.HEADER_SIZE -
len(self.__encrypted_buffer))
if read_bytes:
self.__encrypted_buffer += read_bytes
if len(self.__encrypted_buffer) >= keyczar.HEADER_SIZE:
hdr_bytes = self.__encrypted_buffer[:keyczar.HEADER_SIZE]
self.__encrypted_buffer = self.__encrypted_buffer[keyczar.HEADER_SIZE:]
self.__key = self.__key_set._ParseHeader(hdr_bytes)
self.__hmac_stream = self.__key.hmac_key.CreateStreamable()
self.__hmac_stream.Update(hdr_bytes)
if self.__buffer_size >= 0:
self.__buffer_size = self.__key._NoPadBufferSize(self.__buffer_size)
return is_data_avail
def __CreateCipher(self):
"""
Helper to create the cipher using the IV from the message
"""
is_data_avail = True
if not self.__cipher:
reqd_block_size = self.__key.block_size
new_bytes_reqd = reqd_block_size - len(self.__encrypted_buffer)
read_bytes, is_data_avail = self.__ReadBytes(new_bytes_reqd)
if read_bytes:
self.__encrypted_buffer += read_bytes
if len(self.__encrypted_buffer) >= reqd_block_size:
iv_bytes = self.__encrypted_buffer[:reqd_block_size]
self.__encrypted_buffer = self.__encrypted_buffer[
reqd_block_size:]
self.__hmac_stream.Update(iv_bytes)
self.__cipher = AES.new(self.__key.key_bytes, AES.MODE_CBC, iv_bytes)
return is_data_avail
| 33.136986
| 99
| 0.661706
|
1c487dc61b9ee3171cbff46d329fc9b97936f78e
| 386
|
py
|
Python
|
profiles_api/urls.py
|
doglzz0806/profiles-rest-api
|
11f9ee0ee6e278570b1edab30e27d0a41382ffca
|
[
"MIT"
] | 1
|
2021-03-17T00:21:20.000Z
|
2021-03-17T00:21:20.000Z
|
profiles_api/urls.py
|
doglzz0806/profiles-rest-api
|
11f9ee0ee6e278570b1edab30e27d0a41382ffca
|
[
"MIT"
] | null | null | null |
profiles_api/urls.py
|
doglzz0806/profiles-rest-api
|
11f9ee0ee6e278570b1edab30e27d0a41382ffca
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
urlpatterns = [
path('hello-view/',views.HelloApiView.as_view()),
path('', include(router.urls))
]
| 27.571429
| 79
| 0.772021
|
56abd34b321ed57ce6250e8f5f98a85905618662
| 9,147
|
py
|
Python
|
DjangoBlog/settings.py
|
snxkxk/blog
|
7c018c5a64c705a7aad2e50a94c863f77d22e992
|
[
"MIT"
] | null | null | null |
DjangoBlog/settings.py
|
snxkxk/blog
|
7c018c5a64c705a7aad2e50a94c863f77d22e992
|
[
"MIT"
] | 6
|
2021-03-19T01:47:07.000Z
|
2022-03-12T00:22:55.000Z
|
DjangoBlog/settings.py
|
snxkxk/blog
|
7c018c5a64c705a7aad2e50a94c863f77d22e992
|
[
"MIT"
] | null | null | null |
"""
Django settings for DjangoBlog project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import sys
import os
def env_to_bool(env, default):
str_val = os.environ.get(env)
return default if str_val is None else str_val == 'True'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
'DJANGO_SECRET_KEY') or 'n9ceqv38)#&mwuat@(mjb_p%em$e8$qyr#fw9ot!=ba6lijx-6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env_to_bool('DJANGO_DEBUG', True)
# DEBUG = False
TESTING = len(sys.argv) > 1 and sys.argv[1] == 'test'
# ALLOWED_HOSTS = []
ALLOWED_HOSTS = ['*', '127.0.0.1', 'batgm.com']
# Application definition
SITE_ROOT = os.path.dirname(os.path.abspath(__file__))
SITE_ROOT = os.path.abspath(os.path.join(SITE_ROOT, '../'))
INSTALLED_APPS = [
# 'django.contrib.admin',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
'mdeditor',
'haystack',
'blog',
'accounts',
'comments',
'oauth',
'servermanager',
'owntracks',
'compressor'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.gzip.GZipMiddleware',
# 'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'blog.middleware.OnlineMiddleware'
]
ROOT_URLCONF = 'DjangoBlog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'blog.context_processors.seo_processor'
],
},
},
]
WSGI_APPLICATION = 'DjangoBlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('DJANGO_MYSQL_DATABASE') or 'djangoblog',
'USER': os.environ.get('DJANGO_MYSQL_USER') or 'root',
'PASSWORD': os.environ.get('DJANGO_MYSQL_PASSWORD') or 'Abc123!@#',
'HOST': os.environ.get('DJANGO_MYSQL_HOST') or 'localhost',
'PORT': int(
os.environ.get('DJANGO_MYSQL_PORT') or 3306),
'OPTIONS': {
'charset': 'utf8mb4'},
}}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
ELASTICSEARCH_DSL = {
'default': {
'hosts': 'www.batgm.com:9200'
},
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'DjangoBlog.elasticsearch_backend.ElasticSearchEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
},
}
# Automatically update searching index
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# Allow user login with username and password
AUTHENTICATION_BACKENDS = [
'accounts.user_login_backend.EmailOrUsernameModelBackend']
STATIC_ROOT = os.path.join(SITE_ROOT, 'collectedstatic')
STATIC_URL = '/static/'
STATICFILES = os.path.join(BASE_DIR, 'static')
AUTH_USER_MODEL = 'accounts.BlogUser'
LOGIN_URL = '/login/'
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
DATE_TIME_FORMAT = '%Y-%m-%d'
# bootstrap color styles
BOOTSTRAP_COLOR_TYPES = [
'default', 'primary', 'success', 'info', 'warning', 'danger'
]
# paginate
PAGINATE_BY = 10
# http cache timeout
CACHE_CONTROL_MAX_AGE = 2592000
# cache setting
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': os.environ.get('DJANGO_MEMCACHED_LOCATION') or '127.0.0.1:11211',
'KEY_PREFIX': 'django_test' if TESTING else 'djangoblog',
'TIMEOUT': 60 * 60 * 10
} if env_to_bool('DJANGO_MEMCACHED_ENABLE', True) else {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'TIMEOUT': 10800,
'LOCATION': 'unique-snowflake',
}
}
SITE_ID = 1
BAIDU_NOTIFY_URL = os.environ.get('DJANGO_BAIDU_NOTIFY_URL') \
or 'http://data.zz.baidu.com/urls?site=https://www.lylinux.net&token=1uAOGrMsUm5syDGn'
# Email:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = env_to_bool('DJANGO_EMAIL_TLS', False)
EMAIL_USE_SSL = env_to_bool('DJANGO_EMAIL_SSL', True)
EMAIL_HOST = os.environ.get('DJANGO_EMAIL_HOST') or 'smtp.qq.com'
EMAIL_PORT = int(os.environ.get('DJANGO_EMAIL_PORT') or 465)
EMAIL_HOST_USER = os.environ.get('DJANGO_EMAIL_USER') or 'snxkxk@qq.com'
EMAIL_HOST_PASSWORD = os.environ.get('DJANGO_EMAIL_PASSWORD') or 'nnnlwdjilrzccbdi'
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
SERVER_EMAIL = EMAIL_HOST_USER
# Setting debug=false did NOT handle except email notifications
ADMINS = [('admin', os.environ.get('DJANGO_ADMIN_EMAIL') or 'snxkxk@qq.com')]
# WX ADMIN password(Two times md5)
WXADMIN = os.environ.get(
'DJANGO_WXADMIN_PASSWORD') or '995F03AC401D6CABABAEF756FC4D43C7'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'INFO',
'handlers': ['console', 'log_file'],
},
'formatters': {
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d %(module)s] %(message)s',
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'log_file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'djangoblog.log',
'maxBytes': 16777216, # 16 MB
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'null': {
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'djangoblog': {
'handlers': ['log_file', 'console'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
}
}
}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# other
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = True
# COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
# creates absolute urls from relative ones
'compressor.filters.css_default.CssAbsoluteFilter',
# css minimizer
'compressor.filters.cssmin.CSSMinFilter'
]
COMPRESS_JS_FILTERS = [
'compressor.filters.jsmin.JSMinFilter'
]
MEDIA_ROOT = os.path.join(SITE_ROOT, 'uploads')
MEDIA_URL = '/media/'
X_FRAME_OPTIONS = 'SAMEORIGIN'
| 30.188119
| 110
| 0.660216
|
7a368755fc6de8b911a77abcd0eab1852fcfdaa1
| 166
|
py
|
Python
|
blogapi/posts/urls.py
|
HyanBatista/blog-api-prometheus-grafana
|
144ed51de4e0b7997afd45440f4c0448b9f81c6f
|
[
"MIT"
] | null | null | null |
blogapi/posts/urls.py
|
HyanBatista/blog-api-prometheus-grafana
|
144ed51de4e0b7997afd45440f4c0448b9f81c6f
|
[
"MIT"
] | null | null | null |
blogapi/posts/urls.py
|
HyanBatista/blog-api-prometheus-grafana
|
144ed51de4e0b7997afd45440f4c0448b9f81c6f
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import PostList, PostDetail
urlpatterns = [
path('<int:pk>/', PostDetail.as_view()),
path('', PostList.as_view()),
]
| 23.714286
| 44
| 0.680723
|
b1a41e2c7147a214e83621c71c7f5ffefe6fafd4
| 416
|
py
|
Python
|
Aula 07 - Operadores Aritméticos/desafio011.py
|
josue-rosa/Python---Curso-em-Video
|
2d74c7421a49952b7c3eadb1010533525f2de338
|
[
"MIT"
] | 3
|
2020-10-07T03:21:07.000Z
|
2020-10-13T14:18:49.000Z
|
Aula 07 - Operadores Aritméticos/desafio011.py
|
josue-rosa/Python---Curso-em-Video
|
2d74c7421a49952b7c3eadb1010533525f2de338
|
[
"MIT"
] | null | null | null |
Aula 07 - Operadores Aritméticos/desafio011.py
|
josue-rosa/Python---Curso-em-Video
|
2d74c7421a49952b7c3eadb1010533525f2de338
|
[
"MIT"
] | null | null | null |
# ler largura e altura de uma parede em metros e calcule a sua area e a quantidade de tinta
# necessária para pintar, cada litro de tinta pinta uma area de 2m².
largura = float(input('largura da parede em Metros: '))
altura = float(input('altura da parede em Metros: '))
area = largura*altura
litro_tinta = area/2
print(f'Area da parede: {area} m²')
print(f'Você precisará de {litro_tinta:.0f} litro(s) de tinta')
| 37.818182
| 91
| 0.735577
|
e970cb46cbe26d72d96aa796ad69c15fe2073f9c
| 577
|
py
|
Python
|
layerserver/widgets/modificationdatetime.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 5
|
2018-06-07T12:54:35.000Z
|
2022-01-14T10:38:38.000Z
|
layerserver/widgets/modificationdatetime.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 140
|
2018-06-18T10:27:28.000Z
|
2022-03-23T09:53:15.000Z
|
layerserver/widgets/modificationdatetime.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 1
|
2021-04-13T11:20:54.000Z
|
2021-04-13T11:20:54.000Z
|
from datetime import datetime
from django.utils.timezone import get_current_timezone
from .datetime import DatetimeWidget
class ModificationDatetimeWidget(DatetimeWidget):
base_type = 'datetime'
@staticmethod
def update(request, instance, validated_data, widget):
validated_data[widget['name']] = datetime.now(tz=get_current_timezone())
@staticmethod
def is_valid(cleaned_data):
if not cleaned_data['readonly']:
return ModificationDatetimeWidget.ERROR_READONLY_REQUIRED
return DatetimeWidget.is_valid(cleaned_data)
| 28.85
| 80
| 0.757366
|
c3cd24fbaceabdb175f21b073a8d3d4559c5e3f2
| 5,160
|
py
|
Python
|
airbyte-integrations/connectors/source-instagram/source_instagram/source.py
|
datacequia/airbyte
|
99ec90c1116bd6b2be9b3c38b0096ae11a40495e
|
[
"MIT"
] | 2
|
2021-08-04T03:17:38.000Z
|
2021-11-15T10:16:08.000Z
|
airbyte-integrations/connectors/source-instagram/source_instagram/source.py
|
datacequia/airbyte
|
99ec90c1116bd6b2be9b3c38b0096ae11a40495e
|
[
"MIT"
] | 52
|
2021-06-11T12:39:05.000Z
|
2022-03-30T04:59:35.000Z
|
airbyte-integrations/connectors/source-instagram/source_instagram/source.py
|
datacequia/airbyte
|
99ec90c1116bd6b2be9b3c38b0096ae11a40495e
|
[
"MIT"
] | 1
|
2021-08-04T03:25:02.000Z
|
2021-08-04T03:25:02.000Z
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from datetime import datetime
from typing import Any, Iterator, List, Mapping, MutableMapping, Tuple
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import AirbyteMessage, ConfiguredAirbyteCatalog, ConnectorSpecification, DestinationSyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from pydantic import BaseModel, Field
from source_instagram.api import InstagramAPI
from source_instagram.streams import Media, MediaInsights, Stories, StoryInsights, UserInsights, UserLifetimeInsights, Users
class ConnectorConfig(BaseModel):
class Config:
title = "Source Instagram"
start_date: datetime = Field(
description="The date from which you'd like to replicate data for User Insights, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated.",
pattern="^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$",
examples=["2017-01-25T00:00:00Z"],
)
access_token: str = Field(
description='The value of the access token generated. See the <a href="https://docs.airbyte.io/integrations/sources/instagram">docs</a> for more information',
airbyte_secret=True,
)
class SourceInstagram(AbstractSource):
def check_connection(self, logger, config: Mapping[str, Any]) -> Tuple[bool, Any]:
"""Connection check to validate that the user-provided config can be used to connect to the underlying API
:param config: the user-input config object conforming to the connector's spec.json
:param logger: logger object
:return Tuple[bool, Any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.
"""
ok = False
error_msg = None
try:
config = ConnectorConfig.parse_obj(config) # FIXME: this will be not need after we fix CDK
api = InstagramAPI(access_token=config.access_token)
logger.info(f"Available accounts: {api.accounts}")
ok = True
except Exception as exc:
error_msg = repr(exc)
return ok, error_msg
def read(
self, logger: AirbyteLogger, config: Mapping[str, Any], catalog: ConfiguredAirbyteCatalog, state: MutableMapping[str, Any] = None
) -> Iterator[AirbyteMessage]:
for stream in self.streams(config):
state_key = str(stream.name)
if state_key in state and hasattr(stream, "upgrade_state_to_latest_format"):
state[state_key] = stream.upgrade_state_to_latest_format(state[state_key])
return super().read(logger, config, catalog, state)
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
"""Discovery method, returns available streams
:param config: A Mapping of the user input configuration as defined in the connector spec.
"""
config: ConnectorConfig = ConnectorConfig.parse_obj(config) # FIXME: this will be not need after we fix CDK
api = InstagramAPI(access_token=config.access_token)
return [
Media(api=api),
MediaInsights(api=api),
Stories(api=api),
StoryInsights(api=api),
Users(api=api),
UserLifetimeInsights(api=api),
UserInsights(api=api, start_date=config.start_date),
]
def spec(self, *args, **kwargs) -> ConnectorSpecification:
"""
Returns the spec for this integration. The spec is a JSON-Schema object describing the required configurations (e.g: username and password)
required to run this integration.
"""
return ConnectorSpecification(
documentationUrl="https://docs.airbyte.io/integrations/sources/instagram",
changelogUrl="https://docs.airbyte.io/integrations/sources/instagram",
supportsIncremental=True,
supported_destination_sync_modes=[DestinationSyncMode.append],
connectionSpecification=ConnectorConfig.schema(),
)
| 45.663717
| 181
| 0.702132
|
cc1ac93e369f6ee9998b693f383581403d3369c4
| 5,790
|
py
|
Python
|
COTR/datasets/megadepth_dataset.py
|
jiangwei221/COTR-1
|
96abd8f95e23c7bf4d04811db6dd131887a2f37a
|
[
"Apache-2.0"
] | 283
|
2021-04-30T17:56:13.000Z
|
2022-03-30T10:33:02.000Z
|
COTR/datasets/megadepth_dataset.py
|
jiangwei221/COTR-1
|
96abd8f95e23c7bf4d04811db6dd131887a2f37a
|
[
"Apache-2.0"
] | 27
|
2021-06-04T10:36:53.000Z
|
2022-03-31T04:58:07.000Z
|
COTR/datasets/megadepth_dataset.py
|
jiangwei221/COTR-1
|
96abd8f95e23c7bf4d04811db6dd131887a2f37a
|
[
"Apache-2.0"
] | 34
|
2021-05-13T04:15:28.000Z
|
2022-01-17T21:22:03.000Z
|
'''
dataset specific layer for megadepth
'''
import os
import json
import random
from collections import namedtuple
import numpy as np
from COTR.datasets import colmap_helper
from COTR.global_configs import dataset_config
from COTR.sfm_scenes import knn_search
from COTR.utils import debug_utils, utils, constants
SceneCapIndex = namedtuple('SceneCapIndex', ['scene_index', 'capture_index'])
def prefix_of_img_path_for_magedepth(img_path):
'''
get the prefix for image of megadepth dataset
'''
prefix = os.path.abspath(os.path.join(img_path, '../../../..')) + '/'
return prefix
class MegadepthSceneDataBase():
scenes = {}
knn_engine_dict = {}
@classmethod
def _load_scene(cls, opt, scene_dir_dict):
if scene_dir_dict['scene_dir'] not in cls.scenes:
if opt.info_level == 'rgb':
assert 0
elif opt.info_level == 'rgbd':
scene_dir = scene_dir_dict['scene_dir']
images_dir = scene_dir_dict['image_dir']
depth_dir = scene_dir_dict['depth_dir']
scene = colmap_helper.ColmapWithDepthAsciiReader.read_sfm_scene_given_valid_list_path(scene_dir, images_dir, depth_dir, dataset_config[opt.dataset_name]['valid_list_json'], opt.crop_cam)
if opt.use_ram:
scene.read_data_to_ram(['image', 'depth'])
else:
raise ValueError()
knn_engine = knn_search.ReprojRatioKnnSearch(scene)
cls.scenes[scene_dir_dict['scene_dir']] = scene
cls.knn_engine_dict[scene_dir_dict['scene_dir']] = knn_engine
else:
pass
class MegadepthDataset():
def __init__(self, opt, dataset_type):
assert dataset_type in ['train', 'val', 'test']
assert len(opt.scenes_name_list) > 0
self.opt = opt
self.dataset_type = dataset_type
self.use_ram = opt.use_ram
self.scenes_name_list = opt.scenes_name_list
self.scenes = None
self.knn_engine_list = None
self.total_caps_set = None
self.query_caps_set = None
self.db_caps_set = None
self.img_path_to_scene_cap_index_dict = {}
self.scene_index_to_db_caps_mask_dict = {}
self._load_scenes()
@property
def num_scenes(self):
return len(self.scenes)
@property
def num_queries(self):
return len(self.query_caps_set)
@property
def num_db(self):
return len(self.db_caps_set)
def get_scene_cap_index_by_index(self, index):
assert index < len(self.query_caps_set)
img_path = sorted(list(self.query_caps_set))[index]
scene_cap_index = self.img_path_to_scene_cap_index_dict[img_path]
return scene_cap_index
def _get_common_subset_caps_from_json(self, json_path, total_caps):
prefix = prefix_of_img_path_for_magedepth(list(total_caps)[0])
with open(json_path, 'r') as f:
common_caps = [prefix + cap for cap in json.load(f)]
common_caps = set(total_caps) & set(common_caps)
return common_caps
def _extend_img_path_to_scene_cap_index_dict(self, img_path_to_cap_index_dict, scene_id):
for key in img_path_to_cap_index_dict.keys():
self.img_path_to_scene_cap_index_dict[key] = SceneCapIndex(scene_id, img_path_to_cap_index_dict[key])
def _create_scene_index_to_db_caps_mask_dict(self, db_caps_set):
scene_index_to_db_caps_mask_dict = {}
for cap in db_caps_set:
scene_id, cap_id = self.img_path_to_scene_cap_index_dict[cap]
if scene_id not in scene_index_to_db_caps_mask_dict:
scene_index_to_db_caps_mask_dict[scene_id] = []
scene_index_to_db_caps_mask_dict[scene_id].append(cap_id)
for _k, _v in scene_index_to_db_caps_mask_dict.items():
scene_index_to_db_caps_mask_dict[_k] = np.array(sorted(_v))
return scene_index_to_db_caps_mask_dict
def _load_scenes(self):
scenes = []
knn_engine_list = []
total_caps_set = set()
for scene_id, scene_dir_dict in enumerate(self.scenes_name_list):
MegadepthSceneDataBase._load_scene(self.opt, scene_dir_dict)
scene = MegadepthSceneDataBase.scenes[scene_dir_dict['scene_dir']]
knn_engine = MegadepthSceneDataBase.knn_engine_dict[scene_dir_dict['scene_dir']]
total_caps_set = total_caps_set | set(scene.img_path_to_index_dict.keys())
self._extend_img_path_to_scene_cap_index_dict(scene.img_path_to_index_dict, scene_id)
scenes.append(scene)
knn_engine_list.append(knn_engine)
self.scenes = scenes
self.knn_engine_list = knn_engine_list
self.total_caps_set = total_caps_set
self.query_caps_set = self._get_common_subset_caps_from_json(dataset_config[self.opt.dataset_name][f'{self.dataset_type}_json'], total_caps_set)
self.db_caps_set = self._get_common_subset_caps_from_json(dataset_config[self.opt.dataset_name]['train_json'], total_caps_set)
self.scene_index_to_db_caps_mask_dict = self._create_scene_index_to_db_caps_mask_dict(self.db_caps_set)
def get_query_with_knn(self, index):
scene_index, cap_index = self.get_scene_cap_index_by_index(index)
query_cap = self.scenes[scene_index].captures[cap_index]
knn_engine = self.knn_engine_list[scene_index]
if scene_index in self.scene_index_to_db_caps_mask_dict:
db_mask = self.scene_index_to_db_caps_mask_dict[scene_index]
else:
db_mask = None
pool = knn_engine.get_knn(query_cap, self.opt.pool_size, db_mask=db_mask)
nn_caps = random.sample(pool, min(len(pool), self.opt.k_size))
return query_cap, nn_caps
| 41.06383
| 202
| 0.692228
|
8685153218025a7affb222c5cb69e18c9319c560
| 1,886
|
py
|
Python
|
smoke/convert/modis_aod_to_nc.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
smoke/convert/modis_aod_to_nc.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
smoke/convert/modis_aod_to_nc.py
|
minnieteng/smoke_project
|
cc3c8f16f7759fe29e46d3cec32a3ed6ca86bd5f
|
[
"Apache-2.0"
] | null | null | null |
"""
Functions to transform MODIS Aerosol Optical Depth files to netCDF files
"""
import datetime
import logging
import os
import sys
import xarray as xr
import click
import smoke.utils.utilities as utilities
from pathlib import Path
from smoke.load.parsers import *
logging.getLogger(__name__).addHandler(logging.NullHandler())
def convert_modis_aod(modis_aod_file, output_directory):
ds = modis_aod_to_xr(modis_aod_file)
name = os.path.join(output_directory, Path(modis_aod_file).stem)
utilities.mkdir(output_directory)
os.mkdir(output_directory)
ds.to_netcdf(
name, mode="w", format="NETCDF4",
)
def modis_aod_to_xr(modis_aod_file):
"""loading and returning data from modis Aerosol Optical Depth
files as of file format including and previous too 2020/06/30
:param modis_aod_file: path to raw modis aod data file
:type modis_aod_file: str
:returns: xarray dataset with specs for GeographicalDataset
:rtype: xr.Dataset
"""
parser = MODISAODParser()
return parser.parse_file(modis_aod_file)
@click.command(help=convert_modis_aod.__doc__)
@click.argument("modis_aod_file", type=click.Path(exists=True))
@click.argument("output_directory", type=click.Path(writable=True))
@click.option(
"-v",
"--verbosity",
default="WARNING",
show_default=True,
type=click.Choice(("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")),
help="""
Choose the logging level. Defaults to WARNING.
WARNING, ERROR, and CRITICAL will only report when Murphy's law kicks in
""",
)
def cli(modis_aod_file, output_directory, logging_level):
logging.basicConfig(
level=logging_level,
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
stream=sys.stdout,
)
convert_modis_aod(modis_aod_file, output_directory)
| 29.015385
| 80
| 0.71474
|
a676ef9a12e09e1b750fc3e11e9e41a83edb3329
| 48,447
|
py
|
Python
|
tensorflow/python/training/optimizer.py
|
ashuven63/tf_audio
|
bc561b81069001da01a1c7df4c16f6b9ba9a400b
|
[
"Apache-2.0"
] | 1
|
2018-05-30T00:34:05.000Z
|
2018-05-30T00:34:05.000Z
|
tensorflow/python/training/optimizer.py
|
timctho/tensorflow
|
015c72eac3f4e448dd8ab852843e902771496532
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/training/optimizer.py
|
timctho/tensorflow
|
015c72eac3f4e448dd8ab852843e902771496532
|
[
"Apache-2.0"
] | 1
|
2021-11-16T19:59:48.000Z
|
2021-11-16T19:59:48.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for optimizers."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import checkpointable
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import slot_creator
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def get_filtered_grad_fn(grad_fn):
# `distributed_context.join()` requires that its arguments are parallel
# across threads, and in particular that `grads_and_vars` has the same
# variables in the same order.
# When computing gradients in eager mode with multiple threads, you
# can get extra variables with a gradient of `None`. This happens when
# those variables are accessed in another thread during the gradient
# computation. To get a consistent set of variables, we filter out
# those with `None` gradients.
def filtered_grad_fn(x=None):
return [(g, v) for g, v in grad_fn(x) if g is not None]
return filtered_grad_fn
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _var_key(var):
if context.executing_eagerly():
return var._unique_id # pylint: disable=protected-access
return (var.op.graph, var.op.name)
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def __str__(self):
return "<_RefVariableProcessor(%s)>" % self._v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0])
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices)
update_op = optimizer._resource_apply_dense(g, self._v)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _TensorProcessor(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if isinstance(
v, resource_variable_ops.ResourceVariable) and not v._in_graph_mode: # pylint: disable=protected-access
# True if and only if `v` was initialized eagerly.
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
@tf_export("train.Optimizer")
class Optimizer(
# Optimizers inherit from CheckpointableBase rather than Checkpointable
# since they do most of their dependency management themselves (slot
# variables are special-cased, and non-slot variables are keyed to graphs).
checkpointable.CheckpointableBase):
"""Base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Args:
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
"""
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
# Dictionary of slots.
# {slot_name :
# {_var_key(variable_to_train): slot_for_the_variable, ... },
# ... }
self._slots = {}
self._non_slot_dict = {}
# For implementing Checkpointable. Stores information about how to restore
# slot variables which have not yet been created
# (checkpointable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
# TODO(isaprykin): When using a DistributionStrategy, and when an
# optimizer is created in each tower, it might be dangerous to
# rely on some Optimer methods. When such methods are called on a
# per-tower optimizer, an exception needs to be thrown. We do
# allow creation per-tower optimizers however, because the
# compute_gradients()->apply_gradients() sequence is safe.
def get_name(self):
return self._name
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes elements of `var_list` as arguments and computes the value to be
minimized. If `var_list` is None, `loss` should take no arguments.
Minimization (and gradient computation) is done with respect to the
elements of `var_list` if not None, else with respect to any trainable
variables created during the execution of the `loss` function.
`gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and
`grad_loss` are ignored when eager execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable taking
no arguments which returns the value to minimize. When eager execution
is enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and `loss` is
not callable.
@compatibility(eager)
When eager execution is enabled, `gate_gradients`, `aggregation_method`,
and `colocate_gradients_with_ops` are ignored.
@end_compatibility
"""
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
# Scale loss if using a "mean" loss reduction and multiple towers.
# Have to be careful to call distribute_lib.get_loss_reduction()
# *after* loss() is evaluated, so we know what loss reduction it uses.
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
if distribute_lib.get_loss_reduction() == "mean":
num_towers = distribute_lib.get_distribution_strategy().num_towers
if num_towers > 1:
loss_value *= (1. / num_towers)
if var_list is None:
var_list = tape.watched_variables()
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
# Non-callable/Tensor loss case
if context.executing_eagerly():
raise RuntimeError(
"`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
# Scale loss if using a "mean" loss reduction and multiple towers.
if distribute_lib.get_loss_reduction() == "mean":
num_towers = distribute_lib.get_distribution_strategy().num_towers
if num_towers > 1:
loss *= (1. / num_towers)
if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss, var_refs, grad_ys=grad_loss,
gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if gate_gradients == Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
RuntimeError: If you should use `_distributed_apply()` instead.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().
# Handle DistributionStrategy case.
if distribute_lib.get_cross_tower_context():
raise RuntimeError("Use `_distributed_apply()` instead of "
"`apply_gradients()` in a cross-tower context.")
# TODO(isaprykin): Get rid of `has_distribution_strategy()` check by
# always calling _distributed_apply(), using the default distribution
# as needed.
if distribute_lib.has_distribution_strategy():
grads_and_vars = get_filtered_grad_fn(lambda _: grads_and_vars)()
return distribute_lib.get_tower_context().merge_call(
self._distributed_apply, grads_and_vars, global_step, name)
# No DistributionStrategy case.
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
converted_grads_and_vars = []
for g, v in grads_and_vars:
if g is not None:
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
converted_grads_and_vars.append((g, v, p))
converted_grads_and_vars = tuple(converted_grads_and_vars)
var_list = [v for g, v, _ in converted_grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, _, v in converted_grads_and_vars],))
with ops.init_scope():
self._create_slots(var_list)
update_ops = []
with ops.name_scope(name, self._name) as name:
self._prepare()
for grad, var, processor in converted_grads_and_vars:
if grad is None:
continue
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
if context.executing_eagerly() or isinstance(
var,
resource_variable_ops.ResourceVariable) and not var._in_graph_mode: # pylint: disable=protected-access
scope_name = ""
else:
scope_name = var.op.name
with ops.name_scope("update_" + scope_name), ops.colocate_with(var):
update_ops.append(processor.update_op(self, grad))
if global_step is None:
apply_updates = self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
if isinstance(global_step, resource_variable_ops.ResourceVariable):
# TODO(apassos): the implicit read in assign_add is slow; consider
# making it less so.
apply_updates = resource_variable_ops.assign_add_variable_op(
global_step.handle,
ops.convert_to_tensor(1, dtype=global_step.dtype),
name=name)
else:
apply_updates = state_ops.assign_add(global_step, 1, name=name)
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def _distributed_apply(self,
distribution,
grads_and_vars,
global_step=None,
name=None):
"""A version of `apply_gradients` for cross-tower context.
This is a version of `apply_gradients()` for when you are using a
`DistributionStrategy` and are in a cross-tower context. If in a
tower context, use `apply_gradients()` as normal.
Args:
distribution: A `DistributionStrategy` object.
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`, and then aggregated across towers.
global_step: Optional (mirrored) `Variable` to increment by one
after the variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients across all
towers. If `global_step` was not None, that operation also
increments `global_step`.
"""
reduced_grads = distribution.batch_reduce("sum", grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
# Note that this is called in a cross-tower context.
self._create_slots(var_list)
def update(v, g):
"""Apply gradients to a replica variable."""
assert v is not None
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError("Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
scope_name = "" if context.executing_eagerly() else v.op.name
# device_policy is set because non-mirrored tensors will be read in
# `update_op`. `_resource_apply_dense`, `lr_t`, `beta1_t` and `beta2_t`
# is an example.
with ops.name_scope("update_" + scope_name):
return p.update_op(self, g)
with ops.name_scope(name, self._name) as name:
self._prepare()
update_ops = [
op
for grad, var in grads_and_vars
for op in distribution.unwrap(distribution.update(var, update, grad))
]
def finish(self, update_ops):
return self._finish(update_ops, "update")
non_slot_devices = distribution.non_slot_devices(var_list)
finish_updates = distribution.update_non_slot(
non_slot_devices, finish, self, update_ops)
if global_step is None:
apply_updates = distribution.group(finish_updates, name=name)
else:
with ops.control_dependencies(distribution.unwrap(finish_updates)):
apply_updates = distribution.group(distribution.update(
global_step, state_ops.assign_add, 1, name=name))
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
# pylint: disable=protected-access
named_slots = self._slots.get(name, None)
if not named_slots:
return None
if hasattr(var, "_mirrored_container"):
# NOTE: If this isn't patched, then there is no `handle` in
# `_resource_apply_dense`.
mirrored_container = var._mirrored_container()
assert mirrored_container is not None
if context.executing_eagerly():
key = mirrored_container._unique_id
else:
key = (mirrored_container.graph, mirrored_container._shared_name)
# pylint: enable=protected-access
mirrored_slot = named_slots.get(key, None)
if mirrored_slot is None: return None
return mirrored_slot.get(device=var.device)
return named_slots.get(_var_key(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
executing_eagerly = context.executing_eagerly()
current_graph = ops.get_default_graph()
def _from_current_graph(variable):
if executing_eagerly:
# No variable.op in eager mode. We don't expect lots of eager graphs,
# but behavior should be consistent with graph mode.
return variable._graph_key == current_graph._graph_key # pylint: disable=protected-access
else:
return variable.op.graph is current_graph
optimizer_variables = [v for v in self._non_slot_variables()
if _from_current_graph(v)]
for _, variable_dict in self._slots.items():
for _, slot_for_variable in variable_dict.items():
if _from_current_graph(slot_for_variable):
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _create_non_slot_variable(self, initial_value, name, colocate_with):
"""Add an extra variable, not associated with a slot."""
# Recommendation: Use OptimizerV2 if your optimizer uses non-slot variables.
eager = context.executing_eagerly()
graph = None if eager else colocate_with.graph
key = (name, graph)
v = self._non_slot_dict.get(key, None)
if v is None:
self._maybe_initialize_checkpointable()
distribution_strategy = distribute_lib.get_distribution_strategy()
with distribution_strategy.colocate_vars_with(colocate_with):
if eager:
restored_initial_value = self._preload_simple_restoration(
name=name, shape=None)
if restored_initial_value is not None:
initial_value = restored_initial_value
v = variable_scope.variable(initial_value, name=name, trainable=False)
# Restore this variable by name if necessary, but don't add a
# Checkpointable dependency. Optimizers return the current graph's
# non-slot variables from _checkpoint_dependencies explicitly rather
# than unconditionally adding dependencies (since there may be multiple
# non-slot variables with the same name in different graphs, trying to
# save all of them would result in errors).
self._handle_deferred_dependencies(name=name, checkpointable=v)
self._non_slot_dict[key] = v
return v
@property
def _checkpoint_dependencies(self):
"""From Checkpointable. Gather graph-specific non-slot variables to save."""
current_graph_non_slot_variables = []
current_graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
for (name, _), variable_object in sorted(self._non_slot_dict.items(),
# Avoid comparing graphs
key=lambda item: item[0][0]):
if variable_object._graph_key == current_graph_key: # pylint: disable=protected-access
current_graph_non_slot_variables.append(
checkpointable.CheckpointableReference(
name=name, ref=variable_object))
return (super(Optimizer, self)._checkpoint_dependencies
+ current_graph_non_slot_variables)
def _lookup_dependency(self, name):
"""From Checkpointable. Find a non-slot variable in the current graph."""
unconditional = super(Optimizer, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
graph = None if context.executing_eagerly() else ops.get_default_graph()
return self._get_non_slot_variable(name, graph=graph)
def _get_non_slot_variable(self, name, graph=None):
non_slot = self._non_slot_dict.get((name, graph), None)
if hasattr(non_slot, "_mirrored_container"):
# This is a mirrored non-slot. In order to enable code like `_finish`
# to assign to a non-slot, return the current context replica.
return non_slot.get()
else:
return non_slot
def _non_slot_variables(self):
"""Additional variables created by the `Optimizer`.
Returns:
A list or tuple of variables.
"""
return self._non_slot_dict.values()
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError(
"Invalid type %r for %s, expected: %s." % (
dtype, t.name, [v for v in valid_dtypes]))
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _create_slots(self, var_list):
"""Create all slots needed by the variables.
Args:
var_list: A list of `Variable` objects.
"""
# No slots needed by default
pass
def _prepare(self):
"""Create all needed tensors before applying gradients.
This is called with the name_scope using the "name" that
users have chosen for the application of gradients.
"""
pass
def _apply_dense(self, grad, var):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
summed_values, unique_indices = _deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var)
def _apply_sparse(self, grad, var):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, update_ops, name_scope):
"""Do what is needed to finish the update.
This is called with the `name_scope` using the "name" that
users have chosen for the application of gradients.
Args:
update_ops: List of `Operation` objects to update variables. This list
contains the values returned by the `_apply_dense()` and
`_apply_sparse()` calls.
name_scope: String. Name to use for the returned operation.
Returns:
The operation to apply updates.
"""
return control_flow_ops.group(*update_ops, name=name_scope)
# --------------
# Utility methods for subclasses.
# --------------
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot(var, val, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
# --------------
# For implementing the Checkpointable interface.
# --------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
named_slots = self._slot_dict(slot_name)
variable_key = _var_key(variable)
slot_variable = named_slots.get(variable_key, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = checkpointable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self._get_or_make_slot(
var=variable,
val=initializer,
slot_name=slot_name,
op_name=self._name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
| 39.906919
| 115
| 0.696927
|
305dff128ceb1c0c4ee229afab49462dc249dd35
| 2,613
|
py
|
Python
|
src/pyams_portal/portlets/spacer/skin/__init__.py
|
Py-AMS/pyams-portal
|
a19f48079e683711394b8e57c05cf7cd9d20a888
|
[
"ZPL-2.1"
] | null | null | null |
src/pyams_portal/portlets/spacer/skin/__init__.py
|
Py-AMS/pyams-portal
|
a19f48079e683711394b8e57c05cf7cd9d20a888
|
[
"ZPL-2.1"
] | null | null | null |
src/pyams_portal/portlets/spacer/skin/__init__.py
|
Py-AMS/pyams-portal
|
a19f48079e683711394b8e57c05cf7cd9d20a888
|
[
"ZPL-2.1"
] | null | null | null |
#
# Copyright (c) 2015-2021 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_portal.portlets.spacer.skin module
"""
from persistent import Persistent
from zope.container.contained import Contained
from zope.interface import Interface
from zope.schema.fieldproperty import FieldProperty
from pyams_layer.interfaces import IPyAMSLayer
from pyams_portal.interfaces import IPortalContext, IPortletRenderer
from pyams_portal.portlets.spacer import ISpacerPortletSettings
from pyams_portal.portlets.spacer.interfaces import ISpacerPortletRendererSettings
from pyams_portal.skin import PortletRenderer
from pyams_template.template import template_config
from pyams_utils.adapter import adapter_config
from pyams_utils.factory import factory_config
__docformat__ = 'restructuredtext'
from pyams_portal import _ # pylint: disable=ungrouped-imports
@factory_config(provided=ISpacerPortletRendererSettings)
class SpacerPortletRendererSettings(Persistent, Contained):
"""Spacer portlet renderer settings"""
transparent = FieldProperty(ISpacerPortletRendererSettings['transparent'])
background_color = FieldProperty(ISpacerPortletRendererSettings['background_color'])
with_ruler = FieldProperty(ISpacerPortletRendererSettings['with_ruler'])
#
# Spacer portlet renderers
#
class BaseSpacerPortletRenderer(PortletRenderer):
"""Base spacer renderer"""
settings_interface = ISpacerPortletRendererSettings
@adapter_config(required=(IPortalContext, IPyAMSLayer, Interface, ISpacerPortletSettings),
provides=IPortletRenderer)
@template_config(template='templates/spacer.pt', layer=IPyAMSLayer)
class SpacerPortletDefaultRenderer(BaseSpacerPortletRenderer):
"""Spacer portlet renderer"""
label = _("Simple spacer (default)")
weight = 10
@adapter_config(name='double-spacer',
required=(IPortalContext, IPyAMSLayer, Interface, ISpacerPortletSettings),
provides=IPortletRenderer)
@template_config(template='templates/spacer-double.pt', layer=IPyAMSLayer)
class DoubleSpacerPortletDefaultRenderer(BaseSpacerPortletRenderer):
"""Double spacer portlet renderer"""
label = _("Double spacer")
weight = 20
| 34.84
| 90
| 0.798316
|
79e9856d8e13dc948385709553647c71ef698485
| 2,476
|
py
|
Python
|
tiledb/__init__.py
|
georgeSkoumas/TileDB-Py
|
e02824be50fdac445c81f78c6b1586ab1ec79696
|
[
"MIT"
] | 1
|
2020-10-21T08:14:43.000Z
|
2020-10-21T08:14:43.000Z
|
tiledb/__init__.py
|
georgeSkoumas/TileDB-Py
|
e02824be50fdac445c81f78c6b1586ab1ec79696
|
[
"MIT"
] | null | null | null |
tiledb/__init__.py
|
georgeSkoumas/TileDB-Py
|
e02824be50fdac445c81f78c6b1586ab1ec79696
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import ctypes
import os
import sys
if os.name == "posix":
if sys.platform == "darwin":
lib_name = "libtiledb.dylib"
else:
lib_name = "libtiledb.so"
else:
lib_name = "tiledb"
# On Windows and whl builds, we may have a shared library already linked, or
# adjacent to, the cython .pyd shared object. In this case, we can import directly
# from .libtiledb
try:
import tiledb
from .libtiledb import Ctx
except:
try:
lib_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "native")
ctypes.CDLL(os.path.join(lib_dir, lib_name))
except OSError as e:
# Otherwise try loading by name only.
ctypes.CDLL(lib_name)
from .libtiledb import (
Array,
Ctx,
Config,
Dim,
Domain,
Attr,
ArraySchema,
TileDBError,
VFS,
FileIO,
FilterList,
NoOpFilter,
GzipFilter,
ZstdFilter,
LZ4Filter,
Bzip2Filter,
RleFilter,
DoubleDeltaFilter,
BitShuffleFilter,
ByteShuffleFilter,
BitWidthReductionFilter,
PositiveDeltaFilter,
ChecksumMD5Filter,
ChecksumSHA256Filter,
consolidate,
default_ctx,
group_create,
object_type,
ls,
walk,
remove,
move,
schema_like,
stats_enable,
stats_disable,
stats_reset,
stats_dump,
vacuum
)
from .array import DenseArray, SparseArray
from .highlevel import (
open,
save,
from_numpy,
empty_like,
array_exists
)
# TODO restricted imports
from .dataframe_ import from_csv, from_pandas, from_dataframe, open_dataframe
from .version import version as __version__
# Note: we use a modified namespace packaging to allow continuity of existing TileDB-Py imports.
# Therefore, 'tiledb/__init__.py' must *only* exist in this package.
# Furthermore, in sub-packages, the `find_packages` helper will not work at the
# root directory due to lack of 'tiledb/__init__.py'. Sub-package 'setup.py' scripts
# must declare constituents accordingly, such as by running 'find_packages' on a sub-directory
# and applying prefixes accordingly.
# 1) https://packaging.python.org/guides/packaging-namespace-packages/#native-namespace-packages
# 2) https://stackoverflow.com/a/53486554
#
# Note: 'pip -e' in particular will not work without this declaration:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
| 25.791667
| 100
| 0.682956
|
8736f8588a223c729cacecbc04a33c175ef52985
| 923
|
py
|
Python
|
smdebug_rulesconfig/debugger_rules/_ruleGroups.py
|
tomassosorio/sagemaker-debugger-rulesconfig
|
60b348e12f939d87404b44b96151596782f70b46
|
[
"Apache-2.0"
] | 8
|
2020-02-09T19:57:56.000Z
|
2021-10-20T14:51:04.000Z
|
smdebug_rulesconfig/debugger_rules/_ruleGroups.py
|
tomassosorio/sagemaker-debugger-rulesconfig
|
60b348e12f939d87404b44b96151596782f70b46
|
[
"Apache-2.0"
] | 6
|
2020-06-30T04:29:29.000Z
|
2021-03-09T03:27:41.000Z
|
smdebug_rulesconfig/debugger_rules/_ruleGroups.py
|
tomassosorio/sagemaker-debugger-rulesconfig
|
60b348e12f939d87404b44b96151596782f70b46
|
[
"Apache-2.0"
] | 7
|
2019-12-08T20:17:04.000Z
|
2021-07-08T09:36:21.000Z
|
# set of rules that are expected to work for all supported frameworks
# Supported Frameworks: Mxnet, Pytorch, Tensorflow, Xgboost
UNIVERSAL_RULES = {
"AllZero",
"ClassImbalance",
"Confusion",
"LossNotDecreasing",
"Overfit",
"Overtraining",
"SimilarAcrossRuns",
"StalledTrainingRule",
"UnchangedTensor",
}
# set of rules that are expected to work for only for supported deep learning frameworks
# Supported Deep Learning Frameworks: Mxnet, Pytorch, Tensorflow
DEEP_LEARNING_RULES = {
"DeadRelu",
"ExplodingTensor",
"PoorWeightInitialization",
"SaturatedActivation",
"TensorVariance",
"VanishingGradient",
"WeightUpdateRatio",
}
# Rules intended to be used as part of a DL Application
DEEP_LEARNING_APPLICATION_RULES = {"CheckInputImages", "NLPSequenceRatio"}
# Rules only compatible with XGBOOST
XGBOOST_RULES = {"FeatureImportanceOverweight", "TreeDepth"}
| 28.84375
| 88
| 0.735645
|
157eaacd9ca79022ac942c50bcc78019e9ea719e
| 869
|
py
|
Python
|
members/apps.py
|
looselycoupled/partisan-discourse
|
8579924094c92e25e21ce59a26232269cf6b34bc
|
[
"Apache-2.0"
] | 25
|
2017-02-27T19:44:23.000Z
|
2021-04-11T00:11:49.000Z
|
members/apps.py
|
looselycoupled/partisan-discourse
|
8579924094c92e25e21ce59a26232269cf6b34bc
|
[
"Apache-2.0"
] | 26
|
2016-07-16T15:41:07.000Z
|
2016-10-11T16:44:04.000Z
|
members/apps.py
|
looselycoupled/partisan-discourse
|
8579924094c92e25e21ce59a26232269cf6b34bc
|
[
"Apache-2.0"
] | 9
|
2016-08-08T17:19:34.000Z
|
2020-03-04T00:31:26.000Z
|
# members.apps
# Describes the Members application for Django
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Sat Aug 22 10:41:24 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: apps.py [d011c91] benjamin@bengfort.com $
"""
Describes the Members application for Django
"""
##########################################################################
## Imports
##########################################################################
from django.apps import AppConfig
##########################################################################
## Members Config
##########################################################################
class MembersConfig(AppConfig):
name = 'members'
verbose_name = "Member Profiles"
def ready(self):
import members.signals
| 27.15625
| 74
| 0.472957
|
65f1576963aaeaefe29edf090f737e0efaa6ebcd
| 761
|
pyde
|
Python
|
mode/examples/Basics/Transform/Rotate/Rotate.pyde
|
kazimuth/processing.py
|
9aa1ddf7ebd4efed73a8c2a1ecf6d2c167b1faf1
|
[
"Apache-2.0"
] | 4
|
2016-08-09T14:14:36.000Z
|
2021-12-10T07:51:35.000Z
|
mode/examples/Basics/Transform/Rotate/Rotate.pyde
|
kazimuth/processing.py
|
9aa1ddf7ebd4efed73a8c2a1ecf6d2c167b1faf1
|
[
"Apache-2.0"
] | null | null | null |
mode/examples/Basics/Transform/Rotate/Rotate.pyde
|
kazimuth/processing.py
|
9aa1ddf7ebd4efed73a8c2a1ecf6d2c167b1faf1
|
[
"Apache-2.0"
] | null | null | null |
"""
Rotate.
Rotating a square around the Z axis. To get the results
you expect, send the rotate function angle parameters that are
values between 0 and PI*2 (TWO_PI which is roughly 6.28). If you prefer to
think about angles as degrees (0-360), you can use the radians()
method to convert your values. For example: scale(radians(90))
is identical to the statement scale(PI/2).
"""
angle = 0
jitter = 0
def setup():
size(640, 360)
noStroke()
fill(255)
rectMode(CENTER)
def draw():
background(51)
# during even-numbered seconds (0, 2, 4, 6...)
if second() % 2 == 0:
jitter = random(-0.1, 0.1)
angle = angle + jitter
c = cos(angle)
translate(width / 2, height / 2)
rotate(c)
rect(0, 0, 180, 180)
| 21.742857
| 75
| 0.641261
|
6f8110ddcd796d50c60f5e5ae09010caf8840c52
| 3,639
|
py
|
Python
|
pills_online/settings.py
|
TermiNutZ/PillsOnline
|
d524cf076262285f1d6e1b2368e2653477e0eccc
|
[
"MIT"
] | null | null | null |
pills_online/settings.py
|
TermiNutZ/PillsOnline
|
d524cf076262285f1d6e1b2368e2653477e0eccc
|
[
"MIT"
] | null | null | null |
pills_online/settings.py
|
TermiNutZ/PillsOnline
|
d524cf076262285f1d6e1b2368e2653477e0eccc
|
[
"MIT"
] | null | null | null |
"""
Django settings for pills_online project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from .personal_settings import get_db_settings
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n6jn3o(1mbgd)_d3zq(8nll*$8=r*z*2-xw^i^gz!#rk3r=7q1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'rest_framework',
'rest_framework.authtoken'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pills_online.urls'
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pills_online.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': get_db_settings()
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
]
REST_FRAMEWORK = {
'PAGE_SIZE': 4000,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
LOCAL_APPS = (
'project.api',
)
ALLOWED_HOSTS = ['10.91.83.175', 'localhost', '127.0.0.1', '10.240.20.81']
| 25.626761
| 91
| 0.699368
|
4502fb02b6c6aafecc11908a3319e662003a4e6f
| 13,148
|
py
|
Python
|
corehq/apps/reports/standard/inspect.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/reports/standard/inspect.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/reports/standard/inspect.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
import functools
from couchdbkit.exceptions import ResourceNotFound
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from jsonobject import DateTimeProperty
from corehq.apps.reports import util
from corehq.apps.reports.filters.users import ExpandedMobileWorkerFilter
from corehq import feature_previews, privileges
from corehq.apps.reports.models import HQUserType
from corehq.apps.reports.standard import ProjectReport, ProjectReportParametersMixin, DatespanMixin
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.display import xmlns_to_name
from corehq.apps.reports.dont_use.fields import StrongFilterUsersField
from corehq.apps.reports.filters.forms import MISSING_APP_ID
from corehq.apps.reports.generic import GenericTabularReport, ProjectInspectionReportParamsMixin, ElasticProjectInspectionReport
from corehq.apps.reports.standard.monitoring import MultiFormDrilldownMixin, CompletionOrSubmissionTimeMixin
from corehq.apps.reports.util import datespan_from_beginning
from corehq.apps.users.models import CouchUser
from corehq.elastic import es_query, ADD_TO_ES_FILTER
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX
from dimagi.utils.couch import get_cached_property, IncompatibleDocument, safe_index
from corehq.apps.reports.graph_models import PieChart
from corehq import elastic
from dimagi.utils.decorators.memoized import memoized
class ProjectInspectionReport(ProjectInspectionReportParamsMixin, GenericTabularReport, ProjectReport, ProjectReportParametersMixin):
"""
Base class for this reporting section
"""
exportable = False
asynchronous = False
ajax_pagination = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.users.SelectMobileWorkerFilter']
class SubmitHistory(ElasticProjectInspectionReport, ProjectReport,
ProjectReportParametersMixin,
CompletionOrSubmissionTimeMixin, MultiFormDrilldownMixin,
DatespanMixin):
name = ugettext_noop('Submit History')
slug = 'submit_history'
fields = [
'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.forms.FormsByApplicationFilter',
'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
]
ajax_pagination = True
filter_users_field_class = StrongFilterUsersField
include_inactive = True
# Feature preview flag for Submit History Filters
def __init__(self, request, **kwargs):
if feature_previews.SUBMIT_HISTORY_FILTERS.enabled(request.domain):
# create a new instance attribute instead of modifying the
# class attribute
self.fields = self.fields + [
'corehq.apps.reports.filters.forms.FormDataFilter',
'corehq.apps.reports.filters.forms.CustomFieldFilter',
]
super(SubmitHistory, self).__init__(request, **kwargs)
@property
def other_fields(self):
return filter(None, self.request.GET.get('custom_field', "").split(","))
@property
def headers(self):
h = [
DataTablesColumn(_("View Form")),
DataTablesColumn(_("Username"), prop_name='form.meta.username'),
DataTablesColumn(
_("Submission Time") if self.by_submission_time
else _("Completion Time"),
prop_name=self.time_field
),
DataTablesColumn(_("Form"), prop_name='form.@name'),
]
h.extend([DataTablesColumn(field) for field in self.other_fields])
return DataTablesHeader(*h)
@property
def default_datespan(self):
return datespan_from_beginning(self.domain, self.datespan_default_days, self.timezone)
def _es_extra_filters(self):
def form_filter(form):
app_id = form.get('app_id', None)
if app_id and app_id != MISSING_APP_ID:
return {'and': [{'term': {'xmlns.exact': form['xmlns']}},
{'term': {'app_id': app_id}}]}
return {'term': {'xmlns.exact': form['xmlns']}}
truthy_only = functools.partial(filter, None)
form_values = self.all_relevant_forms.values()
if form_values:
yield {'or': [form_filter(f) for f in form_values]}
users_data = ExpandedMobileWorkerFilter.pull_users_and_groups(
self.domain, self.request, True, True)
all_mobile_workers_selected = 't__0' in self.request.GET.getlist('emw')
if not all_mobile_workers_selected or users_data.admin_and_demo_users:
yield {
'terms': {
'form.meta.userID': truthy_only(
u.user_id for u in users_data.combined_users
)
}
}
else:
negated_ids = util.get_all_users_by_domain(
self.domain,
user_filter=HQUserType.all_but_users(),
simplified=True,
)
yield {
'not': {
'terms': {
'form.meta.userID': truthy_only(
user.user_id for user in negated_ids
)
}
}
}
props = truthy_only(self.request.GET.get('form_data', '').split(','))
for prop in props:
yield {
'term': {'__props_for_querying': prop.lower()}
}
@property
@memoized
def es_results(self):
return es_query(
params={'domain.exact': self.domain},
q={
'query': {
'range': {
self.time_field: {
'from': self.datespan.startdate_param,
'to': self.datespan.enddate_param,
'include_upper': False,
}
}
},
'filter': {
'and': (ADD_TO_ES_FILTER['forms'] +
list(self._es_extra_filters()))
},
'sort': self.get_sorting_block(),
},
es_url=XFORM_INDEX + '/xform/_search',
start_at=self.pagination.start,
size=self.pagination.count,
)
def get_sorting_block(self):
sorting_block = super(SubmitHistory, self).get_sorting_block()
if sorting_block:
return sorting_block
else:
return [{self.time_field: {'order': 'desc'}}]
@property
def time_field(self):
return 'received_on' if self.by_submission_time else 'form.meta.timeEnd'
@property
def total_records(self):
return int(self.es_results['hits']['total'])
@property
def rows(self):
def form_data_link(instance_id):
return "<a class='ajax_dialog' href='%(url)s'>%(text)s</a>" % {
"url": reverse('render_form_data', args=[self.domain, instance_id]),
"text": _("View Form")
}
submissions = [res['_source'] for res in self.es_results.get('hits', {}).get('hits', [])]
for form in submissions:
uid = form["form"]["meta"]["userID"]
username = form["form"]["meta"].get("username")
try:
if username not in ['demo_user', 'admin']:
full_name = get_cached_property(CouchUser, uid, 'full_name', expiry=7*24*60*60)
name = '"%s"' % full_name if full_name else ""
else:
name = ""
except (ResourceNotFound, IncompatibleDocument):
name = "<b>[unregistered]</b>"
init_cells = [
form_data_link(form["_id"]),
(username or _('No data for username')) + (" %s" % name if name else ""),
DateTimeProperty().wrap(safe_index(form, self.time_field.split('.'))).strftime("%Y-%m-%d %H:%M:%S"),
xmlns_to_name(self.domain, form.get("xmlns"), app_id=form.get("app_id")),
]
def cell(field):
return form["form"].get(field)
init_cells.extend([cell(field) for field in self.other_fields])
yield init_cells
class GenericPieChartReportTemplate(ProjectReport, GenericTabularReport):
"""this is a report TEMPLATE to conduct analytics on an arbitrary case property
or form question. all values for the property/question from cases/forms matching
the filters are tabulated and displayed as a pie chart. values are compared via
string comparison only.
this report class is a TEMPLATE -- it must be subclassed and configured with the
actual case/form info to be useful. coming up with a better way to configure this
is a work in progress. for now this report is effectively de-activated, with no
way to reach it from production HQ.
see the reports app readme for a configuration example
"""
name = ugettext_noop('Generic Pie Chart (sandbox)')
slug = 'generic_pie'
fields = ['corehq.apps.reports.filters.dates.DatespanFilter',
'corehq.apps.reports.filters.fixtures.AsyncLocationFilter']
# define in subclass
#mode = 'case' or 'form'
#submission_type = <case type> or <xform xmlns>
#field = <case property> or <path to form instance node>
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return True
@property
def headers(self):
return DataTablesHeader(*(DataTablesColumn(text) for text in [
_('Response'), _('# Responses'), _('% of responses'),
]))
def _es_query(self):
es_config_case = {
'index': 'report_cases',
'type': 'report_case',
'field_to_path': lambda f: '%s.#value' % f,
'fields': {
'date': 'server_modified_on',
'submission_type': 'type',
}
}
es_config_form = {
'index': 'report_xforms',
'type': 'report_xform',
'field_to_path': lambda f: 'form.%s.#value' % f,
'fields': {
'date': 'received_on',
'submission_type': 'xmlns',
}
}
es_config = {
'case': es_config_case,
'form': es_config_form,
}[self.mode]
MAX_DISTINCT_VALUES = 50
es = elastic.get_es()
filter_criteria = [
{"term": {"domain": self.domain}},
{"term": {es_config['fields']['submission_type']: self.submission_type}},
{"range": {es_config['fields']['date']: {
"from": self.start_date,
"to": self.end_date,
}}},
]
if self.location_id:
filter_criteria.append({"term": {"location_": self.location_id}})
result = es.get('%s/_search' % es_config['index'], data={
"query": {"match_all": {}},
"size": 0, # no hits; only aggregated data
"facets": {
"blah": {
"terms": {
"field": "%s.%s" % (es_config['type'], es_config['field_to_path'](self.field)),
"size": MAX_DISTINCT_VALUES
},
"facet_filter": {
"and": filter_criteria
}
}
},
})
result = result['facets']['blah']
raw = dict((k['term'], k['count']) for k in result['terms'])
if result['other']:
raw[_('Other')] = result['other']
return raw
def _data(self):
raw = self._es_query()
return sorted(raw.iteritems())
@property
def rows(self):
data = self._data()
total = sum(v for k, v in data)
def row(k, v):
pct = v / float(total) if total > 0 else None
fmtpct = ('%.1f%%' % (100. * pct)) if pct is not None else u'\u2014'
return (k, v, fmtpct)
return [row(*r) for r in data]
def _chart_data(self):
return {
'key': _('Tallied by Response'),
'values': [{'label': k, 'value': v} for k, v in self._data()],
}
@property
def location_id(self):
return self.request.GET.get('location_id')
@property
def start_date(self):
return self.request.GET.get('startdate')
@property
def end_date(self):
return self.request.GET.get('enddate')
@property
def charts(self):
if 'location_id' in self.request.GET: # hack: only get data if we're loading an actual report
return [PieChart(None, **self._chart_data())]
return []
| 38.557185
| 133
| 0.581685
|
425c927f11250887d0b8d2c283d0737c79a87888
| 8,219
|
py
|
Python
|
experiments/plotting.py
|
xin-alice/cs159_safe_learning
|
44761774c38cec36f156b2978b5eb5ec1ca712e9
|
[
"MIT"
] | 169
|
2017-11-08T17:05:12.000Z
|
2022-03-01T21:30:41.000Z
|
examples/plotting.py
|
hubbs5/safe_learning
|
98ecd359b41fd96aef542340b5dcfcfc616a3698
|
[
"MIT"
] | 8
|
2018-08-23T14:55:48.000Z
|
2020-12-09T15:51:41.000Z
|
examples/plotting.py
|
hubbs5/safe_learning
|
98ecd359b41fd96aef542340b5dcfcfc616a3698
|
[
"MIT"
] | 66
|
2017-11-08T17:07:06.000Z
|
2022-03-17T20:05:44.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from IPython.display import display, HTML
from mpl_toolkits.mplot3d import Axes3D
from safe_learning.utilities import (with_scope, get_storage, set_storage,
get_feed_dict)
__all__ = ['plot_lyapunov_1d', 'plot_triangulation', 'show_graph']
# An object to store graph elements
_STORAGE = {}
@with_scope('plot_lyapunov_1d')
def plot_lyapunov_1d(lyapunov, true_dynamics, legend=False):
"""Plot the lyapunov function of a 1D system
Parameters
----------
lyapunov : instance of `Lyapunov`
true_dynamics : callable
legend : bool, optional
"""
sess = tf.get_default_session()
feed_dict = get_feed_dict(sess.graph)
# Get the storage (specific to the lyapunov function)
storage = get_storage(_STORAGE, index=lyapunov)
if storage is None:
# Lyapunov function
states = lyapunov.discretization.all_points
actions = lyapunov.policy(states)
next_states = lyapunov.dynamics(states, actions)
v_bounds = lyapunov.v_decrease_confidence(states, next_states)
true_next_states = true_dynamics(states, actions, noise=False)
delta_v_true, _ = lyapunov.v_decrease_confidence(states,
true_next_states)
storage = [('states', states),
('next_states', next_states),
('v_bounds', v_bounds),
('true_next_states', true_next_states),
('delta_v_true', delta_v_true)]
set_storage(_STORAGE, storage, index=lyapunov)
else:
(states, next_states, v_bounds,
true_next_states, delta_v_true) = storage.values()
extent = [np.min(states), np.max(states)]
safe_set = lyapunov.safe_set
threshold = lyapunov.threshold(states)
# Create figure axes
fig, axes = plt.subplots(2, 1, figsize=(10, 12))
# Format axes
axes[0].set_title('GP model of the dynamics')
axes[0].set_xlim(extent)
axes[1].set_xlim(extent)
axes[1].set_xlabel('$x$')
axes[1].set_ylabel(r'Upper bound of $\Delta V(x)$')
axes[1].set_title(r'Determining stability with $\Delta V(x)$')
# Plot dynamics
axes[0].plot(states,
true_next_states.eval(feed_dict=feed_dict),
color='black', alpha=0.8)
mean, bound = sess.run(next_states, feed_dict=feed_dict)
axes[0].fill_between(states[:, 0],
mean[:, 0] - bound[:, 0],
mean[:, 0] + bound[:, 0],
color=(0.8, 0.8, 1))
if hasattr(lyapunov.dynamics, 'X'):
axes[0].plot(lyapunov.dynamics.X[:, 0],
lyapunov.dynamics.Y[:, 0],
'x', ms=8, mew=2)
v_dot_mean, v_dot_bound = sess.run(v_bounds, feed_dict=feed_dict)
# # Plot V_dot
print(v_dot_mean.shape)
print(v_dot_bound.shape)
plt.fill_between(states[:, 0],
v_dot_mean[:, 0] - v_dot_bound[:, 0],
v_dot_mean[:, 0] + v_dot_bound[:, 0],
color=(0.8, 0.8, 1))
threshold_plot = plt.plot(extent, [threshold, threshold],
'k-.', label=r'Safety threshold ($L \tau$ )')
# # Plot the true V_dot or Delta_V
delta_v = delta_v_true.eval(feed_dict=feed_dict)
v_dot_true_plot = axes[1].plot(states[:, 0],
delta_v,
color='k',
label=r'True $\Delta V(x)$')
# # Create twin axis
ax2 = axes[1].twinx()
ax2.set_ylabel(r'$V(x)$')
ax2.set_xlim(extent)
# # Plot Lyapunov function
V_unsafe = np.ma.masked_where(safe_set, lyapunov.values)
V_safe = np.ma.masked_where(~safe_set, lyapunov.values)
unsafe_plot = ax2.plot(states, V_unsafe,
color='b',
label=r'$V(x)$ (unsafe, $\Delta V(x) > L \tau$)')
safe_plot = ax2.plot(states, V_safe,
color='r',
label=r'$V(x)$ (safe, $\Delta V(x) \leq L \tau$)')
if legend:
lns = unsafe_plot + safe_plot + threshold_plot + v_dot_true_plot
labels = [x.get_label() for x in lns]
plt.legend(lns, labels, loc=4, fancybox=True, framealpha=0.75)
# Create helper lines
if np.any(safe_set):
max_id = np.argmax(lyapunov.values[safe_set])
x_safe = states[safe_set][max_id]
y_range = axes[1].get_ylim()
axes[1].plot([x_safe, x_safe], y_range, 'k-.')
axes[1].plot([-x_safe, -x_safe], y_range, 'k-.')
# Show plot
plt.show()
def plot_triangulation(triangulation, axis=None, three_dimensional=False,
xlabel=None, ylabel=None, zlabel=None, **kwargs):
"""Plot a triangulation.
Parameters
----------
values: ndarray
axis: optional
three_dimensional: bool, optional
Whether to plot 3D
Returns
-------
axis:
The axis on which we plotted.
"""
values = triangulation.parameters[0].eval()
if three_dimensional:
if axis is None:
axis = Axes3D(plt.figure())
# Get the simplices and plot
delaunay = triangulation.tri
state_space = triangulation.discretization.all_points
simplices = delaunay.simplices(np.arange(delaunay.nsimplex))
c = axis.plot_trisurf(state_space[:, 0], state_space[:, 1], values[:, 0],
triangles=simplices.copy(),
cmap='viridis', lw=0.1, **kwargs)
cbar = plt.colorbar(c)
else:
if axis is None:
axis = plt.figure().gca()
domain = triangulation.discretization.limits.tolist()
num_points = triangulation.discretization.num_points
# Some magic reshaping to go to physical coordinates
vals = values.reshape(num_points[0], num_points[1]).T[::-1]
axis = plt.imshow(vals, origin='upper',
extent=domain[0] + domain[1],
aspect='auto', cmap='viridis', interpolation='bilinear', **kwargs)
cbar = plt.colorbar(axis)
axis = axis.axes
if xlabel is not None:
axis.set_xlabel(xlabel)
if ylabel is not None:
axis.set_ylabel(ylabel)
if zlabel is not None:
cbar.set_label(zlabel)
return axis
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def.
Taken from
http://stackoverflow.com/questions/38189119/simple-way-to-visualize-a-
tensorflow-graph-in-jupyter
"""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = str.encode("<stripped %d bytes>" % size)
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph.
Taken from
http://stackoverflow.com/questions/38189119/simple-way-to-visualize-a-
tensorflow-graph-in-jupyter
"""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script src="//cdnjs.cloudflare.com/ajax/libs/polymer/0.3.3/platform.js"></script>
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)),
id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:100%;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
| 34.245833
| 106
| 0.580971
|
47f4b99e97196bf4b7c67b16b8ceb677b35fa63c
| 28,178
|
py
|
Python
|
external/mysql/mysql-connector-python-8.0.11/lib/mysql/connector/protocol.py
|
Army-Ant/ArmyAntServer
|
3e292b9d38dd53807b03748fc767970dc8adbfb0
|
[
"BSD-3-Clause"
] | 1
|
2018-05-30T01:38:23.000Z
|
2018-05-30T01:38:23.000Z
|
Others/Source/17/17.1/dbapp/mysql/connector/protocol.py
|
silence0201/Learn-Python
|
662da7c0e74221cedb445ba17d5cb1cd3af41c86
|
[
"MIT"
] | null | null | null |
Others/Source/17/17.1/dbapp/mysql/connector/protocol.py
|
silence0201/Learn-Python
|
662da7c0e74221cedb445ba17d5cb1cd3af41c86
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implements the MySQL Client/Server protocol
"""
import struct
import datetime
from decimal import Decimal
from .constants import (
FieldFlag, ServerCmd, FieldType, ClientFlag)
from . import errors, utils
from .authentication import get_auth_plugin
from .catch23 import PY2, struct_unpack
from .errors import get_exception
class MySQLProtocol(object):
"""Implements MySQL client/server protocol
Create and parses MySQL packets.
"""
def _connect_with_db(self, client_flags, database):
"""Prepare database string for handshake response"""
if client_flags & ClientFlag.CONNECT_WITH_DB and database:
return database.encode('utf8') + b'\x00'
return b'\x00'
def _auth_response(self, client_flags, username, password, database,
auth_plugin, auth_data, ssl_enabled):
"""Prepare the authentication response"""
if not password:
return b'\x00'
try:
auth = get_auth_plugin(auth_plugin)(
auth_data,
username=username, password=password, database=database,
ssl_enabled=ssl_enabled)
plugin_auth_response = auth.auth_response()
except (TypeError, errors.InterfaceError) as exc:
raise errors.InterfaceError(
"Failed authentication: {0}".format(str(exc)))
if client_flags & ClientFlag.SECURE_CONNECTION:
resplen = len(plugin_auth_response)
auth_response = struct.pack('<B', resplen) + plugin_auth_response
else:
auth_response = plugin_auth_response + b'\x00'
return auth_response
def make_auth(self, handshake, username=None, password=None, database=None,
charset=33, client_flags=0,
max_allowed_packet=1073741824, ssl_enabled=False,
auth_plugin=None):
"""Make a MySQL Authentication packet"""
try:
auth_data = handshake['auth_data']
auth_plugin = auth_plugin or handshake['auth_plugin']
except (TypeError, KeyError) as exc:
raise errors.ProgrammingError(
"Handshake misses authentication info ({0})".format(exc))
if not username:
username = b''
try:
username_bytes = username.encode('utf8') # pylint: disable=E1103
except AttributeError:
# Username is already bytes
username_bytes = username
packet = struct.pack('<IIB{filler}{usrlen}sx'.format(
filler='x' * 23, usrlen=len(username_bytes)),
client_flags, max_allowed_packet, charset,
username_bytes)
packet += self._auth_response(client_flags, username, password,
database,
auth_plugin,
auth_data, ssl_enabled)
packet += self._connect_with_db(client_flags, database)
if client_flags & ClientFlag.PLUGIN_AUTH:
packet += auth_plugin.encode('utf8') + b'\x00'
return packet
def make_auth_ssl(self, charset=33, client_flags=0,
max_allowed_packet=1073741824):
"""Make a SSL authentication packet"""
return utils.int4store(client_flags) + \
utils.int4store(max_allowed_packet) + \
utils.int1store(charset) + \
b'\x00' * 23
def make_command(self, command, argument=None):
"""Make a MySQL packet containing a command"""
data = utils.int1store(command)
if argument is not None:
data += argument
return data
def make_stmt_fetch(self, statement_id, rows=1):
"""Make a MySQL packet with Fetch Statement command"""
return utils.int4store(statement_id) + utils.int4store(rows)
def make_change_user(self, handshake, username=None, password=None,
database=None, charset=33, client_flags=0,
ssl_enabled=False, auth_plugin=None):
"""Make a MySQL packet with the Change User command"""
try:
auth_data = handshake['auth_data']
auth_plugin = auth_plugin or handshake['auth_plugin']
except (TypeError, KeyError) as exc:
raise errors.ProgrammingError(
"Handshake misses authentication info ({0})".format(exc))
if not username:
username = b''
try:
username_bytes = username.encode('utf8') # pylint: disable=E1103
except AttributeError:
# Username is already bytes
username_bytes = username
packet = struct.pack('<B{usrlen}sx'.format(usrlen=len(username_bytes)),
ServerCmd.CHANGE_USER, username_bytes)
packet += self._auth_response(client_flags, username, password,
database,
auth_plugin,
auth_data, ssl_enabled)
packet += self._connect_with_db(client_flags, database)
packet += struct.pack('<H', charset)
if client_flags & ClientFlag.PLUGIN_AUTH:
packet += auth_plugin.encode('utf8') + b'\x00'
return packet
def parse_handshake(self, packet):
"""Parse a MySQL Handshake-packet"""
res = {}
res['protocol'] = struct_unpack('<xxxxB', packet[0:5])[0]
(packet, res['server_version_original']) = utils.read_string(
packet[5:], end=b'\x00')
(res['server_threadid'],
auth_data1,
capabilities1,
res['charset'],
res['server_status'],
capabilities2,
auth_data_length
) = struct_unpack('<I8sx2sBH2sBxxxxxxxxxx', packet[0:31])
res['server_version_original'] = res['server_version_original'].decode()
packet = packet[31:]
capabilities = utils.intread(capabilities1 + capabilities2)
auth_data2 = b''
if capabilities & ClientFlag.SECURE_CONNECTION:
size = min(13, auth_data_length - 8) if auth_data_length else 13
auth_data2 = packet[0:size]
packet = packet[size:]
if auth_data2[-1] == 0:
auth_data2 = auth_data2[:-1]
if capabilities & ClientFlag.PLUGIN_AUTH:
if (b'\x00' not in packet
and res['server_version_original'].startswith("5.5.8")):
# MySQL server 5.5.8 has a bug where end byte is not send
(packet, res['auth_plugin']) = (b'', packet)
else:
(packet, res['auth_plugin']) = utils.read_string(
packet, end=b'\x00')
res['auth_plugin'] = res['auth_plugin'].decode('utf-8')
else:
res['auth_plugin'] = 'mysql_native_password'
res['auth_data'] = auth_data1 + auth_data2
res['capabilities'] = capabilities
return res
def parse_ok(self, packet):
"""Parse a MySQL OK-packet"""
if not packet[4] == 0:
raise errors.InterfaceError("Failed parsing OK packet (invalid).")
ok_packet = {}
try:
ok_packet['field_count'] = struct_unpack('<xxxxB', packet[0:5])[0]
(packet, ok_packet['affected_rows']) = utils.read_lc_int(packet[5:])
(packet, ok_packet['insert_id']) = utils.read_lc_int(packet)
(ok_packet['status_flag'],
ok_packet['warning_count']) = struct_unpack('<HH', packet[0:4])
packet = packet[4:]
if packet:
(packet, ok_packet['info_msg']) = utils.read_lc_string(packet)
ok_packet['info_msg'] = ok_packet['info_msg'].decode('utf-8')
except ValueError:
raise errors.InterfaceError("Failed parsing OK packet.")
return ok_packet
def parse_column_count(self, packet):
"""Parse a MySQL packet with the number of columns in result set"""
try:
count = utils.read_lc_int(packet[4:])[1]
return count
except (struct.error, ValueError):
raise errors.InterfaceError("Failed parsing column count")
def parse_column(self, packet, charset='utf-8'):
"""Parse a MySQL column-packet"""
(packet, _) = utils.read_lc_string(packet[4:]) # catalog
(packet, _) = utils.read_lc_string(packet) # db
(packet, _) = utils.read_lc_string(packet) # table
(packet, _) = utils.read_lc_string(packet) # org_table
(packet, name) = utils.read_lc_string(packet) # name
(packet, _) = utils.read_lc_string(packet) # org_name
try:
(_, _, field_type,
flags, _) = struct_unpack('<xHIBHBxx', packet)
except struct.error:
raise errors.InterfaceError("Failed parsing column information")
return (
name.decode(charset),
field_type,
None, # display_size
None, # internal_size
None, # precision
None, # scale
~flags & FieldFlag.NOT_NULL, # null_ok
flags, # MySQL specific
)
def parse_eof(self, packet):
"""Parse a MySQL EOF-packet"""
if packet[4] == 0:
# EOF packet deprecation
return self.parse_ok(packet)
err_msg = "Failed parsing EOF packet."
res = {}
try:
unpacked = struct_unpack('<xxxBBHH', packet)
except struct.error:
raise errors.InterfaceError(err_msg)
if not (unpacked[1] == 254 and len(packet) <= 9):
raise errors.InterfaceError(err_msg)
res['warning_count'] = unpacked[2]
res['status_flag'] = unpacked[3]
return res
def parse_statistics(self, packet, with_header=True):
"""Parse the statistics packet"""
errmsg = "Failed getting COM_STATISTICS information"
res = {}
# Information is separated by 2 spaces
if with_header:
pairs = packet[4:].split(b'\x20\x20')
else:
pairs = packet.split(b'\x20\x20')
for pair in pairs:
try:
(lbl, val) = [v.strip() for v in pair.split(b':', 2)]
except:
raise errors.InterfaceError(errmsg)
# It's either an integer or a decimal
lbl = lbl.decode('utf-8')
try:
res[lbl] = int(val)
except:
try:
res[lbl] = Decimal(val.decode('utf-8'))
except:
raise errors.InterfaceError(
"{0} ({1}:{2}).".format(errmsg, lbl, val))
return res
def read_text_result(self, sock, version, count=1):
"""Read MySQL text result
Reads all or given number of rows from the socket.
Returns a tuple with 2 elements: a list with all rows and
the EOF packet.
"""
rows = []
eof = None
rowdata = None
i = 0
while True:
if eof or i == count:
break
packet = sock.recv()
if packet.startswith(b'\xff\xff\xff'):
datas = [packet[4:]]
packet = sock.recv()
while packet.startswith(b'\xff\xff\xff'):
datas.append(packet[4:])
packet = sock.recv()
datas.append(packet[4:])
rowdata = utils.read_lc_string_list(bytearray(b'').join(datas))
elif packet[4] == 254 and packet[0] < 7:
eof = self.parse_eof(packet)
rowdata = None
else:
eof = None
rowdata = utils.read_lc_string_list(packet[4:])
if eof is None and rowdata is not None:
rows.append(rowdata)
elif eof is None and rowdata is None:
raise get_exception(packet)
i += 1
return rows, eof
def _parse_binary_integer(self, packet, field):
"""Parse an integer from a binary packet"""
if field[1] == FieldType.TINY:
format_ = 'b'
length = 1
elif field[1] == FieldType.SHORT:
format_ = 'h'
length = 2
elif field[1] in (FieldType.INT24, FieldType.LONG):
format_ = 'i'
length = 4
elif field[1] == FieldType.LONGLONG:
format_ = 'q'
length = 8
if field[7] & FieldFlag.UNSIGNED:
format_ = format_.upper()
return (packet[length:], struct_unpack(format_, packet[0:length])[0])
def _parse_binary_float(self, packet, field):
"""Parse a float/double from a binary packet"""
if field[1] == FieldType.DOUBLE:
length = 8
format_ = 'd'
else:
length = 4
format_ = 'f'
return (packet[length:], struct_unpack(format_, packet[0:length])[0])
def _parse_binary_timestamp(self, packet, field):
"""Parse a timestamp from a binary packet"""
length = packet[0]
value = None
if length == 4:
value = datetime.date(
year=struct_unpack('H', packet[1:3])[0],
month=packet[3],
day=packet[4])
elif length >= 7:
mcs = 0
if length == 11:
mcs = struct_unpack('I', packet[8:length + 1])[0]
value = datetime.datetime(
year=struct_unpack('H', packet[1:3])[0],
month=packet[3],
day=packet[4],
hour=packet[5],
minute=packet[6],
second=packet[7],
microsecond=mcs)
return (packet[length + 1:], value)
def _parse_binary_time(self, packet, field):
"""Parse a time value from a binary packet"""
length = packet[0]
data = packet[1:length + 1]
mcs = 0
if length > 8:
mcs = struct_unpack('I', data[8:])[0]
days = struct_unpack('I', data[1:5])[0]
if data[0] == 1:
days *= -1
tmp = datetime.timedelta(days=days,
seconds=data[7],
microseconds=mcs,
minutes=data[6],
hours=data[5])
return (packet[length + 1:], tmp)
def _parse_binary_values(self, fields, packet, charset='utf-8'):
"""Parse values from a binary result packet"""
null_bitmap_length = (len(fields) + 7 + 2) // 8
null_bitmap = [int(i) for i in packet[0:null_bitmap_length]]
packet = packet[null_bitmap_length:]
values = []
for pos, field in enumerate(fields):
if null_bitmap[int((pos+2)/8)] & (1 << (pos + 2) % 8):
values.append(None)
continue
elif field[1] in (FieldType.TINY, FieldType.SHORT,
FieldType.INT24,
FieldType.LONG, FieldType.LONGLONG):
(packet, value) = self._parse_binary_integer(packet, field)
values.append(value)
elif field[1] in (FieldType.DOUBLE, FieldType.FLOAT):
(packet, value) = self._parse_binary_float(packet, field)
values.append(value)
elif field[1] in (FieldType.DATETIME, FieldType.DATE,
FieldType.TIMESTAMP):
(packet, value) = self._parse_binary_timestamp(packet, field)
values.append(value)
elif field[1] == FieldType.TIME:
(packet, value) = self._parse_binary_time(packet, field)
values.append(value)
else:
(packet, value) = utils.read_lc_string(packet)
values.append(value.decode(charset))
return tuple(values)
def read_binary_result(self, sock, columns, count=1, charset='utf-8'):
"""Read MySQL binary protocol result
Reads all or given number of binary resultset rows from the socket.
"""
rows = []
eof = None
values = None
i = 0
while True:
if eof is not None:
break
if i == count:
break
packet = sock.recv()
if packet[4] == 254:
eof = self.parse_eof(packet)
values = None
elif packet[4] == 0:
eof = None
values = self._parse_binary_values(columns, packet[5:], charset)
if eof is None and values is not None:
rows.append(values)
elif eof is None and values is None:
raise get_exception(packet)
i += 1
return (rows, eof)
def parse_binary_prepare_ok(self, packet):
"""Parse a MySQL Binary Protocol OK packet"""
if not packet[4] == 0:
raise errors.InterfaceError("Failed parsing Binary OK packet")
ok_pkt = {}
try:
(packet, ok_pkt['statement_id']) = utils.read_int(packet[5:], 4)
(packet, ok_pkt['num_columns']) = utils.read_int(packet, 2)
(packet, ok_pkt['num_params']) = utils.read_int(packet, 2)
packet = packet[1:] # Filler 1 * \x00
(packet, ok_pkt['warning_count']) = utils.read_int(packet, 2)
except ValueError:
raise errors.InterfaceError("Failed parsing Binary OK packet")
return ok_pkt
def _prepare_binary_integer(self, value):
"""Prepare an integer for the MySQL binary protocol"""
field_type = None
flags = 0
if value < 0:
if value >= -128:
format_ = 'b'
field_type = FieldType.TINY
elif value >= -32768:
format_ = 'h'
field_type = FieldType.SHORT
elif value >= -2147483648:
format_ = 'i'
field_type = FieldType.LONG
else:
format_ = 'q'
field_type = FieldType.LONGLONG
else:
flags = 128
if value <= 255:
format_ = 'B'
field_type = FieldType.TINY
elif value <= 65535:
format_ = 'H'
field_type = FieldType.SHORT
elif value <= 4294967295:
format_ = 'I'
field_type = FieldType.LONG
else:
field_type = FieldType.LONGLONG
format_ = 'Q'
return (struct.pack(format_, value), field_type, flags)
def _prepare_binary_timestamp(self, value):
"""Prepare a timestamp object for the MySQL binary protocol
This method prepares a timestamp of type datetime.datetime or
datetime.date for sending over the MySQL binary protocol.
A tuple is returned with the prepared value and field type
as elements.
Raises ValueError when the argument value is of invalid type.
Returns a tuple.
"""
if isinstance(value, datetime.datetime):
field_type = FieldType.DATETIME
elif isinstance(value, datetime.date):
field_type = FieldType.DATE
else:
raise ValueError(
"Argument must a datetime.datetime or datetime.date")
packed = (utils.int2store(value.year) +
utils.int1store(value.month) +
utils.int1store(value.day))
if isinstance(value, datetime.datetime):
packed = (packed + utils.int1store(value.hour) +
utils.int1store(value.minute) +
utils.int1store(value.second))
if value.microsecond > 0:
packed += utils.int4store(value.microsecond)
packed = utils.int1store(len(packed)) + packed
return (packed, field_type)
def _prepare_binary_time(self, value):
"""Prepare a time object for the MySQL binary protocol
This method prepares a time object of type datetime.timedelta or
datetime.time for sending over the MySQL binary protocol.
A tuple is returned with the prepared value and field type
as elements.
Raises ValueError when the argument value is of invalid type.
Returns a tuple.
"""
if not isinstance(value, (datetime.timedelta, datetime.time)):
raise ValueError(
"Argument must a datetime.timedelta or datetime.time")
field_type = FieldType.TIME
negative = 0
mcs = None
packed = b''
if isinstance(value, datetime.timedelta):
if value.days < 0:
negative = 1
(hours, remainder) = divmod(value.seconds, 3600)
(mins, secs) = divmod(remainder, 60)
packed += (utils.int4store(abs(value.days)) +
utils.int1store(hours) +
utils.int1store(mins) +
utils.int1store(secs))
mcs = value.microseconds
else:
packed += (utils.int4store(0) +
utils.int1store(value.hour) +
utils.int1store(value.minute) +
utils.int1store(value.second))
mcs = value.microsecond
if mcs:
packed += utils.int4store(mcs)
packed = utils.int1store(negative) + packed
packed = utils.int1store(len(packed)) + packed
return (packed, field_type)
def _prepare_stmt_send_long_data(self, statement, param, data):
"""Prepare long data for prepared statements
Returns a string.
"""
packet = (
utils.int4store(statement) +
utils.int2store(param) +
data)
return packet
def make_stmt_execute(self, statement_id, data=(), parameters=(),
flags=0, long_data_used=None, charset='utf8'):
"""Make a MySQL packet with the Statement Execute command"""
iteration_count = 1
null_bitmap = [0] * ((len(data) + 7) // 8)
values = []
types = []
packed = b''
if charset == 'utf8mb4':
charset = 'utf8'
if long_data_used is None:
long_data_used = {}
if parameters and data:
if len(data) != len(parameters):
raise errors.InterfaceError(
"Failed executing prepared statement: data values does not"
" match number of parameters")
for pos, _ in enumerate(parameters):
value = data[pos]
flags = 0
if value is None:
null_bitmap[(pos // 8)] |= 1 << (pos % 8)
types.append(utils.int1store(FieldType.NULL) +
utils.int1store(flags))
continue
elif pos in long_data_used:
if long_data_used[pos][0]:
# We suppose binary data
field_type = FieldType.BLOB
else:
# We suppose text data
field_type = FieldType.STRING
elif isinstance(value, int):
(packed, field_type,
flags) = self._prepare_binary_integer(value)
values.append(packed)
elif isinstance(value, str):
if PY2:
values.append(utils.lc_int(len(value)) +
value)
else:
value = value.encode(charset)
values.append(
utils.lc_int(len(value)) + value)
field_type = FieldType.VARCHAR
elif isinstance(value, bytes):
values.append(utils.lc_int(len(value)) + value)
field_type = FieldType.BLOB
elif PY2 and \
isinstance(value, unicode): # pylint: disable=E0602
value = value.encode(charset)
values.append(utils.lc_int(len(value)) + value)
field_type = FieldType.VARCHAR
elif isinstance(value, Decimal):
values.append(
utils.lc_int(len(str(value).encode(
charset))) + str(value).encode(charset))
field_type = FieldType.DECIMAL
elif isinstance(value, float):
values.append(struct.pack('d', value))
field_type = FieldType.DOUBLE
elif isinstance(value, (datetime.datetime, datetime.date)):
(packed, field_type) = self._prepare_binary_timestamp(
value)
values.append(packed)
elif isinstance(value, (datetime.timedelta, datetime.time)):
(packed, field_type) = self._prepare_binary_time(value)
values.append(packed)
else:
raise errors.ProgrammingError(
"MySQL binary protocol can not handle "
"'{classname}' objects".format(
classname=value.__class__.__name__))
types.append(utils.int1store(field_type) +
utils.int1store(flags))
packet = (
utils.int4store(statement_id) +
utils.int1store(flags) +
utils.int4store(iteration_count) +
b''.join([struct.pack('B', bit) for bit in null_bitmap]) +
utils.int1store(1)
)
for a_type in types:
packet += a_type
for a_value in values:
packet += a_value
return packet
def parse_auth_switch_request(self, packet):
"""Parse a MySQL AuthSwitchRequest-packet"""
if not packet[4] == 254:
raise errors.InterfaceError(
"Failed parsing AuthSwitchRequest packet")
(packet, plugin_name) = utils.read_string(packet[5:], end=b'\x00')
if packet and packet[-1] == 0:
packet = packet[:-1]
return plugin_name.decode('utf8'), packet
def parse_auth_more_data(self, packet):
"""Parse a MySQL AuthMoreData-packet"""
if not packet[4] == 1:
raise errors.InterfaceError(
"Failed parsing AuthMoreData packet")
return packet[5:]
| 38.233379
| 80
| 0.548158
|
f7c9b51e8ae1ff253ab63706d6e7258fa8566f78
| 6,141
|
py
|
Python
|
src/experiments/compare_filters/experiment_compare_filters.py
|
gummz/cell
|
a741ca4900a11f1080b7572ac969f765e5ac2ffd
|
[
"MIT"
] | null | null | null |
src/experiments/compare_filters/experiment_compare_filters.py
|
gummz/cell
|
a741ca4900a11f1080b7572ac969f765e5ac2ffd
|
[
"MIT"
] | null | null | null |
src/experiments/compare_filters/experiment_compare_filters.py
|
gummz/cell
|
a741ca4900a11f1080b7572ac969f765e5ac2ffd
|
[
"MIT"
] | null | null | null |
import os
from os import listdir, makedirs
from os.path import join
import pickle
import sys
import cv2
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
from time import time
from skimage import filters # threshold_yen, frangi
from skimage.exposure import rescale_intensity
import src.data.constants as c
import src.data.utils.utils as utils
mode = 'train'
img_idx = 1500
tic = time()
utils.setcwd(__file__)
DIR = c.RAW_DATA_DIR
ext = c.IMG_EXT
files = c.RAW_FILES
KERNEL = c.MEDIAN_FILTER_KERNEL
imgs_path = join('..', c.DATA_DIR, mode, c.IMG_DIR)
filename = os.path.basename(__file__)
filename = os.path.splitext(filename)[0]
images = sorted([image for image in listdir(imgs_path) if '.npy' in image])
# Get full image paths from filename list `images`
image_paths = sorted([join(imgs_path, image) for image in images])
path = image_paths[img_idx]
img_name = images[img_idx].split('.')[0]
save = join(c.FIG_DIR, mode, img_name)
# Create image-specific directory
utils.make_dir(save)
img = np.int16(np.load(path))
img = cv2.normalize(img, None, alpha=0, beta=255,
dtype=cv2.CV_8UC1, norm_type=cv2.NORM_MINMAX)
# hist = cv2.calcHist([img], [0], None, [256], [0, 256])
cv2.imwrite(join(save, f'img_cv.{ext}'), img)
plt.imsave(join(save, f'img_plt.{ext}'), img)
# Operation: mean blur
operation = 'meanblur'
utils.make_dir(join(save, operation))
for i in range(1, 21, 2):
img_blur = cv2.blur(img, (i, i))
# img_blur = np.array(img_blur)
# img_blur = np.where(img_blur > 5, img_blur, 0)
name = f'{operation}_{i}'
utils.imsave(join(save, operation, name), img_blur)
# Operation
# Median Blur
operation = 'medianblur'
utils.make_dir(join(save, operation))
for i in range(1, 21, 2):
name = f'{operation}_{i}'
if os.path.exists(join(save, operation, name)):
break
img_blur = cv2.medianBlur(img, i)
# img_blur = np.array(img_blur)
# img_blur = np.where(img_blur > 5, img_blur, 0)
utils.imsave(join(save, operation, name), img_blur)
# Operation
# Denoise
operation = 'denoise'
utils.make_dir(join(save, operation))
for i in range(1, 21, 2):
for j in range(1, 10, 2):
for k in range(1, 30, 4):
name = f'{operation}_{i}_{j}_{k}'
if os.path.exists(join(save, operation, name)):
break
img_denoise = cv2.fastNlMeansDenoising(img, None, i, j, k)
utils.imsave(join(save, operation, name),
img_denoise)
# Operation: Gaussian blur
operation = 'gaussianblur'
utils.make_dir(join(save, operation))
for kernel_size in [1, 5, 9, 15]:
for sigma_x in [1, 5, 9]:
for sigma_y in [1, 5, 9]:
name = f'{operation}_{kernel_size}_{sigma_x}_{sigma_y}'
if os.path.exists(join(save, operation, name)):
break
img_gauss = cv2.GaussianBlur(
img, (kernel_size, kernel_size),
sigma_x, sigmaY=sigma_y)
utils.imsave(join(save, operation, name), img_gauss)
# Operation: Bilateral filter
operation = 'bilateral'
utils.make_dir(join(save, operation))
for filter_size in [50, 150]:
for sigma_color in [50, 150]:
for sigma_space in [5, 9]:
name = f'{operation}_{filter_size}_{sigma_color}_{sigma_space}'
if os.path.exists(join(save, operation, name)):
break
img_bilateral = cv2.bilateralFilter(
img, filter_size, sigma_color, sigma_space)
utils.imsave(join(save, operation, name), img_bilateral)
operation = 'frangi'
utils.make_dir(join(save, operation))
for alpha in np.linspace(0.1, 1, 10):
for beta in np.linspace(0.1, 1, 10):
for gamma in np.linspace(1, 30, 5):
if os.path.exists(join(save, operation, name)):
break
img_frangi = frangi(img, alpha=alpha, beta=beta,
gamma=gamma, black_ridges=False)
name = f'{operation}_plt_{img_name}_{alpha:.2f}_{beta}_{gamma}'
utils.imsave(join(save, operation, name), img_frangi)
operation = 'canny'
utils.make_dir(join(save, operation))
for thresh1 in [20, 50, 80, 100, 150, 200][-2:]:
for thresh2 in [20, 50, 80, 100, 150, 200][-2:]:
for aperture_size in [3, 5, 7]:
for L2_gradient in [True, False]:
if os.path.exists(join(save, operation, name)):
break
img = cv2.fastNlMeansDenoising(img, None, 11, 7, 21)
# img = cv2.normalize(img, None, alpha=0,
# beta=1, dtype=cv2.CV_32FC1,
# norm_type=cv2.NORM_MINMAX)
# img *= np.where((0.05 < img) & (img < 0.3), img * 3, img)
# img = cv2.normalize(img, None, alpha=0,
# beta=255, dtype=cv2.CV_8UC1,
# norm_type=cv2.NORM_MINMAX)
img_canny = cv2.Canny(
img, thresh1, thresh2, None,
apertureSize=aperture_size, L2gradient=L2_gradient)
name = (f'canny_{thresh1}_{thresh2}'
f'_{aperture_size}_{L2_gradient}')
utils.imsave(join(save, operation, name), img_canny, 512)
# Operation
# Simple Threshold
# operation = 'simple_threshold'
# _, thresh = cv2.threshold(img_blur, SIMPLE_THRESHOLD, 255, cv2.THRESH_BINARY)
# cv2.imwrite(f'{save}/{operation}_{img_name}.png', thresh)
# Operation
# Rescale intensity
operation = 'rescale_intensity'
yen_threshold = filters.threshold_yen(img_blur)
for thresh in range(80, 220, 20):
bright = filters.rescale_intensity(
img_blur, (0, yen_threshold), (220, 255))
utils.imsave(join(save, operation, thresh), bright)
# bright = Image.fromarray(bright)
# # Operation
# # Generate and save histogram of intensified image
# operation = 'histogram_intense'
# plt.hist(bright.ravel(), 256, [0, 256])
# plt.show()
# plt.savefig(f'{save}/{img_name}_{operation}.jpg')
elapsed = utils.time_report(tic, time())
print(f'{filename} complete after {elapsed}.')
| 33.741758
| 79
| 0.623351
|
69886bbbfb81337731eadce0420cbbe659ee3281
| 1,020
|
py
|
Python
|
programme/migrations/0025_auto_20160202_2237.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 13
|
2015-11-29T12:19:12.000Z
|
2021-02-21T15:42:11.000Z
|
programme/migrations/0025_auto_20160202_2237.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 23
|
2015-04-29T19:43:34.000Z
|
2021-02-10T05:50:17.000Z
|
programme/migrations/0025_auto_20160202_2237.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 11
|
2015-09-20T18:59:00.000Z
|
2020-02-07T08:47:34.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-02 20:31
import re
from django.db import migrations
SLUGIFY_CHAR_MAP = {
'ä': 'a',
'å': 'a',
'ö': 'o',
'ü': 'u',
' ': '-',
'_': '-',
'.': '-',
}
SLUGIFY_FORBANNAD_RE = re.compile(r'[^a-z0-9-]', re.UNICODE)
SLUGIFY_MULTIDASH_RE = re.compile(r'-+', re.UNICODE)
def slugify(ustr):
ustr = ustr.lower()
ustr = ''.join(SLUGIFY_CHAR_MAP.get(c, c) for c in ustr)
ustr = SLUGIFY_FORBANNAD_RE.sub('', ustr)
ustr = SLUGIFY_MULTIDASH_RE.sub('-', ustr)
return ustr
def populate_slug(apps, schema_editor):
Programme = apps.get_model('programme', 'programme')
for programme in Programme.objects.all():
if not programme.slug:
programme.slug = slugify(programme.title)
programme.save()
class Migration(migrations.Migration):
dependencies = [
('programme', '0024_auto_20160202_2236'),
]
operations = [
migrations.RunPython(populate_slug, elidable=True)
]
| 20.816327
| 60
| 0.614706
|
520d7912c1a10e802341f148485206262a565f7c
| 19,157
|
py
|
Python
|
python/pyspark/ml/recommendation.py
|
ChenWeiye83/spark
|
1f1d98c6facd556b70f457184231b5af78de8d53
|
[
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4
|
2018-09-11T15:27:22.000Z
|
2019-06-24T21:46:53.000Z
|
python/pyspark/ml/recommendation.py
|
ChenWeiye83/spark
|
1f1d98c6facd556b70f457184231b5af78de8d53
|
[
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5
|
2015-07-14T14:03:07.000Z
|
2018-06-06T12:40:44.000Z
|
python/pyspark/ml/recommendation.py
|
ChenWeiye83/spark
|
1f1d98c6facd556b70f457184231b5af78de8d53
|
[
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14
|
2015-10-31T14:19:10.000Z
|
2022-01-31T05:52:41.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
__all__ = ['ALS', 'ALSModel']
@inherit_doc
class ALS(JavaEstimator, HasCheckpointInterval, HasMaxIter, HasPredictionCol, HasRegParam, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
Alternating Least Squares (ALS) matrix factorization.
ALS attempts to estimate the ratings matrix `R` as the product of
two lower-rank matrices, `X` and `Y`, i.e. `X * Yt = R`. Typically
these approximations are called 'factor' matrices. The general
approach is iterative. During each iteration, one of the factor
matrices is held constant, while the other is solved for using least
squares. The newly-solved factor matrix is then held constant while
solving for the other factor matrix.
This is a blocked implementation of the ALS factorization algorithm
that groups the two sets of factors (referred to as "users" and
"products") into blocks and reduces communication by only sending
one copy of each user vector to each product block on each
iteration, and only for the product blocks that need that user's
feature vector. This is achieved by pre-computing some information
about the ratings matrix to determine the "out-links" of each user
(which blocks of products it will contribute to) and "in-link"
information for each product (which of the feature vectors it
receives from each user block it will depend on). This allows us to
send only an array of feature vectors between each user block and
product block, and have the product block find the users' ratings
and update the products based on these messages.
For implicit preference data, the algorithm used is based on
`"Collaborative Filtering for Implicit Feedback Datasets",
<https://doi.org/10.1109/ICDM.2008.22>`_, adapted for the blocked
approach used here.
Essentially instead of finding the low-rank approximations to the
rating matrix `R`, this finds the approximations for a preference
matrix `P` where the elements of `P` are 1 if r > 0 and 0 if r <= 0.
The ratings then act as 'confidence' values related to strength of
indicated user preferences rather than explicit ratings given to
items.
>>> df = spark.createDataFrame(
... [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
... ["user", "item", "rating"])
>>> als = ALS(rank=10, maxIter=5, seed=0)
>>> model = als.fit(df)
>>> model.rank
10
>>> model.userFactors.orderBy("id").collect()
[Row(id=0, features=[...]), Row(id=1, ...), Row(id=2, ...)]
>>> test = spark.createDataFrame([(0, 2), (1, 0), (2, 0)], ["user", "item"])
>>> predictions = sorted(model.transform(test).collect(), key=lambda r: r[0])
>>> predictions[0]
Row(user=0, item=2, prediction=-0.13807615637779236)
>>> predictions[1]
Row(user=1, item=0, prediction=2.6258413791656494)
>>> predictions[2]
Row(user=2, item=0, prediction=-1.5018409490585327)
>>> user_recs = model.recommendForAllUsers(3)
>>> user_recs.where(user_recs.user == 0)\
.select("recommendations.item", "recommendations.rating").collect()
[Row(item=[0, 1, 2], rating=[3.910..., 1.992..., -0.138...])]
>>> item_recs = model.recommendForAllItems(3)
>>> item_recs.where(item_recs.item == 2)\
.select("recommendations.user", "recommendations.rating").collect()
[Row(user=[2, 1, 0], rating=[4.901..., 3.981..., -0.138...])]
>>> user_subset = df.where(df.user == 2)
>>> user_subset_recs = model.recommendForUserSubset(user_subset, 3)
>>> user_subset_recs.select("recommendations.item", "recommendations.rating").first()
Row(item=[2, 1, 0], rating=[4.901..., 1.056..., -1.501...])
>>> item_subset = df.where(df.item == 0)
>>> item_subset_recs = model.recommendForItemSubset(item_subset, 3)
>>> item_subset_recs.select("recommendations.user", "recommendations.rating").first()
Row(user=[0, 1, 2], rating=[3.910..., 2.625..., -1.501...])
>>> als_path = temp_path + "/als"
>>> als.save(als_path)
>>> als2 = ALS.load(als_path)
>>> als.getMaxIter()
5
>>> model_path = temp_path + "/als_model"
>>> model.save(model_path)
>>> model2 = ALSModel.load(model_path)
>>> model.rank == model2.rank
True
>>> sorted(model.userFactors.collect()) == sorted(model2.userFactors.collect())
True
>>> sorted(model.itemFactors.collect()) == sorted(model2.itemFactors.collect())
True
.. versionadded:: 1.4.0
"""
rank = Param(Params._dummy(), "rank", "rank of the factorization",
typeConverter=TypeConverters.toInt)
numUserBlocks = Param(Params._dummy(), "numUserBlocks", "number of user blocks",
typeConverter=TypeConverters.toInt)
numItemBlocks = Param(Params._dummy(), "numItemBlocks", "number of item blocks",
typeConverter=TypeConverters.toInt)
implicitPrefs = Param(Params._dummy(), "implicitPrefs", "whether to use implicit preference",
typeConverter=TypeConverters.toBoolean)
alpha = Param(Params._dummy(), "alpha", "alpha for implicit preference",
typeConverter=TypeConverters.toFloat)
userCol = Param(Params._dummy(), "userCol", "column name for user ids. Ids must be within " +
"the integer value range.", typeConverter=TypeConverters.toString)
itemCol = Param(Params._dummy(), "itemCol", "column name for item ids. Ids must be within " +
"the integer value range.", typeConverter=TypeConverters.toString)
ratingCol = Param(Params._dummy(), "ratingCol", "column name for ratings",
typeConverter=TypeConverters.toString)
nonnegative = Param(Params._dummy(), "nonnegative",
"whether to use nonnegative constraint for least squares",
typeConverter=TypeConverters.toBoolean)
intermediateStorageLevel = Param(Params._dummy(), "intermediateStorageLevel",
"StorageLevel for intermediate datasets. Cannot be 'NONE'.",
typeConverter=TypeConverters.toString)
finalStorageLevel = Param(Params._dummy(), "finalStorageLevel",
"StorageLevel for ALS model factors.",
typeConverter=TypeConverters.toString)
coldStartStrategy = Param(Params._dummy(), "coldStartStrategy", "strategy for dealing with " +
"unknown or new users/items at prediction time. This may be useful " +
"in cross-validation or production scenarios, for handling " +
"user/item ids the model has not seen in the training data. " +
"Supported values: 'nan', 'drop'.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10,
intermediateStorageLevel="MEMORY_AND_DISK",
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan"):
"""
__init__(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10, \
implicitPrefs=false, alpha=1.0, userCol="user", itemCol="item", seed=None, \
ratingCol="rating", nonnegative=false, checkpointInterval=10, \
intermediateStorageLevel="MEMORY_AND_DISK", \
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan")
"""
super(ALS, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.recommendation.ALS", self.uid)
self._setDefault(rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item",
ratingCol="rating", nonnegative=False, checkpointInterval=10,
intermediateStorageLevel="MEMORY_AND_DISK",
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10,
intermediateStorageLevel="MEMORY_AND_DISK",
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan"):
"""
setParams(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10, \
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None, \
ratingCol="rating", nonnegative=False, checkpointInterval=10, \
intermediateStorageLevel="MEMORY_AND_DISK", \
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan")
Sets params for ALS.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return ALSModel(java_model)
@since("1.4.0")
def setRank(self, value):
"""
Sets the value of :py:attr:`rank`.
"""
return self._set(rank=value)
@since("1.4.0")
def getRank(self):
"""
Gets the value of rank or its default value.
"""
return self.getOrDefault(self.rank)
@since("1.4.0")
def setNumUserBlocks(self, value):
"""
Sets the value of :py:attr:`numUserBlocks`.
"""
return self._set(numUserBlocks=value)
@since("1.4.0")
def getNumUserBlocks(self):
"""
Gets the value of numUserBlocks or its default value.
"""
return self.getOrDefault(self.numUserBlocks)
@since("1.4.0")
def setNumItemBlocks(self, value):
"""
Sets the value of :py:attr:`numItemBlocks`.
"""
return self._set(numItemBlocks=value)
@since("1.4.0")
def getNumItemBlocks(self):
"""
Gets the value of numItemBlocks or its default value.
"""
return self.getOrDefault(self.numItemBlocks)
@since("1.4.0")
def setNumBlocks(self, value):
"""
Sets both :py:attr:`numUserBlocks` and :py:attr:`numItemBlocks` to the specific value.
"""
self._set(numUserBlocks=value)
return self._set(numItemBlocks=value)
@since("1.4.0")
def setImplicitPrefs(self, value):
"""
Sets the value of :py:attr:`implicitPrefs`.
"""
return self._set(implicitPrefs=value)
@since("1.4.0")
def getImplicitPrefs(self):
"""
Gets the value of implicitPrefs or its default value.
"""
return self.getOrDefault(self.implicitPrefs)
@since("1.4.0")
def setAlpha(self, value):
"""
Sets the value of :py:attr:`alpha`.
"""
return self._set(alpha=value)
@since("1.4.0")
def getAlpha(self):
"""
Gets the value of alpha or its default value.
"""
return self.getOrDefault(self.alpha)
@since("1.4.0")
def setUserCol(self, value):
"""
Sets the value of :py:attr:`userCol`.
"""
return self._set(userCol=value)
@since("1.4.0")
def getUserCol(self):
"""
Gets the value of userCol or its default value.
"""
return self.getOrDefault(self.userCol)
@since("1.4.0")
def setItemCol(self, value):
"""
Sets the value of :py:attr:`itemCol`.
"""
return self._set(itemCol=value)
@since("1.4.0")
def getItemCol(self):
"""
Gets the value of itemCol or its default value.
"""
return self.getOrDefault(self.itemCol)
@since("1.4.0")
def setRatingCol(self, value):
"""
Sets the value of :py:attr:`ratingCol`.
"""
return self._set(ratingCol=value)
@since("1.4.0")
def getRatingCol(self):
"""
Gets the value of ratingCol or its default value.
"""
return self.getOrDefault(self.ratingCol)
@since("1.4.0")
def setNonnegative(self, value):
"""
Sets the value of :py:attr:`nonnegative`.
"""
return self._set(nonnegative=value)
@since("1.4.0")
def getNonnegative(self):
"""
Gets the value of nonnegative or its default value.
"""
return self.getOrDefault(self.nonnegative)
@since("2.0.0")
def setIntermediateStorageLevel(self, value):
"""
Sets the value of :py:attr:`intermediateStorageLevel`.
"""
return self._set(intermediateStorageLevel=value)
@since("2.0.0")
def getIntermediateStorageLevel(self):
"""
Gets the value of intermediateStorageLevel or its default value.
"""
return self.getOrDefault(self.intermediateStorageLevel)
@since("2.0.0")
def setFinalStorageLevel(self, value):
"""
Sets the value of :py:attr:`finalStorageLevel`.
"""
return self._set(finalStorageLevel=value)
@since("2.0.0")
def getFinalStorageLevel(self):
"""
Gets the value of finalStorageLevel or its default value.
"""
return self.getOrDefault(self.finalStorageLevel)
@since("2.2.0")
def setColdStartStrategy(self, value):
"""
Sets the value of :py:attr:`coldStartStrategy`.
"""
return self._set(coldStartStrategy=value)
@since("2.2.0")
def getColdStartStrategy(self):
"""
Gets the value of coldStartStrategy or its default value.
"""
return self.getOrDefault(self.coldStartStrategy)
class ALSModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by ALS.
.. versionadded:: 1.4.0
"""
@property
@since("1.4.0")
def rank(self):
"""rank of the matrix factorization model"""
return self._call_java("rank")
@property
@since("1.4.0")
def userFactors(self):
"""
a DataFrame that stores user factors in two columns: `id` and
`features`
"""
return self._call_java("userFactors")
@property
@since("1.4.0")
def itemFactors(self):
"""
a DataFrame that stores item factors in two columns: `id` and
`features`
"""
return self._call_java("itemFactors")
@since("2.2.0")
def recommendForAllUsers(self, numItems):
"""
Returns top `numItems` items recommended for each user, for all users.
:param numItems: max number of recommendations for each user
:return: a DataFrame of (userCol, recommendations), where recommendations are
stored as an array of (itemCol, rating) Rows.
"""
return self._call_java("recommendForAllUsers", numItems)
@since("2.2.0")
def recommendForAllItems(self, numUsers):
"""
Returns top `numUsers` users recommended for each item, for all items.
:param numUsers: max number of recommendations for each item
:return: a DataFrame of (itemCol, recommendations), where recommendations are
stored as an array of (userCol, rating) Rows.
"""
return self._call_java("recommendForAllItems", numUsers)
@since("2.3.0")
def recommendForUserSubset(self, dataset, numItems):
"""
Returns top `numItems` items recommended for each user id in the input data set. Note that
if there are duplicate ids in the input dataset, only one set of recommendations per unique
id will be returned.
:param dataset: a Dataset containing a column of user ids. The column name must match
`userCol`.
:param numItems: max number of recommendations for each user
:return: a DataFrame of (userCol, recommendations), where recommendations are
stored as an array of (itemCol, rating) Rows.
"""
return self._call_java("recommendForUserSubset", dataset, numItems)
@since("2.3.0")
def recommendForItemSubset(self, dataset, numUsers):
"""
Returns top `numUsers` users recommended for each item id in the input data set. Note that
if there are duplicate ids in the input dataset, only one set of recommendations per unique
id will be returned.
:param dataset: a Dataset containing a column of item ids. The column name must match
`itemCol`.
:param numUsers: max number of recommendations for each item
:return: a DataFrame of (itemCol, recommendations), where recommendations are
stored as an array of (userCol, rating) Rows.
"""
return self._call_java("recommendForItemSubset", dataset, numUsers)
if __name__ == "__main__":
import doctest
import pyspark.ml.recommendation
from pyspark.sql import SparkSession
globs = pyspark.ml.recommendation.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.recommendation tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| 39.417695
| 100
| 0.627186
|
13b3d71425a3836d7448536464038ef001322542
| 1,225
|
py
|
Python
|
setup.py
|
richClubb/python-lin
|
de837ab4c7c602ada4f4eefeae3a4749f82a7b1f
|
[
"MIT"
] | 5
|
2019-03-18T19:01:22.000Z
|
2022-03-14T06:51:43.000Z
|
setup.py
|
richClubb/python-lin
|
de837ab4c7c602ada4f4eefeae3a4749f82a7b1f
|
[
"MIT"
] | 6
|
2020-01-14T13:51:11.000Z
|
2021-08-25T12:15:21.000Z
|
setup.py
|
richClubb/python-lin
|
de837ab4c7c602ada4f4eefeae3a4749f82a7b1f
|
[
"MIT"
] | 2
|
2020-01-14T12:58:00.000Z
|
2022-03-14T06:51:53.000Z
|
#!/usr/bin/env python
__author__ = "Richard Clubb"
__copyrights__ = "Copyright 2018, the python-uds project"
__credits__ = ["Richard Clubb"]
__license__ = "MIT"
__maintainer__ = "Richard Clubb"
__email__ = "richard.clubb@embeduk.com"
__status__ = "Development"
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
# Needed to silence warnings (and to be a worthwhile package)
name='python-lin',
url='https://github.com/richClubb/python-lin',
author='Richard Clubb',
author_email='richard.clubb@embeduk.com',
# Needed to actually package something
packages=find_packages(exclude=["test", "test.*"]),
# Needed for dependencies
install_requires=[''],
# *strongly* suggested for sharing
version='0.1.1',
# The license can be anything you like
license='MIT',
description='A library for interfacing with LIN devices using python',
# We will also need a readme eventually (there will be a warning)
# long_description=open('README.txt').read(),
classifiers=[
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent"
],
include_package_data=True
)
| 29.166667
| 74
| 0.689796
|
d13da94272848df7191e18587dbbdb6bfd54e4ff
| 916
|
py
|
Python
|
migrations/versions/71e498fe9d0f_.py
|
sumedhbala/catalog
|
ab969ccf39ce343ba0172e92221f56c18478f743
|
[
"MIT"
] | null | null | null |
migrations/versions/71e498fe9d0f_.py
|
sumedhbala/catalog
|
ab969ccf39ce343ba0172e92221f56c18478f743
|
[
"MIT"
] | null | null | null |
migrations/versions/71e498fe9d0f_.py
|
sumedhbala/catalog
|
ab969ccf39ce343ba0172e92221f56c18478f743
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 71e498fe9d0f
Revises:
Create Date: 2018-11-07 12:56:41.179420
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "71e498fe9d0f"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=80), nullable=True),
sa.Column("email", sa.String(length=80), nullable=True),
sa.Column("password_hash", sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("user")
# ### end Alembic commands ###
| 24.756757
| 73
| 0.649563
|
38085516bcda0cdaefafef39c86989498c704cf5
| 4,131
|
py
|
Python
|
LibrairieVideoCompress/Test_SophieCompression.py
|
JostTim/custom_libs
|
8f9e3f45c6f5f7e47b6582e072d09a8910efddd3
|
[
"MIT"
] | null | null | null |
LibrairieVideoCompress/Test_SophieCompression.py
|
JostTim/custom_libs
|
8f9e3f45c6f5f7e47b6582e072d09a8910efddd3
|
[
"MIT"
] | null | null | null |
LibrairieVideoCompress/Test_SophieCompression.py
|
JostTim/custom_libs
|
8f9e3f45c6f5f7e47b6582e072d09a8910efddd3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 14:28:30 2019
@author: Timothe
"""
from VideoCompression.HirisSeqReader import HirisSeqReader, VideoArrayWrite, Foldersearch, Seq_to_Video
from termcolor import colored
import os
import sys
import re
import logging
from datetime import datetime
import tkinter as tk
from tkinter import filedialog
now = datetime.now()
logsBasename = r"C:\Users\Timothe\NasgoyaveOC\Professionnel\ThèseUNIC\Scripts\Logs\VideoCompression"
logsFilename = now.strftime("LOGS_%y%m%d_%H-%M-%S.log")
logging.basicConfig(filename=os.path.join(logsBasename,logsFilename),level=logging.DEBUG,format='%(asctime)s %(levelname)s %(name)s %(message)s', datefmt = '%d/%m/%Y %H:%M:%S %p --')
logging.info("")
logging.info("NEW PROGRAMM CALL AT DATE :" + now.strftime("%Y%m%d AND HOUR %H:%M:%S"))
logging.info("-------------------------------------------------------")
logger = logging.getLogger("root")
try :
root = tk.Tk()
root.withdraw()
file_path = filedialog.askdirectory(parent=root,initialdir="D:/",title='Please select a directory containing mouses')
print(file_path)
# Root = r"D:\TestSophie"
# output_path_RAW = r"D:\TestSophie"
Root = file_path
output_path_RAW = file_path
regex1 = (r"^[Ss]ouris\d+$")
regex2 = (r"(\w*\\[sS]ouris\d*\\\d{6}_?(VSD)?_?\d*)")
SubdirStorage = "Compressed"
DirsList = os.listdir(Root)
logger.setLevel(logging.WARNING)
TOTAL_FILES = 1
PROCESSED_FILES = 0
for Dir in DirsList:
dirpath = os.path.join(Root,Dir)
print(dirpath)
print(Dir)
if os.path.isdir(dirpath) :
matches = re.finditer(regex1, Dir, re.MULTILINE)
Mouseno = False
for matchnum, match in enumerate(matches, start = 1):
Mouseno = match.group()
print(Mouseno)
if not Mouseno:
continue
Output_Root = False
if not os.path.exists(os.path.join(dirpath,SubdirStorage)):
try :
os.makedirs(os.path.join(dirpath,SubdirStorage))
Output_Root = os.path.join(dirpath,SubdirStorage)
except FileExistsError:
pass
Output_Root = os.path.join(dirpath,SubdirStorage)
print(Output_Root)
ListOfFiles = Foldersearch(dirpath,"1.seq")
print(ListOfFiles)
for Video in ListOfFiles:
path,file = os.path.split(Video)
vout_name = os.path.basename(path)
Status = Seq_to_Video(Video,Output_Root,output_name = Mouseno+"_"+vout_name , extension = ".avi", codec = "MJPG" )
if Status :
PROCESSED_FILES = PROCESSED_FILES + 1
print(colored("Video n°{:2d} of {:2d} - {:2d}% complete".format(PROCESSED_FILES,TOTAL_FILES,int((PROCESSED_FILES/TOTAL_FILES)*100)),"magenta"))
logger.info("Video n°{:2d} of {:2d} - {:2d}% complete".format(PROCESSED_FILES,TOTAL_FILES,int((PROCESSED_FILES/TOTAL_FILES)*100)))
# print(colored("Video n°{:2d} of {:2d} - {:2d}% complete".format(n,len(ListOfFiles),int((n/len(ListOfFiles))*100)),"magenta"))
# logger.info("Video n°{:2d} of {:2d} - {:2d}% complete".format(n,len(ListOfFiles),int((n/len(ListOfFiles))*100)))
# else :
# print("Video {} Already Exist, searching next".format(output_name+".avi"))
# logger.debug("Video {} Already Exist, searching next".format(output_name+".avi"))
print()
except Exception as e :
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
errors = "Exception : " + str(exc_type) + "Object : " + str(exc_obj) + "TB : " + str(exc_tb) + "File : " + str(fname) + " Line : " + str(exc_tb.tb_lineno)
print(colored("Invalid error {} from : {}\n".format(e,errors),"red"))
logger.error("Invalid error {} from : {}\n".format(e,errors))
| 42.153061
| 182
| 0.591382
|
94f6bb13ba98181ee7c6be301ee451d0871f69f9
| 9,918
|
py
|
Python
|
src/rtransformer/masked_transformer.py
|
cxqj/51-recurrent-transformer
|
e43647217ca30133aace0bce94b750a19d0deb70
|
[
"MIT"
] | 143
|
2020-05-18T22:10:19.000Z
|
2022-03-22T06:28:38.000Z
|
src/rtransformer/masked_transformer.py
|
Tikquuss/mart
|
81b0ec274ab598f0b5b3e1cb00bfa238c2569099
|
[
"MIT"
] | 10
|
2020-07-09T18:37:12.000Z
|
2021-07-13T18:52:28.000Z
|
src/rtransformer/masked_transformer.py
|
Tikquuss/mart
|
81b0ec274ab598f0b5b3e1cb00bfa238c2569099
|
[
"MIT"
] | 28
|
2020-07-11T08:03:10.000Z
|
2022-02-17T08:07:46.000Z
|
"""
Copyright (c) 2018, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
References:
https://github.com/salesforce/densecap/blob/master/model/transformer.py
Modified by Jie Lei
"""
import torch
from torch import nn
from torch.nn import functional as F
import math
import numpy as np
from src.rtransformer.model import LabelSmoothingLoss
INF = 1e10
def positional_encodings_like(x, t=None):
if t is None:
positions = torch.arange(0, x.size(1)).float()
if x.is_cuda:
positions = positions.cuda(x.get_device())
else:
positions = t
encodings = torch.zeros(*x.size()[1:])
if x.is_cuda:
encodings = encodings.cuda(x.get_device())
for channel in range(x.size(-1)):
if channel % 2 == 0:
encodings[:, channel] = torch.sin(positions / 10000 ** (channel / x.size(2)))
else:
encodings[:, channel] = torch.cos(positions / 10000 ** ((channel - 1) / x.size(2)))
return encodings
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio):
super(ResidualBlock, self).__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x):
return self.layernorm(x[0] + self.dropout(self.layer(*x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super(Attention, self).__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = torch.bmm(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri.cuda(key.get_device())
dot_products.data.sub_(tri.unsqueeze(0))
return torch.bmm(self.dropout(F.softmax(dot_products / self.scale, dim=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super(MultiHead, self).__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = nn.Linear(d_key, d_key, bias=False)
self.wk = nn.Linear(d_key, d_key, bias=False)
self.wv = nn.Linear(d_value, d_value, bias=False)
self.wo = nn.Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (
x.chunk(self.n_heads, -1) for x in (query, key, value))
return self.wo(torch.cat([self.attention(q, k, v)
for q, k, v in zip(query, key, value)], -1))
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super(FeedForward, self).__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.linear2 = nn.Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_hidden, n_heads, drop_ratio):
super(EncoderLayer, self).__init__()
self.selfattn = ResidualBlock(
MultiHead(d_model, d_model, n_heads, drop_ratio, causal=False),
d_model, drop_ratio)
self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden),
d_model, drop_ratio)
def forward(self, x):
return self.feedforward(self.selfattn(x, x, x))
class DecoderLayer(nn.Module):
def __init__(self, d_model, d_hidden, n_heads, drop_ratio):
super(DecoderLayer, self).__init__()
self.selfattn = ResidualBlock(
MultiHead(d_model, d_model, n_heads, drop_ratio, causal=True),
d_model, drop_ratio)
self.attention = ResidualBlock(
MultiHead(d_model, d_model, n_heads, drop_ratio),
d_model, drop_ratio)
self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden),
d_model, drop_ratio)
def forward(self, x, encoding):
"""
Args:
x: (N, Lt, D)
encoding: (N, Lv, D)
"""
x = self.selfattn(x, x, x) # (N, Lt, D)
return self.feedforward(self.attention(x, encoding, encoding)) # (N, Lt, D)
class Encoder(nn.Module):
def __init__(self, vfeat_size, d_model, d_hidden, n_layers, n_heads, drop_ratio):
super(Encoder, self).__init__()
self.video_embeddings = nn.Sequential(
LayerNorm(vfeat_size),
nn.Dropout(drop_ratio),
nn.Linear(vfeat_size, d_model)
)
self.layers = nn.ModuleList(
[EncoderLayer(d_model, d_hidden, n_heads, drop_ratio)
for i in range(n_layers)])
self.dropout = nn.Dropout(drop_ratio)
def forward(self, x, mask=None):
"""
Args:
x: (N, Lv, Dv)
mask: (N, Lv)
Returns:
"""
x = self.video_embeddings(x) # (N, Lv, D)
x = x + positional_encodings_like(x)
x = self.dropout(x)
mask.unsqueeze_(-1)
if mask is not None:
x = x*mask
encoding = []
for layer in self.layers:
x = layer(x)
if mask is not None:
x = x*mask
encoding.append(x)
return encoding
class Decoder(nn.Module):
def __init__(self, d_model, d_hidden, vocab_size, n_layers, n_heads,
drop_ratio):
super(Decoder, self).__init__()
self.layers = nn.ModuleList(
[DecoderLayer(d_model, d_hidden, n_heads, drop_ratio)
for i in range(n_layers)])
self.out = nn.Linear(d_model, vocab_size)
self.dropout = nn.Dropout(drop_ratio)
self.d_model = d_model
self.d_out = vocab_size
def forward(self, x, encoding):
"""
Args:
x: (N, Lt)
encoding: [(N, Lv, D), ] * num_hidden_layers
"""
x = F.embedding(x, self.out.weight * math.sqrt(self.d_model)) # (N, Lt, D)
x = x + positional_encodings_like(x) # (N, Lt, D)
x = self.dropout(x) # (N, Lt, D)
for layer, enc in zip(self.layers, encoding):
x = layer(x, enc) # (N, Lt, D)
return x # (N, Lt, D) at last layer
class MTransformer(nn.Module):
def __init__(self, config):
super(MTransformer, self).__init__()
self.config = config
vfeat_size = config.video_feature_size
d_model = config.hidden_size # 1024
d_hidden = config.intermediate_size # 2048
n_layers = config.num_hidden_layers # 6
n_heads = config.num_attention_heads # 8
drop_ratio = config.hidden_dropout_prob # 0.1
self.vocab_size = config.vocab_size
self.encoder = Encoder(vfeat_size, d_model, d_hidden, n_layers,
n_heads, drop_ratio)
self.decoder = Decoder(d_model, d_hidden, self.vocab_size,
n_layers, n_heads, drop_ratio)
self.loss_func = LabelSmoothingLoss(config.label_smoothing, config.vocab_size, ignore_index=-1) \
if "label_smoothing" in config and config.label_smoothing > 0 else nn.CrossEntropyLoss(ignore_index=-1)
def encode(self, video_features, video_masks):
"""
Args:
video_features: (N, Lv, Dv)
video_masks: (N, Lv) with 1 indicates valid bits
"""
return self.encoder(video_features, video_masks)
def decode(self, text_input_ids, text_masks, text_input_labels, encoder_outputs, video_masks):
"""
Args:
text_input_ids: (N, Lt)
text_masks: (N, Lt) with 1 indicates valid bits,
text_input_labels: (N, Lt) with `-1` on ignored positions
encoder_outputs: (N, Lv, D)
video_masks: not used, leave here to maintain a common API with untied model
"""
# the triangular mask is generated and applied inside the attention module
h = self.decoder(text_input_ids, encoder_outputs) # (N, Lt, D)
prediction_scores = self.decoder.out(h) # (N, Lt, vocab_size)
caption_loss = self.loss_func(prediction_scores.view(-1, self.config.vocab_size),
text_input_labels.view(-1)) # float
return caption_loss, prediction_scores
def forward(self, video_features, video_masks, text_input_ids, text_masks, text_input_labels):
"""
Args:
video_features: (N, Lv, Dv)
video_masks: (N, Lv) with 1 indicates valid bits
text_input_ids: (N, Lt)
text_masks: (N, Lt) with 1 indicates valid bits
text_input_labels: (N, Lt) with `-1` on ignored positions (in some sense duplicate with text_masks)
"""
encoder_layer_outputs = self.encode(video_features, video_masks) # [(N, Lv, D), ] * num_hidden_layers
caption_loss, prediction_scores = self.decode(
text_input_ids, text_masks, text_input_labels, encoder_layer_outputs, None) # float, (N, Lt, vocab_size)
return caption_loss, prediction_scores
| 36.869888
| 117
| 0.604658
|
9efd82e3927dd7c8e09d92eeb8ed30ae98e4b411
| 895
|
py
|
Python
|
nicos_mlz/reseda/setups/guide_fields.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos_mlz/reseda/setups/guide_fields.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91
|
2020-08-18T09:20:26.000Z
|
2022-02-01T11:07:14.000Z
|
nicos_mlz/reseda/setups/guide_fields.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
# -*- coding: utf-8 -*-
description = 'Guide fields'
group = 'lowlevel'
display_order = 20
tango_base = 'tango://resedahw2.reseda.frm2:10000/reseda'
devices = {
'gf%i' % i: device('nicos.devices.entangle.PowerSupply',
description = 'Guide field %i' % i,
tangodevice = '%s/coil/gf%i' % (tango_base, i),
fmtstr = '%.3f',
tangotimeout = 30.0,
pollinterval = 60,
maxage = 119, #maxage should not be a multiple of pollinterval!
unit = 'A',
precision = 0.005,
) for i in ([0, 1, 2] + list(range(4, 11)))
}
devices.update({
'gf4': device('nicos.devices.entangle.PowerSupply',
description = 'Guide field 4',
tangodevice = '%s/coil/gf4' % tango_base,
fmtstr = '%.3f',
tangotimeout = 5.0,
pollinterval = 60,
maxage = 119,
unit = 'A',
precision = 0.005,
)
})
| 27.96875
| 71
| 0.556425
|
16ccdd69811e44fe64f9c7e75d8328873f1790a7
| 2,364
|
py
|
Python
|
backend/similarConferenceFinder.py
|
vunetsys/paper_search
|
f53222204761852f97b72876b79bef117cdfd463
|
[
"MIT"
] | null | null | null |
backend/similarConferenceFinder.py
|
vunetsys/paper_search
|
f53222204761852f97b72876b79bef117cdfd463
|
[
"MIT"
] | null | null | null |
backend/similarConferenceFinder.py
|
vunetsys/paper_search
|
f53222204761852f97b72876b79bef117cdfd463
|
[
"MIT"
] | 1
|
2021-10-29T20:43:27.000Z
|
2021-10-29T20:43:27.000Z
|
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import linear_kernel
from scipy import spatial
def create_collaborative_keyworddb(db): # counts finger prints of keywords from each conference
panda_keywords = []
stop_update_header = False
header = ['conference']
for conference_data in db.get_all_data_from_table('conferences_temp'):
relative_keywords = dict()
for keyword in db.get_distinct_keywords():
relative_keywords['conference'] = conference_data[0]
keyword_df = keyword[0]
if stop_update_header is False:
header.append(keyword_df)
relative_keywords[keyword_df] = conference_data[1].count(keyword_df)
panda_keywords.append(relative_keywords)
stop_update_header = True
return pd.DataFrame(panda_keywords)
# cosine similarity
# def calc_cosine_similarity(X):
# cosine_sim = linear_kernel(X, X)
# return cosine_sim
# sklearn euclidean function
# def calc_euclidean_dist(X):
# eucli_dist = euclidean_distances(X, X)
# return eucli_dist
# scipy more precise euclidean function
def calc_euclidean_dist(X):
scipy_eucli_dist = list()
for index in range(np.size(X, 0)):
row_list = list()
for index_2 in range(np.size(X, 0)):
row_list.append(spatial.distance.euclidean(X[index], X[index_2]))
scipy_eucli_dist.append(row_list)
return np.array(scipy_eucli_dist)
def get_neigbors(df): # extract shortest distance and create dataframe of all conferences' similar conference
df = df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1)
conferences = df['conference']
df = df.drop(df.columns[df.columns.str.contains('conference', case=False)], axis=1)
neighbor_conf_index_data = []
X = df.values
eucli_dist = calc_euclidean_dist(X)
min_values = np.where(eucli_dist > 0., eucli_dist, eucli_dist.max()).min(1)
neighbor_conf_index = [list(eucli_dist[i, :]).index(min_values[i]) for i in range(len(min_values))]
i = 0
for conference in conferences:
neighbor_conf_index_data.append({'conferenceName': conference, 'similar conference': conferences[neighbor_conf_index[i]]})
i = i + 1
return pd.DataFrame(neighbor_conf_index_data)
| 38.129032
| 130
| 0.711083
|
31082376927734d93ab61d91725275821f9b43c1
| 907
|
py
|
Python
|
share/qt/clean_mac_info_plist.py
|
bogdanoffcoin/bogdanoffcoin
|
a860f5f4c29c020b635d9d7e7e240365cd69e322
|
[
"MIT"
] | null | null | null |
share/qt/clean_mac_info_plist.py
|
bogdanoffcoin/bogdanoffcoin
|
a860f5f4c29c020b635d9d7e7e240365cd69e322
|
[
"MIT"
] | null | null | null |
share/qt/clean_mac_info_plist.py
|
bogdanoffcoin/bogdanoffcoin
|
a860f5f4c29c020b635d9d7e7e240365cd69e322
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Bogdanoffcoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Bogdanoffcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| 30.233333
| 109
| 0.728776
|
fba551c42003afa852e57f357b649d108c419c48
| 9,673
|
py
|
Python
|
Api/utils.py
|
erfanhs/Tuky
|
11bd23ca31892e4579ec904b14a3ee701e58f9d8
|
[
"MIT"
] | 3
|
2020-05-04T11:23:38.000Z
|
2020-06-19T14:04:16.000Z
|
Api/utils.py
|
erfanhs/Tuky
|
11bd23ca31892e4579ec904b14a3ee701e58f9d8
|
[
"MIT"
] | 6
|
2021-03-30T13:15:49.000Z
|
2022-03-12T00:27:41.000Z
|
Api/utils.py
|
erfanhs/Tuky
|
11bd23ca31892e4579ec904b14a3ee701e58f9d8
|
[
"MIT"
] | 1
|
2020-09-02T10:23:01.000Z
|
2020-09-02T10:23:01.000Z
|
from django.conf import settings
from django.core.mail import send_mail
from django.utils.crypto import get_random_string
from hashlib import md5, sha1
import re
import requests
import datetime
from dateutil.relativedelta import relativedelta
import pytz
from . import models
tz = pytz.timezone('Asia/Tehran')
class ClickAnalyse:
def __init__(self, link):
self.clicks = models.Click.objects.filter(short_url=link)
self.now = datetime.datetime.now(tz)
def os(self, clicks=None):
if clicks == None: clicks = self.clicks
OSs = [click.os for click in clicks]
OSsClean = []
[OSsClean.append(os) for os in OSs if os not in OSsClean]
OSsCountJson = {}
[OSsCountJson.update({os: OSs.count(os)}) for os in OSsClean]
return OSsCountJson
def browser(self, clicks=None):
if clicks == None: clicks = self.clicks
browsers = [click.browser for click in clicks]
browsersClean = []
[browsersClean.append(browser) for browser in browsers if browser not in browsersClean]
browsersCountJson = {}
[browsersCountJson.update({browser: browsers.count(browser)}) for browser in browsersClean]
return browsersCountJson
def device(self, clicks=None):
if clicks == None: clicks = self.clicks
devices = [click.device for click in clicks]
devicesClean = []
[devicesClean.append(device) for device in devices if device not in devicesClean]
devicesCountJson = {}
[devicesCountJson.update({device: devices.count(device)}) for device in devicesClean]
return devicesCountJson
def country(self, clicks=None):
if clicks == None: clicks = self.clicks
countries = [click.country for click in clicks]
countriesClean = []
[countriesClean.append(country) for country in countries if country not in countriesClean]
countriesCountJson = {}
[countriesCountJson.update({country: countries.count(country)}) for country in countriesClean]
return countriesCountJson
def roundClicks(self, clicks):
for click in clicks:
date = click.dateTime.astimezone(tz)
if date.minute >= 30: date += datetime.timedelta(minutes=60 - date.minute)
else: date -= datetime.timedelta(minutes=date.minute)
click.dateTime = date
return clicks
def day(self):
def filterByHourDay(hour, day):
return [click for click in clicks if click.dateTime.hour == hour and click.dateTime.day == day]
now = self.now
clicks = self.clicks.filter(dateTime__range=[now - relativedelta(days=1), now + datetime.timedelta(hours=1)])
clicks = self.roundClicks(clicks)
TimeLine_Day_List = [ len(filterByHourDay((now - datetime.timedelta(hours=i)).hour, (now - datetime.timedelta(hours=i)).day)) for i in range(0,24)]
TimeLine_Day_List[0] += len(filterByHourDay((now + datetime.timedelta(hours=1)).hour, now.day))
TimeLine_Day_List.reverse()
return {
'totalClicks': len(clicks),
'browser': self.browser(clicks),
'os': self.os(clicks),
'country': self.country(clicks),
'device': self.device(clicks),
'timeLine': TimeLine_Day_List,
'time_interval': 'lastDay'
}
def week(self):
now = self.now
clicks = self.clicks.filter(dateTime__range = [now - datetime.timedelta(days=6), now + datetime.timedelta(days=1)])
TimeLine_Week_List = [ len(clicks.filter(dateTime__day = (now - datetime.timedelta(days=i)).day)) for i in range(0, 7)]
TimeLine_Week_List.reverse()
return {
'totalClicks': len(clicks),
'browser': self.browser(clicks),
'os': self.os(clicks),
'country': self.country(clicks),
'device': self.device(clicks),
'timeLine': TimeLine_Week_List,
'time_interval': 'lastWeek'
}
def month(self):
now = self.now.date()
clicks = self.clicks.filter(dateTime__range = [now - relativedelta(months=1), now + datetime.timedelta(days=1)])
TimeLine_Month_List = [ len(clicks.filter(dateTime__date = (now - datetime.timedelta(days=i)))) for i in range(0, 30)]
TimeLine_Month_List.reverse()
return {
'totalClicks': len(clicks),
'browser': self.browser(clicks),
'os': self.os(clicks),
'country': self.country(clicks),
'device': self.device(clicks),
'timeLine': TimeLine_Month_List,
'time_interval': 'lastMonth'
}
def alltime(self):
now = self.now.date()
clicks = self.clicks.filter(dateTime__range = [now - relativedelta(months=18), now + datetime.timedelta(days=1)])
TimeLine_AllTime_List = [ len(clicks.filter(dateTime__month = (now - relativedelta(months=i)).month, dateTime__year = (now - relativedelta(months=i)).year )) for i in range(0, 18)]
TimeLine_AllTime_List.reverse()
return {
'totalClicks': len(clicks),
'browser': self.browser(clicks),
'os': self.os(clicks),
'country': self.country(clicks),
'device': self.device(clicks),
'timeLine': TimeLine_AllTime_List,
'time_interval': 'allTime'
}
def send_verify_mail(target):
verify_id = get_random_string(length=32)
send_mail(
'email verify link',
('thanks for sign up.\nverify link: http://%s/verify/' % settings.HOST_NAME) + verify_id,
'erfanharirsaz071@gmail.com',
[target],
fail_silently=False
)
return verify_id
def password_hashing(passw):
return sha1(md5(passw.encode('utf8')).hexdigest().encode('utf8')).hexdigest()
url_validator_regex = re.compile(re.compile(
u"^"
u"(?:(?:https?|ftp)://)"
u"(?:\S+(?::\S*)?@)?"
u"(?:"
u"(?P<private_ip>"
u"(?:(?:10|127)" + u"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5]))" + u"{2}" + u"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + u")|"
u"(?:(?:169\.254|192\.168)" + u"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5]))" + u"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + u")|"
u"(?:172\.(?:1[6-9]|2\d|3[0-1])" + u"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5]))" + u"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + u"))"
u"|"
u"(?P<public_ip>"
u"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])"
u"" + u"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5]))" + u"{2}"
u"" + u"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + u")"
u"|"
u"(?:(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)"
u"(?:\.(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)*"
u"(?:\.(?:[a-z\u00a1-\uffff]{2,}))"
u")"
u"(?::\d{2,5})?"
u"(?:/\S*)?"
u"(?:\?\S*)?"
u"$",
re.UNICODE | re.IGNORECASE
))
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def recaptcha_validation(request):
captcha_rs = request.data.get('recaptchaToken')
if not captcha_rs: return False
url = "https://www.google.com/recaptcha/api/siteverify"
params = {
'secret': settings.CAPTCHA_SECRET_KEY,
'response': captcha_rs,
'remoteip': get_client_ip(request)
}
verify_rs = requests.get(url, params=params, verify=True).json()
return verify_rs['success']
def check_input_data(data):
if hasattr(data, '_mutable'): data._mutable = True
for key in data:
if key not in ('user', 'url_id', 'long_url', 'password', 'recaptchaToken', 'expiration_date'):
return {'error': "فیلد ناشناخته !"}
if not url_validator_regex.match(data['long_url']):
return {'error': 'لینک وارد شده اشتباه است !' + '\n' + 'توجه: لینک باید با //:http یا //:https شروع شود.'}
if 'url_id' in data and data['url_id']:
if data['url_id'] in ['registration', 'settings', 'report', 'admin']:
return {'error': 'شما نمی توانید از این آدرس استفاده کنید !'}
try:
models.Link.objects.get(url_id=data['url_id'])
return {'error': 'از این آدرس در یک لینک دیگر استفاده شده است !'}
except models.Link.DoesNotExist:
if not data['url_id'].isalnum():
return {'error': 'در آدرس دلخواه از کاراکتر غیر مجاز استفاده شده است !'}
elif len(data['url_id']) > 65:
return {'error': 'حداکثر طول آدرس دلخواه 65 کاراکتر می باشد !'}
if 'password' in data and data['password']:
data['password'] = password_hashing(data['password'])
if 'expiration_date' in data:
if data['expiration_date']:
date_str = data['expiration_date']
try:
date = datetime.datetime.strptime(date_str, '%Y/%m/%d')
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
tomorrow = datetime.datetime(tomorrow.year, tomorrow.month, tomorrow.day)
if date < tomorrow:
return {'error': 'تاریخ وارد شده گذشته است !'}
else:
data['expiration_date'] = date
except:
return {'error': 'فرمت تاریخ وارد شده اشتباه است !'}
else:
del data['expiration_date']
return data
| 38.233202
| 189
| 0.574072
|
b7d752aa1a360efb6bb96228e083bcb76e78e64c
| 10,539
|
py
|
Python
|
src/tools/test_ndn.py
|
funalab/QCANet
|
6e8503a3ac78e7cbde18881314c8ad53774f59e5
|
[
"MIT"
] | 26
|
2018-06-02T01:50:28.000Z
|
2022-01-18T20:20:13.000Z
|
src/tools/test_ndn.py
|
funalab/QCANet
|
6e8503a3ac78e7cbde18881314c8ad53774f59e5
|
[
"MIT"
] | 7
|
2018-11-30T13:43:10.000Z
|
2021-01-16T11:15:28.000Z
|
src/tools/test_ndn.py
|
funalab/QCANet
|
6e8503a3ac78e7cbde18881314c8ad53774f59e5
|
[
"MIT"
] | 7
|
2018-06-20T07:58:59.000Z
|
2022-03-17T07:37:28.000Z
|
# -*- coding: utf-8 -*-
import csv
import sys
import time
import random
import copy
import math
import os
import numpy as np
import os.path as pt
from skimage import io
from skimage import transform as tr
from skimage import morphology as mor
from argparse import ArgumentParser
from chainer import cuda
sys.path.append(os.getcwd())
from src.lib.model import Model_L2, Model_L3, Model_L4
from src.lib.utils import mirror_extension_image
class TestNDN():
def __init__(
self,
model=None,
patchsize=128,
stride=64,
resolution=(1.0, 1.0, 2.18),
scaling=True,
delv=3,
opbase=None,
gpu=False,
ndim=3
):
self.model = model
self.patchsize = (patchsize, patchsize, patchsize)
self.stride = (stride, stride, stride)
self.resolution = resolution
self.scaling = scaling
self.delv = delv
self.opbase = opbase
self.gpu = gpu
self.psep = '/'
self.ndim = ndim
def NuclearDetection(self, image_path):
segbase = 'DetectionImages'
if not (pt.exists(self.opbase + self.psep + segbase)):
os.mkdir(self.opbase + self.psep + segbase)
labbase = 'LabelingDetectionImages'
if not (pt.exists(self.opbase + self.psep + labbase)):
os.mkdir(self.opbase + self.psep + labbase)
image = io.imread(image_path)
im_size = image.shape
if self.ndim == 2:
ip_size = (int(image.shape[0] * self.resolution[1]), int(image.shape[1] * self.resolution[0]))
sh = [int(self.stride[0]/2), int(self.stride[1]/2)]
elif self.ndim == 3:
ip_size = (int(image.shape[0] * self.resolution[2]), int(image.shape[1] * self.resolution[1]), int(image.shape[2] * self.resolution[0]))
sh = [int(self.stride[0]/2), int(self.stride[1]/2), int(self.stride[2]/2)]
image = tr.resize(image, ip_size, order = 1, preserve_range = True)
im_size_ip = image.shape
# Scaling
if self.scaling:
image = image.astype(np.float32)
#image = image / image.max()
image = (image - image.min()) / (image.max() - image.min())
#image = (image - image.mean()) / image.std()
''' calculation for pad size'''
#if np.min(self.patchsize) > np.max(im_size):
if np.min(self.patchsize) > np.max(np.array(im_size) + np.array(sh)*2):
if self.ndim == 2:
pad_size = [self.patchsize[0], self.patchsize[1]]
elif self.ndim == 3:
pad_size = [self.patchsize[0], self.patchsize[1], self.patchsize[2]]
else:
pad_size = []
for axis in range(len(im_size_ip)):
if (im_size_ip[axis] + 2*sh[axis] - self.patchsize[axis]) % self.stride[axis] == 0:
stride_num = int((im_size_ip[axis] + 2*sh[axis] - self.patchsize[axis]) / self.stride[axis])
else:
stride_num = int((im_size_ip[axis] + 2*sh[axis] - self.patchsize[axis]) / self.stride[axis]) + 1
pad_size.append(int(self.stride[axis] * stride_num + self.patchsize[axis]))
pre_img = np.zeros(pad_size)
if self.ndim == 2:
image = mirror_extension_image(image=image, ndim=self.ndim, length=int(np.max(self.patchsize)))[self.patchsize[0]-sh[0]:self.patchsize[0]-sh[0]+pad_size[0], self.patchsize[1]-sh[1]:self.patchsize[1]-sh[1]+pad_size[1]]
for y in range(0, pad_size[0]-self.stride[0], self.stride[0]):
for x in range(0, pad_size[1]-self.stride[1], self.stride[1]):
x_patch = image[y:y+self.patchsize[0], x:x+self.patchsize[1]]
x_patch = np.expand_dims(np.expand_dims(x_patch.astype(np.float32), axis=0), axis=0)
if self.gpu >= 0:
x_patch = cuda.to_gpu(x_patch)
s_output = self.model(x=x_patch, t=None, seg=True)
if self.gpu >= 0:
s_output = cuda.to_cpu(s_output)
pred = copy.deepcopy((0 < (s_output[0][1] - s_output[0][0])) * 255)
# Add segmentation image
pre_img[y:y+self.stride[0], x:x+self.stride[1]] += pred[sh[0]:-sh[0], sh[1]:-sh[1]]
seg_img = (pre_img > 0) * 255
seg_img = seg_img[0:im_size_ip[0], 0:im_size_ip[1]]
elif self.ndim == 3:
image = mirror_extension_image(image=image, ndim=self.ndim, length=int(np.max(self.patchsize)))[self.patchsize[0]-sh[0]:self.patchsize[0]-sh[0]+pad_size[0], self.patchsize[1]-sh[1]:self.patchsize[1]-sh[1]+pad_size[1], self.patchsize[2]-sh[2]:self.patchsize[2]-sh[2]+pad_size[2]]
for z in range(0, pad_size[0]-self.stride[0], self.stride[0]):
for y in range(0, pad_size[1]-self.stride[1], self.stride[1]):
for x in range(0, pad_size[2]-self.stride[2], self.stride[2]):
x_patch = image[z:z+self.patchsize[0], y:y+self.patchsize[1], x:x+self.patchsize[2]]
x_patch = np.expand_dims(np.expand_dims(x_patch.astype(np.float32), axis=0), axis=0)
if self.gpu >= 0:
x_patch = cuda.to_gpu(x_patch)
s_output = self.model(x=x_patch, t=None, seg=True)
if self.gpu >= 0:
s_output = cuda.to_cpu(s_output)
pred = copy.deepcopy((0 < (s_output[0][1] - s_output[0][0])) * 255)
# Add segmentation image
pre_img[z:z+self.stride[0], y:y+self.stride[1], x:x+self.stride[2]] += pred[sh[0]:-sh[0], sh[1]:-sh[1], sh[2]:-sh[2]]
seg_img = (pre_img > 0) * 255
seg_img = seg_img[0:im_size_ip[0], 0:im_size_ip[1], 0:im_size_ip[2]]
seg_img = (tr.resize(seg_img, im_size, order = 1, preserve_range = True) > 0) * 255
filename = os.path.join(self.opbase, segbase, os.path.basename(image_path)[:os.path.basename(image_path).rfind('.')] + '.tif')
# filename = self.opbase + self.psep + segbase + self.psep + 'detimg_t{0:03d}.tif'.format(int(image_path[image_path.rfind('/')+1:image_path.rfind('.')]))
io.imsave(filename, seg_img.astype(np.uint8))
lab_img = mor.label(seg_img.astype(np.uint16), neighbors=4)
mask_size = np.unique(lab_img, return_counts=True)[1] < (self.delv + 1)
remove_voxel = mask_size[lab_img]
lab_img[remove_voxel] = 0
labels = np.unique(lab_img)
lab_img = np.searchsorted(labels, lab_img)
filename = os.path.join(self.opbase, labbase, os.path.basename(image_path)[:os.path.basename(image_path).rfind('.')] + '.tif')
# filename = self.opbase + self.psep + labbase + self.psep + 'labimg_t{0:03d}.tif'.format(int(image_path[image_path.rfind('/')+1:image_path.rfind('.')]))
io.imsave(filename, lab_img.astype(np.uint16))
return lab_img.astype(np.uint16)
if __name__ == '__main__':
start_time = time.time()
ap = ArgumentParser(description='python test_ndn.py')
ap.add_argument('--indir', '-i', nargs='?', default='../images/example_input', help='Specify input image')
ap.add_argument('--outdir', '-o', nargs='?', default='result_test_ndn', help='Specify output files directory for create detection image')
ap.add_argument('--model', '-m', nargs='?', default='../models/p128/learned_ndn.model', help='Specify loading file path of learned NDN Model')
ap.add_argument('--gpu', '-g', type=int, default=-1, help='Specify GPU ID (negative value indicates CPU)')
ap.add_argument('--patchsize', '-p', type=int, default=128, help='Specify patch size')
ap.add_argument('--stride', type=int, default=64, help='Specify stride size')
ap.add_argument('--delete', '-d', type=int, default=0, help='Specify Pixel Size of Delete Region')
ap.add_argument('--scaling', '-s', action='store_true', help='Specify Image-wise Scaling Flag')
ap.add_argument('--resolution_x', '-x', type=float, default=1.0, help='Specify microscope resolution of x axis (default=1.0)')
ap.add_argument('--resolution_y', '-y', type=float, default=1.0, help='Specify microscope resolution of y axis (default=1.0)')
ap.add_argument('--resolution_z', '-z', type=float, default=2.18, help='Specify microscope resolution of z axis (default=2.18)')
args = ap.parse_args()
argvs = sys.argv
util = Utils()
psep = '/'
opbase = util.createOpbase(args.outdir)
patchsize = args.patchsize
stride = args.stride
print('Patch Size: {}'.format(patchsize))
print('Stride Size: {}'.format(stride))
print('Delete Voxels: {}'.format(args.delete))
with open(opbase + psep + 'result.txt', 'w') as f:
f.write('python ' + ' '.join(argvs) + '\n')
f.write('[Properties of parameter]\n')
f.write('Output Directory: {}\n'.format(opbase))
f.write('Patch Size: {}\n'.format(patchsize))
f.write('Stride Size: {}\n'.format(stride))
f.write('Delete Voxels: {}\n'.format(args.delete))
# Create Model
class_weight = np.array([1, 1]).astype(np.float32)
if args.gpu >= 0:
class_weight = cuda.to_gpu(class_weight)
# Adam
ndn = Model_L4(class_weight=class_weight, n_class=2, init_channel=12,
kernel_size=5, pool_size=2, ap_factor=2, gpu=args.gpu)
# SGD
# ndn = Model_L3(class_weight=class_weight, n_class=2, init_channel=16,
# kernel_size=3, pool_size=2, ap_factor=2, gpu=args.gpu)
# Load Model
if not args.model == '0':
util.loadModel(args.model, ndn)
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
ndn.to_gpu()
# Detection Phase
test_ndn = TestNDN(model=ndn, patchsize=patchsize, stride=stride,
resolution=(args.resolution_x, args.resolution_y, args.resolution_z),
scaling=args.scaling, delv=args.delete,
opbase=opbase, gpu=args.gpu)
dlist = os.listdir(args.indir)
for l in dlist:
test_ndn.NuclearDetection(args.indir + psep + l)
end_time = time.time()
etime = end_time - start_time
print('Elapsed time is (sec) {}'.format(etime))
with open(opbase + psep + 'result.txt', 'a') as f:
f.write('======================================\n')
f.write('Elapsed time is (sec) {} \n'.format(etime))
print('NDN Test Completed Process!')
| 49.247664
| 290
| 0.591707
|
bb293fa030d1402dd37326f11fd8ad23be4afcbc
| 5,094
|
py
|
Python
|
python_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from dagster_aws.emr import EmrJobRunner, emr_pyspark_resource
from dagster_pyspark import pyspark_resource, pyspark_solid
from moto import mock_emr
from dagster import (
DagsterInvalidDefinitionError,
ModeDefinition,
RunConfig,
execute_pipeline,
pipeline,
)
from dagster.seven import mock
from dagster.utils.test import create_test_pipeline_execution_context
@pyspark_solid
def example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pyspark_solid(name='blah', description='this is a test', config={'foo': str, 'bar': int})
def other_example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pipeline(
mode_defs=[
ModeDefinition('prod', resource_defs={'pyspark': emr_pyspark_resource}),
ModeDefinition('local', resource_defs={'pyspark': pyspark_resource}),
]
)
def example_pipe():
example_solid()
other_example_solid()
def test_local():
result = execute_pipeline(
example_pipe,
environment_dict={'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},},
run_config=RunConfig(mode='local'),
)
assert result.success
@mock_emr
@mock.patch('dagster_aws.emr.emr.EmrJobRunner.wait_for_steps_to_complete')
def test_pyspark_emr(mock_wait):
run_job_flow_args = dict(
Instances={
'InstanceCount': 1,
'KeepJobFlowAliveWhenNoSteps': True,
'MasterInstanceType': 'c3.medium',
'Placement': {'AvailabilityZone': 'us-west-1a'},
'SlaveInstanceType': 'c3.xlarge',
},
JobFlowRole='EMR_EC2_DefaultRole',
LogUri='s3://mybucket/log',
Name='cluster',
ServiceRole='EMR_DefaultRole',
VisibleToAllUsers=True,
)
# Doing cluster setup outside of a solid here, because run_job_flow is not yet plumbed through
# to the pyspark EMR resource.
job_runner = EmrJobRunner(region='us-west-1')
context = create_test_pipeline_execution_context()
cluster_id = job_runner.run_job_flow(context, run_job_flow_args)
result = execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': cluster_id,
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert result.success
assert mock_wait.called_once
def test_bad_requirements_txt():
with pytest.raises(DagsterInvalidDefinitionError) as exc_info:
execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'requirements_file_path': 'DOES_NOT_EXIST',
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': 'some_cluster_id',
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert 'The requirements.txt file that was specified does not exist' in str(exc_info.value)
@pytest.mark.skipif(
'AWS_EMR_TEST_DO_IT_LIVE' not in os.environ,
reason='This test is slow and requires a live EMR cluster; run only upon explicit request',
)
def test_do_it_live_emr():
result = execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': os.environ.get('AWS_EMR_JOB_FLOW_ID'),
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
'wait_for_logs': True,
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert result.success
| 33.513158
| 98
| 0.561837
|
d2555a3bfdb15bd379659fd32924764af5f1cb8c
| 13,293
|
py
|
Python
|
luna/gateware/soc/peripheral.py
|
modwizcode/luna
|
a401e5240d210ccc59526660604451bca92dc17c
|
[
"BSD-3-Clause"
] | 609
|
2019-10-17T07:17:21.000Z
|
2022-03-29T02:31:28.000Z
|
luna/gateware/soc/peripheral.py
|
modwizcode/luna
|
a401e5240d210ccc59526660604451bca92dc17c
|
[
"BSD-3-Clause"
] | 132
|
2020-01-19T11:48:03.000Z
|
2022-03-29T20:31:14.000Z
|
luna/gateware/soc/peripheral.py
|
modwizcode/luna
|
a401e5240d210ccc59526660604451bca92dc17c
|
[
"BSD-3-Clause"
] | 113
|
2019-12-17T02:31:25.000Z
|
2022-03-18T13:01:17.000Z
|
#
# This file is part of LUNA.
#
# Adapted from lambdasoc.
# This file includes content Copyright (C) 2020 LambdaConcept.
#
# Per our BSD license, derivative files must include this license disclaimer.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Peripheral helpers for LUNA devices. """
from contextlib import contextmanager
from amaranth import Module, Elaboratable
from amaranth import tracer
from amaranth.utils import log2_int
from amaranth_soc import csr, wishbone
from amaranth_soc.memory import MemoryMap
from amaranth_soc.csr.wishbone import WishboneCSRBridge
from .event import EventSource, IRQLine, InterruptSource
__all__ = ["Peripheral", "CSRBank", "PeripheralBridge"]
class Peripheral:
"""Wishbone peripheral.
A helper class to reduce the boilerplate needed to control a peripheral with a Wishbone interface.
It provides facilities for instantiating CSR registers, requesting windows to subordinate busses
and sending interrupt requests to the CPU.
The ``Peripheral`` class is not meant to be instantiated as-is, but rather as a base class for
actual peripherals.
Usage example
-------------
```
class ExamplePeripheral(Peripheral, Elaboratable):
def __init__(self):
super().__init__()
bank = self.csr_bank()
self._foo = bank.csr(8, "r")
self._bar = bank.csr(8, "w")
self._rdy = self.event(mode="rise")
self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
self.irq = self._bridge.irq
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
# ...
return m
```
Arguments
---------
name : str
Name of this peripheral. If ``None`` (default) the name is inferred from the variable
name this peripheral is assigned to.
Properties
----------
name : str
Name of the peripheral.
"""
def __init__(self, name=None, src_loc_at=1):
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
self.name = name or tracer.get_var_name(depth=2 + src_loc_at).lstrip("_")
self._csr_banks = []
self._windows = []
self._events = []
self._bus = None
self._irq = None
@property
def bus(self):
"""Wishbone bus interface.
Return value
------------
An instance of :class:`Interface`.
Exceptions
----------
Raises :exn:`NotImplementedError` if the peripheral does not have a Wishbone bus.
"""
if self._bus is None:
raise NotImplementedError("Peripheral {!r} does not have a bus interface"
.format(self))
return self._bus
@bus.setter
def bus(self, bus):
if not isinstance(bus, wishbone.Interface):
raise TypeError("Bus interface must be an instance of wishbone.Interface, not {!r}"
.format(bus))
self._bus = bus
@property
def irq(self):
"""Interrupt request line.
Return value
------------
An instance of :class:`IRQLine`.
Exceptions
----------
Raises :exn:`NotImplementedError` if the peripheral does not have an IRQ line.
"""
if self._irq is None:
raise NotImplementedError("Peripheral {!r} does not have an IRQ line"
.format(self))
return self._irq
@irq.setter
def irq(self, irq):
if not isinstance(irq, IRQLine):
raise TypeError("IRQ line must be an instance of IRQLine, not {!r}"
.format(irq))
self._irq = irq
def csr_bank(self, *, addr=None, alignment=None, desc=None):
"""Request a CSR bank.
Arguments
---------
addr : int or None
Address of the bank. If ``None``, the implicit next address will be used.
Otherwise, the exact specified address (which must be a multiple of
``2 ** max(alignment, bridge_alignment)``) will be used.
alignment : int or None
Alignment of the bank. If not specified, the bridge alignment is used.
See :class:`amaranth_soc.csr.Multiplexer` for details.
desc: (str, optional):
Documentation of the given CSR bank.
Return value
------------
An instance of :class:`CSRBank`.
"""
bank = CSRBank(name_prefix=self.name)
self._csr_banks.append((bank, addr, alignment))
return bank
def window(self, *, addr_width, data_width, granularity=None, features=frozenset(),
alignment=0, addr=None, sparse=None):
"""Request a window to a subordinate bus.
See :meth:`amaranth_soc.wishbone.Decoder.add` for details.
Return value
------------
An instance of :class:`amaranth_soc.wishbone.Interface`.
"""
window = wishbone.Interface(addr_width=addr_width, data_width=data_width,
granularity=granularity, features=features)
granularity_bits = log2_int(data_width // window.granularity)
window.memory_map = MemoryMap(addr_width=addr_width + granularity_bits,
data_width=window.granularity, alignment=alignment)
self._windows.append((window, addr, sparse))
return window
def event(self, *, mode="level", name=None, src_loc_at=0, desc=None):
"""Request an event source.
See :class:`EventSource` for details.
Return value
------------
An instance of :class:`EventSource`.
"""
event = EventSource(mode=mode, name=name, src_loc_at=1 + src_loc_at)
self._events.append(event)
return event
def bridge(self, *, data_width=8, granularity=None, features=frozenset(), alignment=0):
"""Request a bridge to the resources of the peripheral.
See :class:`PeripheralBridge` for details.
Return value
------------
A :class:`PeripheralBridge` providing access to local resources.
"""
return PeripheralBridge(self, data_width=data_width, granularity=granularity,
features=features, alignment=alignment)
def iter_csr_banks(self):
"""Iterate requested CSR banks and their parameters.
Yield values
------------
A tuple ``bank, addr, alignment`` describing the bank and its parameters.
"""
for bank, addr, alignment in self._csr_banks:
yield bank, addr, alignment
def iter_windows(self):
"""Iterate requested windows and their parameters.
Yield values
------------
A tuple ``window, addr, sparse`` descr
given to :meth:`Peripheral.window`.
"""
for window, addr, sparse in self._windows:
yield window, addr, sparse
def iter_events(self):
"""Iterate requested event sources.
Yield values
------------
An instance of :class:`EventSource`.
"""
for event in self._events:
yield event
class CSRBank:
"""CSR register bank.
Parameters
----------
name_prefix : str
Name prefix of the bank registers.
"""
def __init__(self, *, name_prefix=""):
self._name_prefix = name_prefix
self._csr_regs = []
def csr(self, width, access, *, addr=None, alignment=None, name=None, desc=None,
src_loc_at=0):
"""Request a CSR register.
Parameters
----------
width : int
Width of the register. See :class:`amaranth_soc.csr.Element`.
access : :class:`Access`
Register access mode. See :class:`amaranth_soc.csr.Element`.
addr : int
Address of the register. See :meth:`amaranth_soc.csr.Multiplexer.add`.
alignment : int
Register alignment. See :class:`amaranth_soc.csr.Multiplexer`.
name : str
Name of the register. If ``None`` (default) the name is inferred from the variable
name this register is assigned to.
desc: str
Documentation for the provided register, if available.
Used to capture register documentation automatically.
Return value
------------
An instance of :class:`amaranth_soc.csr.Element`.
"""
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
name = name or tracer.get_var_name(depth=2 + src_loc_at).lstrip("_")
elem_name = "{}_{}".format(self._name_prefix, name)
elem = csr.Element(width, access, name=elem_name)
self._csr_regs.append((elem, addr, alignment))
return elem
def iter_csr_regs(self):
"""Iterate requested CSR registers and their parameters.
Yield values
------------
A tuple ``elem, addr, alignment`` describing the register and its parameters.
"""
for elem, addr, alignment in self._csr_regs:
yield elem, addr, alignment
class PeripheralBridge(Elaboratable):
"""Peripheral bridge.
A bridge providing access to the registers and windows of a peripheral, and support for
interrupt requests from its event sources.
Event managment is performed by an :class:`InterruptSource` submodule.
Parameters
---------
periph : :class:`Peripheral`
The peripheral whose resources are exposed by this bridge.
data_width : int
Data width. See :class:`amaranth_soc.wishbone.Interface`.
granularity : int or None
Granularity. See :class:`amaranth_soc.wishbone.Interface`.
features : iter(str)
Optional signal set. See :class:`amaranth_soc.wishbone.Interface`.
alignment : int
Resource alignment. See :class:`amaranth_soc.memory.MemoryMap`.
Attributes
----------
bus : :class:`amaranth_soc.wishbone.Interface`
Wishbone bus providing access to the resources of the peripheral.
irq : :class:`IRQLine`, out
Interrupt request. It is raised if any event source is enabled and has a pending
notification.
"""
def __init__(self, periph, *, data_width, granularity, features, alignment):
if not isinstance(periph, Peripheral):
raise TypeError("Peripheral must be an instance of Peripheral, not {!r}"
.format(periph))
self._wb_decoder = wishbone.Decoder(addr_width=1, data_width=data_width,
granularity=granularity,
features=features, alignment=alignment)
self._csr_subs = []
for bank, bank_addr, bank_alignment in periph.iter_csr_banks():
if bank_alignment is None:
bank_alignment = alignment
csr_mux = csr.Multiplexer(addr_width=1, data_width=8, alignment=bank_alignment)
for elem, elem_addr, elem_alignment in bank.iter_csr_regs():
if elem_alignment is None:
elem_alignment = alignment
csr_mux.add(elem, addr=elem_addr, alignment=elem_alignment, extend=True)
csr_bridge = WishboneCSRBridge(csr_mux.bus, data_width=data_width)
self._wb_decoder.add(csr_bridge.wb_bus, addr=bank_addr, extend=True)
self._csr_subs.append((csr_mux, csr_bridge))
for window, window_addr, window_sparse in periph.iter_windows():
self._wb_decoder.add(window, addr=window_addr, sparse=window_sparse, extend=True)
events = list(periph.iter_events())
if len(events) > 0:
self._int_src = InterruptSource(events, name="{}_ev".format(periph.name))
self.irq = self._int_src.irq
csr_mux = csr.Multiplexer(addr_width=1, data_width=8, alignment=alignment)
csr_mux.add(self._int_src.status, extend=True)
csr_mux.add(self._int_src.pending, extend=True)
csr_mux.add(self._int_src.enable, extend=True)
csr_bridge = WishboneCSRBridge(csr_mux.bus, data_width=data_width)
self._wb_decoder.add(csr_bridge.wb_bus, extend=True)
self._csr_subs.append((csr_mux, csr_bridge))
else:
self._int_src = None
self.irq = None
self.bus = self._wb_decoder.bus
def elaborate(self, platform):
m = Module()
for i, (csr_mux, csr_bridge) in enumerate(self._csr_subs):
m.submodules[ "csr_mux_{}".format(i)] = csr_mux
m.submodules["csr_bridge_{}".format(i)] = csr_bridge
if self._int_src is not None:
m.submodules._int_src = self._int_src
m.submodules.wb_decoder = self._wb_decoder
return m
| 35.073879
| 102
| 0.599714
|
b75d4c9dcabdde04b3574e84021e6cb9e0c111bf
| 680
|
py
|
Python
|
python-threatexchange/threatexchange/cli/tests/cli_smoke_test.py
|
b-bold/ThreatExchange
|
6f8d0dc803faccf576c9398569bb52d54a4f9a87
|
[
"BSD-3-Clause"
] | 997
|
2015-03-13T18:04:03.000Z
|
2022-03-30T12:09:10.000Z
|
python-threatexchange/threatexchange/cli/tests/cli_smoke_test.py
|
b-bold/ThreatExchange
|
6f8d0dc803faccf576c9398569bb52d54a4f9a87
|
[
"BSD-3-Clause"
] | 444
|
2015-03-26T17:28:49.000Z
|
2022-03-28T19:34:05.000Z
|
python-threatexchange/threatexchange/cli/tests/cli_smoke_test.py
|
b-bold/ThreatExchange
|
6f8d0dc803faccf576c9398569bb52d54a4f9a87
|
[
"BSD-3-Clause"
] | 294
|
2015-03-13T22:19:43.000Z
|
2022-03-30T08:42:45.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import pytest
from threatexchange.cli import main
def test_all_helps():
"""
Just executes all the commands to make sure they don't throw on help.
View the pretty output with py.test -s
"""
def help(command=None):
args = [command.get_name()] if command else []
args.append("--help")
with pytest.raises(SystemExit) as exc:
print("\n$ threatexchange", " ".join(args), file=sys.stderr)
main.main(args)
assert exc.value.code == 0
help() # root help
for command in main.get_subcommands():
help(command)
| 24.285714
| 73
| 0.630882
|
fa66763cb6972a92806bcf929a87e7d65ee83a88
| 425
|
py
|
Python
|
soustypes.py
|
geocot/Python_ArcGIS_Desktop
|
aef5d855d8ce3f564dd4fba80599be32b89fcb5b
|
[
"Apache-2.0"
] | null | null | null |
soustypes.py
|
geocot/Python_ArcGIS_Desktop
|
aef5d855d8ce3f564dd4fba80599be32b89fcb5b
|
[
"Apache-2.0"
] | null | null | null |
soustypes.py
|
geocot/Python_ArcGIS_Desktop
|
aef5d855d8ce3f564dd4fba80599be32b89fcb5b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import arcpy
arcpy.env.workspace = "C:/Temp/donnees.gdb"
soustypes = arcpy.da.ListSubtypes("villes")
for stcode, stdict in list(soustypes.items()):
print("code: ", stcode, " nom: ", stdict['Name'], " defaut: ", stdict['Default'])
"""
Retourne ceci:
('code: ', 0, ' nom: ', u'Villes', ' defaut: ', True)
('code: ', 1, ' nom: ', u'Grande ville', ' defaut: ', False)
"""
| 22.368421
| 86
| 0.592941
|
0ebfc566cb1815ab821093f5020c7b817895e3a4
| 594
|
py
|
Python
|
fleet_management/core/notification/models.py
|
nahidsaikat/Fleet-Management-Backend
|
d3a20a9b971600eb039bcc62068599cbbc72537e
|
[
"MIT"
] | 3
|
2018-08-09T14:06:09.000Z
|
2021-10-31T08:49:56.000Z
|
fleet_management/core/notification/models.py
|
nahidsaikat/Fleet-Management-Backend
|
d3a20a9b971600eb039bcc62068599cbbc72537e
|
[
"MIT"
] | null | null | null |
fleet_management/core/notification/models.py
|
nahidsaikat/Fleet-Management-Backend
|
d3a20a9b971600eb039bcc62068599cbbc72537e
|
[
"MIT"
] | 2
|
2018-10-09T08:38:28.000Z
|
2022-01-19T12:27:58.000Z
|
from django.db import models
from django.contrib.auth.models import User
# from django.contrib.auth.models import User as SecondUser
class Notification(models.Model):
message = models.TextField(null=False)
from_employee_id = models.ForeignKey(User, on_delete=models.CASCADE, related_name='%(class)s_from_employee')
to_employee_id = models.ForeignKey(User, on_delete=models.CASCADE, related_name='%(class)s_to_employee')
table_name = models.CharField(max_length=255, null=False)
table_id = models.IntegerField(null=False)
mark_as_read = models.IntegerField(null=False)
| 45.692308
| 112
| 0.784512
|
d6f4e33b92fa12180d10f3edeecfda8217d401ca
| 4,248
|
py
|
Python
|
docker/jupyterhub_config.py
|
GoogleCloudDataproc/jupyterhub-dataprocspawner
|
8859c088088cf7e15f0b395aaa4b1334a1d1c894
|
[
"Apache-2.0"
] | 7
|
2020-06-01T15:35:21.000Z
|
2022-02-04T18:45:51.000Z
|
docker/jupyterhub_config.py
|
GoogleCloudDataproc/jupyterhub-dataprocspawner
|
8859c088088cf7e15f0b395aaa4b1334a1d1c894
|
[
"Apache-2.0"
] | 20
|
2020-08-12T10:54:53.000Z
|
2021-06-25T19:55:17.000Z
|
docker/jupyterhub_config.py
|
GoogleCloudDataproc/jupyterhub-dataprocspawner
|
8859c088088cf7e15f0b395aaa4b1334a1d1c894
|
[
"Apache-2.0"
] | 5
|
2020-04-28T12:02:13.000Z
|
2021-02-12T22:55:59.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
import socket
from tornado import web
from google.cloud import secretmanager_v1beta1 as secretmanager
def is_true(boolstring: str):
""" Converts an environment variables to a Python boolean. """
if boolstring.lower() in ('true', '1'):
return True
return False
# Listens on all interfaces.
c.JupyterHub.hub_ip = '0.0.0.0'
# Hostname that Cloud Dataproc can access to connect to the Hub.
c.JupyterHub.hub_connect_ip = socket.gethostbyname(socket.gethostname())
# Template for the user form.
c.JupyterHub.template_paths = ['/etc/jupyterhub/templates']
# Opens on JupyterLab instead of Jupyter's tree
c.Spawner.default_url = os.environ.get('SPAWNER_DEFAULT_URL', '/lab')
# The port that the spawned notebook listens on for the hub to connect
c.Spawner.port = 12345
print(os.environ)
# JupyterHub (Port must be 8080 to meet Inverting Proxy requirements.)
c.JupyterHub.spawner_class = 'dataprocspawner.DataprocSpawner'
c.JupyterHub.proxy_class = 'redirect-proxy'
c.JupyterHub.port = 8080
c.JupyterHub.allow_named_servers = is_true(os.environ.get('HUB_ALLOW_NAMED_SERVERS', ''))
# Authenticator
from gcpproxiesauthenticator.gcpproxiesauthenticator import GCPProxiesAuthenticator
c.JupyterHub.authenticator_class = GCPProxiesAuthenticator
c.GCPProxiesAuthenticator.check_header = 'X-Inverting-Proxy-User-Id'
c.GCPProxiesAuthenticator.template_to_render = 'welcome.html'
# Spawner
c.DataprocSpawner.project = os.environ.get('PROJECT', '')
c.DataprocSpawner.dataproc_configs = os.environ.get('DATAPROC_CONFIGS', '')
c.DataprocSpawner.region = os.environ.get('JUPYTERHUB_REGION', '')
c.DataprocSpawner.dataproc_default_subnet = os.environ.get('DATAPROC_DEFAULT_SUBNET', '')
c.DataprocSpawner.dataproc_service_account = os.environ.get('DATAPROC_SERVICE_ACCOUNT', '')
c.DataprocSpawner.dataproc_locations_list = os.environ.get('DATAPROC_LOCATIONS_LIST', '')
c.DataprocSpawner.machine_types_list = os.environ.get('DATAPROC_MACHINE_TYPES_LIST', '')
c.DataprocSpawner.cluster_name_pattern = os.environ.get('CLUSTER_NAME_PATTERN', 'dataprochub-{}')
c.DataprocSpawner.allow_custom_clusters = is_true(os.environ.get('DATAPROC_ALLOW_CUSTOM_CLUSTERS', ''))
c.DataprocSpawner.allow_random_cluster_names = is_true(os.environ.get('ALLOW_RANDOM_CLUSTER_NAMES', ''))
c.DataprocSpawner.show_spawned_clusters_in_notebooks_list = is_true(os.environ.get('SHOW_SPAWNED_CLUSTERS', ''))
c.DataprocSpawner.force_single_user = is_true(os.environ.get('FORCE_SINGLE_USER', ''))
c.DataprocSpawner.gcs_notebooks = os.environ.get('GCS_NOTEBOOKS', '')
if not c.DataprocSpawner.gcs_notebooks:
c.DataprocSpawner.gcs_notebooks = os.environ.get('NOTEBOOKS_LOCATION', '')
c.DataprocSpawner.default_notebooks_gcs_path = os.environ.get('GCS_EXAMPLES_PATH', '')
if not c.DataprocSpawner.default_notebooks_gcs_path:
c.DataprocSpawner.default_notebooks_gcs_path = os.environ.get('NOTEBOOKS_EXAMPLES_LOCATION', '')
admins = os.environ.get('ADMINS', '')
if admins:
c.Authenticator.admin_users = admins.split(',')
# # Idle checker https://github.com/blakedubois/dataproc-idle-check
idle_job_path = os.environ.get('IDLE_JOB_PATH', '')
idle_path = os.environ.get('IDLE_PATH', '')
idle_timeout = os.environ.get('IDLE_TIMEOUT', '1d')
if (idle_job_path and idle_path):
c.DataprocSpawner.idle_checker = {
'idle_job_path': idle_job_path, # gcs path to https://github.com/blakedubois/dataproc-idle-check/blob/master/isIdleJob.sh
'idle_path': idle_path, # gcs path to https://github.com/blakedubois/dataproc-idle-check/blob/master/isIdle.sh
'timeout': idle_timeout # idle time after which cluster will be shutdown
}
## End of common setup ##
| 44.715789
| 126
| 0.780603
|
7b2f18da68a0f5d2dcefb8f1a6940803a206757a
| 2,555
|
py
|
Python
|
tenma_dev.py
|
ScopeFoundry/HW_tenma_power
|
e29f9f432b8888a0b250d7892c312be6c4566d4b
|
[
"BSD-3-Clause"
] | null | null | null |
tenma_dev.py
|
ScopeFoundry/HW_tenma_power
|
e29f9f432b8888a0b250d7892c312be6c4566d4b
|
[
"BSD-3-Clause"
] | null | null | null |
tenma_dev.py
|
ScopeFoundry/HW_tenma_power
|
e29f9f432b8888a0b250d7892c312be6c4566d4b
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Created on Jul 6, 2017
@author: Alan Buckley <alanbuckley@lbl.gov>
'''
import serial
import time
import logging
logger = logging.getLogger(__name__)
class TenmaDev(object):
name = 'tenma_dev'
def __init__(self, port="COM5", debug = False):
self.port = port
self.debug = debug
if self.debug:
logger.debug("ButtonBoardInterface.__init__, port={}".format(self.port))
self.ser = serial.Serial(port=self.port, baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout = 0.1)
self.ser.flush()
time.sleep(0.2)
def ask_cmd(self, cmd):
if self.debug:
logger.debug("ask_cmd: {}".format(cmd))
message = cmd.encode()+b'\n'
self.ser.write(message)
resp = self.ser.readline().decode()
if self.debug:
logger.debug("readout: {}".format(cmd))
self.ser.flush()
return resp
def write_voltage(self, voltage, chan=1):
resp = self.ask_cmd("VSET{}:{:05.2f}".format(chan, voltage))
return resp
def read_set_voltage(self, chan=1):
resp = self.ask_cmd("VSET{}?".format(chan))
return resp
def read_actual_voltage(self, chan=1):
resp = self.ask_cmd("VOUT{}?".format(chan))
return resp
def write_current(self, current, chan=1):
resp = self.ask_cmd("ISET{}:{:1.3f}".format(chan, current))
return resp
def read_set_current(self, chan=1):
resp = self.ask_cmd("ISET{}?".format(chan))
return resp
def read_actual_current(self, chan=1):
resp = self.ask_cmd("IOUT{}?".format(chan))
return resp
# def write_ocp(self, on=False):
# if on:
# _setting = 1
# else:
# _setting = 0
# resp = self.ask_cmd("OCP{}".format(_setting))
def lock(self, locked=False):
if locked:
_setting = 1
else:
_setting = 0
self.ask_cmd("LOCK{}".format(_setting))
def get_status(self):
resp = self.ask_cmd("STATUS?").strip()
#print(resp)
return resp
def read_device_name(self):
resp = self.ask_cmd("*IDN?")
return resp
def close(self):
self.ser.close()
del self.ser
| 27.771739
| 84
| 0.526419
|
a4814887e2012d699b9ebb5f8b9828534740f6fd
| 58
|
py
|
Python
|
Storylines/storyline_evaluation/__init__.py
|
Komanawa-Solutions-Ltd/SLMACC-2020-CSRA
|
914b6912c5f5b522107aa9406fb3d823e61c2ebe
|
[
"Apache-2.0"
] | null | null | null |
Storylines/storyline_evaluation/__init__.py
|
Komanawa-Solutions-Ltd/SLMACC-2020-CSRA
|
914b6912c5f5b522107aa9406fb3d823e61c2ebe
|
[
"Apache-2.0"
] | null | null | null |
Storylines/storyline_evaluation/__init__.py
|
Komanawa-Solutions-Ltd/SLMACC-2020-CSRA
|
914b6912c5f5b522107aa9406fb3d823e61c2ebe
|
[
"Apache-2.0"
] | null | null | null |
"""
Author: Matt Hanson
Created: 6/04/2021 10:33 AM
"""
| 14.5
| 28
| 0.62069
|
b7682276177fd6ab33aa343ed422541b827f460f
| 3,948
|
py
|
Python
|
Performance_Evaluation.py
|
alhomayani/OutFin
|
34cf8e8126ebb9bb1c47c62cbbaed1f56c8ded29
|
[
"MIT"
] | null | null | null |
Performance_Evaluation.py
|
alhomayani/OutFin
|
34cf8e8126ebb9bb1c47c62cbbaed1f56c8ded29
|
[
"MIT"
] | null | null | null |
Performance_Evaluation.py
|
alhomayani/OutFin
|
34cf8e8126ebb9bb1c47c62cbbaed1f56c8ded29
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
import math
os.chdir('C:/Users/uf11/Desktop/OutFin/Coordinates/') # NOTE: change directory to where OutFin dataset resides
points_mapping = pd.read_csv('Site4_Local.csv') # load Site 4 local coordinates
RP = points_mapping['RP_ID'].to_numpy()
df_all = pd.DataFrame()
os.chdir('C:/Users/uf11/Desktop/OutFin/Measurements/')
# concatenate all Bluetooth measurements to ge unique MAC addresses observed on Site 4
for i in range(2):
for j in RP:
df_temp = pd.read_csv('Phone'+str(i+1)+'_Bluetooth_'+str(j)+'.csv')
df_all = df_all.append(df_temp, ignore_index=True)
MAC_address = df_all.MAC_address.unique()
df_MAC_address = pd.DataFrame({'MAC_address': MAC_address})
df_all = pd.DataFrame()
for i in range(2):
for j in RP:
df_temp = pd.read_csv('Phone'+str(i+1)+'_Bluetooth_'+str(j)+'.csv')
df1 = df_temp.groupby('MAC_address')['RSS'].apply(list).reset_index(name='RSS_ALL')
df2 = pd.DataFrame(df1['RSS_ALL'].to_list())
df3 = pd.concat([df1[['MAC_address']], df2], axis=1)
result = pd.merge(df_MAC_address, df3, on='MAC_address', how='left')
result = result.T
new_header = result.iloc[0]
result = result[1:]
result.columns = new_header
result = result[MAC_address]
result['RP'] = np.ones(len(result))*(j)
df_all = pd.concat([df_all, result], ignore_index=True)
# shuffle the data and split into training and testing
data = shuffle(df_all, random_state=100)
data = data.values
X = data[:,0:len(MAC_address)]
y = data[:,len(MAC_address)]
y = y.astype('int')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=100)
# perform preprocessing
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_train = np.nan_to_num(X_train)
X_test = scaler.transform(X_test)
X_test = np.nan_to_num(X_test)
# specify the classifiers under comparision and perform the comparision analysis
names = ["Nearest Neighbors", "RBF SVM", "Decision Tree", "Naive Bayes"]
classifiers = [KNeighborsClassifier(3), SVC(gamma='auto', C=100000), DecisionTreeClassifier(), GaussianNB()]
for name, clf in zip(names, classifiers):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
all_distances = []
for i in range(len(y_pred)):
distance = 0
for j in range(len(points_mapping)):
if y_pred[i] == points_mapping.RP_ID[j]:
x1 = points_mapping.X[j]
y1 = points_mapping.Y[j]
if y_test[i] == points_mapping.RP_ID[j]:
x2 = points_mapping.X[j]
y2 = points_mapping.Y[j]
distance = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
all_distances.append(distance)
print("================",name,"================")
print("Accuracy: ",accuracy_score(y_test, y_pred))
print("Precision: ",precision_score(y_test, y_pred, average='weighted'))
print("Recall", recall_score(y_test, y_pred, average='weighted'))
print("F1", f1_score(y_test, y_pred, average='weighted'))
print("---------------------------------------------")
print("Min. distance is: ", min(all_distances))
print("Max. distance is: ", max(all_distances))
print('Mean distance is:', np.mean(all_distances))
print('STD is:', np.std(all_distances))
| 40.701031
| 112
| 0.66464
|
7aa4fa934c1256120ad178cb78b93a531007672d
| 821
|
py
|
Python
|
data/signals/rel_coords.py
|
TYSSSY/Apb-gcn
|
b7c9324d3ef3baafa2fe85d57fc1f81f24e0b1e7
|
[
"MIT"
] | null | null | null |
data/signals/rel_coords.py
|
TYSSSY/Apb-gcn
|
b7c9324d3ef3baafa2fe85d57fc1f81f24e0b1e7
|
[
"MIT"
] | 1
|
2020-10-30T02:01:39.000Z
|
2020-10-30T02:01:39.000Z
|
data/signals/rel_coords.py
|
TYSSSY/Apb-gcn
|
b7c9324d3ef3baafa2fe85d57fc1f81f24e0b1e7
|
[
"MIT"
] | null | null | null |
import numpy as np
def get_relative_coordinates(sample,
references=(4, 8, 12, 16)):
# input: C, T, V, M
c, t, v, m = sample.shape
final_sample = np.zeros((4 * c, t, v, m))
valid_frames = (sample != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
start = valid_frames.argmax()
end = len(valid_frames) - valid_frames[::-1].argmax()
sample = sample[:, start:end, :, :]
rel_coords = []
for i in range(len(references)):
ref_loc = sample[:, :, references[i], :]
coords_diff = (sample.transpose((2, 0, 1, 3)) - ref_loc).transpose((1, 2, 0, 3))
rel_coords.append(coords_diff)
# Shape: 4*C, t, V, M
rel_coords = np.vstack(rel_coords)
# Shape: C, T, V, M
final_sample[:, start:end, :, :] = rel_coords
return final_sample
| 31.576923
| 88
| 0.5676
|
0983163a0587338bb5d55dcb80bc7b466b5f512c
| 6,809
|
py
|
Python
|
pytext/data/utils.py
|
czHP0616/pytext
|
64ab1835905dea2e7797e6bc11398c55941fa728
|
[
"BSD-3-Clause"
] | null | null | null |
pytext/data/utils.py
|
czHP0616/pytext
|
64ab1835905dea2e7797e6bc11398c55941fa728
|
[
"BSD-3-Clause"
] | null | null | null |
pytext/data/utils.py
|
czHP0616/pytext
|
64ab1835905dea2e7797e6bc11398c55941fa728
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import collections
import itertools
import re
from typing import Dict, List, Tuple
import torch
from pytext.config.component import Component, ComponentType
from pytext.utils import cuda
def should_iter(i):
"""Whether or not an object looks like a python iterable (not including strings)."""
return (
hasattr(i, "__iter__")
and not isinstance(i, str)
and not (isinstance(i, torch.Tensor) and len(i) == 0)
)
def _infer_pad_shape(nested_lists):
"""Return the minimal tensor shape which could contain the input data."""
yield len(nested_lists)
while nested_lists and all(should_iter(i) for i in nested_lists):
yield max(len(nested) for nested in nested_lists)
nested_lists = list(itertools.chain.from_iterable(nested_lists))
def _make_nested_padding(pad_shape, pad_token):
"""Create nested lists of pad_token of shape pad_shape."""
result = [pad_token]
for dimension in reversed(pad_shape):
result = [result * dimension]
return result[0]
def pad(nested_lists, pad_token, pad_shape=None):
"""Pad the input lists with the pad token. If pad_shape is provided, pad to that
shape, otherwise infer the input shape and pad out to a square tensor shape."""
if pad_shape is None:
pad_shape = list(_infer_pad_shape(nested_lists))
if not pad_shape:
return nested_lists
dimension, *rest = pad_shape
result = [pad(nested, pad_token, rest) for nested in nested_lists]
result += [_make_nested_padding(rest, pad_token)] * (dimension - len(result))
return result
def pad_and_tensorize(batch, pad_token=0, pad_shape=None, dtype=torch.long):
batch = list(batch)
if not batch:
return torch.Tensor()
return cuda.tensor(
pad(batch, pad_token=pad_token, pad_shape=pad_shape), dtype=dtype
)
def shard(rows, rank, num_workers):
"""Only return every num_workers example for distributed training."""
queue = []
for row in rows:
queue.append(row)
# might discard remainder %num_workers rows because distributed
# training needs to be in sync
if len(queue) == num_workers:
yield queue[rank]
queue = []
class SpecialToken(str):
def __eq__(self, other):
# We don't want to compare as equal to actual strings, but we want to behave
# like a string code-wise.
return self is other
__hash__ = str.__hash__
UNK = SpecialToken("__UNKNOWN__")
PAD = SpecialToken("__PAD__")
BOS = SpecialToken("__BEGIN_OF_SENTENCE__")
EOS = SpecialToken("__END_OF_SENTENCE__")
MASK = SpecialToken("__MASK__")
class Vocabulary:
"""A mapping from indices to vocab elements."""
def __init__(self, vocab_list, counts=None, replacements=None):
self._vocab = vocab_list
self.counts = counts
self.idx = {word: i for i, word in enumerate(vocab_list)}
if replacements:
self.replace_tokens(replacements)
def replace_tokens(self, replacements):
"""Replace tokens in vocab with given replacement.
Used for replacing special strings for special tokens.
e.g. '[UNK]' for UNK"""
for token, replacement in replacements.items():
idx = self.idx.pop(token)
self._vocab[idx] = replacement
self.idx[replacement] = idx
def lookup_all(self, nested_values):
"""
Look up a value or nested container of values in the vocab index.
The return value will have the same shape as the input, with all values
replaced with their respective indicies.
"""
if UNK in self.idx:
unk_idx = self.idx[UNK]
lookup = lambda value: self.idx.get(value, unk_idx)
else:
lookup = self.idx.__getitem__
def lookup_value(value):
return self.lookup_all(value) if should_iter(value) else lookup(value)
if not should_iter(nested_values):
return lookup_value(nested_values)
else:
return [lookup_value(value) for value in nested_values]
def __getitem__(self, item):
return self._vocab[item]
def __len__(self):
return len(self._vocab)
class VocabBuilder:
"""Helper class for aggregating and building `Vocabulary` objects."""
def __init__(self):
self._counter = collections.Counter()
self.use_unk = True
self.unk_index = 0
self.use_pad = True
self.pad_index = 1
self.use_bos = False
self.bos_index = 2
self.use_eos = False
self.eos_index = 3
def add_all(self, values) -> None:
"""Count a value or nested container of values in the vocabulary."""
if should_iter(values):
for value in values:
self.add_all(value)
else:
self.add(values)
def add(self, value) -> None:
"""Count a single value in the vocabulary."""
self._counter[value] += 1
def make_vocab(self) -> Vocabulary:
"""Build a Vocabulary object from the values seen by the builder."""
vocab_list = list(self._counter)
tokens_to_insert: List[Tuple[int, object]] = []
if self.use_unk:
tokens_to_insert.append((self.unk_index, UNK))
if self.use_pad:
tokens_to_insert.append((self.pad_index, PAD))
if self.use_bos:
tokens_to_insert.append((self.bos_index, BOS))
if self.use_eos:
tokens_to_insert.append((self.eos_index, EOS))
for index, token in sorted(tokens_to_insert):
vocab_list.insert(index, token)
return Vocabulary(vocab_list, counts=self._counter)
def align_target_labels(
targets_list: List[List[float]],
labels_list: List[List[str]],
label_vocab: Dict[str, int],
) -> List[List[float]]:
"""
Given `targets_list` that are ordered according to `labels_list`, align the targets
to match the order of `label_vocab`.
"""
return [
align_target_label(targets, labels, label_vocab)
for targets, labels in zip(targets_list, labels_list)
]
def align_target_label(
targets: List[float], labels: List[str], label_vocab: Dict[str, int]
) -> List[float]:
"""
Given `targets` that are ordered according to `labels`, align the targets to match
the order of `label_vocab`.
"""
assert sorted(labels) == sorted(label_vocab)
assert len(targets) == len(labels)
aligned_targets = [None] * len(targets)
for target, label in zip(targets, labels):
aligned_targets[label_vocab[label]] = target
assert all(t is not None for t in aligned_targets)
return aligned_targets
| 32.42381
| 88
| 0.653694
|
c95f82ba7f5dfc324e2aa3c928eea25928ca6a08
| 603
|
py
|
Python
|
listings/python_code.py
|
VsevolodKozlov-git/cs-lab-7
|
7526401b08033a7aa042b8ad183c4b38a2268b21
|
[
"MIT"
] | null | null | null |
listings/python_code.py
|
VsevolodKozlov-git/cs-lab-7
|
7526401b08033a7aa042b8ad183c4b38a2268b21
|
[
"MIT"
] | null | null | null |
listings/python_code.py
|
VsevolodKozlov-git/cs-lab-7
|
7526401b08033a7aa042b8ad183c4b38a2268b21
|
[
"MIT"
] | null | null | null |
def f(x):
return (5*x / (4-x**2))
def main():
#init figure
fig = plt.figure()
y = f(x)
#clear vert. asymptots
y[y>30] = np.inf
y[y<-30] = -np.inf
#plot main graphic
plt.plot(x, y)
#plot horizontal asymptots
for i in [-2, 2]:
plt.axvline(x=i, linestyle='dashed', color="black")
#plot vertical asymptots
plt.axhline(0, linestyle='dashed', color="black")
#configuring plot size
plt.xlim(-5, 5)
plt.ylim(-20, 20)
#saving picture
fig.savefig("mainPlot.eps", format = "eps", dpi = 1200)
if __name__ == '__main__':
main()
| 22.333333
| 60
| 0.570481
|
36385416dc0cc6937df2057d82d96e3458ce5efb
| 506
|
py
|
Python
|
test_utils.py
|
drscotthawley/SPNet
|
94f1195c91e2373bee1f36bc7d834c4e07388369
|
[
"MIT"
] | 1
|
2021-02-02T16:06:23.000Z
|
2021-02-02T16:06:23.000Z
|
test_utils.py
|
drscotthawley/SPNet
|
94f1195c91e2373bee1f36bc7d834c4e07388369
|
[
"MIT"
] | 8
|
2021-01-25T15:53:26.000Z
|
2022-03-12T00:54:07.000Z
|
test_utils.py
|
drscotthawley/SPNet
|
94f1195c91e2373bee1f36bc7d834c4e07388369
|
[
"MIT"
] | 1
|
2022-02-03T10:35:21.000Z
|
2022-02-03T10:35:21.000Z
|
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from utils import *
n = 100
features = 3
img_dims = [256.0,256.0]
X_test = np.random.rand(n,features)-.5
Y_test = np.random.rand(n,features)-.5
Y_pred = Y_test + 0.02*(np.random.rand(n,features)-.5)
#utils.plot_prediction(X_test, Y_test, Y_pred,img_dims)
filename = 'Test/steelpan_49990.txt'
#arrs = parse_txt_file(filename)
#print("arrs = ",arrs)
X, Y, img_dims, img_file_list = build_dataset(load_frac=1)
| 20.24
| 58
| 0.729249
|
cd667a87ce3e86a19a062d8a98d86c3bf6d387a1
| 3,444
|
py
|
Python
|
pypureclient/flashblade/FB_2_1/models/rapid_data_locking.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flashblade/FB_2_1/models/rapid_data_locking.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flashblade/FB_2_1/models/rapid_data_locking.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.1, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_1 import models
class RapidDataLocking(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'enabled': 'bool',
'kmip_server': 'Reference'
}
attribute_map = {
'enabled': 'enabled',
'kmip_server': 'kmip_server'
}
required_args = {
}
def __init__(
self,
enabled=None, # type: bool
kmip_server=None, # type: models.Reference
):
"""
Keyword args:
enabled (bool): `True` if the Rapid Data Locking feature is enabled.
kmip_server (Reference): The KMIP server configuration associated with RDL.
"""
if enabled is not None:
self.enabled = enabled
if kmip_server is not None:
self.kmip_server = kmip_server
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RapidDataLocking`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RapidDataLocking, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RapidDataLocking):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.186441
| 116
| 0.551103
|
b970fb6dfae526db8989800a6a62e9f472714a7c
| 20,723
|
py
|
Python
|
glocaltokens/client.py
|
leikoilja/google-home-local-authentication-tokens
|
f253172f71f39ecb633050e3655a8d140c9e8c79
|
[
"MIT"
] | 1
|
2020-12-29T15:40:03.000Z
|
2020-12-29T15:40:03.000Z
|
glocaltokens/client.py
|
leikoilja/google-home-local-authentication-tokens
|
f253172f71f39ecb633050e3655a8d140c9e8c79
|
[
"MIT"
] | null | null | null |
glocaltokens/client.py
|
leikoilja/google-home-local-authentication-tokens
|
f253172f71f39ecb633050e3655a8d140c9e8c79
|
[
"MIT"
] | null | null | null |
"""Client"""
from __future__ import annotations
from datetime import datetime
import json
import logging
import random
from gpsoauth import perform_master_login, perform_oauth
import grpc
from zeroconf import Zeroconf
from .const import (
ACCESS_TOKEN_APP_NAME,
ACCESS_TOKEN_CLIENT_SIGNATURE,
ACCESS_TOKEN_DURATION,
ACCESS_TOKEN_SERVICE,
ANDROID_ID_LENGTH,
DEFAULT_DISCOVERY_PORT,
DISCOVERY_TIMEOUT,
GOOGLE_HOME_FOYER_API,
HOMEGRAPH_DURATION,
)
from .google.internal.home.foyer.v1_pb2 import GetHomeGraphRequest, GetHomeGraphResponse
from .google.internal.home.foyer.v1_pb2_grpc import StructuresServiceStub
from .scanner import NetworkDevice, discover_devices
from .types import DeviceDict
from .utils import network as net_utils, token as token_utils
from .utils.logs import censor
from .utils.network import is_valid_ipv4_address
logging.basicConfig(level=logging.ERROR)
LOGGER = logging.getLogger(__name__)
class Device:
"""Device representation"""
def __init__(
self,
device_id: str,
device_name: str,
local_auth_token: str,
network_device: NetworkDevice | None = None,
hardware: str | None = None,
):
"""
Initializes a Device.
"""
log_prefix = f"[Device - {device_name}(id={device_id})]"
LOGGER.debug("%s Initializing new Device instance", log_prefix)
self.device_id = device_id
self.device_name = device_name
self.local_auth_token = None
self.network_device = network_device
self.hardware = hardware
# Token and name validations
if not self.device_name:
LOGGER.error("%s device_name must be provided", log_prefix)
return
if not token_utils.is_local_auth_token(local_auth_token):
LOGGER.warning(
"%s local_auth_token does not follow Google Home token format. "
"Ignore for non-Google Home devices",
log_prefix,
)
return
# Setting IP and PORT
if network_device:
LOGGER.debug(
"%s network_device is provided, using its IP and PORT", log_prefix
)
self.ip_address: str | None = network_device.ip_address
self.port: int | None = network_device.port
else:
self.ip_address = None
self.port = None
# IP and PORT validation
if (
self.ip_address
and not net_utils.is_valid_ipv4_address(self.ip_address)
and not net_utils.is_valid_ipv6_address(self.ip_address)
):
LOGGER.error("%s IP(%s) is invalid", log_prefix, self.ip_address)
return
if self.port and not net_utils.is_valid_port(self.port):
LOGGER.error("%s PORT(%s) is invalid", log_prefix, self.port)
return
LOGGER.debug(
'%s Set device_name to "%s", '
'local_auth_token to "%s", '
'IP to "%s", PORT to "%s" and hardware to "%s"',
log_prefix,
device_name,
censor(local_auth_token),
self.ip_address,
self.port,
hardware,
)
self.local_auth_token = local_auth_token
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> DeviceDict:
"""Dictionary representation"""
return {
"device_id": self.device_id,
"device_name": self.device_name,
"network_device": {
"ip": self.ip_address,
"port": self.port,
},
"hardware": self.hardware,
"local_auth_token": self.local_auth_token,
}
class GLocalAuthenticationTokens:
"""Client"""
def __init__(
self,
username: str | None = None,
password: str | None = None,
master_token: str | None = None,
android_id: str | None = None,
verbose: bool = False,
):
"""
Initialize an GLocalAuthenticationTokens instance with google account
credentials
:params
username: google account username;
password: google account password (can be an app password);
master_token: google master token (instead of username/password
combination);
android_id: the id of an android device. Will be randomly generated
if not set;
verbose: whether or not print debug logging information
"""
self.logging_level = logging.DEBUG if verbose else logging.ERROR
LOGGER.setLevel(self.logging_level)
LOGGER.debug("Initializing new GLocalAuthenticationTokens instance.")
self.username: str | None = username
self.password: str | None = password
self.master_token: str | None = master_token
self.android_id: str | None = android_id
self.access_token: str | None = None
self.access_token_date: datetime | None = None
self.homegraph: GetHomeGraphResponse | None = None
self.homegraph_date: datetime | None = None
LOGGER.debug(
"Set GLocalAuthenticationTokens client access_token, homegraph, "
"access_token_date and homegraph_date to None"
)
LOGGER.debug(
"Set GLocalAuthenticationTokens client "
'username to "%s", password to "%s", '
'master_token to "%s" and android_id to %s',
censor(username),
censor(password),
censor(master_token),
censor(android_id),
)
# Validation
if (not self.username or not self.password) and not self.master_token:
LOGGER.error(
"You must either provide google username/password "
"or google master token"
)
return
if self.master_token and not token_utils.is_aas_et(self.master_token):
LOGGER.error("master_token doesn't follow the AAS_ET format")
return
@staticmethod
def _generate_android_id() -> str:
"""Generate random 16 char long string"""
LOGGER.debug("Generating android id...")
mac_string = "".join(
[f"{random.randrange(16):x}" for _ in range(ANDROID_ID_LENGTH)]
)
LOGGER.debug("Generated android id: %s", mac_string)
return mac_string
def get_android_id(self) -> str:
"""Return existing or generate android id"""
if not self.android_id:
LOGGER.debug("There is no stored android_id, generating a new one")
self.android_id = self._generate_android_id()
return self.android_id
@staticmethod
def _has_expired(creation_dt: datetime, duration: int) -> bool:
"""Checks if an specified token/object has expired"""
return datetime.now().timestamp() - creation_dt.timestamp() > duration
def get_master_token(self) -> str | None:
"""Get google master token from username and password"""
if self.username is None or self.password is None:
LOGGER.error("Username and password are not set.")
return None
if not self.master_token:
LOGGER.debug(
"There is no stored master_token, "
"logging in using username and password"
)
res = {}
try:
res = perform_master_login(
self.username, self.password, self.get_android_id()
)
except ValueError:
LOGGER.error(
"A ValueError exception has been thrown, this usually is related"
"to a password length that exceeds the boundaries (too long)."
)
if "Token" not in res:
LOGGER.error("[!] Could not get master token.")
LOGGER.debug("Request response: %s", res)
return None
self.master_token = res["Token"]
LOGGER.debug("Master token: %s", censor(self.master_token))
return self.master_token
def get_access_token(self) -> str | None:
"""Return existing or fetch access_token"""
if (
self.access_token is None
or self.access_token_date is None
or self._has_expired(self.access_token_date, ACCESS_TOKEN_DURATION)
):
LOGGER.debug(
"There is no access_token stored, "
"or it has expired, getting a new one..."
)
master_token = self.get_master_token()
if master_token is None:
LOGGER.debug("Unable to obtain master token.")
return None
if self.username is None:
LOGGER.error("Username is not set.")
return None
res = perform_oauth(
self.username,
master_token,
self.get_android_id(),
app=ACCESS_TOKEN_APP_NAME,
service=ACCESS_TOKEN_SERVICE,
client_sig=ACCESS_TOKEN_CLIENT_SIGNATURE,
)
if "Auth" not in res:
LOGGER.error("[!] Could not get access token.")
LOGGER.debug("Request response: %s", res)
return None
self.access_token = res["Auth"]
self.access_token_date = datetime.now()
LOGGER.debug(
"Access token: %s, datetime %s",
censor(self.access_token),
self.access_token_date,
)
return self.access_token
def get_homegraph(self, auth_attempts: int = 3) -> GetHomeGraphResponse | None:
"""Returns the entire Google Home Foyer V2 service"""
if (
self.homegraph is None
or self.homegraph_date is None
or self._has_expired(self.homegraph_date, HOMEGRAPH_DURATION)
):
if auth_attempts == 0:
LOGGER.error("Reached maximum number of authentication attempts")
return None
LOGGER.debug(
"There is no stored homegraph, or it has expired, getting a new one..."
)
log_prefix = "[GRPC]"
access_token = self.get_access_token()
if not access_token:
LOGGER.debug("%s Unable to obtain access token.", log_prefix)
return None
try:
LOGGER.debug("%s Creating SSL channel credentials...", log_prefix)
scc = grpc.ssl_channel_credentials(root_certificates=None)
LOGGER.debug("%s Creating access token call credentials...", log_prefix)
tok = grpc.access_token_call_credentials(access_token)
LOGGER.debug("%s Compositing channel credentials...", log_prefix)
channel_credentials = grpc.composite_channel_credentials(scc, tok)
LOGGER.debug(
"%s Establishing secure channel with "
"the Google Home Foyer API...",
log_prefix,
)
with grpc.secure_channel(
GOOGLE_HOME_FOYER_API, channel_credentials
) as channel:
LOGGER.debug(
"%s Getting channels StructuresServiceStub...", log_prefix
)
rpc_service = StructuresServiceStub(channel)
LOGGER.debug("%s Getting HomeGraph request...", log_prefix)
request = GetHomeGraphRequest(string1="", num2="")
LOGGER.debug("%s Fetching HomeGraph...", log_prefix)
response = rpc_service.GetHomeGraph(request)
LOGGER.debug("%s Storing obtained HomeGraph...", log_prefix)
self.homegraph = response
self.homegraph_date = datetime.now()
except grpc.RpcError as rpc_error:
LOGGER.debug("%s Got an RpcError", log_prefix)
if (
rpc_error.code().name # pylint: disable=no-member
== "UNAUTHENTICATED"
):
LOGGER.warning(
"%s The access token has expired. Getting a new one.",
log_prefix,
)
self.invalidate_access_token()
return self.get_homegraph(auth_attempts - 1)
LOGGER.error(
"%s Received unknown RPC error: code=%s message=%s",
log_prefix,
rpc_error.code(), # pylint: disable=no-member
rpc_error.details(), # pylint: disable=no-member
)
return None
return self.homegraph
def get_google_devices(
self,
models_list: list[str] | None = None,
disable_discovery: bool = False,
addresses: dict[str, str] | None = None,
zeroconf_instance: Zeroconf | None = None,
force_homegraph_reload: bool = False,
discovery_timeout: int = DISCOVERY_TIMEOUT,
) -> list[Device]:
"""
Returns a list of google devices with their local authentication tokens,
and IP and ports if set in models_list.
models_list: The list of accepted model names.
disable_discovery: Whether or not the device's IP and port should
be searched for in the network.
addresses: Dict of network devices from the local network
({"name": "ip_address"}). If set to `None` will try to automatically
discover network devices. Disable discovery by setting to `{}`.
zeroconf_instance: If you already have an initialized zeroconf instance,
use it here.
force_homegraph_reload: If the stored homegraph should be generated again.
discovery_timeout: Timeout for zeroconf discovery in seconds.
"""
# Set models_list to empty list if None
LOGGER.debug("Initializing models list if empty...")
models_list = models_list if models_list else []
if force_homegraph_reload:
LOGGER.debug("Forcing homegraph reload")
self.invalidate_homegraph()
LOGGER.debug("Getting homegraph...")
homegraph = self.get_homegraph()
devices: list[Device] = []
def is_dict_with_valid_ipv4_addresses(data: dict[str, str]) -> bool:
# Validate the data structure is correct and that each entry contains a
# valid IPv4 address.
return isinstance(data, dict) and all(
isinstance(x, str) and is_valid_ipv4_address(x) for x in data.values()
)
if addresses and not is_dict_with_valid_ipv4_addresses(addresses):
# We need to disable flake8-use-fstring because of the brackets,
# it causes a false positive.
LOGGER.error(
"Invalid dictionary structure for addresses dictionary "
"argument. Correct structure is {'device_name': 'ipaddress'}" # noqa
)
return devices
if homegraph is None:
LOGGER.debug("Failed to fetch homegraph")
return devices
network_devices: list[NetworkDevice] = []
if disable_discovery is False:
LOGGER.debug("Automatically discovering network devices...")
network_devices = discover_devices(
models_list,
timeout=discovery_timeout,
zeroconf_instance=zeroconf_instance,
logging_level=self.logging_level,
)
def find_device(unique_id: str) -> NetworkDevice | None:
for device in network_devices:
if device.unique_id == unique_id:
return device
return None
address_dict = addresses if addresses else {}
LOGGER.debug("Iterating in %d homegraph devices", len(homegraph.home.devices))
for item in homegraph.home.devices:
if item.local_auth_token != "":
# This checks if the current item is a valid model,
# only if there are models in models_list.
# If models_list is empty, the check should be omitted,
# and accept all items.
if models_list and item.hardware.model not in models_list:
LOGGER.debug("%s not in models_list", item.hardware.model)
continue
network_device = None
if network_devices:
unique_id = item.device_info.agent_info.unique_id
LOGGER.debug(
"Looking for '%s' (id=%s) in local network",
item.device_name,
unique_id,
)
network_device = find_device(unique_id)
elif item.device_name in address_dict:
network_device = NetworkDevice(
name=item.device_name,
ip_address=address_dict[item.device_name],
port=DEFAULT_DISCOVERY_PORT,
model=item.hardware.model,
unique_id=item.device_info.device_id,
)
device = Device(
device_id=item.device_info.device_id,
device_name=network_device.name
if network_device is not None
else item.device_name,
local_auth_token=item.local_auth_token,
network_device=network_device,
hardware=item.hardware.model,
)
if device.local_auth_token:
LOGGER.debug("Adding %s to devices list", device.device_name)
devices.append(device)
else:
LOGGER.warning(
"%s device initialization failed "
"because of missing local_auth_token, skipping.",
device.device_name,
)
else:
LOGGER.debug(
"'%s' local_auth_token is not found in Homegraph, skipping",
item.device_name,
)
LOGGER.debug("Successfully initialized %d Google Home devices", len(devices))
return devices
def get_google_devices_json(
self,
models_list: list[str] | None = None,
indent: int = 2,
disable_discovery: bool = False,
addresses: dict[str, str] | None = None,
zeroconf_instance: Zeroconf | None = None,
force_homegraph_reload: bool = False,
) -> str:
"""
Returns a json list of google devices with their local authentication tokens,
and IP and ports if set in models_list.
models_list: The list of accepted model names.
indent: The indentation for the json formatting.
disable_discovery: Whether or not the device's IP and port should
be searched for in the network.
addresses: Dict of network devices from the local network
({"name": "ip_address"}). If set to `None` will try to automatically
discover network devices. Disable discovery by setting to `{}`.
zeroconf_instance: If you already have an initialized zeroconf instance,
use it here.
force_homegraph_reload: If the stored homegraph should be generated again.
"""
google_devices = self.get_google_devices(
models_list=models_list,
disable_discovery=disable_discovery,
addresses=addresses,
zeroconf_instance=zeroconf_instance,
force_homegraph_reload=force_homegraph_reload,
)
json_string = json.dumps(
[obj.as_dict() for obj in google_devices], indent=indent
)
return json_string
def invalidate_access_token(self) -> None:
"""Invalidates the current access token"""
self.access_token = None
self.access_token_date = None
LOGGER.debug("Invalidated access_token")
def invalidate_master_token(self) -> None:
"""Invalidates the current master token"""
self.master_token = None
LOGGER.debug("Invalidated master_token")
def invalidate_homegraph(self) -> None:
"""Invalidates the stored homegraph data"""
self.homegraph = None
self.homegraph_date = None
LOGGER.debug("Invalidated homegraph")
| 39.248106
| 88
| 0.578536
|
22ec7e40360d5091f7ca8a4c99cda8bbb2b61db0
| 1,512
|
py
|
Python
|
flowmater/graph_util.py
|
KanHatakeyama/flowmater
|
d8b3bec06ee10c41cc2a83ada4a2966fd61f9535
|
[
"MIT"
] | null | null | null |
flowmater/graph_util.py
|
KanHatakeyama/flowmater
|
d8b3bec06ee10c41cc2a83ada4a2966fd61f9535
|
[
"MIT"
] | null | null | null |
flowmater/graph_util.py
|
KanHatakeyama/flowmater
|
d8b3bec06ee10c41cc2a83ada4a2966fd61f9535
|
[
"MIT"
] | null | null | null |
"""
******************************
process graph files
******************************
"""
import copy
import networkx as nx
import matplotlib.pyplot as plt
#draw graph
def draw_graph(g):
pos = nx.spring_layout(g,k=0.2)
nx.draw_networkx_labels(g, pos, labels = nx.get_node_attributes(g,'label'))
plt.figure(1,figsize=(120,120))
nx.draw(g, pos)
plt.show()
def make_simplifierd_graphs(g_list):
simplified_graph_list=[]
for g in g_list:
temp_g=copy.deepcopy(g)
for node in temp_g.nodes:
node_dict=temp_g.nodes[node]
node_dict.pop("x")
node_dict.pop("y")
node_dict.pop("value")
rename_dict={node:temp_g.nodes[node]["label"] for node in temp_g.nodes}
temp_g=nx.relabel_nodes(temp_g, rename_dict)
simplified_graph_list.append(temp_g)
return simplified_graph_list
def categorize_graphs(g_list):
simplified_graph_list=make_simplifierd_graphs(g_list)
#compare grapghs by enumerating the graph nodes
num_graphs=len(simplified_graph_list)
graph_category=list(range(num_graphs))
graph_set_list=[sorted(list(set(g))) for g in simplified_graph_list]
graph_set_list=["".join(str(i)) for i in graph_set_list]
graph_name_dict={v:num for num,v in enumerate(set(graph_set_list))}
#set graph numbers
graph_type_list=[graph_name_dict[gr] for gr in graph_set_list]
return {k:v for k,v in zip(range(len(g_list)),graph_type_list)}
| 27
| 80
| 0.654101
|
109d50c07c93d41c991371b3a5d9107d6d0487f7
| 52,304
|
py
|
Python
|
src/assisted_test_infra/test_infra/helper_classes/cluster.py
|
mhrivnak/assisted-test-infra
|
5db2d3bf9999dda2f6756a412ecd6968cc55e95a
|
[
"Apache-2.0"
] | null | null | null |
src/assisted_test_infra/test_infra/helper_classes/cluster.py
|
mhrivnak/assisted-test-infra
|
5db2d3bf9999dda2f6756a412ecd6968cc55e95a
|
[
"Apache-2.0"
] | 23
|
2022-01-13T21:50:32.000Z
|
2022-03-28T09:14:43.000Z
|
src/assisted_test_infra/test_infra/helper_classes/cluster.py
|
mhrivnak/assisted-test-infra
|
5db2d3bf9999dda2f6756a412ecd6968cc55e95a
|
[
"Apache-2.0"
] | null | null | null |
import contextlib
import ipaddress
import json
import os
import random
import re
import time
import warnings
from collections import Counter
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Union
import requests
import waiting
import yaml
from assisted_service_client import models
from assisted_service_client.models.operator_type import OperatorType
from junit_report import JunitTestCase
from netaddr import IPAddress, IPNetwork
import consts
from assisted_test_infra.test_infra import BaseClusterConfig, BaseInfraEnvConfig, ClusterName, utils
from assisted_test_infra.test_infra.controllers.load_balancer_controller import LoadBalancerController
from assisted_test_infra.test_infra.controllers.node_controllers import Node
from assisted_test_infra.test_infra.helper_classes.cluster_host import ClusterHost
from assisted_test_infra.test_infra.helper_classes.entity import Entity
from assisted_test_infra.test_infra.helper_classes.events_handler import EventsHandler
from assisted_test_infra.test_infra.helper_classes.infra_env import InfraEnv
from assisted_test_infra.test_infra.helper_classes.nodes import Nodes
from assisted_test_infra.test_infra.tools import static_network, terraform_utils
from assisted_test_infra.test_infra.utils import logs_utils, network_utils, operators_utils
from assisted_test_infra.test_infra.utils.waiting import wait_till_all_hosts_are_in_status
from service_client import InventoryClient, log
class Cluster(Entity):
MINIMUM_NODES_TO_WAIT = 1
EVENTS_THRESHOLD = 500 # TODO - remove EVENTS_THRESHOLD after removing it from kni-assisted-installer-auto
_config: BaseClusterConfig
def __init__(
self,
api_client: InventoryClient,
config: BaseClusterConfig,
infra_env_config: BaseInfraEnvConfig,
nodes: Optional[Nodes] = None,
):
super().__init__(api_client, config, nodes)
self._infra_env_config = infra_env_config
self._infra_env = None
# Update infraEnv configurations
self._infra_env_config.cluster_id = config.cluster_id
self._infra_env_config.openshift_version = self._config.openshift_version
self._infra_env_config.pull_secret = self._config.pull_secret
self._high_availability_mode = config.high_availability_mode
self.name = config.cluster_name.get()
@property
def kubeconfig_path(self):
return self._config.kubeconfig_path
@property
def iso_download_path(self):
return self._config.iso_download_path
@property
def enable_image_download(self):
return self._config.download_image
def _update_day2_config(self, api_client: InventoryClient, cluster_id: str):
day2_cluster: models.cluster.Cluster = api_client.cluster_get(cluster_id)
self.update_config(
**dict(
openshift_version=day2_cluster.openshift_version,
cluster_name=ClusterName(day2_cluster.name),
additional_ntp_source=day2_cluster.additional_ntp_source,
user_managed_networking=day2_cluster.user_managed_networking,
high_availability_mode=day2_cluster.high_availability_mode,
olm_operators=day2_cluster.monitored_operators,
base_dns_domain=day2_cluster.base_dns_domain,
vip_dhcp_allocation=day2_cluster.vip_dhcp_allocation,
)
)
def _create(self) -> str:
disk_encryption = models.DiskEncryption(
enable_on=self._config.disk_encryption_roles,
mode=self._config.disk_encryption_mode,
)
if self._config.cluster_id:
log.info(f"Fetching day2 cluster with id {self._config.cluster_id}")
self._update_day2_config(self.api_client, self._config.cluster_id)
return self._config.cluster_id
cluster = self.api_client.create_cluster(
self._config.cluster_name.get(),
ssh_public_key=self._config.ssh_public_key,
openshift_version=self._config.openshift_version,
pull_secret=self._config.pull_secret,
base_dns_domain=self._config.base_dns_domain,
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
additional_ntp_source=self._config.additional_ntp_source,
user_managed_networking=self._config.user_managed_networking,
high_availability_mode=self._config.high_availability_mode,
olm_operators=[{"name": name} for name in self._config.olm_operators],
network_type=self._config.network_type,
disk_encryption=disk_encryption,
)
self._config.cluster_id = cluster.id
return cluster.id
def delete(self):
self.api_client.delete_cluster(self.id)
def deregister_infraenv(self):
if self._infra_env:
self._infra_env.deregister()
def get_details(self):
return self.api_client.cluster_get(self.id)
def get_cluster_name(self):
return self.get_details().name
def get_hosts(self):
return self.api_client.get_cluster_hosts(self.id)
def get_host_ids(self):
return [host["id"] for host in self.get_hosts()]
def get_host_ids_names_mapping(self):
return {host["id"]: host["requested_hostname"] for host in self.get_hosts()}
def get_host_assigned_roles(self):
hosts = self.get_hosts()
return {h["id"]: h["role"] for h in hosts}
def get_operators(self):
return self.api_client.get_cluster_operators(self.id)
# TODO remove in favor of generate_infra_env
def generate_image(self):
warnings.warn("generate_image is deprecated. Use generate_infra_env instead.", DeprecationWarning)
self.api_client.generate_image(cluster_id=self.id, ssh_key=self._config.ssh_public_key)
def generate_infra_env(
self, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None
) -> InfraEnv:
self._infra_env_config.ssh_public_key = ssh_key or self._config.ssh_public_key
self._infra_env_config.iso_image_type = iso_image_type or self._config.iso_image_type
self._infra_env_config.static_network_config = static_network_config
self._infra_env_config.ignition_config_override = ignition_info
self._infra_env_config.proxy = proxy or self._config.proxy
infra_env = InfraEnv(api_client=self.api_client, config=self._infra_env_config)
self._infra_env = infra_env
return infra_env
def update_infra_env_proxy(self, proxy: models.Proxy) -> None:
self._infra_env_config.proxy = proxy
self._infra_env.update_proxy(proxy=proxy)
def download_infra_env_image(self, iso_download_path=None) -> Path:
iso_download_path = iso_download_path or self._config.iso_download_path
return self._infra_env.download_image(iso_download_path=iso_download_path)
@JunitTestCase()
def generate_and_download_infra_env(
self,
iso_download_path=None,
static_network_config=None,
iso_image_type=None,
ssh_key=None,
ignition_info=None,
proxy=None,
) -> Path:
if self._config.is_static_ip and static_network_config is None:
static_network_config = static_network.generate_static_network_data_from_tf(self.nodes.controller.tf_folder)
self.generate_infra_env(
static_network_config=static_network_config,
iso_image_type=iso_image_type,
ssh_key=ssh_key,
ignition_info=ignition_info,
proxy=proxy,
)
return self.download_infra_env_image(iso_download_path=iso_download_path or self._config.iso_download_path)
@JunitTestCase()
def generate_and_download_image(
self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None
):
warnings.warn(
"generate_and_download_image is deprecated. Use generate_and_download_infra_env instead.",
DeprecationWarning,
)
iso_download_path = iso_download_path or self._config.iso_download_path
# ensure file path exists before downloading
if not os.path.exists(iso_download_path):
utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False)
self.api_client.generate_and_download_image(
cluster_id=self.id,
ssh_key=ssh_key or self._config.ssh_public_key,
image_path=iso_download_path,
image_type=iso_image_type or self._config.iso_image_type,
static_network_config=static_network_config,
)
def wait_until_hosts_are_disconnected(self, nodes_count: int = None):
statuses = [consts.NodesStatus.DISCONNECTED]
wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.DISCONNECTED_TIMEOUT,
)
@JunitTestCase()
def wait_until_hosts_are_discovered(self, allow_insufficient=False, nodes_count: int = None):
statuses = [consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN]
if allow_insufficient:
statuses.append(consts.NodesStatus.INSUFFICIENT)
wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.NODES_REGISTERED_TIMEOUT,
)
def _get_matching_hosts(self, host_type, count):
hosts = self.get_hosts()
return [{"id": h["id"], "role": host_type} for h in hosts if host_type in h["requested_hostname"]][:count]
def set_cluster_name(self, cluster_name: str):
log.info(f"Setting Cluster Name:{cluster_name} for cluster: {self.id}")
self.update_config(cluster_name=ClusterName(prefix=cluster_name, suffix=None))
self.api_client.update_cluster(self.id, {"name": cluster_name})
def select_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None:
self._infra_env.select_host_installation_disk(host_id=host_id, disk_paths=disk_paths)
def set_ocs(self, properties=None):
self.set_olm_operator(consts.OperatorType.OCS, properties=properties)
def set_cnv(self, properties=None):
self.set_olm_operator(consts.OperatorType.CNV, properties=properties)
def unset_ocs(self):
self.unset_olm_operator(consts.OperatorType.OCS)
def unset_cnv(self):
self.unset_olm_operator(consts.OperatorType.CNV)
def unset_olm_operator(self, operator_name):
log.info(f"Unsetting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
olm_operators = []
for operator in cluster.monitored_operators:
if operator.name == operator_name or operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_olm_operator(self, operator_name, properties=None):
log.info(f"Setting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
if operator_name in [o.name for o in cluster.monitored_operators]:
return
olm_operators = []
for operator in cluster.monitored_operators:
if operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
olm_operators.append({"name": operator_name, "properties": properties})
self._config.olm_operators = olm_operators
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_host_roles(self, num_masters: int = None, num_workers: int = None, requested_roles=None):
if requested_roles is None:
requested_roles = Counter(
master=num_masters or self.nodes.masters_count, worker=num_workers or self.nodes.workers_count
)
assigned_roles = self._get_matching_hosts(host_type=consts.NodeRoles.MASTER, count=requested_roles["master"])
assigned_roles.extend(
self._get_matching_hosts(host_type=consts.NodeRoles.WORKER, count=requested_roles["worker"])
)
for role in assigned_roles:
self._infra_env.update_host(host_id=role["id"], host_role=role["role"])
return assigned_roles
def set_specific_host_role(self, host, role):
self._infra_env.update_host(host_id=host["id"], host_role=role)
def set_network_params(self, controller=None):
# Controller argument is here only for backward compatibility TODO - Remove after QE refactor all e2e tests
controller = controller or self.nodes.controller # TODO - Remove after QE refactor all e2e tests
if self._config.platform == consts.Platforms.NONE:
log.info("On None platform, leaving network management to the user")
api_vip = ingress_vip = machine_networks = None
elif self._config.vip_dhcp_allocation or self._high_availability_mode == consts.HighAvailabilityMode.NONE:
log.info("Letting access VIPs be deducted from machine networks")
api_vip = ingress_vip = None
machine_networks = self.get_machine_networks()
else:
log.info("Assigning VIPs statically")
access_vips = controller.get_ingress_and_api_vips()
api_vip = access_vips["api_vip"]
ingress_vip = access_vips["ingress_vip"]
machine_networks = None
if self._config.is_ipv4 and self._config.is_ipv6:
machine_networks = controller.get_all_machine_addresses()
self.set_advanced_networking(
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
cluster_networks=self._config.cluster_networks,
service_networks=self._config.service_networks,
machine_networks=machine_networks,
api_vip=api_vip,
ingress_vip=ingress_vip,
)
def get_primary_machine_cidr(self):
cidr = self.nodes.controller.get_primary_machine_cidr()
if not cidr:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
matching_cidrs = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not matching_cidrs:
raise RuntimeError("No matching cidr for DHCP")
cidr = next(iter(matching_cidrs))
return cidr
def get_machine_networks(self):
networks = []
primary_machine_cidr = self.nodes.controller.get_primary_machine_cidr()
if primary_machine_cidr:
networks.append(primary_machine_cidr)
secondary_machine_cidr = self.nodes.controller.get_provisioning_cidr()
if secondary_machine_cidr:
networks.append(secondary_machine_cidr)
if not networks:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
networks = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not networks:
raise RuntimeError("No matching cidr for DHCP")
return networks
def set_ingress_and_api_vips(self, vips):
log.info(f"Setting API VIP:{vips['api_vip']} and ingress VIP:{vips['ingress_vip']} for cluster: {self.id}")
self.api_client.update_cluster(self.id, vips)
def set_ssh_key(self, ssh_key: str):
log.info(f"Setting SSH key:{ssh_key} for cluster: {self.id}")
self.update_config(ssh_public_key=ssh_key)
self.api_client.update_cluster(self.id, {"ssh_public_key": ssh_key})
def set_base_dns_domain(self, base_dns_domain: str):
log.info(f"Setting base DNS domain:{base_dns_domain} for cluster: {self.id}")
self.update_config(base_dns_domain=base_dns_domain)
self.api_client.update_cluster(self.id, {"base_dns_domain": base_dns_domain})
def set_advanced_networking(
self,
vip_dhcp_allocation: Optional[bool] = None,
cluster_networks: Optional[List[models.ClusterNetwork]] = None,
service_networks: Optional[List[models.ServiceNetwork]] = None,
machine_networks: Optional[List[models.MachineNetwork]] = None,
api_vip: Optional[str] = None,
ingress_vip: Optional[str] = None,
):
if machine_networks is None:
machine_networks = self._config.machine_networks
else:
machine_networks = [models.MachineNetwork(cidr=cidr) for cidr in machine_networks]
if vip_dhcp_allocation is None:
vip_dhcp_allocation = self._config.vip_dhcp_allocation
advanced_networking = {
"vip_dhcp_allocation": vip_dhcp_allocation,
"cluster_networks": cluster_networks if cluster_networks is not None else self._config.cluster_networks,
"service_networks": service_networks if service_networks is not None else self._config.service_networks,
"machine_networks": machine_networks,
"api_vip": api_vip if api_vip is not None else self._config.api_vip,
"ingress_vip": ingress_vip if ingress_vip is not None else self._config.ingress_vip,
}
log.info(f"Updating advanced networking with {advanced_networking} for cluster: {self.id}")
self.update_config(**advanced_networking)
self.api_client.update_cluster(self.id, advanced_networking)
def set_pull_secret(self, pull_secret: str):
log.info(f"Setting pull secret:{pull_secret} for cluster: {self.id}")
self.update_config(pull_secret=pull_secret)
self.api_client.update_cluster(self.id, {"pull_secret": pull_secret})
def set_host_name(self, host_id, requested_name):
log.info(f"Setting Required Host Name:{requested_name}, for Host ID: {host_id}")
self._infra_env.update_host(host_id=host_id, host_name=requested_name)
def set_additional_ntp_source(self, ntp_source: List[str]):
log.info(f"Setting Additional NTP source:{ntp_source}")
if isinstance(ntp_source, List):
ntp_source_string = ",".join(ntp_source)
elif isinstance(ntp_source, str):
ntp_source_string = ntp_source
else:
raise TypeError(
f"ntp_source must be a string or a list of strings, got: {ntp_source}," f" type: {type(ntp_source)}"
)
self.update_config(additional_ntp_source=ntp_source_string)
self.api_client.update_cluster(self.id, {"additional_ntp_source": ntp_source_string})
def patch_discovery_ignition(self, ignition):
self._infra_env.patch_discovery_ignition(ignition_info=ignition)
def set_proxy_values(self, proxy_values: models.Proxy) -> None:
log.info(f"Setting proxy values {proxy_values} for cluster: {self.id}")
self.update_config(proxy=proxy_values)
self.api_client.set_cluster_proxy(
self.id,
http_proxy=self._config.proxy.http_proxy,
https_proxy=self._config.proxy.https_proxy,
no_proxy=self._config.proxy.no_proxy,
)
@JunitTestCase()
def start_install(self):
self.api_client.install_cluster(cluster_id=self.id)
def wait_for_logs_complete(self, timeout, interval=60, check_host_logs_only=False):
logs_utils.wait_for_logs_complete(
client=self.api_client,
cluster_id=self.id,
timeout=timeout,
interval=interval,
check_host_logs_only=check_host_logs_only,
)
def wait_for_installing_in_progress(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS],
nodes_count=nodes_count,
timeout=consts.INSTALLING_IN_PROGRESS_TIMEOUT,
)
def wait_for_write_image_to_disk(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.WRITE_IMAGE_TO_DISK, consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_host_status(self, statuses, fall_on_error_status=True, nodes_count: int = MINIMUM_NODES_TO_WAIT):
utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
)
def wait_for_specific_host_status(self, host, statuses, nodes_count: int = MINIMUM_NODES_TO_WAIT):
utils.waiting.wait_till_specific_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
statuses=statuses,
nodes_count=nodes_count,
)
def wait_for_specific_host_stage(self, host: dict, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
utils.waiting.wait_till_specific_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
)
def wait_for_cluster_in_error_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.ERROR],
timeout=consts.ERROR_TIMEOUT,
)
def wait_for_pending_for_input_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.PENDING_FOR_INPUT],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_boot_during_install(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_non_bootstrap_masters_to_reach_configuring_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.CONFIGURING],
nodes_count=num_masters - 1,
)
def wait_for_non_bootstrap_masters_to_reach_joined_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.JOINED],
nodes_count=num_masters - 1,
)
def wait_for_hosts_stage(self, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
nodes_count=self.nodes.nodes_count,
)
@JunitTestCase()
def start_install_and_wait_for_installed(
self,
wait_for_hosts=True,
wait_for_operators=True,
wait_for_cluster_install=True,
download_kubeconfig=True,
):
self.start_install()
if wait_for_hosts:
self.wait_for_hosts_to_install()
if wait_for_operators:
self.wait_for_operators_to_finish()
if wait_for_cluster_install:
self.wait_for_install()
if download_kubeconfig:
self.download_kubeconfig()
def disable_worker_hosts(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.WORKER)
for host in hosts:
self.disable_host(host)
def disable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to disable host: {host_name} in cluster: {self.id}")
self._infra_env.unbind_host(host_id=host["id"])
def enable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to enable host: {host_name} in cluster: {self.id}")
self._infra_env.bind_host(host_id=host["id"], cluster_id=self.id)
def delete_host(self, host):
host_id = host["id"]
log.info(f"Going to delete host: {host_id} in cluster: {self.id}")
self._infra_env.delete_host(host_id=host_id)
def cancel_install(self):
self.api_client.cancel_cluster_install(cluster_id=self.id)
def get_bootstrap_hostname(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.MASTER)
for host in hosts:
if host.get("bootstrap"):
log.info("Bootstrap node is: %s", host["requested_hostname"])
return host["requested_hostname"]
def get_hosts_by_role(self, role, hosts=None):
hosts = hosts or self.api_client.get_cluster_hosts(self.id)
nodes_by_role = []
for host in hosts:
if host["role"] == role:
nodes_by_role.append(host)
log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}")
return nodes_by_role
def get_random_host_by_role(self, role):
return random.choice(self.get_hosts_by_role(role))
def get_reboot_required_hosts(self):
return self.api_client.get_hosts_in_statuses(
cluster_id=self.id, statuses=[consts.NodesStatus.RESETING_PENDING_USER_ACTION]
)
def reboot_required_nodes_into_iso_after_reset(self):
hosts_to_reboot = self.get_reboot_required_hosts()
self.nodes.run_for_given_nodes_by_cluster_hosts(cluster_hosts=hosts_to_reboot, func_name="reset")
def wait_for_one_host_to_be_in_wrong_boot_order(self, fall_on_error_status=True):
utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_be_in_reboot_timeout(self, fall_on_error_status=True, nodes_count=1):
utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.REBOOT_TIMEOUT,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_hosts_to_be_in_wrong_boot_order(
self, nodes_count, timeout=consts.PENDING_USER_ACTION_TIMEOUT, fall_on_error_status=True
):
wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
nodes_count=nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_ready_to_install(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
# This code added due to BZ:1909997, temporarily checking if help to prevent unexpected failure
time.sleep(10)
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
def is_in_cancelled_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.CANCELLED]
)
def is_in_error(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR]
)
def is_finalizing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING]
)
def is_installing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING]
)
def reset_install(self):
self.api_client.reset_cluster_install(cluster_id=self.id)
def is_in_insufficient_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSUFFICIENT]
)
def wait_for_hosts_to_install(
self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True, nodes_count: int = None
):
wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
nodes_count=nodes_count or self.nodes.nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_operators_to_finish(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True):
operators = self.get_operators()
if fall_on_error_status:
statuses = [consts.OperatorStatus.AVAILABLE]
else:
statuses = [consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED]
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.BUILTIN)),
operator_types=[OperatorType.BUILTIN],
statuses=statuses,
timeout=timeout,
fall_on_error_status=False,
)
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.OLM)),
operator_types=[OperatorType.OLM],
statuses=[consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED],
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def is_operator_in_status(self, operator_name, status):
return operators_utils.is_operator_in_status(
operators=self.get_operators(), operator_name=operator_name, status=status
)
def wait_for_install(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
timeout=timeout,
)
def _set_hostnames_and_roles(self):
cluster_id = self.id
hosts = self.to_cluster_hosts(self.api_client.get_cluster_hosts(cluster_id))
nodes = self.nodes.get_nodes(refresh=True)
for host in hosts:
if host.has_hostname():
continue
name = self.find_matching_node_name(host, nodes)
assert name is not None, (
f"Failed to find matching node for host with mac address {host.macs()}"
f" nodes: {[(n.name, n.ips, n.macs) for n in nodes]}"
)
if self.nodes.nodes_count == 1:
role = None
else:
role = consts.NodeRoles.MASTER if consts.NodeRoles.MASTER in name else consts.NodeRoles.WORKER
self._infra_env.update_host(host_id=host.get_id(), host_role=role, host_name=name)
def _ha_not_none(self):
return (
self._high_availability_mode != consts.HighAvailabilityMode.NONE
and self._config.platform != consts.Platforms.NONE
)
def download_image(self, iso_download_path: str = None) -> Path:
if self._infra_env is None:
log.warning("No infra_env found. Generating infra_env and downloading ISO")
return self.generate_and_download_infra_env(
iso_download_path=iso_download_path or self._config.iso_download_path,
iso_image_type=self._config.iso_image_type,
)
return self._infra_env.download_image(iso_download_path)
@JunitTestCase()
def prepare_for_installation(self, **kwargs):
super(Cluster, self).prepare_for_installation(**kwargs)
self.nodes.wait_for_networking()
self._set_hostnames_and_roles()
if self._high_availability_mode != consts.HighAvailabilityMode.NONE:
self.set_host_roles(len(self.nodes.get_masters()), len(self.nodes.get_workers()))
self.set_network_params(controller=self.nodes.controller)
# in case of None platform we need to specify dns records before hosts are ready
if self._config.platform == consts.Platforms.NONE:
self._configure_load_balancer()
self.nodes.controller.set_dns_for_user_managed_network()
elif self._high_availability_mode == consts.HighAvailabilityMode.NONE:
main_cidr = self.get_primary_machine_cidr()
ip = Cluster.get_ip_for_single_node(self.api_client, self.id, main_cidr)
self.nodes.controller.set_single_node_ip(ip)
self.nodes.controller.set_dns(api_vip=ip, ingress_vip=ip)
self.wait_for_ready_to_install()
# in case of regular cluster, need to set dns after vips exits
# in our case when nodes are ready, vips will be there for sure
if self._ha_not_none():
vips_info = self.__class__.get_vips_from_cluster(self.api_client, self.id)
self.nodes.controller.set_dns(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"])
def download_kubeconfig_no_ingress(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig_no_ingress(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_kubeconfig(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_installation_logs(self, cluster_tar_path):
self.api_client.download_cluster_logs(self.id, cluster_tar_path)
def get_install_config(self):
return yaml.safe_load(self.api_client.get_cluster_install_config(self.id))
def get_admin_credentials(self):
return self.api_client.get_cluster_admin_credentials(self.id)
def register_dummy_host(self):
dummy_host_id = "b164df18-0ff1-4b85-9121-059f10f58f71"
self.api_client.register_host(self.id, dummy_host_id)
def host_get_next_step(self, host_id):
return self.api_client.host_get_next_step(self.id, host_id)
def host_post_step_result(self, host_id, step_type, step_id, exit_code, output):
self.api_client.host_post_step_result(
self.id, host_id, step_type=step_type, step_id=step_id, exit_code=exit_code, output=output
)
def host_update_install_progress(self, host_id, current_stage, progress_info=None):
self.api_client.host_update_progress(self.id, host_id, current_stage, progress_info=progress_info)
def host_complete_install(self):
self.api_client.complete_cluster_installation(cluster_id=self.id, is_success=True)
def setup_nodes(self, nodes, infra_env_config: BaseInfraEnvConfig):
self._infra_env = InfraEnv.generate(
self.api_client, infra_env_config, iso_image_type=self._config.iso_image_type
)
self._infra_env.download_image(iso_download_path=self._config.iso_download_path)
nodes.start_all()
self.wait_until_hosts_are_discovered()
return nodes.create_nodes_cluster_hosts_mapping(cluster=self)
def wait_for_cluster_validation(
self, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until cluster %s validation %s is in status %s", self.id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_cluster_validation_in_status(
validation_section=validation_section, validation_id=validation_id, statuses=statuses
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Cluster validation to be in status {statuses}",
)
except BaseException:
log.error(
"Cluster validation status is: %s",
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
),
)
raise
def is_cluster_validation_in_status(self, validation_section, validation_id, statuses):
log.info("Is cluster %s validation %s in status %s", self.id, validation_id, statuses)
try:
return (
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_host_validation(
self, host_id, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until host %s validation %s is in status %s", host_id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_host_validation_in_status(
host_id=host_id,
validation_section=validation_section,
validation_id=validation_id,
statuses=statuses,
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Host validation to be in status {statuses}",
)
except BaseException:
log.error(
"Host validation status is: %s",
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
),
)
raise
def is_host_validation_in_status(self, host_id, validation_section, validation_id, statuses):
log.info("Is host %s validation %s in status %s", host_id, validation_id, statuses)
try:
return (
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_cluster_to_be_in_installing_pending_user_action_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING_PENDING_USER_ACTION],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_cluster_to_be_in_installing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING],
timeout=consts.START_CLUSTER_INSTALLATION_TIMEOUT,
)
def wait_for_cluster_to_be_in_finalizing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.FINALIZING, consts.ClusterStatus.INSTALLED],
timeout=consts.CLUSTER_INSTALLATION_TIMEOUT,
break_statuses=[consts.ClusterStatus.ERROR],
)
def wait_for_cluster_to_be_in_status(self, statuses, timeout=consts.ERROR_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
timeout=timeout,
)
@classmethod
def reset_cluster_and_wait_for_ready(cls, cluster):
# Reset cluster install
cluster.reset_install()
assert cluster.is_in_insufficient_status()
# Reboot required nodes into ISO
cluster.reboot_required_nodes_into_iso_after_reset()
# Wait for hosts to be rediscovered
cluster.wait_until_hosts_are_discovered()
cluster.wait_for_ready_to_install()
def get_events(self, host_id="", infra_env_id=""):
warnings.warn(
"Cluster.get_events is now deprecated, use EventsHandler.get_events instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.get_events(host_id, self.id, infra_env_id)
def _configure_load_balancer(self):
main_cidr = self.get_primary_machine_cidr()
secondary_cidr = self.nodes.controller.get_provisioning_cidr()
master_ips = self.get_master_ips(self.api_client, self.id, main_cidr) + self.get_master_ips(
self.api_client, self.id, secondary_cidr
)
worker_ips = self.get_worker_ips(self.api_client, self.id, main_cidr)
load_balancer_ip = str(IPNetwork(main_cidr).ip + 1)
tf = terraform_utils.TerraformUtils(working_dir=self.nodes.controller.tf_folder)
lb_controller = LoadBalancerController(tf)
lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips)
@classmethod
def _get_namespace_index(cls, libvirt_network_if):
# Hack to retrieve namespace index - does not exist in tests
matcher = re.match(r"^tt(\d+)$", libvirt_network_if)
return int(matcher.groups()[0]) if matcher is not None else 0
def wait_for_event(self, event_to_find, reference_time, params_list=None, host_id="", infra_env_id="", timeout=10):
warnings.warn(
"Cluster.wait_for_event is now deprecated, use EventsHandler.wait_for_event instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.wait_for_event(
event_to_find, reference_time, params_list, host_id, infra_env_id, self.id, timeout
)
@staticmethod
def get_inventory_host_nics_data(host: dict, ipv4_first=True):
def get_network_interface_ip(interface):
addresses = (
interface.ipv4_addresses + interface.ipv6_addresses
if ipv4_first
else interface.ipv6_addresses + interface.ipv4_addresses
)
return addresses[0].split("/")[0] if len(addresses) > 0 else None
inventory = models.Inventory(**json.loads(host["inventory"]))
interfaces_list = [models.Interface(**interface) for interface in inventory.interfaces]
return [
{
"name": interface.name,
"model": interface.product,
"mac": interface.mac_address,
"ip": get_network_interface_ip(interface),
"speed": interface.speed_mbps,
}
for interface in interfaces_list
]
@staticmethod
def get_hosts_nics_data(hosts: list, ipv4_first=True):
return [Cluster.get_inventory_host_nics_data(h, ipv4_first=ipv4_first) for h in hosts]
@staticmethod
def get_cluster_hosts(cluster: models.cluster.Cluster) -> List[ClusterHost]:
return [ClusterHost(h) for h in cluster.hosts]
@staticmethod
def to_cluster_hosts(hosts: List[Dict[str, Any]]) -> List[ClusterHost]:
return [ClusterHost(models.Host(**h)) for h in hosts]
def get_cluster_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cidrs = set()
for host in hosts:
ips = []
if self.nodes.is_ipv4:
ips += host.ipv4_addresses()
if self.nodes.is_ipv6:
ips += host.ipv6_addresses()
for host_ip in ips:
cidr = network_utils.get_cidr_by_interface(host_ip)
cidrs.add(cidr)
return cidrs
def get_cluster_matching_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cluster_cidrs = self.get_cluster_cidrs(hosts)
matching_cidrs = set()
for cidr in cluster_cidrs:
for host in hosts:
interfaces = []
if self.nodes.is_ipv4:
interfaces += host.ipv4_addresses()
if self.nodes.is_ipv6:
interfaces += host.ipv6_addresses()
if not network_utils.any_interface_in_cidr(interfaces, cidr):
break
matching_cidrs.add(cidr)
return matching_cidrs
@staticmethod
def get_ip_for_single_node(client, cluster_id, machine_cidr, ipv4_first=True):
cluster_info = client.cluster_get(cluster_id).to_dict()
if len(cluster_info["hosts"]) == 0:
raise Exception("No host found")
network = IPNetwork(machine_cidr)
interfaces = Cluster.get_inventory_host_nics_data(cluster_info["hosts"][0], ipv4_first=ipv4_first)
for intf in interfaces:
ip = intf["ip"]
if IPAddress(ip) in network:
return ip
raise Exception("IP for single node not found")
@staticmethod
def get_ips_for_role(client, cluster_id, network, role):
cluster_info = client.cluster_get(cluster_id).to_dict()
ret = []
net = IPNetwork(network)
hosts_interfaces = Cluster.get_hosts_nics_data([h for h in cluster_info["hosts"] if h["role"] == role])
for host_interfaces in hosts_interfaces:
for intf in host_interfaces:
ip = IPAddress(intf["ip"])
if ip in net:
ret = ret + [intf["ip"]]
return ret
@staticmethod
def get_master_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.MASTER)
@staticmethod
def get_worker_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.WORKER)
@staticmethod
def get_vips_from_cluster(client, cluster_id):
cluster_info = client.cluster_get(cluster_id)
return dict(api_vip=cluster_info.api_vip, ingress_vip=cluster_info.ingress_vip)
def get_host_disks(self, host, filter=None):
hosts = self.get_hosts()
selected_host = [h for h in hosts if h["id"] == host["id"]]
disks = json.loads(selected_host[0]["inventory"])["disks"]
if not filter:
return [disk for disk in disks]
else:
return [disk for disk in disks if filter(disk)]
def get_inventory_host_ips_data(self, host: dict):
nics = self.get_inventory_host_nics_data(host)
return [nic["ip"] for nic in nics]
# needed for None platform and single node
# we need to get ip where api is running
def get_kube_api_ip(self, hosts):
for host in hosts:
for ip in self.get_inventory_host_ips_data(host):
if self.is_kubeapi_service_ready(ip):
return ip
def get_api_vip(self, cluster):
cluster = cluster or self.get_details()
api_vip = cluster.api_vip
if not api_vip and cluster.user_managed_networking:
log.info("API VIP is not set, searching for api ip on masters")
masters = self.get_hosts_by_role(consts.NodeRoles.MASTER, hosts=cluster.to_dict()["hosts"])
api_vip = self._wait_for_api_vip(masters)
log.info("api vip is %s", api_vip)
return api_vip
def _wait_for_api_vip(self, hosts, timeout=180):
"""Enable some grace time for waiting for API's availability."""
return waiting.wait(
lambda: self.get_kube_api_ip(hosts=hosts), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP"
)
def find_matching_node_name(self, host: ClusterHost, nodes: List[Node]) -> Union[str, None]:
# Looking for node matches the given host by its mac address (which is unique)
for node in nodes:
for mac in node.macs:
if mac.lower() in host.macs():
return node.name
# IPv6 static ips
if self._config.is_static_ip:
mappings = static_network.get_name_to_mac_addresses_mapping(self.nodes.controller.tf_folder)
for mac in host.macs():
for name, macs in mappings.items():
if mac in macs:
return name
return None
@staticmethod
def is_kubeapi_service_ready(ip_or_dns):
"""Validate if kube-api is ready on given address."""
with contextlib.suppress(ValueError):
# IPv6 addresses need to be surrounded with square-brackets
# to differentiate them from domain names
if ipaddress.ip_address(ip_or_dns).version == 6:
ip_or_dns = f"[{ip_or_dns}]"
try:
response = requests.get(f"https://{ip_or_dns}:6443/readyz", verify=False, timeout=1)
return response.ok
except BaseException:
return False
def wait_and_kill_installer(self, host):
# Wait for specific host to be in installing in progress
self.wait_for_specific_host_status(host=host, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS])
# Kill installer to simulate host error
selected_node = self.nodes.get_node_from_cluster_host(host)
selected_node.kill_installer()
def get_api_vip_from_cluster(api_client, cluster_info: Union[dict, models.cluster.Cluster], pull_secret):
import warnings
from tests.config import ClusterConfig, InfraEnvConfig
warnings.warn(
"Soon get_api_vip_from_cluster will be deprecated. Avoid using or adding new functionality to "
"this function. The function and solution for that case have not been determined yet. It might be "
"on another module, or as a classmethod within Cluster class."
" For more information see https://issues.redhat.com/browse/MGMT-4975",
PendingDeprecationWarning,
)
if isinstance(cluster_info, dict):
cluster_info = models.cluster.Cluster(**cluster_info)
cluster = Cluster(
api_client=api_client,
infra_env_config=InfraEnvConfig(),
config=ClusterConfig(
cluster_name=ClusterName(cluster_info.name),
pull_secret=pull_secret,
ssh_public_key=cluster_info.ssh_public_key,
cluster_id=cluster_info.id,
),
nodes=None,
)
return cluster.get_api_vip(cluster=cluster_info)
| 42.214689
| 120
| 0.679394
|
de697138aebfebdd6f4c178215b3bcaea9938718
| 6,434
|
py
|
Python
|
src/robot/model/testsuite.py
|
mbrzozowski/robotframework
|
3bb3301a715d2809647915b5150f54ddde83b5e0
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-09-13T08:56:49.000Z
|
2021-01-10T11:21:34.000Z
|
src/robot/model/testsuite.py
|
sotayamashita/robotframework
|
63cc1dac3d3ab8c9019d15e2fb0c61da99f026df
|
[
"ECL-2.0",
"Apache-2.0"
] | 55
|
2021-03-10T01:16:34.000Z
|
2022-03-14T01:27:43.000Z
|
src/robot/model/testsuite.py
|
sotayamashita/robotframework
|
63cc1dac3d3ab8c9019d15e2fb0c61da99f026df
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2016-02-29T15:42:22.000Z
|
2018-05-08T08:58:18.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .configurer import SuiteConfigurer
from .filter import Filter, EmptySuiteRemover
from .itemlist import ItemList
from .keyword import Keyword, Keywords
from .metadata import Metadata
from .modelobject import ModelObject
from .tagsetter import TagSetter
from .testcase import TestCase, TestCases
class TestSuite(ModelObject):
"""Base model for single suite.
Extended by :class:`robot.running.model.TestSuite` and
:class:`robot.result.model.TestSuite`.
"""
__slots__ = ['parent', 'source', '_name', 'doc', '_my_visitors', 'rpa']
test_class = TestCase #: Internal usage only.
keyword_class = Keyword #: Internal usage only.
def __init__(self, name='', doc='', metadata=None, source=None, rpa=False):
self.parent = None #: Parent suite. ``None`` with the root suite.
self._name = name
self.doc = doc #: Test suite documentation.
self.metadata = metadata
self.source = source #: Path to the source file or directory.
self.rpa = rpa
self.suites = None
self.tests = None
self.keywords = None
self._my_visitors = []
@property
def _visitors(self):
parent_visitors = self.parent._visitors if self.parent else []
return self._my_visitors + parent_visitors
@property
def name(self):
"""Test suite name. If not set, constructed from child suite names."""
return self._name or ' & '.join(s.name for s in self.suites)
@name.setter
def name(self, name):
self._name = name
@property
def longname(self):
"""Suite name prefixed with the long name of the parent suite."""
if not self.parent:
return self.name
return '%s.%s' % (self.parent.longname, self.name)
@setter
def metadata(self, metadata):
"""Free test suite metadata as a dictionary."""
return Metadata(metadata)
@setter
def suites(self, suites):
"""Child suites as a :class:`~.TestSuites` object."""
return TestSuites(self.__class__, self, suites)
@setter
def tests(self, tests):
"""Tests as a :class:`~.TestCases` object."""
return TestCases(self.test_class, self, tests)
@setter
def keywords(self, keywords):
"""Suite setup and teardown as a :class:`~.Keywords` object."""
return Keywords(self.keyword_class, self, keywords)
@property
def id(self):
"""An automatically generated unique id.
The root suite has id ``s1``, its child suites have ids ``s1-s1``,
``s1-s2``, ..., their child suites get ids ``s1-s1-s1``, ``s1-s1-s2``,
..., ``s1-s2-s1``, ..., and so on.
The first test in a suite has an id like ``s1-t1``, the second has an
id ``s1-t2``, and so on. Similarly keywords in suites (setup/teardown)
and in tests get ids like ``s1-k1``, ``s1-t1-k1``, and ``s1-s4-t2-k5``.
"""
if not self.parent:
return 's1'
return '%s-s%d' % (self.parent.id, self.parent.suites.index(self)+1)
@property
def test_count(self):
"""Number of the tests in this suite, recursively."""
return len(self.tests) + sum(suite.test_count for suite in self.suites)
def set_tags(self, add=None, remove=None, persist=False):
"""Add and/or remove specified tags to the tests in this suite.
:param add: Tags to add as a list or, if adding only one,
as a single string.
:param remove: Tags to remove as a list or as a single string.
Can be given as patterns where ``*`` and ``?`` work as wildcards.
:param persist: Add/remove specified tags also to new tests added
to this suite in the future.
"""
setter = TagSetter(add, remove)
self.visit(setter)
if persist:
self._my_visitors.append(setter)
def filter(self, included_suites=None, included_tests=None,
included_tags=None, excluded_tags=None):
"""Select test cases and remove others from this suite.
Parameters have the same semantics as ``--suite``, ``--test``,
``--include``, and ``--exclude`` command line options. All of them
can be given as a list of strings, or when selecting only one, as
a single string.
Child suites that contain no tests after filtering are automatically
removed.
Example::
suite.filter(included_tests=['Test 1', '* Example'],
included_tags='priority-1')
"""
self.visit(Filter(included_suites, included_tests,
included_tags, excluded_tags))
def configure(self, **options):
"""A shortcut to configure a suite using one method call.
Can only be used with the root test suite.
:param options: Passed to
:class:`~robot.model.configurer.SuiteConfigurer` that will then
set suite attributes, call :meth:`filter`, etc. as needed.
"""
if self.parent is not None:
raise ValueError("'TestSuite.configure()' can only be used with "
"the root test suite.")
if options:
self.visit(SuiteConfigurer(**options))
def remove_empty_suites(self):
"""Removes all child suites not containing any tests, recursively."""
self.visit(EmptySuiteRemover())
def visit(self, visitor):
""":mod:`Visitor interface <robot.model.visitor>` entry-point."""
visitor.visit_suite(self)
class TestSuites(ItemList):
__slots__ = []
def __init__(self, suite_class=TestSuite, parent=None, suites=None):
ItemList.__init__(self, suite_class, {'parent': parent}, suites)
| 36.556818
| 79
| 0.632577
|
97747ae2932ba2dba81daffe4ef0804fc426f464
| 13,146
|
py
|
Python
|
pnc_cli/buildconfigurationsets.py
|
SakuragawaAsaba/pnc-cli
|
0e0c5976766f6d2e32980c39ebc30950fc02960e
|
[
"Apache-2.0"
] | null | null | null |
pnc_cli/buildconfigurationsets.py
|
SakuragawaAsaba/pnc-cli
|
0e0c5976766f6d2e32980c39ebc30950fc02960e
|
[
"Apache-2.0"
] | null | null | null |
pnc_cli/buildconfigurationsets.py
|
SakuragawaAsaba/pnc-cli
|
0e0c5976766f6d2e32980c39ebc30950fc02960e
|
[
"Apache-2.0"
] | null | null | null |
import logging
from argh import arg
from six import iteritems
import json
import pnc_cli.common as common
import pnc_cli.cli_types as types
import pnc_cli.utils as utils
from pnc_cli import swagger_client
from pnc_cli.swagger_client.apis.buildconfigurations_api import BuildconfigurationsApi
from pnc_cli.swagger_client.apis.buildconfigurationsets_api import BuildconfigurationsetsApi
import pnc_cli.user_config as uc
import sys
sets_api = BuildconfigurationsetsApi(uc.user.get_api_client())
configs_api = BuildconfigurationsApi(uc.user.get_api_client())
def _create_build_config_set_object(**kwargs):
created_build_config_set = swagger_client.BuildConfigurationSetRest()
for key, value in iteritems(kwargs):
setattr(created_build_config_set, key, value)
return created_build_config_set
def list_build_configuration_sets_raw(page_size=200, page_index=0, sort="", q=""):
response = utils.checked_api_call(sets_api, 'get_all', page_size=page_size,
page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-p", "--page-size", help="Limit the amount of build records returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_build_configuration_sets(page_size=200, page_index=0, sort="", q=""):
"""
List all build configuration sets
"""
data = list_build_configuration_sets_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
def create_build_configuration_set_raw(**kwargs):
"""
Create a new BuildConfigurationSet.
"""
config_set = _create_build_config_set_object(**kwargs)
response = utils.checked_api_call(sets_api, 'create_new', body=config_set)
if response:
return response.content
@arg("name", help="Name for the new BuildConfigurationSet.", type=types.unique_bc_set_name)
@arg("-pvi", "--product-version-id",
help="ID of the product version to associate this BuildConfigurationSet.",
type=types.existing_product_version)
@arg("-bcs", "--build-configuration-ids", type=types.existing_bc_id, nargs='+',
help="Space separated list of build-configurations to include in the set.")
def create_build_configuration_set(**kwargs):
"""
Create a new BuildConfigurationSet.
"""
content = create_build_configuration_set_raw(**kwargs)
if content:
return utils.format_json(content)
def get_build_configuration_set_raw(id=None, name=None):
"""
Get a specific BuildConfigurationSet by name or ID
"""
found_id = common.set_id(sets_api, id, name)
response = utils.checked_api_call(sets_api, 'get_specific', id=found_id)
if response:
return response.content
@arg("-id", "--id", help="ID of the BuildConfigurationSet to retrieve", type=types.existing_bc_set_id)
@arg("-n", "--name", help="Name of the BuildConfigurationSet to retrieve", type=types.existing_bc_set_name)
def get_build_configuration_set(id=None, name=None):
"""
Get a specific BuildConfigurationSet by name or ID
"""
content = get_build_configuration_set_raw(id, name)
if content:
return utils.format_json(content)
def update_build_configuration_set_raw(id, **kwargs):
set_to_update = utils.checked_api_call(sets_api, 'get_specific', id=id).content
for key, value in kwargs.items():
if value is not None:
setattr(set_to_update, key, value)
response = utils.checked_api_call(sets_api, 'update', id=id, body=set_to_update)
if response:
return response.content
@arg("id", help="ID of the BuildConfigurationSet to update.", type=types.existing_bc_set_id)
@arg("-n", "--name", help="Updated name for the BuildConfigurationSet.", type=types.unique_bc_set_name)
@arg("-pvi", "--product-version-id",
help="Updated product version ID for the BuildConfigurationSet.", type=types.existing_product_version)
@arg("-bcs", "--build-configuration-ids", type=types.existing_bc_id, nargs='+',
help="Space separated list of build-configurations to include in the set.")
def update_build_configuration_set(id, **kwargs):
"""
Update a BuildConfigurationSet
"""
data = update_build_configuration_set_raw(id, **kwargs)
if data:
return utils.format_json(data)
def delete_build_configuration_set_raw(id=None, name=None):
set_id = common.set_id(sets_api, id, name)
response = utils.checked_api_call(sets_api, 'delete_specific', id=set_id)
if response:
return response.content
@arg("-i", "--id", help="ID of the BuildConfigurationSet to delete.", type=types.existing_bc_set_id)
@arg("-n", "--name", help="Name of the BuildConfigurationSet to delete.", type=types.existing_bc_set_name)
# TODO: in order to delete a config set successfully, any buildconfigsetrecords must be deleted first
# TODO: it may be impossible / undesireable to remove
# buildconfigsetrecords. so perhaps just check and abort
def delete_build_configuration_set(id=None, name=None):
data =delete_build_configuration_set_raw(id, name)
if data:
return utils.format_json(data)
def build_set_raw(id=None, name=None,
tempbuild=False, timestamp_alignment=False,
force=False):
"""
Start a build of the given BuildConfigurationSet
"""
logging.debug("temp_build: " + str(tempbuild))
logging.debug("timestamp_alignment: " + str(timestamp_alignment))
logging.debug("force: " + str(force))
if tempbuild is False and timestamp_alignment is True:
logging.error("You can only activate timestamp alignment with the temporary build flag!")
sys.exit(1)
found_id = common.set_id(sets_api, id, name)
response = utils.checked_api_call(sets_api, 'build', id=found_id,
temporary_build=tempbuild,
timestamp_alignment=timestamp_alignment,
force_rebuild=force)
if response:
return response.content
@arg("-i", "--id", help="ID of the BuildConfigurationSet to build.", type=types.existing_bc_set_id)
@arg("-n", "--name", help="Name of the BuildConfigurationSet to build.", type=types.existing_bc_set_name)
@arg("--temporary-build", help="Temporary builds")
@arg("--timestamp-alignment", help="Enable timestamp alignment for the temporary builds")
@arg("-f", "--force", help="Force rebuild of all configurations")
def build_set(id=None, name=None,
temporary_build=False, timestamp_alignment=False,
force=False):
"""
Start a build of the given BuildConfigurationSet
"""
content = build_set_raw(id, name,
temporary_build, timestamp_alignment, force)
if content:
return utils.format_json_list(content)
def list_build_configurations_for_set_raw(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
found_id = common.set_id(sets_api, id, name)
response = utils.checked_api_call(sets_api, 'get_configurations', id=found_id, page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-i", "--id", help="ID of the BuildConfigurationSet to build.", type=types.existing_bc_set_id)
@arg("-n", "--name", help="Name of the BuildConfigurationSet to build.", type=types.existing_bc_set_name)
@arg("-p", "--page-size", help="Limit the amount of build records returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_build_configurations_for_set(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all build configurations in a given BuildConfigurationSet.
"""
content = list_build_configurations_for_set_raw(id, name, page_size, page_index, sort, q)
if content:
return utils.format_json_list(content)
def add_build_configuration_to_set_raw(
set_id=None, set_name=None, config_id=None, config_name=None):
config_set_id = common.set_id(sets_api, set_id, set_name)
bc_id = common.set_id(configs_api, config_id, config_name)
bc = common.get_entity(configs_api, bc_id)
response = utils.checked_api_call(
sets_api,
'add_configuration',
id=config_set_id,
body=bc)
if response:
return response.content
@arg("-sid", "--set-id", help="ID of the BuildConfigurationSet to add to", type=types.existing_bc_set_id)
@arg("-sn", "--set-name", help="Name of the BuildConfigurationSet to add to", type=types.existing_bc_set_name)
@arg("-cid", "--config-id",
help="ID of the build configuration to add to the given set", type=types.existing_bc_id)
@arg("-cn", "--config-name",
help="Name of the build configuration to add to the given set", type=types.existing_bc_name)
def add_build_configuration_to_set(
set_id=None, set_name=None, config_id=None, config_name=None):
"""
Add a build configuration to an existing BuildConfigurationSet
"""
content = add_build_configuration_to_set_raw(set_id, set_name, config_id, config_name)
if content:
return utils.format_json(content)
def remove_build_configuration_from_set_raw(set_id=None, set_name=None, config_id=None, config_name=None):
config_set_id = common.set_id(sets_api, set_id, set_name)
bc_id = common.set_id(configs_api, config_id, config_name)
response = utils.checked_api_call(
sets_api,
'remove_configuration',
id=config_set_id,
config_id=bc_id)
if response:
return response.content
@arg("-sid", "--set-id", help="ID of the BuildConfigurationSet to remove from", type=types.existing_bc_set_id)
@arg("-sn", "--set-name", help="Name of the BuildConfigurationSet to remove from", type=types.existing_bc_set_name)
@arg("-cid", "--config-id", help="ID of the BuildConfiguration to remove from the set",
type=types.existing_bc_id)
@arg("-cn", "--config-name", help="Name of the BuildConfiguration to remove from the set",
type=types.existing_bc_name)
def remove_build_configuration_from_set(set_id=None, set_name=None, config_id=None, config_name=None):
content = remove_build_configuration_from_set_raw(set_id, set_name, config_id, config_name)
if content:
return utils.format_json(content)
def list_build_records_for_set_raw(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
found_id = common.set_id(sets_api, id, name)
response = utils.checked_api_call(sets_api, 'get_build_records', id=found_id, page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-i", "--id", help="ID of the BuildConfigurationSet", type=types.existing_bc_set_id)
@arg("-n", "--name", help="Name of the BuildConfigurationSet", type=types.existing_bc_set_name)
@arg("-p", "--page-size", help="Limit the amount of build records returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_build_records_for_set(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all build records for a BuildConfigurationSet
"""
content = list_build_records_for_set_raw(id, name, page_size, page_index, sort, q)
if content:
return utils.format_json_list(content)
def list_build_set_records_raw(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
found_id = common.set_id(sets_api, id, name)
response = utils.checked_api_call(sets_api, 'get_all_build_config_set_records', id=found_id, page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-i", "--id", help="ID of the BuildConfigurationSet", type=types.existing_bc_set_id)
@arg("-n", "--name", help="Name of the BuildConfigurationSet", type=types.existing_bc_set_name)
@arg("-p", "--page-size", help="Limit the amount of build records returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_build_set_records(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all build set records for a BuildConfigurationSet
"""
content = list_build_set_records(id, name, page_size, page_index, sort, q)
if content:
return utils.format_json_list(content)
@arg("-i", "--id", help="ID of the BuildConfigurationSet", type=types.existing_bc_set_id)
@arg("-n", "--name", help="Name of the BuildConfigurationSet", type=types.existing_bc_set_name)
def latest_build_set_records_status(id=None, name=None):
"""
List latest build set record status
"""
data = list_build_set_records(id, name)
data_json = json.loads(data)
if len(data_json) > 0:
data_json.sort(key=lambda obj: obj['id'], reverse=True)
return "Build Config Set Record #" + str(data_json[0]['id']) + ": " + data_json[0]['status']
| 42.543689
| 156
| 0.712156
|
d091e387b0465bdc4056a2be6c30784d3a8d28f0
| 2,863
|
py
|
Python
|
datas/models.py
|
ballon3/SUW-Demo
|
09c8793ab70de743b8f4484f6dc6120e2425570c
|
[
"MIT"
] | null | null | null |
datas/models.py
|
ballon3/SUW-Demo
|
09c8793ab70de743b8f4484f6dc6120e2425570c
|
[
"MIT"
] | null | null | null |
datas/models.py
|
ballon3/SUW-Demo
|
09c8793ab70de743b8f4484f6dc6120e2425570c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Datas
https://iothook.com/
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import unicode_literals
from django.db import models
from django.template.defaultfilters import slugify as djslugify
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
import hashlib, random
from channels.models import Channel
class Data(models.Model):
"""
"""
owner = models.ForeignKey('auth.User', related_name='ownerdata')
channel = models.ForeignKey(Channel, related_name='channeldata')
value_1 = models.CharField(_('Deger 1'), max_length=10, null=True, blank=False)
value_2 = models.CharField(_('Deger 2'), max_length=10, null=True, blank=False)
value_3 = models.CharField(_('Deger 3'), max_length=10, null=True, blank=False)
value_4 = models.CharField(_('Deger 4'), max_length=10, null=True, blank=False)
value_5 = models.CharField(_('Deger 5'), max_length=10, null=True, blank=False)
value_6 = models.CharField(_('Deger 6'), max_length=10, null=True, blank=False)
value_7 = models.CharField(_('Deger 7'), max_length=10, null=True, blank=False)
value_8 = models.CharField(_('Deger 8'), max_length=10, null=True, blank=False)
value_9 = models.CharField(_('Deger 9'), max_length=10, null=True, blank=False)
value_10 = models.CharField(_('Deger 10'), max_length=10, null=True, blank=False)
enable = models.BooleanField(_('Aktif et'), default=True)
remote_address = models.CharField(_('Ip adres'), max_length=255)
pub_date = models.DateTimeField(_('Yayin tarihi'), auto_now=True)
def __str__(self):
return self.channel.channel_name
| 48.525424
| 93
| 0.7073
|
04cf83e657eae3d4dd8f461379ce5c0eb4f7ecd3
| 8,741
|
py
|
Python
|
eggs/sqlalchemy_migrate-0.7.2-py2.7.egg/migrate/versioning/schemadiff.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 2
|
2015-11-05T09:43:45.000Z
|
2017-05-31T14:22:02.000Z
|
eggs/sqlalchemy_migrate-0.7.2-py2.7.egg/migrate/versioning/schemadiff.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 1
|
2016-04-19T13:03:17.000Z
|
2016-04-19T13:03:17.000Z
|
eggs/sqlalchemy_migrate-0.7.2-py2.7.egg/migrate/versioning/schemadiff.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 1
|
2020-07-25T21:03:18.000Z
|
2020-07-25T21:03:18.000Z
|
"""
Schema differencing support.
"""
import logging
import sqlalchemy
from sqlalchemy.types import Float
log = logging.getLogger(__name__)
def getDiffOfModelAgainstDatabase(metadata, engine, excludeTables=None):
"""
Return differences of model against database.
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
db_metadata = sqlalchemy.MetaData(engine, reflect=True)
# sqlite will include a dynamically generated 'sqlite_sequence' table if
# there are autoincrement sequences in the database; this should not be
# compared.
if engine.dialect.name == 'sqlite':
if 'sqlite_sequence' in db_metadata.tables:
db_metadata.remove(db_metadata.tables['sqlite_sequence'])
return SchemaDiff(metadata, db_metadata,
labelA='model',
labelB='database',
excludeTables=excludeTables)
def getDiffOfModelAgainstModel(metadataA, metadataB, excludeTables=None):
"""
Return differences of model against another model.
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
return SchemaDiff(metadataA, metadataB, excludeTables)
class ColDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Column`
between two :class:`~sqlalchemy.schema.Table` instances, ``A``
and ``B``.
.. attribute:: col_A
The :class:`~sqlalchemy.schema.Column` object for A.
.. attribute:: col_B
The :class:`~sqlalchemy.schema.Column` object for B.
.. attribute:: type_A
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
.. attribute:: type_B
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
"""
diff = False
def __init__(self,col_A,col_B):
self.col_A = col_A
self.col_B = col_B
self.type_A = col_A.type
self.type_B = col_B.type
self.affinity_A = self.type_A._type_affinity
self.affinity_B = self.type_B._type_affinity
if self.affinity_A is not self.affinity_B:
self.diff = True
return
if isinstance(self.type_A,Float) or isinstance(self.type_B,Float):
if not (isinstance(self.type_A,Float) and isinstance(self.type_B,Float)):
self.diff=True
return
for attr in ('precision','scale','length'):
A = getattr(self.type_A,attr,None)
B = getattr(self.type_B,attr,None)
if not (A is None or B is None) and A!=B:
self.diff=True
return
def __nonzero__(self):
return self.diff
class TableDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Table`
between two :class:`~sqlalchemy.schema.MetaData` instances, ``A``
and ``B``.
.. attribute:: columns_missing_from_A
A sequence of column names that were found in B but weren't in
A.
.. attribute:: columns_missing_from_B
A sequence of column names that were found in A but weren't in
B.
.. attribute:: columns_different
A dictionary containing information about columns that were
found to be different.
It maps column names to a :class:`ColDiff` objects describing the
differences found.
"""
__slots__ = (
'columns_missing_from_A',
'columns_missing_from_B',
'columns_different',
)
def __nonzero__(self):
return bool(
self.columns_missing_from_A or
self.columns_missing_from_B or
self.columns_different
)
class SchemaDiff(object):
"""
Compute the difference between two :class:`~sqlalchemy.schema.MetaData`
objects.
The string representation of a :class:`SchemaDiff` will summarise
the changes found between the two
:class:`~sqlalchemy.schema.MetaData` objects.
The length of a :class:`SchemaDiff` will give the number of
changes found, enabling it to be used much like a boolean in
expressions.
:param metadataA:
First :class:`~sqlalchemy.schema.MetaData` to compare.
:param metadataB:
Second :class:`~sqlalchemy.schema.MetaData` to compare.
:param labelA:
The label to use in messages about the first
:class:`~sqlalchemy.schema.MetaData`.
:param labelB:
The label to use in messages about the second
:class:`~sqlalchemy.schema.MetaData`.
:param excludeTables:
A sequence of table names to exclude.
.. attribute:: tables_missing_from_A
A sequence of table names that were found in B but weren't in
A.
.. attribute:: tables_missing_from_B
A sequence of table names that were found in A but weren't in
B.
.. attribute:: tables_different
A dictionary containing information about tables that were found
to be different.
It maps table names to a :class:`TableDiff` objects describing the
differences found.
"""
def __init__(self,
metadataA, metadataB,
labelA='metadataA',
labelB='metadataB',
excludeTables=None):
self.metadataA, self.metadataB = metadataA, metadataB
self.labelA, self.labelB = labelA, labelB
self.label_width = max(len(labelA),len(labelB))
excludeTables = set(excludeTables or [])
A_table_names = set(metadataA.tables.keys())
B_table_names = set(metadataB.tables.keys())
self.tables_missing_from_A = sorted(
B_table_names - A_table_names - excludeTables
)
self.tables_missing_from_B = sorted(
A_table_names - B_table_names - excludeTables
)
self.tables_different = {}
for table_name in A_table_names.intersection(B_table_names):
td = TableDiff()
A_table = metadataA.tables[table_name]
B_table = metadataB.tables[table_name]
A_column_names = set(A_table.columns.keys())
B_column_names = set(B_table.columns.keys())
td.columns_missing_from_A = sorted(
B_column_names - A_column_names
)
td.columns_missing_from_B = sorted(
A_column_names - B_column_names
)
td.columns_different = {}
for col_name in A_column_names.intersection(B_column_names):
cd = ColDiff(
A_table.columns.get(col_name),
B_table.columns.get(col_name)
)
if cd:
td.columns_different[col_name]=cd
# XXX - index and constraint differences should
# be checked for here
if td:
self.tables_different[table_name]=td
def __str__(self):
''' Summarize differences. '''
out = []
column_template =' %%%is: %%r' % self.label_width
for names,label in (
(self.tables_missing_from_A,self.labelA),
(self.tables_missing_from_B,self.labelB),
):
if names:
out.append(
' tables missing from %s: %s' % (
label,', '.join(sorted(names))
)
)
for name,td in sorted(self.tables_different.items()):
out.append(
' table with differences: %s' % name
)
for names,label in (
(td.columns_missing_from_A,self.labelA),
(td.columns_missing_from_B,self.labelB),
):
if names:
out.append(
' %s missing these columns: %s' % (
label,', '.join(sorted(names))
)
)
for name,cd in td.columns_different.items():
out.append(' column with differences: %s' % name)
out.append(column_template % (self.labelA,cd.col_A))
out.append(column_template % (self.labelB,cd.col_B))
if out:
out.insert(0, 'Schema diffs:')
return '\n'.join(out)
else:
return 'No schema diffs'
def __len__(self):
"""
Used in bool evaluation, return of 0 means no diffs.
"""
return (
len(self.tables_missing_from_A) +
len(self.tables_missing_from_B) +
len(self.tables_different)
)
| 29.832765
| 85
| 0.590893
|
84c11a2bec9765cc4f991823a2b46c894c677043
| 3,912
|
py
|
Python
|
python/tink/prf/_prf_key_manager_test.py
|
Baha-sk/tink
|
285f7dd4f50d2870b3f8137291fda2def9212d63
|
[
"Apache-2.0"
] | 12,366
|
2017-05-12T11:22:39.000Z
|
2022-03-31T13:40:46.000Z
|
python/tink/prf/_prf_key_manager_test.py
|
Baha-sk/tink
|
285f7dd4f50d2870b3f8137291fda2def9212d63
|
[
"Apache-2.0"
] | 505
|
2017-05-18T20:54:30.000Z
|
2022-03-30T19:51:56.000Z
|
python/tink/prf/_prf_key_manager_test.py
|
Baha-sk/tink
|
285f7dd4f50d2870b3f8137291fda2def9212d63
|
[
"Apache-2.0"
] | 1,179
|
2017-05-12T11:25:34.000Z
|
2022-03-31T14:31:15.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.prf.prf_set_key_manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
from tink.proto import common_pb2
from tink.proto import hmac_prf_pb2
from tink.proto import tink_pb2
import tink
from tink import core
from tink import prf
from tink.testing import helper
def setUpModule():
prf.register()
class PrfKeyManagerTest(parameterized.TestCase):
@parameterized.parameters([
('AES_CMAC_PRF', prf.prf_key_templates.AES_CMAC),
('HMAC_PRF_SHA256', prf.prf_key_templates.HMAC_SHA256),
('HMAC_PRF_SHA512', prf.prf_key_templates.HMAC_SHA512),
('HKDF_PRF_SHA256', prf.prf_key_templates.HKDF_SHA256)
])
def test_template(self, template_name, template):
self.assertEqual(template,
helper.template_from_testdata(template_name, 'prf'))
def test_new_key_data_success(self):
key_template = prf.prf_key_templates._create_hmac_key_template(
key_size=32, hash_type=common_pb2.SHA256)
key_manager = core.Registry.key_manager(key_template.type_url)
key_data = key_manager.new_key_data(key_template)
self.assertEqual(key_data.type_url, key_template.type_url)
self.assertEqual(key_data.key_material_type, tink_pb2.KeyData.SYMMETRIC)
key = hmac_prf_pb2.HmacPrfKey.FromString(key_data.value)
self.assertEqual(key.version, 0)
self.assertEqual(key.params.hash, common_pb2.SHA256)
self.assertLen(key.key_value, 32)
def test_invalid_params_throw_exception(self):
template = prf.prf_key_templates._create_hmac_key_template(
key_size=7, hash_type=common_pb2.SHA256)
with self.assertRaises(tink.TinkError):
tink.new_keyset_handle(template)
@parameterized.parameters([
prf.prf_key_templates.AES_CMAC, prf.prf_key_templates.HMAC_SHA256,
prf.prf_key_templates.HMAC_SHA512, prf.prf_key_templates.HKDF_SHA256
])
def test_compute_success(self, template):
keyset_handle = tink.new_keyset_handle(template)
primitive = keyset_handle.primitive(prf.PrfSet)
output = primitive.primary().compute(b'input_data', output_length=15)
self.assertLen(output, 15)
self.assertEqual(
primitive.primary().compute(b'input_data', output_length=15), output)
self.assertNotEqual(
primitive.primary().compute(b'some_other_data', output_length=15),
output)
prfs = primitive.all()
self.assertLen(prfs, 1)
self.assertEqual(
prfs[primitive.primary_id()].compute(b'input_data', output_length=15),
output)
@parameterized.parameters([
prf.prf_key_templates.AES_CMAC, prf.prf_key_templates.HMAC_SHA256,
prf.prf_key_templates.HMAC_SHA512, prf.prf_key_templates.HKDF_SHA256
])
def test_output_too_long_raises_error(self, template):
keyset_handle = tink.new_keyset_handle(template)
primitive = keyset_handle.primitive(prf.PrfSet)
with self.assertRaises(tink.TinkError):
primitive.primary().compute(b'input_data', output_length=1234567)
prfs = primitive.all()
self.assertLen(prfs, 1)
p = prfs[primitive.primary_id()]
with self.assertRaises(tink.TinkError):
p.compute(b'input_data', output_length=1234567)
if __name__ == '__main__':
absltest.main()
| 37.980583
| 78
| 0.758436
|
54b17d949444d6a7a25b4af491a6a1ac17657fd2
| 26,091
|
py
|
Python
|
numba/core/compiler.py
|
charlesbluca/numba
|
3131959c98e567d74ab6db402230cfea6ceecafe
|
[
"BSD-2-Clause"
] | 6,620
|
2015-01-04T08:51:04.000Z
|
2022-03-31T12:52:18.000Z
|
numba/core/compiler.py
|
numba/numba
|
8e6fa5690fbe4138abf69263363be85987891e8b
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 6,457
|
2015-01-04T03:18:41.000Z
|
2022-03-31T17:38:42.000Z
|
numba/core/compiler.py
|
charlesbluca/numba
|
3131959c98e567d74ab6db402230cfea6ceecafe
|
[
"BSD-2-Clause"
] | 930
|
2015-01-25T02:33:03.000Z
|
2022-03-30T14:10:32.000Z
|
from collections import namedtuple
import copy
import warnings
from numba.core.tracing import event
from numba.core import (utils, errors, typing, interpreter, bytecode, postproc,
config, callconv, cpu)
from numba.parfors.parfor import ParforDiagnostics
from numba.core.errors import CompilerError
from numba.core.environment import lookup_environment
from numba.core.compiler_machinery import PassManager
from numba.core.untyped_passes import (ExtractByteCode, TranslateByteCode,
FixupArgs, IRProcessing, DeadBranchPrune,
RewriteSemanticConstants,
InlineClosureLikes, GenericRewrites,
WithLifting, InlineInlinables,
FindLiterallyCalls,
MakeFunctionToJitFunction,
CanonicalizeLoopExit,
CanonicalizeLoopEntry, LiteralUnroll,
ReconstructSSA,
)
from numba.core.typed_passes import (NopythonTypeInference, AnnotateTypes,
NopythonRewrites, PreParforPass,
ParforPass, DumpParforDiagnostics,
IRLegalization, NoPythonBackend,
InlineOverloads, PreLowerStripPhis,
NativeLowering,
NoPythonSupportedFeatureValidation,
)
from numba.core.object_mode_passes import (ObjectModeFrontEnd,
ObjectModeBackEnd)
from numba.core.targetconfig import TargetConfig, Option
class Flags(TargetConfig):
enable_looplift = Option(
type=bool,
default=False,
doc="Enable loop-lifting",
)
enable_pyobject = Option(
type=bool,
default=False,
doc="Enable pyobject mode (in general)",
)
enable_pyobject_looplift = Option(
type=bool,
default=False,
doc="Enable pyobject mode inside lifted loops",
)
enable_ssa = Option(
type=bool,
default=True,
doc="Enable SSA",
)
force_pyobject = Option(
type=bool,
default=False,
doc="Force pyobject mode inside the whole function",
)
release_gil = Option(
type=bool,
default=False,
doc="Release GIL inside the native function",
)
no_compile = Option(
type=bool,
default=False,
doc="TODO",
)
debuginfo = Option(
type=bool,
default=False,
doc="TODO",
)
boundscheck = Option(
type=bool,
default=False,
doc="TODO",
)
forceinline = Option(
type=bool,
default=False,
doc="TODO",
)
no_cpython_wrapper = Option(
type=bool,
default=False,
doc="TODO",
)
no_cfunc_wrapper = Option(
type=bool,
default=False,
doc="TODO",
)
auto_parallel = Option(
type=cpu.ParallelOptions,
default=cpu.ParallelOptions(False),
doc="""Enable automatic parallel optimization, can be fine-tuned by
taking a dictionary of sub-options instead of a boolean, see parfor.py for
detail""",
)
nrt = Option(
type=bool,
default=False,
doc="TODO",
)
no_rewrites = Option(
type=bool,
default=False,
doc="TODO",
)
error_model = Option(
type=str,
default="python",
doc="TODO",
)
fastmath = Option(
type=cpu.FastMathOptions,
default=cpu.FastMathOptions(False),
doc="TODO",
)
noalias = Option(
type=bool,
default=False,
doc="TODO",
)
inline = Option(
type=cpu.InlineOptions,
default=cpu.InlineOptions("never"),
doc="TODO",
)
# Defines a new target option for tracking the "target backend".
# This will be the XYZ in @jit(_target=XYZ).
target_backend = Option(
type=str,
default="cpu", # if not set, default to CPU
doc="backend"
)
DEFAULT_FLAGS = Flags()
DEFAULT_FLAGS.nrt = True
CR_FIELDS = ["typing_context",
"target_context",
"entry_point",
"typing_error",
"type_annotation",
"signature",
"objectmode",
"lifted",
"fndesc",
"library",
"call_helper",
"environment",
"metadata",
# List of functions to call to initialize on unserialization
# (i.e cache load).
"reload_init",
"referenced_envs",
]
class CompileResult(namedtuple("_CompileResult", CR_FIELDS)):
"""
A structure holding results from the compilation of a function.
"""
__slots__ = ()
def _reduce(self):
"""
Reduce a CompileResult to picklable components.
"""
libdata = self.library.serialize_using_object_code()
# Make it (un)picklable efficiently
typeann = str(self.type_annotation)
fndesc = self.fndesc
# Those don't need to be pickled and may fail
fndesc.typemap = fndesc.calltypes = None
# Include all referenced environments
referenced_envs = self._find_referenced_environments()
return (libdata, self.fndesc, self.environment, self.signature,
self.objectmode, self.lifted, typeann, self.reload_init,
tuple(referenced_envs))
def _find_referenced_environments(self):
"""Returns a list of referenced environments
"""
mod = self.library._final_module
# Find environments
referenced_envs = []
for gv in mod.global_variables:
gvn = gv.name
if gvn.startswith("_ZN08NumbaEnv"):
env = lookup_environment(gvn)
if env is not None:
if env.can_cache():
referenced_envs.append(env)
return referenced_envs
@classmethod
def _rebuild(cls, target_context, libdata, fndesc, env,
signature, objectmode, lifted, typeann,
reload_init, referenced_envs):
if reload_init:
# Re-run all
for fn in reload_init:
fn()
library = target_context.codegen().unserialize_library(libdata)
cfunc = target_context.get_executable(library, fndesc, env)
cr = cls(target_context=target_context,
typing_context=target_context.typing_context,
library=library,
environment=env,
entry_point=cfunc,
fndesc=fndesc,
type_annotation=typeann,
signature=signature,
objectmode=objectmode,
lifted=lifted,
typing_error=None,
call_helper=None,
metadata=None, # Do not store, arbitrary & potentially large!
reload_init=reload_init,
referenced_envs=referenced_envs,
)
# Load Environments
for env in referenced_envs:
library.codegen.set_env(env.env_name, env)
return cr
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__} {self.entry_point}')
self.signature.dump(tab=tab + ' ')
print(f'{tab}END DUMP')
_LowerResult = namedtuple("_LowerResult", [
"fndesc",
"call_helper",
"cfunc",
"env",
])
def compile_result(**kws):
keys = set(kws.keys())
fieldset = set(CR_FIELDS)
badnames = keys - fieldset
if badnames:
raise NameError(*badnames)
missing = fieldset - keys
for k in missing:
kws[k] = None
# Avoid keeping alive traceback variables
err = kws['typing_error']
if err is not None:
kws['typing_error'] = err.with_traceback(None)
return CompileResult(**kws)
def compile_isolated(func, args, return_type=None, flags=DEFAULT_FLAGS,
locals={}):
"""
Compile the function in an isolated environment (typing and target
context).
Good for testing.
"""
from numba.core.registry import cpu_target
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx, target='cpu')
# Register the contexts in case for nested @jit or @overload calls
with cpu_target.nested_context(typingctx, targetctx):
return compile_extra(typingctx, targetctx, func, args, return_type,
flags, locals)
def run_frontend(func, inline_closures=False, emit_dels=False):
"""
Run the compiler frontend over the given Python function, and return
the function's canonical Numba IR.
If inline_closures is Truthy then closure inlining will be run
If emit_dels is Truthy the ir.Del nodes will be emitted appropriately
"""
# XXX make this a dedicated Pipeline?
func_id = bytecode.FunctionIdentity.from_function(func)
interp = interpreter.Interpreter(func_id)
bc = bytecode.ByteCode(func_id=func_id)
func_ir = interp.interpret(bc)
if inline_closures:
from numba.core.inline_closurecall import InlineClosureCallPass
inline_pass = InlineClosureCallPass(func_ir, cpu.ParallelOptions(False),
{}, False)
inline_pass.run()
post_proc = postproc.PostProcessor(func_ir)
post_proc.run(emit_dels)
return func_ir
class _CompileStatus(object):
"""
Describes the state of compilation. Used like a C record.
"""
__slots__ = ['fail_reason', 'can_fallback']
def __init__(self, can_fallback):
self.fail_reason = None
self.can_fallback = can_fallback
def __repr__(self):
vals = []
for k in self.__slots__:
vals.append("{k}={v}".format(k=k, v=getattr(self, k)))
return ', '.join(vals)
class _EarlyPipelineCompletion(Exception):
"""
Raised to indicate that a pipeline has completed early
"""
def __init__(self, result):
self.result = result
class StateDict(dict):
"""
A dictionary that has an overloaded getattr and setattr to permit getting
and setting key/values through the use of attributes.
"""
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
self[attr] = value
def _make_subtarget(targetctx, flags):
"""
Make a new target context from the given target context and flags.
"""
subtargetoptions = {}
if flags.debuginfo:
subtargetoptions['enable_debuginfo'] = True
if flags.boundscheck:
subtargetoptions['enable_boundscheck'] = True
if flags.nrt:
subtargetoptions['enable_nrt'] = True
if flags.auto_parallel:
subtargetoptions['auto_parallel'] = flags.auto_parallel
if flags.fastmath:
subtargetoptions['fastmath'] = flags.fastmath
error_model = callconv.create_error_model(flags.error_model, targetctx)
subtargetoptions['error_model'] = error_model
return targetctx.subtarget(**subtargetoptions)
class CompilerBase(object):
"""
Stores and manages states for the compiler
"""
def __init__(self, typingctx, targetctx, library, args, return_type, flags,
locals):
# Make sure the environment is reloaded
config.reload_config()
typingctx.refresh()
targetctx.refresh()
self.state = StateDict()
self.state.typingctx = typingctx
self.state.targetctx = _make_subtarget(targetctx, flags)
self.state.library = library
self.state.args = args
self.state.return_type = return_type
self.state.flags = flags
self.state.locals = locals
# Results of various steps of the compilation pipeline
self.state.bc = None
self.state.func_id = None
self.state.func_ir = None
self.state.lifted = None
self.state.lifted_from = None
self.state.typemap = None
self.state.calltypes = None
self.state.type_annotation = None
# holds arbitrary inter-pipeline stage meta data
self.state.metadata = {}
self.state.reload_init = []
# hold this for e.g. with_lifting, null out on exit
self.state.pipeline = self
# parfor diagnostics info, add to metadata
self.state.parfor_diagnostics = ParforDiagnostics()
self.state.metadata['parfor_diagnostics'] = \
self.state.parfor_diagnostics
self.state.metadata['parfors'] = {}
self.state.status = _CompileStatus(
can_fallback=self.state.flags.enable_pyobject
)
def compile_extra(self, func):
self.state.func_id = bytecode.FunctionIdentity.from_function(func)
ExtractByteCode().run_pass(self.state)
self.state.lifted = ()
self.state.lifted_from = None
return self._compile_bytecode()
def compile_ir(self, func_ir, lifted=(), lifted_from=None):
self.state.func_id = func_ir.func_id
self.state.lifted = lifted
self.state.lifted_from = lifted_from
self.state.func_ir = func_ir
self.state.nargs = self.state.func_ir.arg_count
FixupArgs().run_pass(self.state)
return self._compile_ir()
def define_pipelines(self):
"""Child classes override this to customize the pipelines in use.
"""
raise NotImplementedError()
def _compile_core(self):
"""
Populate and run compiler pipeline
"""
with utils.ConfigStack().enter(self.state.flags.copy()):
pms = self.define_pipelines()
for pm in pms:
pipeline_name = pm.pipeline_name
func_name = "%s.%s" % (self.state.func_id.modname,
self.state.func_id.func_qualname)
event("Pipeline: %s for %s" % (pipeline_name, func_name))
self.state.metadata['pipeline_times'] = {pipeline_name:
pm.exec_times}
is_final_pipeline = pm == pms[-1]
res = None
try:
pm.run(self.state)
if self.state.cr is not None:
break
except _EarlyPipelineCompletion as e:
res = e.result
break
except Exception as e:
if (utils.use_new_style_errors() and not
isinstance(e, errors.NumbaError)):
raise e
self.state.status.fail_reason = e
if is_final_pipeline:
raise e
else:
raise CompilerError("All available pipelines exhausted")
# Pipeline is done, remove self reference to release refs to user
# code
self.state.pipeline = None
# organise a return
if res is not None:
# Early pipeline completion
return res
else:
assert self.state.cr is not None
return self.state.cr
def _compile_bytecode(self):
"""
Populate and run pipeline for bytecode input
"""
assert self.state.func_ir is None
return self._compile_core()
def _compile_ir(self):
"""
Populate and run pipeline for IR input
"""
assert self.state.func_ir is not None
return self._compile_core()
class Compiler(CompilerBase):
"""The default compiler
"""
def define_pipelines(self):
# this maintains the objmode fallback behaviour
pms = []
if not self.state.flags.force_pyobject:
pms.append(DefaultPassBuilder.define_nopython_pipeline(self.state))
if self.state.status.can_fallback or self.state.flags.force_pyobject:
pms.append(
DefaultPassBuilder.define_objectmode_pipeline(self.state)
)
return pms
class DefaultPassBuilder(object):
"""
This is the default pass builder, it contains the "classic" default
pipelines as pre-canned PassManager instances:
- nopython
- objectmode
- interpreted
- typed
- untyped
- nopython lowering
"""
@staticmethod
def define_nopython_pipeline(state, name='nopython'):
"""Returns an nopython mode pipeline based PassManager
"""
# compose pipeline from untyped, typed and lowering parts
dpb = DefaultPassBuilder
pm = PassManager(name)
untyped_passes = dpb.define_untyped_pipeline(state)
pm.passes.extend(untyped_passes.passes)
typed_passes = dpb.define_typed_pipeline(state)
pm.passes.extend(typed_passes.passes)
lowering_passes = dpb.define_nopython_lowering_pipeline(state)
pm.passes.extend(lowering_passes.passes)
pm.finalize()
return pm
@staticmethod
def define_nopython_lowering_pipeline(state, name='nopython_lowering'):
pm = PassManager(name)
# legalise
pm.add_pass(NoPythonSupportedFeatureValidation,
"ensure features that are in use are in a valid form")
pm.add_pass(IRLegalization,
"ensure IR is legal prior to lowering")
# lower
pm.add_pass(NativeLowering, "native lowering")
pm.add_pass(NoPythonBackend, "nopython mode backend")
pm.add_pass(DumpParforDiagnostics, "dump parfor diagnostics")
pm.finalize()
return pm
@staticmethod
def define_typed_pipeline(state, name="typed"):
"""Returns the typed part of the nopython pipeline"""
pm = PassManager(name)
# typing
pm.add_pass(NopythonTypeInference, "nopython frontend")
pm.add_pass(AnnotateTypes, "annotate types")
# strip phis
pm.add_pass(PreLowerStripPhis, "remove phis nodes")
# optimisation
pm.add_pass(InlineOverloads, "inline overloaded functions")
if state.flags.auto_parallel.enabled:
pm.add_pass(PreParforPass, "Preprocessing for parfors")
if not state.flags.no_rewrites:
pm.add_pass(NopythonRewrites, "nopython rewrites")
if state.flags.auto_parallel.enabled:
pm.add_pass(ParforPass, "convert to parfors")
pm.finalize()
return pm
@staticmethod
def define_untyped_pipeline(state, name='untyped'):
"""Returns an untyped part of the nopython pipeline"""
pm = PassManager(name)
if state.func_ir is None:
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
pm.add_pass(WithLifting, "Handle with contexts")
# inline closures early in case they are using nonlocal's
# see issue #6585.
pm.add_pass(InlineClosureLikes,
"inline calls to locally defined closures")
# pre typing
if not state.flags.no_rewrites:
pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants")
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(GenericRewrites, "nopython rewrites")
# convert any remaining closures into functions
pm.add_pass(MakeFunctionToJitFunction,
"convert make_function into JIT functions")
# inline functions that have been determined as inlinable and rerun
# branch pruning, this needs to be run after closures are inlined as
# the IR repr of a closure masks call sites if an inlinable is called
# inside a closure
pm.add_pass(InlineInlinables, "inline inlinable functions")
if not state.flags.no_rewrites:
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(FindLiterallyCalls, "find literally calls")
pm.add_pass(LiteralUnroll, "handles literal_unroll")
if state.flags.enable_ssa:
pm.add_pass(ReconstructSSA, "ssa")
pm.finalize()
return pm
@staticmethod
def define_objectmode_pipeline(state, name='object'):
"""Returns an object-mode pipeline based PassManager
"""
pm = PassManager(name)
if state.func_ir is None:
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
else:
# Reaches here if it's a fallback from nopython mode.
# Strip the phi nodes.
pm.add_pass(PreLowerStripPhis, "remove phis nodes")
pm.add_pass(IRProcessing, "processing IR")
if utils.PYVERSION >= (3, 7):
# The following passes are needed to adjust for looplifting
pm.add_pass(CanonicalizeLoopEntry, "canonicalize loop entry")
pm.add_pass(CanonicalizeLoopExit, "canonicalize loop exit")
pm.add_pass(ObjectModeFrontEnd, "object mode frontend")
pm.add_pass(InlineClosureLikes,
"inline calls to locally defined closures")
# convert any remaining closures into functions
pm.add_pass(MakeFunctionToJitFunction,
"convert make_function into JIT functions")
pm.add_pass(AnnotateTypes, "annotate types")
pm.add_pass(IRLegalization, "ensure IR is legal prior to lowering")
pm.add_pass(ObjectModeBackEnd, "object mode backend")
pm.finalize()
return pm
def compile_extra(typingctx, targetctx, func, args, return_type, flags,
locals, library=None, pipeline_class=Compiler):
"""Compiler entry point
Parameter
---------
typingctx :
typing context
targetctx :
target context
func : function
the python function to be compiled
args : tuple, list
argument types
return_type :
Use ``None`` to indicate void return
flags : numba.compiler.Flags
compiler flags
library : numba.codegen.CodeLibrary
Used to store the compiled code.
If it is ``None``, a new CodeLibrary is used.
pipeline_class : type like numba.compiler.CompilerBase
compiler pipeline
"""
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_extra(func)
def compile_ir(typingctx, targetctx, func_ir, args, return_type, flags,
locals, lifted=(), lifted_from=None, is_lifted_loop=False,
library=None, pipeline_class=Compiler):
"""
Compile a function with the given IR.
For internal use only.
"""
# This is a special branch that should only run on IR from a lifted loop
if is_lifted_loop:
# This code is pessimistic and costly, but it is a not often trodden
# path and it will go away once IR is made immutable. The problem is
# that the rewrite passes can mutate the IR into a state that makes
# it possible for invalid tokens to be transmitted to lowering which
# then trickle through into LLVM IR and causes RuntimeErrors as LLVM
# cannot compile it. As a result the following approach is taken:
# 1. Create some new flags that copy the original ones but switch
# off rewrites.
# 2. Compile with 1. to get a compile result
# 3. Try and compile another compile result but this time with the
# original flags (and IR being rewritten).
# 4. If 3 was successful, use the result, else use 2.
# create flags with no rewrites
norw_flags = copy.deepcopy(flags)
norw_flags.no_rewrites = True
def compile_local(the_ir, the_flags):
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, the_flags, locals)
return pipeline.compile_ir(func_ir=the_ir, lifted=lifted,
lifted_from=lifted_from)
# compile with rewrites off, IR shouldn't be mutated irreparably
norw_cres = compile_local(func_ir.copy(), norw_flags)
# try and compile with rewrites on if no_rewrites was not set in the
# original flags, IR might get broken but we've got a CompileResult
# that's usable from above.
rw_cres = None
if not flags.no_rewrites:
# Suppress warnings in compilation retry
with warnings.catch_warnings():
warnings.simplefilter("ignore", errors.NumbaWarning)
try:
rw_cres = compile_local(func_ir.copy(), flags)
except Exception:
pass
# if the rewrite variant of compilation worked, use it, else use
# the norewrites backup
if rw_cres is not None:
cres = rw_cres
else:
cres = norw_cres
return cres
else:
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
lifted_from=lifted_from)
def compile_internal(typingctx, targetctx, library,
func, args, return_type, flags, locals):
"""
For internal use only.
"""
pipeline = Compiler(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_extra(func)
| 34.105882
| 80
| 0.601318
|
2a8a6da652a66a194b3fbf78a4f936fbd5aae01f
| 739
|
py
|
Python
|
hashkernel/tests/__init__.py
|
hashstore/hashkernel
|
4a0116b1872047626e87c5c350ffd65e311e618f
|
[
"Apache-2.0"
] | null | null | null |
hashkernel/tests/__init__.py
|
hashstore/hashkernel
|
4a0116b1872047626e87c5c350ffd65e311e618f
|
[
"Apache-2.0"
] | null | null | null |
hashkernel/tests/__init__.py
|
hashstore/hashkernel
|
4a0116b1872047626e87c5c350ffd65e311e618f
|
[
"Apache-2.0"
] | null | null | null |
from random import Random
from time import perf_counter
from hashkernel import Stringable
class BytesGen:
def __init__(self, seed=None):
self.random = Random()
if seed is None:
self.random.seed(perf_counter(), version=2)
else:
self.random.seed(seed, version=2)
def randint_repeat(self, start, end, repeat):
return (self.random.randint(start, end) for _ in range(repeat))
def get_bytes(self, length):
return bytes(self.randint_repeat(0, 255, int(length)))
def rand_bytes(seed, size):
return BytesGen(seed).get_bytes(size)
class StringableExample(Stringable):
def __init__(self, s):
self.s = s
def __str__(self):
return self.s
| 23.09375
| 71
| 0.654939
|
2b8c94bb42521c1d5aa5bb8ecba3c9d5d0e07d38
| 11,040
|
py
|
Python
|
tests/meerkat/columns/test_image_column.py
|
HazyResearch/meerkat
|
e3b437d47809ef8e856a5f732ac1e11a1176ba1f
|
[
"Apache-2.0"
] | null | null | null |
tests/meerkat/columns/test_image_column.py
|
HazyResearch/meerkat
|
e3b437d47809ef8e856a5f732ac1e11a1176ba1f
|
[
"Apache-2.0"
] | null | null | null |
tests/meerkat/columns/test_image_column.py
|
HazyResearch/meerkat
|
e3b437d47809ef8e856a5f732ac1e11a1176ba1f
|
[
"Apache-2.0"
] | null | null | null |
"""Unittests for NumpyColumn."""
from __future__ import annotations
import os
from typing import List, Union
import numpy as np
import pandas as pd
import pytest
import torch
import torchvision.datasets.folder as folder
from PIL import Image
from torchvision.transforms.functional import to_tensor
import meerkat
from meerkat import ImageColumn
from meerkat.columns.abstract import AbstractColumn
from meerkat.columns.file_column import FileCell
from meerkat.columns.lambda_column import LambdaCell
from meerkat.columns.list_column import ListColumn
from meerkat.columns.pandas_column import PandasSeriesColumn
from meerkat.columns.tensor_column import TensorColumn
from .abstract import AbstractColumnTestBed, TestAbstractColumn
class ImageColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"transform": [True, False],
"use_base_dir": [True, False],
}
def __init__(
self,
tmpdir: str,
length: int = 16,
transform: bool = False,
use_base_dir: bool = False,
seed: int = 123,
):
self.image_paths = []
self.image_arrays = []
self.ims = []
self.data = []
transform = to_tensor if transform else None
self.base_dir = tmpdir if use_base_dir else None
for i in range(0, length):
self.image_arrays.append((i * np.ones((4, 4, 3))).astype(np.uint8))
im = Image.fromarray(self.image_arrays[-1])
self.ims.append(im)
self.data.append(transform(im) if transform else im)
filename = "{}.png".format(i)
im.save(os.path.join(tmpdir, filename))
if use_base_dir:
self.image_paths.append(filename)
else:
self.image_paths.append(os.path.join(tmpdir, filename))
if transform is not None:
self.data = torch.stack(self.data)
self.transform = transform
self.col = ImageColumn.from_filepaths(
self.image_paths,
transform=transform,
loader=folder.default_loader,
base_dir=self.base_dir,
)
def get_map_spec(
self,
batched: bool = True,
materialize: bool = False,
kwarg: int = 0,
salt: int = 1,
):
if not materialize:
if batched:
return {"fn": lambda x, k=0: x, "expected_result": self.col}
else:
# can't check for cell column equivalence because the `fn` is a bound
# method of different objects (since we perform batching then convert)
# non-batched fns to batched functions, so we call get
if self.transform is None:
return {
"fn": lambda x, k=0: x.get().rotate(45 + salt + k),
"expected_result": ListColumn(
[im.rotate(45 + salt + kwarg) for im in self.ims]
),
}
else:
return {
"fn": lambda x, k=0: x.get() + salt + k,
"expected_result": TensorColumn(
torch.stack([self.transform(im) for im in self.ims])
+ salt
+ kwarg
),
}
else:
if self.transform is None:
return {
"fn": (lambda x, k=0: [im.rotate(45 + salt + k) for im in x])
if batched
else (lambda x, k=0: x.rotate(45 + salt + k)),
"expected_result": ListColumn(
[im.rotate(45 + salt + kwarg) for im in self.ims]
),
}
else:
return {
"fn": lambda x, k=0: x + salt + k,
"expected_result": TensorColumn(
torch.stack([self.transform(im) for im in self.ims])
+ salt
+ kwarg
),
}
def get_filter_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
if not materialize:
if batched:
return {
"fn": lambda x, k=0: [
int(os.path.splitext(os.path.basename(cell.data))[0])
< (4 + salt + k)
for cell in x.lz
],
"expected_result": self.col.lz[: 4 + salt + kwarg],
}
else:
return {
"fn": (
lambda x, k=0: int(
os.path.splitext(os.path.basename(x.data))[0]
)
< (4 + salt + k)
),
"expected_result": self.col.lz[: 4 + salt + kwarg],
}
else:
if self.transform is None:
return {
"fn": (lambda x, k=0: [im.rotate(45 + salt + k) for im in x])
if batched
else (lambda x, k=0: x.rotate(45 + salt + k)),
"expected_result": ListColumn(
[im.rotate(45 + salt + kwarg) for im in self.ims]
),
}
else:
return {
"fn": lambda x, k=0: (
(x.mean(dim=[1, 2, 3]) if batched else x.mean()) > salt + k
).to(bool),
"expected_result": self.col.lz[
torch.stack([self.transform(im) for im in self.ims])
.mean(dim=[1, 2, 3])
.numpy()
> salt + kwarg
],
}
def get_data(self, index, materialize: bool = True):
if materialize:
if isinstance(index, int):
return self.data[index]
if self.transform is not None:
return self.data[index]
else:
index = np.arange(len(self.data))[index]
return [self.data[idx] for idx in index]
else:
if isinstance(index, int):
return FileCell(
data=self.image_paths[index],
loader=self.col.loader,
transform=self.col.transform,
base_dir=self.base_dir,
)
index = np.arange(len(self.data))[index]
return PandasSeriesColumn([self.image_paths[idx] for idx in index])
@staticmethod
def assert_data_equal(
data1: Union[Image.Image, AbstractColumn, List, torch.Tensor],
data2: Union[Image.Image, AbstractColumn, List, torch.Tensor],
):
if isinstance(data1, Image.Image) or isinstance(data1, List):
assert data1 == data2
elif isinstance(data1, AbstractColumn):
assert data1.is_equal(data2)
elif torch.is_tensor(data1):
assert (data1 == data2).all()
elif isinstance(data1, LambdaCell):
assert data1 == data2
else:
raise ValueError(
"Cannot assert data equal between objects type:"
f" {type(data1), type(data2)}"
)
@pytest.fixture
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
class TestImageColumn(TestAbstractColumn):
__test__ = True
testbed_class: type = ImageColumnTestBed
column_class: type = ImageColumn
def _get_data_to_set(self, testbed, data_index):
return np.zeros_like(testbed.get_data(data_index))
@ImageColumnTestBed.parametrize(single=True, params={"index_type": [np.ndarray]})
def test_set_item(self, testbed, index_type: type):
with pytest.raises(ValueError, match="Cannot setitem on a `LambdaColumn`."):
testbed.col[0] = 0
@ImageColumnTestBed.parametrize(params={"index_type": [np.array]})
def test_getitem(self, testbed, index_type: type):
return super().test_getitem(testbed, index_type=index_type)
@ImageColumnTestBed.parametrize(
config={"transform": [True]},
params={"batched": [True, False], "materialize": [True, False]},
)
def test_filter_1(
self, testbed: AbstractColumnTestBed, batched: bool, materialize: bool
):
return super().test_filter_1(testbed, batched, materialize=materialize)
@ImageColumnTestBed.parametrize(
params={"batched": [True, False], "materialize": [True, False]}
)
def test_map_return_multiple(
self, testbed: AbstractColumnTestBed, batched: bool, materialize: bool
):
return super().test_map_return_multiple(
testbed, batched, materialize=materialize
)
@ImageColumnTestBed.parametrize(
params={"batched": [True, False], "materialize": [True, False]}
)
def test_map_return_single(
self, testbed: AbstractColumnTestBed, batched: bool, materialize: bool
):
return super().test_map_return_single(testbed, batched, materialize)
@ImageColumnTestBed.parametrize(
params={"batched": [True, False], "materialize": [True, False]}
)
def test_map_return_single_w_kwarg(
self, testbed: AbstractColumnTestBed, batched: bool, materialize: bool
):
return super().test_map_return_single_w_kwarg(testbed, batched, materialize)
@ImageColumnTestBed.parametrize(params={"n": [1, 2, 3]})
def test_concat(self, testbed: AbstractColumnTestBed, n: int):
return super().test_concat(testbed, n=n)
@ImageColumnTestBed.parametrize()
def test_copy(self, testbed: AbstractColumnTestBed):
return super().test_copy(testbed)
@ImageColumnTestBed.parametrize()
def test_io(self, tmp_path, testbed):
# uses the tmp_path fixture which will provide a
# temporary directory unique to the test invocation,
# important for dataloader
col, _ = testbed.col, testbed.data
path = os.path.join(tmp_path, "test")
col.write(path)
new_col = self.column_class.read(path)
assert isinstance(new_col, self.column_class)
# can't check if the functions are the same since they point to different
# methods
assert col.data.is_equal(new_col.data)
@ImageColumnTestBed.parametrize()
def test_pickle(self, testbed):
super().test_pickle(testbed)
@ImageColumnTestBed.parametrize(params={"max_rows": [6, 16, 20]})
def test_repr_pandas(self, testbed, max_rows):
meerkat.config.DisplayOptions.max_rows = max_rows
series, _ = testbed.col._repr_pandas_()
assert isinstance(series, pd.Series)
assert len(series) == min(len(series), max_rows + 1)
| 35.844156
| 86
| 0.546649
|
ec69c137116e6bb51e0dd23bdf6d7c07c746076a
| 1,073
|
py
|
Python
|
doc/gauss/listings/containers/Mersenne.py
|
gmgunter/pyre
|
e9ff3f8c04661f8b2cd2ba0caded08b6fe8054e2
|
[
"BSD-3-Clause"
] | 25
|
2018-04-23T01:45:39.000Z
|
2021-12-10T06:01:23.000Z
|
doc/gauss/listings/containers/Mersenne.py
|
gmgunter/pyre
|
e9ff3f8c04661f8b2cd2ba0caded08b6fe8054e2
|
[
"BSD-3-Clause"
] | 53
|
2018-05-31T04:55:00.000Z
|
2021-10-07T21:41:32.000Z
|
doc/gauss/listings/containers/Mersenne.py
|
gmgunter/pyre
|
e9ff3f8c04661f8b2cd2ba0caded08b6fe8054e2
|
[
"BSD-3-Clause"
] | 12
|
2018-04-23T22:50:40.000Z
|
2022-02-20T17:27:23.000Z
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
import random
from PointCloud import PointCloud
class Mersenne(PointCloud):
"""
A point generator implemented using the Mersenne Twister random number generator that is
available as part of the python standard library
"""
# interface
def points(self, n, box):
"""
Generate {n} random points in the interior of {box}
"""
# create the point container using a nested list comprehension: the outer one builds
# the container of points, the inner one builds individual points as containers of
# random numbers within the interval of box along each coordinate axis
sample = [
[ random.uniform(*interval) for interval in box ]
for i in range(n)
]
# note the *interval notation in the call to uniform: it unpacks the interval and
# supplies uniform with as many arguments as there are entities in interval
return sample
# end of file
| 29
| 92
| 0.657036
|
32f765bcc3fefa1db1d93d14f72687f16f84f453
| 10,652
|
py
|
Python
|
darknet.py
|
beric7/YOLOv4_infrastructure
|
d5c7ec0296dbe3db656ab6a0259bc709162539d4
|
[
"Apache-2.0"
] | null | null | null |
darknet.py
|
beric7/YOLOv4_infrastructure
|
d5c7ec0296dbe3db656ab6a0259bc709162539d4
|
[
"Apache-2.0"
] | null | null | null |
darknet.py
|
beric7/YOLOv4_infrastructure
|
d5c7ec0296dbe3db656ab6a0259bc709162539d4
|
[
"Apache-2.0"
] | null | null | null |
#!python3
"""
Python 3 wrapper for identifying objects in images
Requires DLL compilation
Both the GPU and no-GPU version should be compiled; the no-GPU version should be renamed "yolo_cpp_dll_nogpu.dll".
On a GPU system, you can force CPU evaluation by any of:
- Set global variable DARKNET_FORCE_CPU to True
- Set environment variable CUDA_VISIBLE_DEVICES to -1
- Set environment variable "FORCE_CPU" to "true"
- Set environment variable "DARKNET_PATH" to path darknet lib .so (for Linux)
Directly viewing or returning bounding-boxed images requires scikit-image to be installed (`pip install scikit-image`)
Original *nix 2.7: https://github.com/pjreddie/darknet/blob/0f110834f4e18b30d5f101bf8f1724c34b7b83db/python/darknet.py
Windows Python 2.7 version: https://github.com/AlexeyAB/darknet/blob/fc496d52bf22a0bb257300d3c79be9cd80e722cb/build/darknet/x64/darknet.py
@author: Philip Kahn
@date: 20180503
"""
from ctypes import *
import math
import random
import os
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int),
("uc", POINTER(c_float)),
("points", c_int),
("embeddings", POINTER(c_float)),
("embedding_size", c_int),
("sim", c_float),
("track_id", c_int)]
class DETNUMPAIR(Structure):
_fields_ = [("num", c_int),
("dets", POINTER(DETECTION))]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
def network_width(net):
return lib.network_width(net)
def network_height(net):
return lib.network_height(net)
def bbox2points(bbox):
"""
From bounding box yolo format
to corner points cv2 rectangle
"""
x, y, w, h = bbox
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def class_colors(names):
"""
Create a dict with one random BGR color for each
class name
"""
return {name: (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)) for name in names}
def load_network(config_file, data_file, weights, batch_size=1):
"""
load model description and weights from config files
args:
config_file (str): path to .cfg model file
data_file (str): path to .data model file
weights (str): path to weights
returns:
network: trained model
class_names
class_colors
"""
network = load_net_custom(
config_file.encode("ascii"),
weights.encode("ascii"), 0, batch_size)
metadata = load_meta(data_file.encode("ascii"))
class_names = [metadata.names[i].decode("ascii") for i in range(metadata.classes)]
colors = class_colors(class_names)
return network, class_names, colors
def print_detections(detections, coordinates=False):
print("\nObjects:")
for label, confidence, bbox in detections:
x, y, w, h = bbox
if coordinates:
print("{}: {}% (left_x: {:.0f} top_y: {:.0f} width: {:.0f} height: {:.0f})".format(label, confidence, x, y, w, h))
else:
print("{}: {}%".format(label, confidence))
def draw_boxes(detections, image, colors):
import cv2
for label, confidence, bbox in detections:
left, top, right, bottom = bbox2points(bbox)
cv2.rectangle(image, (left, top), (right, bottom), colors[label], 1)
cv2.putText(image, "{} [{:.2f}]".format(label, float(confidence)),
(left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
colors[label], 2)
return image
def decode_detection(detections):
decoded = []
for label, confidence, bbox in detections:
confidence = str(round(confidence * 100, 2))
decoded.append((str(label), confidence, bbox))
return decoded
def remove_negatives(detections, class_names, num):
"""
Remove all classes with 0% confidence within the detection
"""
predictions = []
for j in range(num):
for idx, name in enumerate(class_names):
if detections[j].prob[idx] > 0:
bbox = detections[j].bbox
bbox = (bbox.x, bbox.y, bbox.w, bbox.h)
predictions.append((name, detections[j].prob[idx], (bbox)))
return predictions
def detect_image(network, class_names, image, thresh=.5, hier_thresh=.5, nms=.45):
"""
Returns a list with highest confidence class and their bbox
"""
pnum = pointer(c_int(0))
predict_image(network, image)
detections = get_network_boxes(network, image.w, image.h,
thresh, hier_thresh, None, 0, pnum, 0)
num = pnum[0]
if nms:
do_nms_sort(detections, num, len(class_names), nms)
predictions = remove_negatives(detections, class_names, num)
predictions = decode_detection(predictions)
free_detections(detections, num)
return sorted(predictions, key=lambda x: x[1])
# lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
# lib = CDLL("libdarknet.so", RTLD_GLOBAL)
hasGPU = True
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll")
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll")
envKeys = list()
for k, v in os.environ.items():
envKeys.append(k)
try:
try:
tmp = os.environ["FORCE_CPU"].lower()
if tmp in ["1", "true", "yes", "on"]:
raise ValueError("ForceCPU")
else:
print("Flag value {} not forcing CPU mode".format(tmp))
except KeyError:
# We never set the flag
if 'CUDA_VISIBLE_DEVICES' in envKeys:
if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0:
raise ValueError("ForceCPU")
try:
global DARKNET_FORCE_CPU
if DARKNET_FORCE_CPU:
raise ValueError("ForceCPU")
except NameError as cpu_error:
print(cpu_error)
if not os.path.exists(winGPUdll):
raise ValueError("NoDLL")
lib = CDLL(winGPUdll, RTLD_GLOBAL)
except (KeyError, ValueError):
hasGPU = False
if os.path.exists(winNoGPUdll):
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
print("Notice: CPU-only mode")
else:
# Try the other way, in case no_gpu was compile but not renamed
lib = CDLL(winGPUdll, RTLD_GLOBAL)
print("Environment variables indicated a CPU run, but we didn't find {}. Trying a GPU run anyway.".format(winNoGPUdll))
else:
lib = CDLL(os.path.join(
os.environ.get('DARKNET_PATH', './'),
"libdarknet.so"), RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
copy_image_from_bytes = lib.copy_image_from_bytes
copy_image_from_bytes.argtypes = [IMAGE,c_char_p]
predict = lib.network_predict_ptr
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
init_cpu = lib.init_cpu
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_batch_detections = lib.free_batch_detections
free_batch_detections.argtypes = [POINTER(DETNUMPAIR), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict_ptr
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
free_network_ptr = lib.free_network_ptr
free_network_ptr.argtypes = [c_void_p]
free_network_ptr.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
predict_image_letterbox = lib.network_predict_image_letterbox
predict_image_letterbox.argtypes = [c_void_p, IMAGE]
predict_image_letterbox.restype = POINTER(c_float)
network_predict_batch = lib.network_predict_batch
network_predict_batch.argtypes = [c_void_p, IMAGE, c_int, c_int, c_int,
c_float, c_float, POINTER(c_int), c_int, c_int]
network_predict_batch.restype = POINTER(DETNUMPAIR)
| 33.39185
| 140
| 0.638847
|
cfd926738b1e09eee7c8dfb4b641e33f5422173e
| 2,161
|
py
|
Python
|
etl/parsers/etw/Microsoft_Windows_PerfNet.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 104
|
2020-03-04T14:31:31.000Z
|
2022-03-28T02:59:36.000Z
|
etl/parsers/etw/Microsoft_Windows_PerfNet.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 7
|
2020-04-20T09:18:39.000Z
|
2022-03-19T17:06:19.000Z
|
etl/parsers/etw/Microsoft_Windows_PerfNet.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 16
|
2020-03-05T18:55:59.000Z
|
2022-03-01T10:19:28.000Z
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-PerfNet
GUID : cab2b8a5-49b9-4eec-b1b0-fac21da05a3b
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=1000, version=1)
class Microsoft_Windows_PerfNet_1000_1(Etw):
pattern = Struct(
"Win32Error" / Int32ul
)
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=2000, version=1)
class Microsoft_Windows_PerfNet_2000_1(Etw):
pattern = Struct(
"Win32Error" / Int32ul
)
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=2001, version=1)
class Microsoft_Windows_PerfNet_2001_1(Etw):
pattern = Struct(
"Win32Error" / Int32ul
)
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=2001, version=2)
class Microsoft_Windows_PerfNet_2001_2(Etw):
pattern = Struct(
"NTSTATUS" / Int32ul
)
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=2002, version=1)
class Microsoft_Windows_PerfNet_2002_1(Etw):
pattern = Struct(
"NTSTATUS" / Int32ul
)
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=2003, version=1)
class Microsoft_Windows_PerfNet_2003_1(Etw):
pattern = Struct(
"NTSTATUS" / Int32ul
)
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=2004, version=1)
class Microsoft_Windows_PerfNet_2004_1(Etw):
pattern = Struct(
"NTSTATUS" / Int32ul
)
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=2005, version=1)
class Microsoft_Windows_PerfNet_2005_1(Etw):
pattern = Struct(
"NTSTATUS" / Int32ul,
"IOCompletionNTSTATUS" / Int32ul
)
@declare(guid=guid("cab2b8a5-49b9-4eec-b1b0-fac21da05a3b"), event_id=2006, version=1)
class Microsoft_Windows_PerfNet_2006_1(Etw):
pattern = Struct(
"NTSTATUS" / Int32ul,
"IOCompletionNTSTATUS" / Int32ul
)
| 28.434211
| 123
| 0.716798
|
fdabdb156b624a4e3d69d72da3b004bcd3b3be82
| 18,330
|
py
|
Python
|
xarray/core/missing.py
|
martinResearch/xarray
|
e921d1bfa4785b10310f8b5d46a1efacba7e1cc9
|
[
"Apache-2.0"
] | 1
|
2019-10-05T18:20:27.000Z
|
2019-10-05T18:20:27.000Z
|
xarray/core/missing.py
|
martinResearch/xarray
|
e921d1bfa4785b10310f8b5d46a1efacba7e1cc9
|
[
"Apache-2.0"
] | null | null | null |
xarray/core/missing.py
|
martinResearch/xarray
|
e921d1bfa4785b10310f8b5d46a1efacba7e1cc9
|
[
"Apache-2.0"
] | null | null | null |
import warnings
from functools import partial
from typing import Any, Callable, Dict, Sequence
import numpy as np
import pandas as pd
from . import utils
from .common import _contains_datetime_like_objects
from .computation import apply_ufunc
from .duck_array_ops import dask_array_type
from .utils import OrderedSet, is_scalar
from .variable import Variable, broadcast_variables
class BaseInterpolator:
"""Generic interpolator class for normalizing interpolation methods
"""
cons_kwargs = None # type: Dict[str, Any]
call_kwargs = None # type: Dict[str, Any]
f = None # type: Callable
method = None # type: str
def __call__(self, x):
return self.f(x, **self.call_kwargs)
def __repr__(self):
return "{type}: method={method}".format(
type=self.__class__.__name__, method=self.method
)
class NumpyInterpolator(BaseInterpolator):
"""One-dimensional linear interpolation.
See Also
--------
numpy.interp
"""
def __init__(self, xi, yi, method="linear", fill_value=None, period=None):
if method != "linear":
raise ValueError("only method `linear` is valid for the NumpyInterpolator")
self.method = method
self.f = np.interp
self.cons_kwargs = {}
self.call_kwargs = {"period": period}
self._xi = xi
self._yi = yi
if fill_value is None:
self._left = np.nan
self._right = np.nan
elif isinstance(fill_value, Sequence) and len(fill_value) == 2:
self._left = fill_value[0]
self._right = fill_value[1]
elif is_scalar(fill_value):
self._left = fill_value
self._right = fill_value
else:
raise ValueError("%s is not a valid fill_value" % fill_value)
def __call__(self, x):
return self.f(
x,
self._xi,
self._yi,
left=self._left,
right=self._right,
**self.call_kwargs
)
class ScipyInterpolator(BaseInterpolator):
"""Interpolate a 1-D function using Scipy interp1d
See Also
--------
scipy.interpolate.interp1d
"""
def __init__(
self,
xi,
yi,
method=None,
fill_value=None,
assume_sorted=True,
copy=False,
bounds_error=False,
order=None,
**kwargs
):
from scipy.interpolate import interp1d
if method is None:
raise ValueError(
"method is a required argument, please supply a "
"valid scipy.inter1d method (kind)"
)
if method == "polynomial":
if order is None:
raise ValueError("order is required when method=polynomial")
method = order
self.method = method
self.cons_kwargs = kwargs
self.call_kwargs = {}
if fill_value is None and method == "linear":
fill_value = np.nan, np.nan
elif fill_value is None:
fill_value = np.nan
self.f = interp1d(
xi,
yi,
kind=self.method,
fill_value=fill_value,
bounds_error=False,
assume_sorted=assume_sorted,
copy=copy,
**self.cons_kwargs
)
class SplineInterpolator(BaseInterpolator):
"""One-dimensional smoothing spline fit to a given set of data points.
See Also
--------
scipy.interpolate.UnivariateSpline
"""
def __init__(
self,
xi,
yi,
method="spline",
fill_value=None,
order=3,
nu=0,
ext=None,
**kwargs
):
from scipy.interpolate import UnivariateSpline
if method != "spline":
raise ValueError("only method `spline` is valid for the SplineInterpolator")
self.method = method
self.cons_kwargs = kwargs
self.call_kwargs = {"nu": nu, "ext": ext}
if fill_value is not None:
raise ValueError("SplineInterpolator does not support fill_value")
self.f = UnivariateSpline(xi, yi, k=order, **self.cons_kwargs)
def _apply_over_vars_with_dim(func, self, dim=None, **kwargs):
"""Wrapper for datasets
"""
ds = type(self)(coords=self.coords, attrs=self.attrs)
for name, var in self.data_vars.items():
if dim in var.dims:
ds[name] = func(var, dim=dim, **kwargs)
else:
ds[name] = var
return ds
def get_clean_interp_index(arr, dim, use_coordinate=True):
"""get index to use for x values in interpolation.
If use_coordinate is True, the coordinate that shares the name of the
dimension along which interpolation is being performed will be used as the
x values.
If use_coordinate is False, the x values are set as an equally spaced
sequence.
"""
if use_coordinate:
if use_coordinate is True:
index = arr.get_index(dim)
else:
index = arr.coords[use_coordinate]
if index.ndim != 1:
raise ValueError(
"Coordinates used for interpolation must be 1D, "
"%s is %dD." % (use_coordinate, index.ndim)
)
# raise if index cannot be cast to a float (e.g. MultiIndex)
try:
index = index.values.astype(np.float64)
except (TypeError, ValueError):
# pandas raises a TypeError
# xarray/nuppy raise a ValueError
raise TypeError(
"Index must be castable to float64 to support"
"interpolation, got: %s" % type(index)
)
# check index sorting now so we can skip it later
if not (np.diff(index) > 0).all():
raise ValueError("Index must be monotonicly increasing")
else:
axis = arr.get_axis_num(dim)
index = np.arange(arr.shape[axis], dtype=np.float64)
return index
def interp_na(
self, dim=None, use_coordinate=True, method="linear", limit=None, **kwargs
):
"""Interpolate values according to different methods.
"""
if dim is None:
raise NotImplementedError("dim is a required argument")
if limit is not None:
valids = _get_valid_fill_mask(self, dim, limit)
# method
index = get_clean_interp_index(self, dim, use_coordinate=use_coordinate)
interp_class, kwargs = _get_interpolator(method, **kwargs)
interpolator = partial(func_interpolate_na, interp_class, **kwargs)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "overflow", RuntimeWarning)
warnings.filterwarnings("ignore", "invalid value", RuntimeWarning)
arr = apply_ufunc(
interpolator,
index,
self,
input_core_dims=[[dim], [dim]],
output_core_dims=[[dim]],
output_dtypes=[self.dtype],
dask="parallelized",
vectorize=True,
keep_attrs=True,
).transpose(*self.dims)
if limit is not None:
arr = arr.where(valids)
return arr
def func_interpolate_na(interpolator, x, y, **kwargs):
"""helper function to apply interpolation along 1 dimension"""
# it would be nice if this wasn't necessary, works around:
# "ValueError: assignment destination is read-only" in assignment below
out = y.copy()
nans = pd.isnull(y)
nonans = ~nans
# fast track for no-nans and all-nans cases
n_nans = nans.sum()
if n_nans == 0 or n_nans == len(y):
return y
f = interpolator(x[nonans], y[nonans], **kwargs)
out[nans] = f(x[nans])
return out
def _bfill(arr, n=None, axis=-1):
"""inverse of ffill"""
import bottleneck as bn
arr = np.flip(arr, axis=axis)
# fill
arr = bn.push(arr, axis=axis, n=n)
# reverse back to original
return np.flip(arr, axis=axis)
def ffill(arr, dim=None, limit=None):
"""forward fill missing values"""
import bottleneck as bn
axis = arr.get_axis_num(dim)
# work around for bottleneck 178
_limit = limit if limit is not None else arr.shape[axis]
return apply_ufunc(
bn.push,
arr,
dask="parallelized",
keep_attrs=True,
output_dtypes=[arr.dtype],
kwargs=dict(n=_limit, axis=axis),
).transpose(*arr.dims)
def bfill(arr, dim=None, limit=None):
"""backfill missing values"""
axis = arr.get_axis_num(dim)
# work around for bottleneck 178
_limit = limit if limit is not None else arr.shape[axis]
return apply_ufunc(
_bfill,
arr,
dask="parallelized",
keep_attrs=True,
output_dtypes=[arr.dtype],
kwargs=dict(n=_limit, axis=axis),
).transpose(*arr.dims)
def _get_interpolator(method, vectorizeable_only=False, **kwargs):
"""helper function to select the appropriate interpolator class
returns interpolator class and keyword arguments for the class
"""
interp1d_methods = [
"linear",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"polynomial",
]
valid_methods = interp1d_methods + [
"barycentric",
"krog",
"pchip",
"spline",
"akima",
]
has_scipy = True
try:
from scipy import interpolate
except ImportError:
has_scipy = False
# prioritize scipy.interpolate
if (
method == "linear"
and not kwargs.get("fill_value", None) == "extrapolate"
and not vectorizeable_only
):
kwargs.update(method=method)
interp_class = NumpyInterpolator
elif method in valid_methods:
if not has_scipy:
raise ImportError("Interpolation with method `%s` requires scipy" % method)
if method in interp1d_methods:
kwargs.update(method=method)
interp_class = ScipyInterpolator
elif vectorizeable_only:
raise ValueError(
"{} is not a vectorizeable interpolator. "
"Available methods are {}".format(method, interp1d_methods)
)
elif method == "barycentric":
interp_class = interpolate.BarycentricInterpolator
elif method == "krog":
interp_class = interpolate.KroghInterpolator
elif method == "pchip":
interp_class = interpolate.PchipInterpolator
elif method == "spline":
kwargs.update(method=method)
interp_class = SplineInterpolator
elif method == "akima":
interp_class = interpolate.Akima1DInterpolator
else:
raise ValueError("%s is not a valid scipy interpolator" % method)
else:
raise ValueError("%s is not a valid interpolator" % method)
return interp_class, kwargs
def _get_interpolator_nd(method, **kwargs):
"""helper function to select the appropriate interpolator class
returns interpolator class and keyword arguments for the class
"""
valid_methods = ["linear", "nearest"]
try:
from scipy import interpolate
except ImportError:
raise ImportError("Interpolation with method `%s` requires scipy" % method)
if method in valid_methods:
kwargs.update(method=method)
interp_class = interpolate.interpn
else:
raise ValueError(
"%s is not a valid interpolator for interpolating "
"over multiple dimensions." % method
)
return interp_class, kwargs
def _get_valid_fill_mask(arr, dim, limit):
"""helper function to determine values that can be filled when limit is not
None"""
kw = {dim: limit + 1}
# we explicitly use construct method to avoid copy.
new_dim = utils.get_temp_dimname(arr.dims, "_window")
return (
arr.isnull()
.rolling(min_periods=1, **kw)
.construct(new_dim, fill_value=False)
.sum(new_dim, skipna=False)
) <= limit
def _assert_single_chunk(var, axes):
for axis in axes:
if len(var.chunks[axis]) > 1 or var.chunks[axis][0] < var.shape[axis]:
raise NotImplementedError(
"Chunking along the dimension to be interpolated "
"({}) is not yet supported.".format(axis)
)
def _localize(var, indexes_coords):
""" Speed up for linear and nearest neighbor method.
Only consider a subspace that is needed for the interpolation
"""
indexes = {}
for dim, [x, new_x] in indexes_coords.items():
index = x.to_index()
imin = index.get_loc(np.min(new_x.values), method="nearest")
imax = index.get_loc(np.max(new_x.values), method="nearest")
indexes[dim] = slice(max(imin - 2, 0), imax + 2)
indexes_coords[dim] = (x[indexes[dim]], new_x)
return var.isel(**indexes), indexes_coords
def _floatize_x(x, new_x):
""" Make x and new_x float.
This is particulary useful for datetime dtype.
x, new_x: tuple of np.ndarray
"""
x = list(x)
new_x = list(new_x)
for i in range(len(x)):
if _contains_datetime_like_objects(x[i]):
# Scipy casts coordinates to np.float64, which is not accurate
# enough for datetime64 (uses 64bit integer).
# We assume that the most of the bits are used to represent the
# offset (min(x)) and the variation (x - min(x)) can be
# represented by float.
xmin = x[i].values.min()
x[i] = x[i]._to_numeric(offset=xmin, dtype=np.float64)
new_x[i] = new_x[i]._to_numeric(offset=xmin, dtype=np.float64)
return x, new_x
def interp(var, indexes_coords, method, **kwargs):
""" Make an interpolation of Variable
Parameters
----------
var: Variable
index_coords:
Mapping from dimension name to a pair of original and new coordinates.
Original coordinates should be sorted in strictly ascending order.
Note that all the coordinates should be Variable objects.
method: string
One of {'linear', 'nearest', 'zero', 'slinear', 'quadratic',
'cubic'}. For multidimensional interpolation, only
{'linear', 'nearest'} can be used.
**kwargs:
keyword arguments to be passed to scipy.interpolate
Returns
-------
Interpolated Variable
See Also
--------
DataArray.interp
Dataset.interp
"""
if not indexes_coords:
return var.copy()
# simple speed up for the local interpolation
if method in ["linear", "nearest"]:
var, indexes_coords = _localize(var, indexes_coords)
# default behavior
kwargs["bounds_error"] = kwargs.get("bounds_error", False)
# target dimensions
dims = list(indexes_coords)
x, new_x = zip(*[indexes_coords[d] for d in dims])
destination = broadcast_variables(*new_x)
# transpose to make the interpolated axis to the last position
broadcast_dims = [d for d in var.dims if d not in dims]
original_dims = broadcast_dims + dims
new_dims = broadcast_dims + list(destination[0].dims)
interped = interp_func(
var.transpose(*original_dims).data, x, destination, method, kwargs
)
result = Variable(new_dims, interped, attrs=var.attrs)
# dimension of the output array
out_dims = OrderedSet()
for d in var.dims:
if d in dims:
out_dims.update(indexes_coords[d][1].dims)
else:
out_dims.add(d)
return result.transpose(*tuple(out_dims))
def interp_func(var, x, new_x, method, kwargs):
"""
multi-dimensional interpolation for array-like. Interpolated axes should be
located in the last position.
Parameters
----------
var: np.ndarray or dask.array.Array
Array to be interpolated. The final dimension is interpolated.
x: a list of 1d array.
Original coordinates. Should not contain NaN.
new_x: a list of 1d array
New coordinates. Should not contain NaN.
method: string
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for
1-dimensional itnterpolation.
{'linear', 'nearest'} for multidimensional interpolation
**kwargs:
Optional keyword arguments to be passed to scipy.interpolator
Returns
-------
interpolated: array
Interpolated array
Note
----
This requiers scipy installed.
See Also
--------
scipy.interpolate.interp1d
"""
if not x:
return var.copy()
if len(x) == 1:
func, kwargs = _get_interpolator(method, vectorizeable_only=True, **kwargs)
else:
func, kwargs = _get_interpolator_nd(method, **kwargs)
if isinstance(var, dask_array_type):
import dask.array as da
_assert_single_chunk(var, range(var.ndim - len(x), var.ndim))
chunks = var.chunks[: -len(x)] + new_x[0].shape
drop_axis = range(var.ndim - len(x), var.ndim)
new_axis = range(var.ndim - len(x), var.ndim - len(x) + new_x[0].ndim)
return da.map_blocks(
_interpnd,
var,
x,
new_x,
func,
kwargs,
dtype=var.dtype,
chunks=chunks,
new_axis=new_axis,
drop_axis=drop_axis,
)
return _interpnd(var, x, new_x, func, kwargs)
def _interp1d(var, x, new_x, func, kwargs):
# x, new_x are tuples of size 1.
x, new_x = x[0], new_x[0]
rslt = func(x, var, assume_sorted=True, **kwargs)(np.ravel(new_x))
if new_x.ndim > 1:
return rslt.reshape(var.shape[:-1] + new_x.shape)
if new_x.ndim == 0:
return rslt[..., -1]
return rslt
def _interpnd(var, x, new_x, func, kwargs):
x, new_x = _floatize_x(x, new_x)
if len(x) == 1:
return _interp1d(var, x, new_x, func, kwargs)
# move the interpolation axes to the start position
var = var.transpose(range(-len(x), var.ndim - len(x)))
# stack new_x to 1 vector, with reshape
xi = np.stack([x1.values.ravel() for x1 in new_x], axis=-1)
rslt = func(x, var, xi, **kwargs)
# move back the interpolation axes to the last position
rslt = rslt.transpose(range(-rslt.ndim + 1, 1))
return rslt.reshape(rslt.shape[:-1] + new_x[0].shape)
| 29.23445
| 88
| 0.607965
|
43e3c2380103300ca4ed67ff7e55061988807a48
| 870
|
py
|
Python
|
phone_db/test.py
|
zhengwei5981/phone_db
|
a9e66f6064a1f971303d66c344fe8886e359c0cc
|
[
"MIT"
] | 105
|
2019-02-19T13:43:32.000Z
|
2022-03-10T06:55:42.000Z
|
phone_db/test.py
|
zhengwei5981/phone_db
|
a9e66f6064a1f971303d66c344fe8886e359c0cc
|
[
"MIT"
] | 2
|
2019-04-26T07:38:48.000Z
|
2019-05-30T09:34:20.000Z
|
phone_db/test.py
|
zhengwei5981/phone_db
|
a9e66f6064a1f971303d66c344fe8886e359c0cc
|
[
"MIT"
] | 26
|
2019-02-19T14:21:14.000Z
|
2021-06-18T12:10:15.000Z
|
# -*- coding: utf-8 -*-
import unittest
from sqlalchemy.orm.dynamic import AppenderQuery
from model import Session, Phone, Region
class TestModel(unittest.TestCase):
def setUp(self):
self.session = Session()
def tearDown(self):
pass
def test_phone(self):
p = self.session.query(Phone).filter_by(number=1761166).first()
self.assertEqual(p.number, 1761166)
self.assertEqual(p.type, 2)
self.assertIsInstance(p.region, Region)
def test_region(self):
r = self.session.query(Region).filter_by(zip_code='100000').first()
self.assertEqual(r.zip_code, '100000')
self.assertEqual(r.area_code, '010')
self.assertEqual(r.city, '北京')
self.assertEqual(r.province, '北京')
self.assertIsInstance(r.phones, AppenderQuery)
if __name__ == '__main__':
unittest.main()
| 27.1875
| 75
| 0.658621
|
d1fc540d8bf8fda8702fd4e2a0d85aabd692a78e
| 3,613
|
py
|
Python
|
docs/tools/blog.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 1
|
2022-03-25T03:10:20.000Z
|
2022-03-25T03:10:20.000Z
|
docs/tools/blog.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 2
|
2021-10-12T23:45:51.000Z
|
2022-02-05T23:27:52.000Z
|
docs/tools/blog.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import datetime
import logging
import os
import time
import nav # monkey patches mkdocs
import mkdocs.commands
from mkdocs import config
from mkdocs import exceptions
import mdx_clickhouse
import redirects
import util
def build_for_lang(lang, args):
logging.info(f"Building {lang} blog")
try:
theme_cfg = {
"name": None,
"custom_dir": os.path.join(os.path.dirname(__file__), "..", args.theme_dir),
"language": lang,
"direction": "ltr",
"static_templates": ["404.html"],
"extra": {
"now": int(
time.mktime(datetime.datetime.now().timetuple())
) # TODO better way to avoid caching
},
}
# the following list of languages is sorted according to
# https://en.wikipedia.org/wiki/List_of_languages_by_total_number_of_speakers
languages = {"en": "English"}
site_names = {"en": "ClickHouse Blog"}
assert len(site_names) == len(languages)
site_dir = os.path.join(args.blog_output_dir, lang)
plugins = ["macros"]
if args.htmlproofer:
plugins.append("htmlproofer")
website_url = "https://clickhouse.com"
site_name = site_names.get(lang, site_names["en"])
blog_nav, post_meta = nav.build_blog_nav(lang, args)
raw_config = dict(
site_name=site_name,
site_url=f"{website_url}/blog/{lang}/",
docs_dir=os.path.join(args.blog_dir, lang),
site_dir=site_dir,
strict=True,
theme=theme_cfg,
nav=blog_nav,
copyright="©2016–2022 ClickHouse, Inc.",
use_directory_urls=True,
repo_name="ClickHouse/ClickHouse",
repo_url="https://github.com/ClickHouse/ClickHouse/",
edit_uri=f"edit/master/website/blog/{lang}",
markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS,
plugins=plugins,
extra=dict(
now=datetime.datetime.now().isoformat(),
rev=args.rev,
rev_short=args.rev_short,
rev_url=args.rev_url,
website_url=website_url,
events=args.events,
languages=languages,
includes_dir=os.path.join(os.path.dirname(__file__), "..", "_includes"),
is_amp=False,
is_blog=True,
post_meta=post_meta,
today=datetime.date.today().isoformat(),
),
)
cfg = config.load_config(**raw_config)
mkdocs.commands.build.build(cfg)
redirects.build_blog_redirects(args)
env = util.init_jinja2_env(args)
with open(
os.path.join(args.website_dir, "templates", "blog", "rss.xml"), "rb"
) as f:
rss_template_string = f.read().decode("utf-8").strip()
rss_template = env.from_string(rss_template_string)
with open(os.path.join(args.blog_output_dir, lang, "rss.xml"), "w") as f:
f.write(rss_template.render({"config": raw_config}))
logging.info(f"Finished building {lang} blog")
except exceptions.ConfigurationError as e:
raise SystemExit("\n" + str(e))
def build_blog(args):
tasks = []
for lang in args.blog_lang.split(","):
if lang:
tasks.append(
(
lang,
args,
)
)
util.run_function_in_parallel(build_for_lang, tasks, threads=False)
| 31.417391
| 88
| 0.569056
|
29d38a1061ef643d7b7bcfdc88b3a8d93b2e6008
| 280
|
py
|
Python
|
cloudmesh-exercises/e-cloudmesh-5.py
|
cybertraining-dsc/fa19-516-147
|
767e9e2e27ef48a3e8405093b9f105f334bd67d3
|
[
"Apache-2.0"
] | null | null | null |
cloudmesh-exercises/e-cloudmesh-5.py
|
cybertraining-dsc/fa19-516-147
|
767e9e2e27ef48a3e8405093b9f105f334bd67d3
|
[
"Apache-2.0"
] | 2
|
2019-09-25T00:58:50.000Z
|
2019-09-25T01:10:35.000Z
|
cloudmesh-exercises/e-cloudmesh-5.py
|
cybertraining-dsc/fa19-516-147
|
767e9e2e27ef48a3e8405093b9f105f334bd67d3
|
[
"Apache-2.0"
] | 1
|
2019-09-06T17:44:28.000Z
|
2019-09-06T17:44:28.000Z
|
# E.Cloudmesh.Common.5
# Develop a program that demostrate a use of cloudmesh.common.StopWatch
from cloudmesh.common.StopWatch import StopWatch
from time import sleep
StopWatch.start("My stop watch")
sleep(2)
StopWatch.stop("My stop watch")
print(StopWatch.get("My stop watch"))
| 28
| 71
| 0.789286
|
a70fcbced3f5562f045a227ae38c26a805a8bbdc
| 10,112
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20170601/route_filter_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20170601/route_filter_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20170601/route_filter_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['RouteFilterRule']
class RouteFilterRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'Access']]] = None,
communities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
route_filter_rule_type: Optional[pulumi.Input[Union[str, 'RouteFilterRuleType']]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Route Filter Rule Resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'Access']] access: The access type of the rule. Valid values are: 'Allow', 'Deny'
:param pulumi.Input[Sequence[pulumi.Input[str]]] communities: The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Union[str, 'RouteFilterRuleType']] route_filter_rule_type: The rule type of the rule. Valid value is: 'Community'
:param pulumi.Input[str] rule_name: The name of the route filter rule.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if access is None and not opts.urn:
raise TypeError("Missing required property 'access'")
__props__['access'] = access
if communities is None and not opts.urn:
raise TypeError("Missing required property 'communities'")
__props__['communities'] = communities
__props__['id'] = id
__props__['location'] = location
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if route_filter_name is None and not opts.urn:
raise TypeError("Missing required property 'route_filter_name'")
__props__['route_filter_name'] = route_filter_name
if route_filter_rule_type is None and not opts.urn:
raise TypeError("Missing required property 'route_filter_rule_type'")
__props__['route_filter_rule_type'] = route_filter_rule_type
__props__['rule_name'] = rule_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['provisioning_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/latest:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20161201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20170301:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20170801:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20170901:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20171001:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20171101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180401:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180601:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180701:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20180801:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20181001:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20181101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20181201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190401:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190601:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190701:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190801:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20190901:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20191101:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20191201:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200301:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200401:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200501:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200601:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200701:RouteFilterRule"), pulumi.Alias(type_="azure-nextgen:network/v20200801:RouteFilterRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilterRule, __self__).__init__(
'azure-nextgen:network/v20170601:RouteFilterRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilterRule':
"""
Get an existing RouteFilterRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return RouteFilterRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def access(self) -> pulumi.Output[str]:
"""
The access type of the rule. Valid values are: 'Allow', 'Deny'
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def communities(self) -> pulumi.Output[Sequence[str]]:
"""
The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']
"""
return pulumi.get(self, "communities")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilterRuleType")
def route_filter_rule_type(self) -> pulumi.Output[str]:
"""
The rule type of the rule. Valid value is: 'Community'
"""
return pulumi.get(self, "route_filter_rule_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 55.256831
| 2,241
| 0.676919
|
aa52a9ef853bf6c47c116de46dc834c65bf85c7d
| 2,537
|
py
|
Python
|
union_find/kruskal_algorithm.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
union_find/kruskal_algorithm.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
union_find/kruskal_algorithm.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
class DisjointSet:
class Node:
def __init__(self, x):
self.parent = self
self.rank = 0
self.val = x
def __init__(self):
self.map = {}
def make_set(self, graph):
"""
Args:
graph: the adjacent list of the graph
"""
for v in graph:
self.map[v] = DisjointSet.Node(v)
def _find(self, node: 'DisjointSet.Node') -> 'DisjointSet.Node':
if node == node.parent:
return node
node.parent = self._find(node.parent)
return node.parent
def find(self, x):
return self._find(self.map[x]).val
def union(self, x, y):
n_x = self.map[x]
n_y = self.map[y]
root_x = self._find(n_x)
root_y = self._find(n_y)
if root_x is root_y:
# In the same set.
return
else:
if root_x.rank == root_y.rank:
root_x.rank += 1
root_y.parent = root_x
elif root_x.rank > root_y.rank:
root_y.parent = root_x
else:
root_x.parent = root_y
def kruskal(graph: dict, edges: list) -> tuple:
"""
Args:
graph: the adjacent list of the graph
edges: a list of edges in this format: (vertex_u, vertex_v, distance)
Return:
(mst, min_distance)
mst: A list of the edges in the MST
min_distance: the minimum distance
"""
djs = DisjointSet()
djs.make_set(graph)
edges = sorted(edges, key=lambda e: e[2])
mst = []
min_dist = 0
for e in edges:
if not djs.find(e[0]) is djs.find(e[1]):
djs.union(e[0], e[1])
mst.append((e[0], e[1]))
min_dist += e[2]
return mst, min_dist
if __name__ == "__main__":
edges = [('a', 'b', 4), ('b', 'c', 8), ('c', 'd', 7),
('d', 'e', 9), ('e', 'f', 10), ('d', 'f', 14),
('c', 'f', 4), ('i', 'c', 2), ('i', 'g', 6),
('g', 'f', 2), ('h', 'g', 1), ('h', 'i', 7),
('a', 'h', 8), ('b', 'h', 11)]
graph = {}
for e in edges:
if e[0] in graph:
graph[e[0]].append(e[1])
else:
graph[e[0]] = [e[1]]
if e[1] in graph:
graph[e[1]].append(e[0])
else:
graph[e[1]] = [e[0]]
mst, min_dist = kruskal(graph, edges)
print(mst)
print(min_dist)
| 23.490741
| 77
| 0.445014
|
6f0a091ff47bd095a328403484d4d59519143138
| 9,429
|
py
|
Python
|
Tools/peg_generator/pegen/build.py
|
Tech-Matt/cpython
|
ed524b4569b1e4a166886c880018418d46284378
|
[
"0BSD"
] | 2
|
2021-08-25T11:22:50.000Z
|
2021-08-28T05:35:44.000Z
|
Tools/peg_generator/pegen/build.py
|
Tech-Matt/cpython
|
ed524b4569b1e4a166886c880018418d46284378
|
[
"0BSD"
] | 16
|
2018-06-03T02:04:29.000Z
|
2022-03-01T00:00:50.000Z
|
Tools/peg_generator/pegen/build.py
|
zed/cpython
|
863154c9292e70c5a8a1a3f22ef4ee42e2304281
|
[
"0BSD"
] | 1
|
2021-09-04T09:56:10.000Z
|
2021-09-04T09:56:10.000Z
|
import pathlib
import shutil
import tokenize
import sysconfig
import tempfile
import itertools
from typing import Optional, Tuple, List, IO, Set, Dict
from pegen.c_generator import CParserGenerator
from pegen.grammar import Grammar
from pegen.grammar_parser import GeneratedParser as GrammarParser
from pegen.parser import Parser
from pegen.parser_generator import ParserGenerator
from pegen.python_generator import PythonParserGenerator
from pegen.tokenizer import Tokenizer
MOD_DIR = pathlib.Path(__file__).resolve().parent
TokenDefinitions = Tuple[Dict[int, str], Dict[str, int], Set[str]]
def get_extra_flags(compiler_flags: str, compiler_py_flags_nodist: str) -> List[str]:
flags = sysconfig.get_config_var(compiler_flags)
py_flags_nodist = sysconfig.get_config_var(compiler_py_flags_nodist)
if flags is None or py_flags_nodist is None:
return []
return f"{flags} {py_flags_nodist}".split()
def compile_c_extension(
generated_source_path: str,
build_dir: Optional[str] = None,
verbose: bool = False,
keep_asserts: bool = True,
) -> str:
"""Compile the generated source for a parser generator into an extension module.
The extension module will be generated in the same directory as the provided path
for the generated source, with the same basename (in addition to extension module
metadata). For example, for the source mydir/parser.c the generated extension
in a darwin system with python 3.8 will be mydir/parser.cpython-38-darwin.so.
If *build_dir* is provided, that path will be used as the temporary build directory
of distutils (this is useful in case you want to use a temporary directory).
"""
import distutils.log
from distutils.core import Distribution, Extension
from distutils.command.clean import clean # type: ignore
from distutils.command.build_ext import build_ext # type: ignore
from distutils.tests.support import fixup_build_ext # type: ignore
if verbose:
distutils.log.set_verbosity(distutils.log.DEBUG)
source_file_path = pathlib.Path(generated_source_path)
extension_name = source_file_path.stem
extra_compile_args = get_extra_flags("CFLAGS", "PY_CFLAGS_NODIST")
extra_compile_args.append("-DPy_BUILD_CORE_MODULE")
# Define _Py_TEST_PEGEN to not call PyAST_Validate() in Parser/pegen.c
extra_compile_args.append("-D_Py_TEST_PEGEN")
extra_link_args = get_extra_flags("LDFLAGS", "PY_LDFLAGS_NODIST")
if keep_asserts:
extra_compile_args.append("-UNDEBUG")
extension = [
Extension(
extension_name,
sources=[
str(MOD_DIR.parent.parent.parent / "Python" / "Python-ast.c"),
str(MOD_DIR.parent.parent.parent / "Python" / "asdl.c"),
str(MOD_DIR.parent.parent.parent / "Parser" / "tokenizer.c"),
str(MOD_DIR.parent.parent.parent / "Parser" / "pegen.c"),
str(MOD_DIR.parent.parent.parent / "Parser" / "string_parser.c"),
str(MOD_DIR.parent / "peg_extension" / "peg_extension.c"),
generated_source_path,
],
include_dirs=[
str(MOD_DIR.parent.parent.parent / "Include" / "internal"),
str(MOD_DIR.parent.parent.parent / "Parser"),
],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
]
dist = Distribution({"name": extension_name, "ext_modules": extension})
cmd = build_ext(dist)
fixup_build_ext(cmd)
cmd.inplace = True
if build_dir:
cmd.build_temp = build_dir
cmd.build_lib = build_dir
cmd.ensure_finalized()
cmd.run()
extension_path = source_file_path.parent / cmd.get_ext_filename(extension_name)
shutil.move(cmd.get_ext_fullpath(extension_name), extension_path)
cmd = clean(dist)
cmd.finalize_options()
cmd.run()
return extension_path
def build_parser(
grammar_file: str, verbose_tokenizer: bool = False, verbose_parser: bool = False
) -> Tuple[Grammar, Parser, Tokenizer]:
with open(grammar_file) as file:
tokenizer = Tokenizer(tokenize.generate_tokens(file.readline), verbose=verbose_tokenizer)
parser = GrammarParser(tokenizer, verbose=verbose_parser)
grammar = parser.start()
if not grammar:
raise parser.make_syntax_error(grammar_file)
return grammar, parser, tokenizer
def generate_token_definitions(tokens: IO[str]) -> TokenDefinitions:
all_tokens = {}
exact_tokens = {}
non_exact_tokens = set()
numbers = itertools.count(0)
for line in tokens:
line = line.strip()
if not line or line.startswith("#"):
continue
pieces = line.split()
index = next(numbers)
if len(pieces) == 1:
(token,) = pieces
non_exact_tokens.add(token)
all_tokens[index] = token
elif len(pieces) == 2:
token, op = pieces
exact_tokens[op.strip("'")] = index
all_tokens[index] = token
else:
raise ValueError(f"Unexpected line found in Tokens file: {line}")
return all_tokens, exact_tokens, non_exact_tokens
def build_c_generator(
grammar: Grammar,
grammar_file: str,
tokens_file: str,
output_file: str,
compile_extension: bool = False,
verbose_c_extension: bool = False,
keep_asserts_in_extension: bool = True,
skip_actions: bool = False,
) -> ParserGenerator:
with open(tokens_file, "r") as tok_file:
all_tokens, exact_tok, non_exact_tok = generate_token_definitions(tok_file)
with open(output_file, "w") as file:
gen: ParserGenerator = CParserGenerator(
grammar, all_tokens, exact_tok, non_exact_tok, file, skip_actions=skip_actions
)
gen.generate(grammar_file)
if compile_extension:
with tempfile.TemporaryDirectory() as build_dir:
compile_c_extension(
output_file,
build_dir=build_dir,
verbose=verbose_c_extension,
keep_asserts=keep_asserts_in_extension,
)
return gen
def build_python_generator(
grammar: Grammar,
grammar_file: str,
output_file: str,
skip_actions: bool = False,
) -> ParserGenerator:
with open(output_file, "w") as file:
gen: ParserGenerator = PythonParserGenerator(grammar, file) # TODO: skip_actions
gen.generate(grammar_file)
return gen
def build_c_parser_and_generator(
grammar_file: str,
tokens_file: str,
output_file: str,
compile_extension: bool = False,
verbose_tokenizer: bool = False,
verbose_parser: bool = False,
verbose_c_extension: bool = False,
keep_asserts_in_extension: bool = True,
skip_actions: bool = False,
) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
"""Generate rules, C parser, tokenizer, parser generator for a given grammar
Args:
grammar_file (string): Path for the grammar file
tokens_file (string): Path for the tokens file
output_file (string): Path for the output file
compile_extension (bool, optional): Whether to compile the C extension.
Defaults to False.
verbose_tokenizer (bool, optional): Whether to display additional output
when generating the tokenizer. Defaults to False.
verbose_parser (bool, optional): Whether to display additional output
when generating the parser. Defaults to False.
verbose_c_extension (bool, optional): Whether to display additional
output when compiling the C extension . Defaults to False.
keep_asserts_in_extension (bool, optional): Whether to keep the assert statements
when compiling the extension module. Defaults to True.
skip_actions (bool, optional): Whether to pretend no rule has any actions.
"""
grammar, parser, tokenizer = build_parser(grammar_file, verbose_tokenizer, verbose_parser)
gen = build_c_generator(
grammar,
grammar_file,
tokens_file,
output_file,
compile_extension,
verbose_c_extension,
keep_asserts_in_extension,
skip_actions=skip_actions,
)
return grammar, parser, tokenizer, gen
def build_python_parser_and_generator(
grammar_file: str,
output_file: str,
verbose_tokenizer: bool = False,
verbose_parser: bool = False,
skip_actions: bool = False,
) -> Tuple[Grammar, Parser, Tokenizer, ParserGenerator]:
"""Generate rules, python parser, tokenizer, parser generator for a given grammar
Args:
grammar_file (string): Path for the grammar file
output_file (string): Path for the output file
verbose_tokenizer (bool, optional): Whether to display additional output
when generating the tokenizer. Defaults to False.
verbose_parser (bool, optional): Whether to display additional output
when generating the parser. Defaults to False.
skip_actions (bool, optional): Whether to pretend no rule has any actions.
"""
grammar, parser, tokenizer = build_parser(grammar_file, verbose_tokenizer, verbose_parser)
gen = build_python_generator(
grammar,
grammar_file,
output_file,
skip_actions=skip_actions,
)
return grammar, parser, tokenizer, gen
| 36.405405
| 97
| 0.685651
|
c00b9fd7b44b75ce4c9ada8ba50aad4b5d12a64f
| 24,885
|
py
|
Python
|
cogs/roles.py
|
achueves/AceBot
|
30cce6206df49ddb18dc4af03c146a564f3594ea
|
[
"MIT"
] | 1
|
2021-09-04T04:52:13.000Z
|
2021-09-04T04:52:13.000Z
|
cogs/roles.py
|
achueves/AceBot
|
30cce6206df49ddb18dc4af03c146a564f3594ea
|
[
"MIT"
] | null | null | null |
cogs/roles.py
|
achueves/AceBot
|
30cce6206df49ddb18dc4af03c146a564f3594ea
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import disnake
from disnake.ext import commands
from cogs.mixins import AceMixin
from utils.configtable import ConfigTable
from utils.context import can_prompt
from utils.converters import EmojiConverter, MaxLengthConverter
from utils.string import po, shorten
log = logging.getLogger(__name__)
FOOTER_TEXT = 'Click a reaction to add/remove roles.'
RERUN_PROMPT = 'Re-run `roles spawn` for changes to take effect.'
UP_EMOJI = '🔼'
DOWN_EMOJI = '🔽'
MOVEUP_EMOJI = '⏫'
MOVEDOWN_EMOJI = '⏬'
ADD_ROLE_EMOJI = '🇷'
ADD_SEL_EMOJI = '🇸'
DEL_EMOJI = '➖'
EDIT_EMOJI = '✏️'
SAVE_EMOJI = '💾'
ABORT_EMOJI = '🚮'
EMBED_EMOJIS = (
ADD_SEL_EMOJI, ADD_ROLE_EMOJI, UP_EMOJI, DOWN_EMOJI,
MOVEUP_EMOJI, MOVEDOWN_EMOJI, EDIT_EMOJI, DEL_EMOJI, ABORT_EMOJI, SAVE_EMOJI
)
class SelectorEmojiConverter(EmojiConverter):
async def convert(self, ctx, argument):
argument = await super().convert(ctx, argument)
if argument in (role.emoji for role in ctx.head.selector.roles):
raise commands.CommandError('This emoji already exists in this selector.')
return argument
role_title_converter = MaxLengthConverter(199)
role_desc_converter = MaxLengthConverter(1024)
selector_title_converter = MaxLengthConverter(256)
selector_desc_converter = MaxLengthConverter(1024)
class SelectorInlineConverter(commands.Converter):
async def convert(self, ctx, argument):
lowered = argument.lower()
if lowered in ('yes', 'y', 'true', 't', '1', 'enable', 'on'):
return True
elif lowered in ('no', 'n', 'false', 'f', '0', 'disable', 'off'):
return False
else:
raise commands.CommandError('Input could not be interpreted as boolean.')
class CustomRoleConverter(commands.RoleConverter):
async def convert(self, ctx, argument):
try:
role = await super().convert(ctx, argument)
except commands.CommandError as exc:
raise commands.CommandError(str(exc))
if role == ctx.guild.default_role:
raise commands.CommandError('The *everyone* role is not allowed.')
if role.id in (other_role.role_id for selector in ctx.head.selectors for other_role in selector.roles):
raise commands.CommandError('This role already exists somewhere else.')
if ctx.author != ctx.guild.owner and role >= ctx.author.top_role:
raise commands.CommandError('Sorry, you can\'t add roles higher than your top role.')
config = await ctx.bot.config.get_entry(ctx.guild.id)
if role == config.mod_role:
raise commands.CommandError('Can\'t add moderation role to selector.')
return role.id
NEW_ROLE_PREDS = (
('What role do you want to add? (Send a role mention or just the role ID)', CustomRoleConverter()),
('What name should this role entry have?', role_title_converter),
('What emoji should be associated with this role?', SelectorEmojiConverter()),
('What description should this role have?', role_desc_converter),
)
NEW_SEL_PREDS = (
('What should the name of the selector be?', selector_title_converter),
)
EDIT_FOOTER = 'Send a message with your answer! Send \'exit\' to cancel.'
RETRY_MSG = 'Please try again, or send \'exit\' to cancel.'
class MaybeDirty:
dirty = False
def set_dirty(self):
self.dirty = True
def set_clean(self):
self.dirty = False
class MaybeNew:
@property
def is_new(self):
return self.id is None
class Role(MaybeDirty, MaybeNew):
def __init__(self, role_id, name, emoji, desc):
self.id = None
self.role_id = role_id
self.name = name
self.emoji = emoji
self.description = desc
@classmethod
def from_record(cls, record):
self = cls(record.get('role_id'), record.get('name'), record.get('emoji'), record.get('description'))
self.id = record.get('id')
return self
class Selector(MaybeDirty, MaybeNew):
def __init__(self, title, desc, roles: list):
self.id = None
self.title = title
self.description = desc
self.inline = True
self.roles = roles
@classmethod
def from_record(cls, record, roles):
self = cls(record.get('title'), record.get('description'), roles)
self.inline = record.get('inline')
self.id = record.get('id')
return self
def add_role(self, index, role):
self.set_dirty()
self.roles.insert(index, role)
class RoleHead(MaybeDirty):
front = '-> '
back = ' <-'
def __init__(self, conf, selectors: list):
self.conf = conf
self.selectors = selectors
self.selector_pos = 0
self.role_pos = None
@property
def selector(self):
return self.selectors[self.selector_pos]
@property
def role(self):
if self.role_pos is None:
return None
return self.selector.roles[self.role_pos]
@property
def selector_max(self):
return len(self.selectors) - 1
@property
def role_max(self):
return len(self.selector.roles) - 1
def add_selector(self, index, selector):
self.set_dirty()
self.selectors.insert(index, selector)
def move_selector(self, direction):
self.set_dirty()
swap_with = (self.selector_pos + direction) % (self.selector_max + 1)
self.selectors[self.selector_pos], self.selectors[swap_with] = self.selectors[swap_with], self.selectors[self.selector_pos]
self.selector_pos = swap_with
def move_role(self, direction):
sel = self.selector
sel.set_dirty()
new_sel_pos = (self.selector_pos + direction) % (self.selector_max + 1)
new_sel = self.selectors[new_sel_pos]
selector_count = len(self.selectors)
# if this is the last role in this selector and we're moving down
if selector_count > 1 and direction == 1 and self.role_pos == self.role_max:
# move the role to the first role slot in the selector below
new_sel.add_role(0, sel.roles.pop(self.role_pos))
self.selector_pos = new_sel_pos
self.role_pos = 0
# if this is the first role in this selector and we're moving up
elif selector_count > 1 and direction == -1 and self.role_pos == 0:
# move the role to the last role slot in the selector above
new_role_pos = len(new_sel.roles)
new_sel.add_role(new_role_pos, sel.roles.pop(self.role_pos))
self.selector_pos = new_sel_pos
self.role_pos = new_role_pos
# otherwise, just swap the two roles in this selector
elif len(self.selector.roles) > 1:
swap_with = (self.role_pos + direction) % len(sel.roles)
sel.roles[self.role_pos], sel.roles[swap_with] = sel.roles[swap_with], sel.roles[self.role_pos]
self.role_pos = swap_with
def up(self):
if self.role_pos is None:
# get the above selector
self.selector_pos = (self.selector_pos - 1) % (self.selector_max + 1)
role_count = len(self.selector.roles)
# if it has items, select the last item in that selector
if role_count:
self.role_pos = role_count - 1
else:
self.role_pos = None
# in a selector
else:
if self.role_pos > 0:
self.role_pos -= 1
else:
self.role_pos = None
def down(self):
# selector is currently selected
if self.role_pos is None:
# check if there's a role in the selector we can select
if len(self.selector.roles) > 0:
self.role_pos = 0
else:
# otherwise go to the selector below
self.selector_pos = (self.selector_pos + 1) % (self.selector_max + 1)
# role is currently selected
else:
# if there's a role below to select...
if self.role_pos != self.role_max:
self.role_pos += 1
# otherwise, select next selector
else:
self.role_pos = None
self.selector_pos = (self.selector_pos + 1) % (self.selector_max + 1)
def embed(self, footer=''):
e = disnake.Embed(
description=(
f'{ADD_SEL_EMOJI} Add selector\n{ADD_ROLE_EMOJI} Add role\n{UP_EMOJI} {DOWN_EMOJI} Move up/down\n'
f'{MOVEUP_EMOJI} {MOVEDOWN_EMOJI} Move item up/down\n{EDIT_EMOJI} Edit item\n'
f'{DEL_EMOJI} Delete item\n{ABORT_EMOJI} Discard changes\n{SAVE_EMOJI} Save changes\n\nEditor:'
)
)
if not self.selectors:
e.description = 'Click {} to create your first role selector!'.format(ADD_SEL_EMOJI)
return e
e.set_footer(text=footer)
def wrap(to_wrap):
return self.front + to_wrap + self.back
for sel_idx, selector in enumerate(self.selectors):
rls = list()
for role_idx, (role) in enumerate(selector.roles):
string = '{} {}'.format(role.emoji, shorten(role.name, 64))
rls.append(wrap(string) if sel_idx == self.selector_pos and role_idx == self.role_pos else string)
e.add_field(
name=wrap(selector.title) if self.role_pos is None and sel_idx == self.selector_pos else selector.title,
value='\n'.join(rls) if rls else 'Select the selector and press {} to add a role!'.format(ADD_ROLE_EMOJI),
inline=False
)
return e
async def store(self, ctx):
db = ctx.bot.db
# delete role entries
selector_ids = list(selector.id for selector in self.selectors if selector.id is not None)
role_ids = list(role.id for selector in self.selectors for role in selector.roles if role.id is not None)
# delete role entries that don't exist anymore
await db.execute(
'DELETE FROM role_entry WHERE guild_id=$1 AND id!=ALL($2::INTEGER[])',
ctx.guild.id, role_ids
)
# delete role selectors that don't exist anymore
await db.execute(
'DELETE FROM role_selector WHERE guild_id=$1 AND id!=ALL($2::INTEGER[])',
ctx.guild.id, selector_ids
)
sel_ids = list()
for selector in self.selectors:
ids = list()
for role in selector.roles:
if role.is_new:
ids.append(await db.fetchval(
'INSERT INTO role_entry (guild_id, role_id, name, emoji, description) values ($1, $2, $3, $4, $5) RETURNING id',
ctx.guild.id, role.role_id, role.name, role.emoji, role.description
))
else:
if role.dirty:
await db.execute(
'UPDATE role_entry SET name=$2, emoji=$3, description=$4 WHERE id=$1',
role.id, role.name, role.emoji, role.description
)
ids.append(role.id)
if selector.is_new:
sel_ids.append(await db.fetchval(
'INSERT INTO role_selector (guild_id, title, description, inline, roles) VALUES ($1, $2, $3, $4, $5) RETURNING id',
ctx.guild.id, selector.title, selector.description, selector.inline, ids
))
else:
if selector.dirty:
await db.execute(
'UPDATE role_selector SET title=$2, description=$3, inline=$4, roles=$5 WHERE id=$1',
selector.id, selector.title, selector.description, selector.inline, ids
)
sel_ids.append(selector.id)
await self.conf.update(selectors=sel_ids)
class Roles(AceMixin, commands.Cog):
'''Create role selection menu(s).'''
def __init__(self, bot):
super().__init__(bot)
self.editing = set()
self.messages = dict()
self.footer_tasks = dict()
self.footer_lock = asyncio.Lock()
self.config = ConfigTable(bot, table='role', primary='guild_id')
async def bot_check(self, ctx):
return (ctx.channel.id, ctx.author.id) not in self.editing
async def cog_check(self, ctx):
return await ctx.is_mod()
def set_editing(self, ctx):
self.editing.add((ctx.channel.id, ctx.author.id))
def unset_editing(self, ctx):
try:
self.editing.remove((ctx.channel.id, ctx.author.id))
except KeyError:
pass
@commands.group(hidden=True, invoke_without_command=True)
async def roles(self, ctx):
await ctx.send_help(self.roles)
@roles.command()
@can_prompt()
@commands.bot_has_permissions(manage_messages=True)
async def editor(self, ctx):
'''Editor for selectors and roles.'''
# ignore command input from user while editor is open
self.set_editing(ctx)
conf = await self.config.get_entry(ctx.guild.id)
slcs = await self.db.fetch(
'''
SELECT rs.*
FROM role_selector as rs
JOIN unnest($1::INTEGER[]) WITH ORDINALITY t(id, ord) USING (id)
WHERE id=ANY($1::INTEGER[])
ORDER BY t.ord
''',
conf.selectors
)
selectors = list()
for slc in slcs:
roles = await self.db.fetch(
'''
SELECT re.*
FROM role_entry as re
JOIN unnest($1::INTEGER[]) WITH ORDINALITY t(id, ord) USING (id)
WHERE id=ANY($1::INTEGER[])
ORDER BY t.ord
''',
slc.get('roles')
)
selector = Selector.from_record(slc, list(Role.from_record(role) for role in roles))
selectors.append(selector)
head = RoleHead(conf, selectors)
# so converters can access the head for data integrity tests...
ctx.head = head
msg = await ctx.send(embed=disnake.Embed(description='Please wait while reactions are being added...'))
self.messages[ctx.guild.id] = msg
for emoji in EMBED_EMOJIS:
await msg.add_reaction(emoji)
def pred(reaction, user):
return reaction.message.id == msg.id and user.id == ctx.author.id
async def close():
self.unset_editing(ctx)
try:
await msg.delete()
self.messages.pop(ctx.guild.id)
except disnake.HTTPException:
pass
while True:
await msg.edit(embed=head.embed())
try:
reaction, user = await self.bot.wait_for('reaction_add', check=pred, timeout=300.0)
except asyncio.TimeoutError:
await close()
raise commands.CommandError('Role editor closed after 5 minutes of inactivity.')
else:
await msg.remove_reaction(reaction.emoji, user)
reac = str(reaction)
if reac == ADD_SEL_EMOJI:
if len(head.selectors) > 7:
await ctx.send(
embed=disnake.Embed(description='No more than 8 selectors, sorry!'),
delete_after=6
)
continue
selector_data = await self._multiprompt(ctx, msg, NEW_SEL_PREDS)
if selector_data is None:
continue
selector = Selector(selector_data[0], None, list())
selector.set_dirty()
new_pos = 0 if not head.selectors else head.selector_pos + 1
head.add_selector(new_pos, selector)
head.selector_pos = new_pos
head.role_pos = None
if reac == ABORT_EMOJI:
await close()
raise commands.CommandError('Editing aborted, no changes saved.')
if reac == SAVE_EMOJI:
await head.store(ctx)
await close()
await ctx.send('New role selectors saved. Do `roles spawn` to see!')
break
# rest of the actions assume at least one item (selector) is present
if not head.selectors:
continue
if reac == ADD_ROLE_EMOJI:
if len(head.selector.roles) > 24:
await ctx.send(
embed=disnake.Embed(description='No more than 25 roles in one selector, sorry!'),
delete_after=6
)
continue
role_data = await self._multiprompt(ctx, msg, NEW_ROLE_PREDS)
if role_data is None:
continue
role = Role(*role_data)
new_pos = 0 if head.role_pos is None else head.role_pos + 1
head.selector.add_role(new_pos, role)
head.role_pos = new_pos
if reac == DOWN_EMOJI:
head.down()
if reac == UP_EMOJI:
head.up()
if reac in (MOVEUP_EMOJI, MOVEDOWN_EMOJI):
direction = -1 if reac == MOVEUP_EMOJI else 1
if head.role_pos is None:
head.move_selector(direction)
else:
head.move_role(direction)
if reac == DEL_EMOJI:
if head.role_pos is None:
if len(head.selector.roles):
p = ctx.prompt(
'Delete selector?',
'The selector you\'re trying to delete has {} roles inside it.'.format(
len(head.selector.roles)
)
)
if not await p:
continue
head.selectors.pop(head.selector_pos)
if head.selector_pos > head.selector_max:
head.selector_pos = head.selector_max
head.role_pos = None
else:
head.selector.roles.pop(head.role_pos)
if len(head.selector.roles) == 0:
head.role_pos = None
elif head.role_pos > head.role_max:
head.role_pos = head.role_max
if reac == EDIT_EMOJI:
await self._edit_item(
ctx, msg,
head.selector if head.role_pos is None else head.selector.roles[head.role_pos]
)
# similarly to 'tag make', unset editing if an error occurs to not lock the users from using the bot
@editor.error
async def editor_error(self, ctx, error):
self.unset_editing(ctx)
# try to delete the embed message if it exists
try:
msg = self.messages.pop(ctx.guild.id)
await msg.delete()
except (KeyError, disnake.HTTPException):
pass
async def _multiprompt(self, ctx, msg, preds):
outs = list()
def pred(message):
return message.author.id == ctx.author.id and ctx.channel.id == ctx.channel.id
def new_embed(question):
e = disnake.Embed(description=question)
e.set_footer(text=EDIT_FOOTER)
return e
for question, conv in preds:
try:
await msg.edit(embed=new_embed(question))
except disnake.HTTPException:
raise commands.CommandError('Could not replace the message embed. Did the message get deleted?')
while True:
try:
message = await self.bot.wait_for('message', check=pred, timeout=60.0)
await message.delete()
except asyncio.TimeoutError:
return None
if message.content.lower() == 'exit':
return None
try:
value = await conv.convert(ctx, message.content)
except commands.CommandError as exc:
if not msg.embeds:
try:
await msg.delete()
except disnake.HTTPException:
pass
raise commands.CommandError('Embed seems to have been removed, aborting.')
e = msg.embeds[0]
e.set_footer(text='NOTE: ' + str(exc) + ' ' + RETRY_MSG)
await msg.edit(embed=e)
continue
outs.append(value)
break
return outs
async def _edit_item(self, ctx, msg, item):
if isinstance(item, Selector):
questions = dict(
title=selector_title_converter,
description=selector_desc_converter,
inline=SelectorInlineConverter(),
)
elif isinstance(item, Role):
questions = dict(
name=role_title_converter,
description=role_desc_converter,
emoji=SelectorEmojiConverter(),
)
else:
raise TypeError('Unknown item type: ' + str(type(item)))
opts = {emoji: q for emoji, q in zip(EMBED_EMOJIS, questions.keys())}
opt_string = '\n'.join('{} {}'.format(key, value) for key, value in opts.items())
e = disnake.Embed(
description='What would you like to edit?\n\n' + opt_string
)
e.set_footer(text=ABORT_EMOJI + ' to abort.')
await msg.edit(embed=e)
def reac_pred(reaction, user):
return reaction.message.id == msg.id and user.id == ctx.author.id
while True:
try:
reaction, user = await self.bot.wait_for('reaction_add', check=reac_pred, timeout=300.0)
except asyncio.TimeoutError:
return
else:
await msg.remove_reaction(reaction.emoji, user)
reac = str(reaction)
if reac == ABORT_EMOJI:
return
elif reac in opts.keys():
attr = opts[reac]
conv = questions[attr]
break
else:
continue
e.description = 'Please input a new value for \'{}\'.'.format(attr)
e.set_footer(text='Send \'exit\' to abort.')
await msg.edit(embed=e)
def msg_pred(message):
return message.channel.id == msg.channel.id and message.author.id == ctx.author.id
while True:
try:
message = await self.bot.wait_for('message', check=msg_pred, timeout=60.0)
except asyncio.TimeoutError:
return
await message.delete()
if message.content.lower() == 'exit':
return
try:
value = await conv.convert(ctx, message.content)
except commands.CommandError as exc:
if not msg.embeds:
try:
await msg.delete()
except disnake.HTTPException:
pass
raise commands.CommandError('Embed seems to have been removed, aborting.')
e = msg.embeds[0]
e.set_footer(text='NOTE: ' + str(exc) + ' ' + RETRY_MSG)
await msg.edit(embed=e)
continue
setattr(item, attr, value)
item.set_dirty()
return
@roles.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True, manage_messages=True)
async def spawn(self, ctx):
'''Spawn role selectors.'''
await ctx.message.delete()
conf = await self.config.get_entry(ctx.guild.id)
selectors = await self.db.fetch(
'''SELECT rs.*
FROM role_selector as rs
JOIN unnest($1::INTEGER[]) WITH ORDINALITY t(id, ord) USING (id)
WHERE id=ANY($1::INTEGER[])
ORDER BY t.ord
''',
conf.selectors
)
if not selectors:
raise commands.CommandError('No selectors configured. Do `roles editor` to set one up.')
if any(not selector.get('roles') for selector in selectors):
raise commands.CommandError('You have empty selectors. Delete these or add roles to them before spawning.')
if conf.message_ids:
channel = ctx.guild.get_channel(conf.channel_id)
if channel:
for message_id in conf.message_ids:
try:
msg = await channel.fetch_message(message_id)
if msg:
await msg.delete()
except disnake.HTTPException:
pass
msgs = list()
async def delete_all():
for m in msgs:
try:
await m.delete()
except disnake.HTTPException:
pass
self.cancel_footer(ctx.guild.id)
for selector in selectors:
# https://stackoverflow.com/questions/866465/order-by-the-in-value-list
roles = await self.db.fetch(
'''
SELECT re.*
FROM role_entry as re
JOIN unnest($1::INTEGER[]) WITH ORDINALITY t(id, ord) USING (id)
WHERE id=ANY($1::INTEGER[])
ORDER BY t.ord
''',
selector.get('roles')
)
if not roles:
continue
e = disnake.Embed()
description = selector.get('description')
if description is not None:
e.description = selector.get('description')
e.set_footer(text=FOOTER_TEXT)
icon = selector.get('icon')
e.set_author(
name=selector.get('title') or 'Role Selector',
icon_url=icon if icon else (ctx.guild.icon or disnake.Embed.Empty)
)
for role in roles:
e.add_field(
name='{} {}'.format(role.get('emoji'), role.get('name')),
value=role.get('description'),
inline=selector.get('inline')
)
msg = await ctx.send(embed=e)
msgs.append(msg)
try:
for role in roles:
emoj = role.get('emoji')
await msg.add_reaction(emoj)
except disnake.HTTPException:
await delete_all()
raise commands.CommandError(
'Failed adding the emoji {}.\nIf the emoji has been deleted, change it in the editor.'.format(
emoj
)
)
await conf.update(channel_id=ctx.channel.id, message_ids=list(msg.id for msg in msgs))
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
guild_id = payload.guild_id
if guild_id is None:
return
channel_id = payload.channel_id
message_id = payload.message_id
user_id = payload.user_id
emoji = payload.emoji
conf = await self.config.get_entry(guild_id, construct=False)
if conf is None:
return
if channel_id != conf.channel_id or message_id not in conf.message_ids:
return
guild = self.bot.get_guild(guild_id)
if guild is None:
return
channel = guild.get_channel(channel_id)
if channel is None:
return
message = await channel.fetch_message(message_id)
if message is None:
return
member = guild.get_member(user_id)
if member is None:
return
if member.bot:
return
try:
await message.remove_reaction(emoji, member)
except disnake.HTTPException:
pass
selector_id = conf.selectors[conf.message_ids.index(message_id)]
selector = await self.db.fetchrow('SELECT * FROM role_selector WHERE id=$1', selector_id)
if selector is None:
return
role_row = await self.db.fetchrow(
'SELECT * FROM role_entry WHERE emoji=$1 AND id=ANY($2::INTEGER[])',
str(emoji), selector.get('roles')
)
if role_row is None:
return
role = guild.get_role(role_row.get('role_id'))
if role is None:
await channel.send(
embed=disnake.Embed(
description='Could not find role with ID {}. Has it been deleted?'.format(role_row.get('role_id'))
),
delete_after=30
)
return
do_add = role not in member.roles
try:
if do_add:
await member.add_roles(role, reason='Added through role selector')
desc = '{}: added role {}'.format(member.display_name, role.name)
else:
await member.remove_roles(role, reason='Removed through role selector')
desc = '{}: removed role {}'.format(member.display_name, role.name)
except disnake.HTTPException:
desc = 'Unable to toggle role {}. Does the bot have Manage Roles permissions?'.format(role.name)
await self.set_footer(message, desc)
log.info(
'%s %s %s %s in %s',
'Added' if do_add else 'Removed',
po(role),
'to' if do_add else 'from',
po(member),
po(guild)
)
def cancel_footer(self, guild_id):
task = self.footer_tasks.pop(guild_id, None)
if task is not None:
task.cancel()
async def _set_footer_in(self, message, text=FOOTER_TEXT, wait=None):
if wait is not None:
await asyncio.sleep(wait)
embed = message.embeds[0]
embed.set_footer(text=text)
try:
await message.edit(embed=embed)
except disnake.HTTPException:
pass
async def set_footer(self, message, text, clear_after=4.0):
async with self.footer_lock:
guild_id = message.guild.id
self.cancel_footer(message.guild.id)
await self._set_footer_in(message, text)
self.footer_tasks[guild_id] = asyncio.create_task(self._set_footer_in(message, wait=clear_after))
def setup(bot):
bot.add_cog(Roles(bot))
| 26.758065
| 125
| 0.687884
|
8f9b5884737dbaf11254f6fe4f1fbd5c85750048
| 1,913
|
py
|
Python
|
zvt/domain/fundamental/dividend_financing.py
|
aaron8tang/zvt
|
568cf0d42577eb05b96e1a07ec512aed34245b2d
|
[
"MIT"
] | null | null | null |
zvt/domain/fundamental/dividend_financing.py
|
aaron8tang/zvt
|
568cf0d42577eb05b96e1a07ec512aed34245b2d
|
[
"MIT"
] | null | null | null |
zvt/domain/fundamental/dividend_financing.py
|
aaron8tang/zvt
|
568cf0d42577eb05b96e1a07ec512aed34245b2d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from sqlalchemy import Column, String, DateTime, Float
from sqlalchemy.orm import declarative_base
from zvt.contract import Mixin
from zvt.contract.register import register_schema
DividendFinancingBase = declarative_base()
class DividendFinancing(DividendFinancingBase, Mixin):
"""
分红、配股、增发等事项。
"""
__tablename__ = 'dividend_financing'
provider = Column(String(length=32))
code = Column(String(length=32))
# 分红总额
dividend_money = Column(Float)
# 新股
ipo_issues = Column(Float)
ipo_raising_fund = Column(Float)
# 增发
spo_issues = Column(Float)
spo_raising_fund = Column(Float)
# 配股
rights_issues = Column(Float)
rights_raising_fund = Column(Float)
class DividendDetail(DividendFinancingBase, Mixin):
__tablename__ = "dividend_detail"
provider = Column(String(length=32))
code = Column(String(length=32))
# 公告日
announce_date = Column(DateTime)
# 股权登记日
record_date = Column(DateTime)
# 除权除息日
dividend_date = Column(DateTime)
# 方案
dividend = Column(String(length=128))
class SpoDetail(DividendFinancingBase, Mixin):
__tablename__ = "spo_detail"
provider = Column(String(length=32))
code = Column(String(length=32))
spo_issues = Column(Float)
spo_price = Column(Float)
spo_raising_fund = Column(Float)
class RightsIssueDetail(DividendFinancingBase, Mixin):
__tablename__ = "rights_issue_detail"
provider = Column(String(length=32))
code = Column(String(length=32))
# 配股
rights_issues = Column(Float)
rights_issue_price = Column(Float)
rights_raising_fund = Column(Float)
register_schema(providers=['eastmoney'], db_name='dividend_financing', schema_base=DividendFinancingBase, entity_type='stock')
# the __all__ is generated
__all__ = ['DividendFinancing', 'DividendDetail', 'SpoDetail', 'RightsIssueDetail']
| 24.525641
| 126
| 0.713539
|
bc47832b0bd6b21188a6def528db9f0d8982de73
| 633
|
py
|
Python
|
python/src/comandos/uptime.py
|
devRMA/scriptsBotDiscord
|
917ab6e5d4b369f319ad886c4f8e0a150afa2de1
|
[
"MIT"
] | 6
|
2021-12-10T13:17:34.000Z
|
2022-03-14T17:47:55.000Z
|
python/src/comandos/uptime.py
|
devRMA/scriptsBotDiscord
|
917ab6e5d4b369f319ad886c4f8e0a150afa2de1
|
[
"MIT"
] | null | null | null |
python/src/comandos/uptime.py
|
devRMA/scriptsBotDiscord
|
917ab6e5d4b369f319ad886c4f8e0a150afa2de1
|
[
"MIT"
] | 2
|
2022-03-14T12:30:16.000Z
|
2022-03-15T18:04:17.000Z
|
# Command uptime
# Exemplo: !uptime
# Linguagem usada: python 3.10
# Author: Sl4ker#1985
# Obtém o tempo que o bot está online
# ATENÇÃO: Comando feito com o discord.py v2
import discord
from discord.utils import format_dt, utcnow
from discord.ext import commands
bot = commands.Bot(command_prefix='!')
@bot.event
async def on_ready():
print('Bot online')
if not hasattr(bot, 'started_at'):
setattr(bot, 'started_at', utcnow())
@bot.command()
async def uptime(ctx: commands.Context[commands.Bot]) -> discord.Message:
return await ctx.send(f'Estou online {format_dt(self.started_at, "R")}')
bot.run('token')
| 24.346154
| 76
| 0.71722
|
ea590c99e1e5457013f3676fc92684e01cedb4bf
| 1,035
|
py
|
Python
|
app/app7_sigma49/h.py
|
ameenetemady/DeepPep
|
121826309667f1290fa1121746a2992943d0927b
|
[
"Apache-2.0"
] | 1
|
2020-05-30T06:01:50.000Z
|
2020-05-30T06:01:50.000Z
|
app/app7_sigma49/h.py
|
ameenetemady/DeepPep
|
121826309667f1290fa1121746a2992943d0927b
|
[
"Apache-2.0"
] | null | null | null |
app/app7_sigma49/h.py
|
ameenetemady/DeepPep
|
121826309667f1290fa1121746a2992943d0927b
|
[
"Apache-2.0"
] | 1
|
2019-10-20T21:11:48.000Z
|
2019-10-20T21:11:48.000Z
|
import sys
import csv
import os
sys.path.append('../../')
import h_lib
in_strFastaFilename = '{!s}/data/protein/sigma_49/Sigma_49_sequence.fasta'.format(os.environ.get('HOME'))
in_strPeptideFilename = '{!s}/data/protein/sigma_49/Sigma_49.txt'.format(os.environ.get('HOME'))
in_strProtRefsDir = '../app4_sigma49/protRefs' # for reuse, maybe should copy it here
out_strOutputBaseDir = './sparseData_h'
strXMatchProb_filename = out_strOutputBaseDir + '/' + 'XMatchProb.marshal'
YInfo = h_lib.getPeptides(in_strPeptideFilename)
###assuming proteins are already broken to individual files under in_strProtRefsDir
XMatchProb = h_lib.getXInfo(YInfo, in_strProtRefsDir, strXMatchProb_filename, False)
YMatchProbCount = h_lib.getPeptideProteinMatches(YInfo, XMatchProb)
h_lib.updateXMatchingProbabilities(XMatchProb, YMatchProbCount)
XPred = h_lib.getAccumulatedXMatchingProbabilities(XMatchProb)
with open(out_strOutputBaseDir + "/h.csv", "w") as bfFile:
for row in XPred:
bfFile.write('{!s},{:.6f}\n'.format(row[0], row[1]))
| 41.4
| 105
| 0.777778
|
2d25ef58a590de9d57d82ee57a69acfe5bf21368
| 1,544
|
py
|
Python
|
discussion_forum/accounts/views.py
|
SUTHARRAM/discussion_forum
|
84cc10f4118eb22e5e42a3acc7e564d0f85b8607
|
[
"MIT"
] | null | null | null |
discussion_forum/accounts/views.py
|
SUTHARRAM/discussion_forum
|
84cc10f4118eb22e5e42a3acc7e564d0f85b8607
|
[
"MIT"
] | null | null | null |
discussion_forum/accounts/views.py
|
SUTHARRAM/discussion_forum
|
84cc10f4118eb22e5e42a3acc7e564d0f85b8607
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
from django.shortcuts import render, redirect
from .forms import UserLoginForm, UserRegisterForm
def login_view(request):
#print(request.user.is_authenticated())
next = request.GET.get('next')
title = "Login"
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
if next:
return redirect(next)
return redirect("/")
return render(request, "form.html", {"form":form, "title": title})
def register_view(request):
#print(request.user.is_authenticated())
next = request.GET.get('next')
title = "Register"
form = UserRegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get('password')
user.set_password(password)
user.save()
new_user = authenticate(username=user.username, password=password)
login(request, new_user)
if next:
return redirect(next)
return redirect("/")
context = {
"form": form,
"title": title
}
return render(request, "form.html", context)
def logout_view(request):
logout(request)
return redirect("/")
| 26.62069
| 74
| 0.645078
|
98b6322b2042079eb73edf5ee5e38e32ab3f0007
| 1,714
|
py
|
Python
|
insectcrop.py
|
erasta/insectcrop
|
5e0d1a3865c03983b2ecfd72a8fe1d295cdeba8c
|
[
"MIT"
] | null | null | null |
insectcrop.py
|
erasta/insectcrop
|
5e0d1a3865c03983b2ecfd72a8fe1d295cdeba8c
|
[
"MIT"
] | null | null | null |
insectcrop.py
|
erasta/insectcrop
|
5e0d1a3865c03983b2ecfd72a8fe1d295cdeba8c
|
[
"MIT"
] | null | null | null |
import numpy as np
import urllib.request
import cv2
import os
try:
os.mkdir( "out" )
except:
pass
# def url_to_image(url):
# resp = urllib.request.urlopen(url)
# image = np.asarray(bytearray(resp.read()), dtype="uint8")
# image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
# return image
# img = url_to_image("https://uc5204b69423c30e717ff6a61658.previews.dropboxusercontent.com/p/thumb/AAsy9MYUdbGP5iAo0-Mb-8TaZKpoyZMeGu6ctYkHRH9VG8P6rbVeADFOtqLDiuvWmd_kuBMTTCqAMCTDfJn_sIHZj2nHyiyO234exDVUpyGbTNpqtkGZigbNSNbfodaX8qn4kOdGPurkNl84ybtLloaM_VTmncfs0kVK7NUyfdJ88m-u7Vz133zP4X3BOOeBB_WGkJrCxoTVzpQIcmYr6mhothTvSTpL89eVCOotU_eVfSy5eJ7v9UF0ULgHYFhbqmxLNvUKhlP259_q8RKTmx5nzwEcgidguO80hycVN1Nl_U7aVjt5zeWj0rZzvxq3Qy55LTvClSWU4cvDy_bnbWKOE3XpA1TTyVWw9ZpHnzLHPSmlmNvAYh7bOS5lLuP95vGuH46TizZHL_CQ80lEFUx1tkPbd1ifRG-y7cPZwQAAtebkCn66BS9GbgzUvS7C_zN5kZJyalHCpG86w6jCzxYd/p.jpeg?fv_content=true&size_mode=5")
files = os.listdir("input")
index = 0
compIndex = 0
for f in files:
index = index + 1
if f.endswith(".jpg") or f.endswith(".jpeg"):
print ("working on " + str(index) + "/" + str(len(files)) + ": " + f)
img = cv2.imread('input/' + f, cv2.IMREAD_COLOR)
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, bw_img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
bw_img = cv2.bitwise_not(bw_img[:, :, 1])
[num_labels, labels, stats, centroids] = cv2.connectedComponentsWithStats(bw_img, 4, cv2.CV_32S)
for stat in stats:
[x, y, w, h, area] = stat
if w > 200 and h > 200 and w < 1000 and h < 1000:
compIndex = compIndex + 1
cv2.imwrite("out/" + str(compIndex) + ".jpg", img[y:y+h, x:x+w])
| 48.971429
| 608
| 0.723454
|
629799b33db5c8cf2cb8c0c47e14b57cbbd2babb
| 2,333
|
py
|
Python
|
lib/datasets/factory.py
|
opencvfun/faster-rcnn-pedestrian-detection
|
182a55095619042b70716b718087e05937567b46
|
[
"MIT"
] | 1
|
2018-01-18T06:55:13.000Z
|
2018-01-18T06:55:13.000Z
|
lib/datasets/factory.py
|
opencvfun/faster-rcnn-pedestrian-detection
|
182a55095619042b70716b718087e05937567b46
|
[
"MIT"
] | null | null | null |
lib/datasets/factory.py
|
opencvfun/faster-rcnn-pedestrian-detection
|
182a55095619042b70716b718087e05937567b46
|
[
"MIT"
] | 1
|
2021-02-22T21:25:42.000Z
|
2021-02-22T21:25:42.000Z
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
__sets = {}
from datasets.inria import inria
from datasets.eth import eth
from datasets.caltech import caltech
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
import numpy as np
''' add other dataset '''
for version in ["all", "reasonable", "person"]:
for split in ["train", "val", "trainval", "test"]:
name = 'eth_{}_{}'.format(version, split)
__sets[name] = (
lambda split=split, version=version: eth(version, split))
for version in ["all", "reasonable", "person"]:
for split in ["train", "val", "trainval", "test"]:
name = 'inria_{}_{}'.format(version, split)
__sets[name] = (
lambda split=split, version=version: inria(version, split))
# Set up caltech_<version>_<split>
for version in ["all", "reasonable", "person"]:
for split in ["train", "val", "trainval", "test"]:
name = 'caltech_{}_{}'.format(version, split)
__sets[name] = (
lambda split=split, version=version: caltech(version, split))
# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2007', '2012', '0712']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
# Set up coco_2014_<split>
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if not __sets.has_key(name):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return __sets.keys()
| 33.811594
| 79
| 0.6018
|
12233f5837105f7b91c6d2b22d4eae4797778869
| 5,578
|
py
|
Python
|
litex_boards/targets/camlink_4k.py
|
pftbest/litex-boards
|
7525b8772f5b2e17ee4803d27863788ba381d7a1
|
[
"BSD-2-Clause"
] | 1
|
2021-05-29T21:57:17.000Z
|
2021-05-29T21:57:17.000Z
|
litex_boards/targets/camlink_4k.py
|
pftbest/litex-boards
|
7525b8772f5b2e17ee4803d27863788ba381d7a1
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/targets/camlink_4k.py
|
pftbest/litex-boards
|
7525b8772f5b2e17ee4803d27863788ba381d7a1
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
import argparse
import sys
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import camlink_4k
from litex.build.lattice.trellis import trellis_args, trellis_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MT41K64M16
from litedram.phy import ECP5DDRPHY
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_init = ClockDomain()
self.clock_domains.cd_por = ClockDomain(reset_less=True)
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys2x = ClockDomain()
self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True)
# # #
self.stop = Signal()
# clk / rst
clk27 = platform.request("clk27")
# power on reset
por_count = Signal(16, reset=2**16-1)
por_done = Signal()
self.comb += self.cd_por.clk.eq(ClockSignal())
self.comb += por_done.eq(por_count == 0)
self.sync.por += If(~por_done, por_count.eq(por_count - 1))
# pll
self.submodules.pll = pll = ECP5PLL()
self.comb += pll.reset.eq(~por_done | self.rst)
pll.register_clkin(clk27, 27e6)
pll.create_clkout(self.cd_sys2x_i, 2*sys_clk_freq)
pll.create_clkout(self.cd_init, 27e6)
self.specials += [
Instance("ECLKSYNCB",
i_ECLKI = self.cd_sys2x_i.clk,
i_STOP = self.stop,
o_ECLKO = self.cd_sys2x.clk),
Instance("CLKDIVF",
p_DIV = "2.0",
i_ALIGNWD = 0,
i_CLKI = self.cd_sys2x.clk,
i_RST = self.cd_sys2x.rst,
o_CDIVX = self.cd_sys.clk),
AsyncResetSynchronizer(self.cd_sys, ~pll.locked)
]
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, toolchain="trellis", **kwargs):
platform = camlink_4k.Platform(toolchain=toolchain)
sys_clk_freq = int(81e6)
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Cam Link 4K",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = ECP5DDRPHY(
platform.request("ddram"),
sys_clk_freq=sys_clk_freq)
self.add_csr("ddrphy")
self.comb += self.crg.stop.eq(self.ddrphy.init.stop)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT41K64M16(sys_clk_freq, "1:2"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x40000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
# Leds -------------------------------------------------------------------------------------
if platform.lookup_request("serial", loose=True) is None: # Disable leds when serial is used.
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
self.add_csr("leds")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Cam Link 4K")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--sys-clk-freq", default=81e6, help="System clock frequency (default: 81MHz)")
parser.add_argument("--toolchain", default="trellis", help="FPGA toolchain: trellis (default) or diamond")
builder_args(parser)
soc_sdram_args(parser)
trellis_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
toolchain = args.toolchain,
**soc_sdram_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder_kargs = trellis_argdict(args) if args.toolchain == "trellis" else {}
builder.build(**builder_kargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".svf"))
if __name__ == "__main__":
main()
| 39.560284
| 115
| 0.539978
|
8509df436b8bd0a6e02716799e9c80927410f327
| 784
|
py
|
Python
|
Medium/12. Integer to Roman/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | 3
|
2020-05-09T12:55:09.000Z
|
2022-03-11T18:56:05.000Z
|
Medium/12. Integer to Roman/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | null | null | null |
Medium/12. Integer to Roman/solution (1).py
|
czs108/LeetCode-Solutions
|
889f5b6a573769ad077a6283c058ed925d52c9ec
|
[
"MIT"
] | 1
|
2022-03-11T18:56:16.000Z
|
2022-03-11T18:56:16.000Z
|
# 12. Integer to Roman
# Runtime: 56 ms, faster than 40.29% of Python3 online submissions for Integer to Roman.
# Memory Usage: 13.8 MB, less than 6.15% of Python3 online submissions for Integer to Roman.
class Solution:
def intToRoman(self, num: int) -> str:
map = {1000: "M",
900: "CM",
500: "D",
400: "CD",
100: "C",
90: "XC",
50: "L",
40: "XL",
10: "X",
9: "IX",
5: "V",
4: "IV",
1: "I"}
ret = ""
remainder = num
for k, sym in map.items():
quotient, remainder = divmod(remainder, k)
ret += sym * quotient
return ret
| 27.034483
| 92
| 0.422194
|
826bebaa9410f509b8414980169d3d75e9e39fa2
| 13,615
|
py
|
Python
|
test/devices_tests/switch_test.py
|
magicbear/xknx
|
e6fe7bbd292e0fee29b2c4f210aff3031d76539d
|
[
"MIT"
] | null | null | null |
test/devices_tests/switch_test.py
|
magicbear/xknx
|
e6fe7bbd292e0fee29b2c4f210aff3031d76539d
|
[
"MIT"
] | null | null | null |
test/devices_tests/switch_test.py
|
magicbear/xknx
|
e6fe7bbd292e0fee29b2c4f210aff3031d76539d
|
[
"MIT"
] | null | null | null |
"""Unit test for Switch objects."""
import asyncio
from unittest.mock import AsyncMock, Mock
import pytest
from xknx import XKNX
from xknx.devices import Switch
from xknx.dpt import DPTBinary
from xknx.telegram import GroupAddress, Telegram
from xknx.telegram.apci import GroupValueRead, GroupValueResponse, GroupValueWrite
@pytest.mark.asyncio
class TestSwitch:
"""Test class for Switch object."""
#
# SYNC
#
async def test_sync(self):
"""Test sync function / sending group reads to KNX bus."""
xknx = XKNX()
switch = Switch(
xknx, "TestOutlet", group_address_state="1/2/3", group_address="1/2/4"
)
await switch.sync()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"), payload=GroupValueRead()
)
async def test_sync_state_address(self):
"""Test sync function / sending group reads to KNX bus. Test with Switch with explicit state address."""
xknx = XKNX()
switch = Switch(
xknx, "TestOutlet", group_address="1/2/3", group_address_state="1/2/4"
)
await switch.sync()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/4"), payload=GroupValueRead()
)
#
# TEST PROCESS
#
async def test_process(self):
"""Test process / reading telegrams from telegram queue. Test if device was updated."""
xknx = XKNX()
callback_mock = AsyncMock()
switch1 = Switch(
xknx, "TestOutlet", group_address="1/2/3", device_updated_cb=callback_mock
)
switch2 = Switch(
xknx, "TestOutlet", group_address="1/2/3", device_updated_cb=callback_mock
)
assert switch1.state is None
assert switch2.state is None
callback_mock.assert_not_called()
telegram_on = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
telegram_off = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(0)),
)
await switch1.process(telegram_on)
assert switch1.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch1.process(telegram_off)
assert switch1.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
# test setting switch2 to False with first telegram
await switch2.process(telegram_off)
assert switch2.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch2.process(telegram_on)
assert switch2.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
async def test_process_state(self):
"""Test process / reading telegrams from telegram queue. Test if device was updated."""
xknx = XKNX()
callback_mock = AsyncMock()
switch1 = Switch(
xknx,
"TestOutlet",
group_address="1/2/3",
group_address_state="1/2/4",
device_updated_cb=callback_mock,
)
switch2 = Switch(
xknx,
"TestOutlet",
group_address="1/2/3",
group_address_state="1/2/4",
device_updated_cb=callback_mock,
)
assert switch1.state is None
assert switch2.state is None
callback_mock.assert_not_called()
telegram_on = Telegram(
destination_address=GroupAddress("1/2/4"),
payload=GroupValueResponse(DPTBinary(1)),
)
telegram_off = Telegram(
destination_address=GroupAddress("1/2/4"),
payload=GroupValueResponse(DPTBinary(0)),
)
await switch1.process(telegram_on)
assert switch1.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch1.process(telegram_off)
assert switch1.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
# test setting switch2 to False with first telegram
await switch2.process(telegram_off)
assert switch2.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch2.process(telegram_on)
assert switch2.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
async def test_process_invert(self):
"""Test process / reading telegrams from telegram queue with inverted switch."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3", invert=True)
assert switch.state is None
telegram_inv_on = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(0)),
)
telegram_inv_off = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
await switch.process(telegram_inv_on)
assert switch.state is True
await switch.process(telegram_inv_off)
assert switch.state is False
async def test_process_reset_after(self):
"""Test process reset_after."""
xknx = XKNX()
reset_after_sec = 0.001
switch = Switch(
xknx, "TestInput", group_address="1/2/3", reset_after=reset_after_sec
)
telegram_on = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
await switch.process(telegram_on)
assert switch.state
assert xknx.telegrams.qsize() == 0
await asyncio.sleep(reset_after_sec * 2)
assert xknx.telegrams.qsize() == 1
await switch.process(xknx.telegrams.get_nowait())
assert not switch.state
async def test_process_reset_after_cancel_existing(self):
"""Test process reset_after cancels existing reset tasks."""
xknx = XKNX()
reset_after_sec = 0.01
switch = Switch(
xknx, "TestInput", group_address="1/2/3", reset_after=reset_after_sec
)
telegram_on = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueResponse(DPTBinary(1)),
)
await switch.process(telegram_on)
assert switch.state
assert xknx.telegrams.qsize() == 0
await asyncio.sleep(reset_after_sec / 2)
# half way through the reset timer
await switch.process(telegram_on)
assert switch.state
await asyncio.sleep(reset_after_sec / 2)
assert xknx.telegrams.qsize() == 0
async def test_process_callback(self):
"""Test process / reading telegrams from telegram queue. Test if callback was called."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3")
after_update_callback = Mock()
async def async_after_update_callback(device):
"""Async callback."""
after_update_callback(device)
switch.register_device_updated_cb(async_after_update_callback)
telegram = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
await switch.process(telegram)
after_update_callback.assert_called_with(switch)
#
# TEST RESPOND
#
async def test_respond_to_read(self):
"""Test respond_to_read function."""
xknx = XKNX()
responding = Switch(
xknx,
"TestSensor1",
group_address="1/1/1",
respond_to_read=True,
)
non_responding = Switch(
xknx,
"TestSensor2",
group_address="1/1/1",
respond_to_read=False,
)
responding_multiple = Switch(
xknx,
"TestSensor3",
group_address=["1/1/1", "3/3/3"],
group_address_state="2/2/2",
respond_to_read=True,
)
# set initial payload of Switch
responding.switch.value = True
non_responding.switch.value = True
responding_multiple.switch.value = True
read_telegram = Telegram(
destination_address=GroupAddress("1/1/1"), payload=GroupValueRead()
)
# verify no response when respond is False
await non_responding.process(read_telegram)
assert xknx.telegrams.qsize() == 0
# verify response when respond is True
await responding.process(read_telegram)
assert xknx.telegrams.qsize() == 1
response = xknx.telegrams.get_nowait()
assert response == Telegram(
destination_address=GroupAddress("1/1/1"),
payload=GroupValueResponse(DPTBinary(True)),
)
# verify no response when GroupValueRead request is not for group_address
await responding_multiple.process(read_telegram)
assert xknx.telegrams.qsize() == 1
response = xknx.telegrams.get_nowait()
assert response == Telegram(
destination_address=GroupAddress("1/1/1"),
payload=GroupValueResponse(DPTBinary(True)),
)
await responding_multiple.process(
Telegram(
destination_address=GroupAddress("2/2/2"), payload=GroupValueRead()
)
)
await responding_multiple.process(
Telegram(
destination_address=GroupAddress("3/3/3"), payload=GroupValueRead()
)
)
assert xknx.telegrams.qsize() == 0
#
# TEST SET ON
#
async def test_set_on(self):
"""Test switching on switch."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3")
await switch.set_on()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
#
# TEST SET OFF
#
async def test_set_off(self):
"""Test switching off switch."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3")
await switch.set_off()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(0)),
)
#
# TEST SET INVERT
#
async def test_set_invert(self):
"""Test switching on/off inverted switch."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3", invert=True)
await switch.set_on()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(0)),
)
await switch.set_off()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTBinary(1)),
)
#
# TEST has_group_address
#
def test_has_group_address(self):
"""Test has_group_address."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address="1/2/3")
assert switch.has_group_address(GroupAddress("1/2/3"))
assert not switch.has_group_address(GroupAddress("2/2/2"))
#
# TEST passive group addresses
#
def test_has_group_address_passive(self):
"""Test has_group_address with passive group address."""
xknx = XKNX()
switch = Switch(xknx, "TestOutlet", group_address=["1/2/3", "4/4/4"])
assert switch.has_group_address(GroupAddress("1/2/3"))
assert switch.has_group_address(GroupAddress("4/4/4"))
assert not switch.has_group_address(GroupAddress("2/2/2"))
async def test_process_passive(self):
"""Test process / reading telegrams from telegram queue. Test if device was updated."""
xknx = XKNX()
callback_mock = AsyncMock()
switch1 = Switch(
xknx,
"TestOutlet",
group_address=["1/2/3", "4/4/4"],
group_address_state=["1/2/30", "5/5/5"],
device_updated_cb=callback_mock,
)
assert switch1.state is None
callback_mock.assert_not_called()
telegram_on_passive = Telegram(
destination_address=GroupAddress("4/4/4"),
payload=GroupValueWrite(DPTBinary(1)),
)
telegram_off_passive = Telegram(
destination_address=GroupAddress("5/5/5"),
payload=GroupValueWrite(DPTBinary(0)),
)
await switch1.process(telegram_on_passive)
assert switch1.state is True
callback_mock.assert_called_once()
callback_mock.reset_mock()
await switch1.process(telegram_off_passive)
assert switch1.state is False
callback_mock.assert_called_once()
callback_mock.reset_mock()
| 34.208543
| 112
| 0.614249
|
5d9b5d57df7d44e644eab9cd29abd9040fa5d7cd
| 11,422
|
py
|
Python
|
venv/Lib/site-packages/nipype/utils/tests/test_config.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 585
|
2015-01-12T16:06:47.000Z
|
2022-03-26T14:51:08.000Z
|
nipype/utils/tests/test_config.py
|
tamires-consulting/nipype
|
b7879d75a63b6500b2e7d2c3eba5aa7670339274
|
[
"Apache-2.0"
] | 2,329
|
2015-01-01T09:56:41.000Z
|
2022-03-30T14:24:49.000Z
|
nipype/utils/tests/test_config.py
|
tamires-consulting/nipype
|
b7879d75a63b6500b2e7d2c3eba5aa7670339274
|
[
"Apache-2.0"
] | 487
|
2015-01-20T01:04:52.000Z
|
2022-03-21T21:22:47.000Z
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import sys
import pytest
from nipype import config
from unittest.mock import MagicMock
try:
import xvfbwrapper
has_Xvfb = True
except ImportError:
has_Xvfb = False
# Define mocks for xvfbwrapper. Do not forget the spec to ensure that
# hasattr() checks return False with missing attributes.
xvfbpatch = MagicMock(spec=["Xvfb"])
xvfbpatch.Xvfb.return_value = MagicMock(
spec=["new_display", "start", "stop"], new_display=2010
)
# Mock the legacy xvfbwrapper.Xvfb class (changed display attribute name)
xvfbpatch_old = MagicMock(spec=["Xvfb"])
xvfbpatch_old.Xvfb.return_value = MagicMock(
spec=["vdisplay_num", "start", "stop"], vdisplay_num=2010
)
@pytest.mark.parametrize("dispvar", [":12", "localhost:12", "localhost:12.1"])
def test_display_parse(monkeypatch, dispvar):
"""Check that when $DISPLAY is defined, the display is correctly parsed"""
config._display = None
config._config.remove_option("execution", "display_variable")
monkeypatch.setenv("DISPLAY", dispvar)
assert config.get_display() == ":12"
# Test that it was correctly cached
assert config.get_display() == ":12"
@pytest.mark.parametrize("dispnum", range(5))
def test_display_config(monkeypatch, dispnum):
"""Check that the display_variable option is used ($DISPLAY not set)"""
config._display = None
dispstr = ":%d" % dispnum
config.set("execution", "display_variable", dispstr)
monkeypatch.delitem(os.environ, "DISPLAY", raising=False)
assert config.get_display() == config.get("execution", "display_variable")
# Test that it was correctly cached
assert config.get_display() == config.get("execution", "display_variable")
@pytest.mark.parametrize("dispnum", range(5))
def test_display_system(monkeypatch, dispnum):
"""Check that when only a $DISPLAY is defined, it is used"""
config._display = None
config._config.remove_option("execution", "display_variable")
dispstr = ":%d" % dispnum
monkeypatch.setenv("DISPLAY", dispstr)
assert config.get_display() == dispstr
# Test that it was correctly cached
assert config.get_display() == dispstr
def test_display_config_and_system(monkeypatch):
"""Check that when only both config and $DISPLAY are defined, the config
takes precedence"""
config._display = None
dispstr = ":10"
config.set("execution", "display_variable", dispstr)
monkeypatch.setenv("DISPLAY", ":0")
assert config.get_display() == dispstr
# Test that it was correctly cached
assert config.get_display() == dispstr
def test_display_noconfig_nosystem_patched(monkeypatch):
"""Check that when no $DISPLAY nor option are specified, a virtual Xvfb is
used"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.delitem(os.environ, "DISPLAY", raising=False)
monkeypatch.setitem(sys.modules, "xvfbwrapper", xvfbpatch)
monkeypatch.setattr(sys, "platform", value="linux")
assert config.get_display() == ":2010"
# Test that it was correctly cached
assert config.get_display() == ":2010"
# Check that raises in Mac
config._display = None
monkeypatch.setattr(sys, "platform", value="darwin")
with pytest.raises(RuntimeError):
config.get_display()
def test_display_empty_patched(monkeypatch):
"""
Check that when $DISPLAY is empty string and no option is specified,
a virtual Xvfb is used
"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.setenv("DISPLAY", "")
monkeypatch.setitem(sys.modules, "xvfbwrapper", xvfbpatch)
monkeypatch.setattr(sys, "platform", value="linux")
assert config.get_display() == ":2010"
# Test that it was correctly cached
assert config.get_display() == ":2010"
# Check that raises in Mac
config._display = None
monkeypatch.setattr(sys, "platform", value="darwin")
with pytest.raises(RuntimeError):
config.get_display()
def test_display_noconfig_nosystem_patched_oldxvfbwrapper(monkeypatch):
"""
Check that when no $DISPLAY nor option are specified,
a virtual Xvfb is used (with a legacy version of xvfbwrapper).
"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.delitem(os.environ, "DISPLAY", raising=False)
monkeypatch.setitem(sys.modules, "xvfbwrapper", xvfbpatch_old)
monkeypatch.setattr(sys, "platform", value="linux")
assert config.get_display() == ":2010"
# Test that it was correctly cached
assert config.get_display() == ":2010"
# Check that raises in Mac
config._display = None
monkeypatch.setattr(sys, "platform", value="darwin")
with pytest.raises(RuntimeError):
config.get_display()
def test_display_empty_patched_oldxvfbwrapper(monkeypatch):
"""
Check that when $DISPLAY is empty string and no option is specified,
a virtual Xvfb is used (with a legacy version of xvfbwrapper).
"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.setenv("DISPLAY", "")
monkeypatch.setitem(sys.modules, "xvfbwrapper", xvfbpatch_old)
monkeypatch.setattr(sys, "platform", value="linux")
assert config.get_display() == ":2010"
# Test that it was correctly cached
assert config.get_display() == ":2010"
# Check that raises in Mac
config._display = None
monkeypatch.setattr(sys, "platform", value="darwin")
with pytest.raises(RuntimeError):
config.get_display()
def test_display_noconfig_nosystem_notinstalled(monkeypatch):
"""
Check that an exception is raised if xvfbwrapper is not installed
but necessary (no config and $DISPLAY unset)
"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.delenv("DISPLAY", raising=False)
monkeypatch.setitem(sys.modules, "xvfbwrapper", None)
with pytest.raises(RuntimeError):
config.get_display()
def test_display_empty_notinstalled(monkeypatch):
"""
Check that an exception is raised if xvfbwrapper is not installed
but necessary (no config and $DISPLAY empty)
"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.setenv("DISPLAY", "")
monkeypatch.setitem(sys.modules, "xvfbwrapper", None)
with pytest.raises(RuntimeError):
config.get_display()
@pytest.mark.skipif(not has_Xvfb, reason="xvfbwrapper not installed")
@pytest.mark.skipif("darwin" in sys.platform, reason="macosx requires root for Xvfb")
def test_display_noconfig_nosystem_installed(monkeypatch):
"""
Check that actually uses xvfbwrapper when installed (not mocked)
and necessary (no config and $DISPLAY unset)
"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.delenv("DISPLAY", raising=False)
newdisp = config.get_display()
assert int(newdisp.split(":")[-1]) > 1000
# Test that it was correctly cached
assert config.get_display() == newdisp
@pytest.mark.skipif(not has_Xvfb, reason="xvfbwrapper not installed")
@pytest.mark.skipif("darwin" in sys.platform, reason="macosx requires root for Xvfb")
def test_display_empty_installed(monkeypatch):
"""
Check that actually uses xvfbwrapper when installed (not mocked)
and necessary (no config and $DISPLAY empty)
"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.setenv("DISPLAY", "")
newdisp = config.get_display()
assert int(newdisp.split(":")[-1]) > 1000
# Test that it was correctly cached
assert config.get_display() == newdisp
def test_display_empty_macosx(monkeypatch):
"""
Check that an exception is raised if xvfbwrapper is necessary
(no config and $DISPLAY unset) but platform is OSX. See
https://github.com/nipy/nipype/issues/1400
"""
config._display = None
if config.has_option("execution", "display_variable"):
config._config.remove_option("execution", "display_variable")
monkeypatch.delenv("DISPLAY", "")
monkeypatch.setattr(sys, "platform", "darwin")
with pytest.raises(RuntimeError):
config.get_display()
def test_cwd_cached(tmpdir):
"""Check that changing dirs does not change nipype's cwd"""
oldcwd = config.cwd
tmpdir.chdir()
assert config.cwd == oldcwd
def test_debug_mode():
from ... import logging
sofc_config = config.get("execution", "stop_on_first_crash")
ruo_config = config.get("execution", "remove_unnecessary_outputs")
ki_config = config.get("execution", "keep_inputs")
wf_config = config.get("logging", "workflow_level")
if_config = config.get("logging", "interface_level")
ut_config = config.get("logging", "utils_level")
wf_level = logging.getLogger("nipype.workflow").level
if_level = logging.getLogger("nipype.interface").level
ut_level = logging.getLogger("nipype.utils").level
config.enable_debug_mode()
# Check config is updated and logging levels, too
assert config.get("execution", "stop_on_first_crash") == "true"
assert config.get("execution", "remove_unnecessary_outputs") == "false"
assert config.get("execution", "keep_inputs") == "true"
assert config.get("logging", "workflow_level") == "DEBUG"
assert config.get("logging", "interface_level") == "DEBUG"
assert config.get("logging", "utils_level") == "DEBUG"
assert logging.getLogger("nipype.workflow").level == 10
assert logging.getLogger("nipype.interface").level == 10
assert logging.getLogger("nipype.utils").level == 10
# Restore config and levels
config.set("execution", "stop_on_first_crash", sofc_config)
config.set("execution", "remove_unnecessary_outputs", ruo_config)
config.set("execution", "keep_inputs", ki_config)
config.set("logging", "workflow_level", wf_config)
config.set("logging", "interface_level", if_config)
config.set("logging", "utils_level", ut_config)
logging.update_logging(config)
assert config.get("execution", "stop_on_first_crash") == sofc_config
assert config.get("execution", "remove_unnecessary_outputs") == ruo_config
assert config.get("execution", "keep_inputs") == ki_config
assert config.get("logging", "workflow_level") == wf_config
assert config.get("logging", "interface_level") == if_config
assert config.get("logging", "utils_level") == ut_config
assert logging.getLogger("nipype.workflow").level == wf_level
assert logging.getLogger("nipype.interface").level == if_level
assert logging.getLogger("nipype.utils").level == ut_level
| 38.587838
| 85
| 0.708983
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.