hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5193b6866821222ab3feb44d150c5f9a722f9434
| 7,698
|
py
|
Python
|
Lib/site-packages/dask/dataframe/io/sql.py
|
jsturtz/env
|
d523b0be3345f883a727679d58ff29efb4389d16
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/dask/dataframe/io/sql.py
|
jsturtz/env
|
d523b0be3345f883a727679d58ff29efb4389d16
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/dask/dataframe/io/sql.py
|
jsturtz/env
|
d523b0be3345f883a727679d58ff29efb4389d16
|
[
"bzip2-1.0.6"
] | null | null | null |
import numpy as np
import pandas as pd
from ... import delayed
from .io import from_delayed, from_pandas
def read_sql_table(
table,
uri,
index_col,
divisions=None,
npartitions=None,
limits=None,
columns=None,
bytes_per_chunk=256 * 2 ** 20,
head_rows=5,
schema=None,
meta=None,
engine_kwargs=None,
**kwargs
):
"""
Create dataframe from an SQL table.
If neither divisions or npartitions is given, the memory footprint of the
first few rows will be determined, and partitions of size ~256MB will
be used.
Parameters
----------
table : string or sqlalchemy expression
Select columns from here.
uri : string
Full sqlalchemy URI for the database connection
index_col : string
Column which becomes the index, and defines the partitioning. Should
be a indexed column in the SQL server, and any orderable type. If the
type is number or time, then partition boundaries can be inferred from
npartitions or bytes_per_chunk; otherwide must supply explicit
``divisions=``.
``index_col`` could be a function to return a value, e.g.,
``sql.func.abs(sql.column('value')).label('abs(value)')``.
Labeling columns created by functions or arithmetic operations is
required.
divisions: sequence
Values of the index column to split the table by. If given, this will
override npartitions and bytes_per_chunk. The divisions are the value
boundaries of the index column used to define the partitions. For
example, ``divisions=list('acegikmoqsuwz')`` could be used to partition
a string column lexographically into 12 partitions, with the implicit
assumption that each partition contains similar numbers of records.
npartitions : int
Number of partitions, if divisions is not given. Will split the values
of the index column linearly between limits, if given, or the column
max/min. The index column must be numeric or time for this to work
limits: 2-tuple or None
Manually give upper and lower range of values for use with npartitions;
if None, first fetches max/min from the DB. Upper limit, if
given, is inclusive.
columns : list of strings or None
Which columns to select; if None, gets all; can include sqlalchemy
functions, e.g.,
``sql.func.abs(sql.column('value')).label('abs(value)')``.
Labeling columns created by functions or arithmetic operations is
recommended.
bytes_per_chunk : int
If both divisions and npartitions is None, this is the target size of
each partition, in bytes
head_rows : int
How many rows to load for inferring the data-types, unless passing meta
meta : empty DataFrame or None
If provided, do not attempt to infer dtypes, but use these, coercing
all chunks on load
schema : str or None
If using a table name, pass this to sqlalchemy to select which DB
schema to use within the URI connection
engine_kwargs : dict or None
Specific db engine parameters for sqlalchemy
kwargs : dict
Additional parameters to pass to `pd.read_sql()`
Returns
-------
dask.dataframe
Examples
--------
>>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',
... npartitions=10, index_col='id') # doctest: +SKIP
"""
import sqlalchemy as sa
from sqlalchemy import sql
from sqlalchemy.sql import elements
if index_col is None:
raise ValueError("Must specify index column to partition on")
engine_kwargs = {} if engine_kwargs is None else engine_kwargs
engine = sa.create_engine(uri, **engine_kwargs)
m = sa.MetaData()
if isinstance(table, str):
table = sa.Table(table, m, autoload=True, autoload_with=engine, schema=schema)
index = table.columns[index_col] if isinstance(index_col, str) else index_col
if not isinstance(index_col, (str, elements.Label)):
raise ValueError(
"Use label when passing an SQLAlchemy instance as the index (%s)" % index
)
if divisions and npartitions:
raise TypeError("Must supply either divisions or npartitions, not both")
columns = (
[(table.columns[c] if isinstance(c, str) else c) for c in columns]
if columns
else list(table.columns)
)
if index_col not in columns:
columns.append(
table.columns[index_col] if isinstance(index_col, str) else index_col
)
if isinstance(index_col, str):
kwargs["index_col"] = index_col
else:
# function names get pandas auto-named
kwargs["index_col"] = index_col.name
if meta is None:
# derrive metadata from first few rows
q = sql.select(columns).limit(head_rows).select_from(table)
head = pd.read_sql(q, engine, **kwargs)
if head.empty:
# no results at all
name = table.name
schema = table.schema
head = pd.read_sql_table(name, uri, schema=schema, index_col=index_col)
return from_pandas(head, npartitions=1)
bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / head_rows
meta = head[:0]
else:
if divisions is None and npartitions is None:
raise ValueError(
"Must provide divisions or npartitions when using explicit meta."
)
if divisions is None:
if limits is None:
# calculate max and min for given index
q = sql.select([sql.func.max(index), sql.func.min(index)]).select_from(
table
)
minmax = pd.read_sql(q, engine)
maxi, mini = minmax.iloc[0]
dtype = minmax.dtypes["max_1"]
else:
mini, maxi = limits
dtype = pd.Series(limits).dtype
if npartitions is None:
q = sql.select([sql.func.count(index)]).select_from(table)
count = pd.read_sql(q, engine)["count_1"][0]
npartitions = int(round(count * bytes_per_row / bytes_per_chunk)) or 1
if dtype.kind == "M":
divisions = pd.date_range(
start=mini,
end=maxi,
freq="%iS" % ((maxi - mini).total_seconds() / npartitions),
).tolist()
divisions[0] = mini
divisions[-1] = maxi
elif dtype.kind in ["i", "u", "f"]:
divisions = np.linspace(mini, maxi, npartitions + 1).tolist()
else:
raise TypeError(
'Provided index column is of type "{}". If divisions is not provided the '
"index column type must be numeric or datetime.".format(dtype)
)
parts = []
lowers, uppers = divisions[:-1], divisions[1:]
for i, (lower, upper) in enumerate(zip(lowers, uppers)):
cond = index <= upper if i == len(lowers) - 1 else index < upper
q = sql.select(columns).where(sql.and_(index >= lower, cond)).select_from(table)
parts.append(
delayed(_read_sql_chunk)(
q, uri, meta, engine_kwargs=engine_kwargs, **kwargs
)
)
return from_delayed(parts, meta, divisions=divisions)
def _read_sql_chunk(q, uri, meta, engine_kwargs=None, **kwargs):
import sqlalchemy as sa
engine_kwargs = engine_kwargs or {}
conn = sa.create_engine(uri, **engine_kwargs)
df = pd.read_sql(q, conn, **kwargs)
if df.empty:
return meta
else:
return df.astype(meta.dtypes.to_dict(), copy=False)
| 37.55122
| 91
| 0.627566
|
f3748ea1a69fa607cdf046624bf72c1374d9e063
| 118
|
py
|
Python
|
_legacy/_ast_score/exceptions.py
|
StepicOrg/submissions-clustering
|
d61f4cd24ff165ed9b0cdde79d9dcd1ffae47387
|
[
"MIT"
] | 1
|
2017-11-20T02:28:07.000Z
|
2017-11-20T02:28:07.000Z
|
_legacy/_ast_score/exceptions.py
|
StepicOrg/submissions-clustering
|
d61f4cd24ff165ed9b0cdde79d9dcd1ffae47387
|
[
"MIT"
] | 6
|
2017-08-22T10:34:26.000Z
|
2017-08-25T14:29:38.000Z
|
_legacy/_ast_score/exceptions.py
|
StepicOrg/submissions-clustering
|
d61f4cd24ff165ed9b0cdde79d9dcd1ffae47387
|
[
"MIT"
] | null | null | null |
__all__ = []
class AmorphException(Exception):
pass
class InvalidArgumentException(AmorphException):
pass
| 11.8
| 48
| 0.745763
|
3edbc2d43d3e79c64cbe1f72fec3e98af079ecf6
| 9,563
|
py
|
Python
|
tests/test_chromosome.py
|
olendorf/coop_evolve
|
147b987088bfd6b8da23c3775e8b871ee19518b3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_chromosome.py
|
olendorf/coop_evolve
|
147b987088bfd6b8da23c3775e8b871ee19518b3
|
[
"Apache-2.0"
] | 5
|
2017-06-16T17:39:50.000Z
|
2019-11-13T14:49:44.000Z
|
tests/test_chromosome.py
|
olendorf/coop_evolve
|
147b987088bfd6b8da23c3775e8b871ee19518b3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `coop_evolve.chromosome` class."""
import math
import pytest
import re
from app_settings import AppSettings
from scipy.stats import binom
from scipy.stats import nbinom
from scipy.stats import poisson
from coop_evolve.chromosome import Chromosome
class TestChromosomeCreation:
"""Tests chromosome creation."""
def test_defined_sequence(self):
"""Tests creation of a chromosome from a specified sequence."""
chrom = Chromosome("abcd")
assert chrom.sequence == "abcd"
def test_random_chromosome_compostion(self):
"""Tests creation of a chromosome of random length and sequence. Length
is drawn from a negative binomial distribution with a mean
of the expected dna length."""
chrom = Chromosome()
assert type(chrom.sequence) is str
assert re.match('[abcd*:/?+]+', chrom.sequence)
def test_random_chromosome_length(self):
"""Ensures that random chromosomes are created at the correct average
length."""
reps = 1000
cfg = AppSettings()
lengths = []
for _ in range(0, reps):
chrom = Chromosome()
lengths.append(len(chrom.sequence))
mean_length = float(sum(lengths))/len(lengths)
expected_length = cfg.chromosome_length
p = 1 - (expected_length/(1 + expected_length))
conf_99 =(nbinom.var(1, p)/reps)**(1/2) * 4
assert (
expected_length- conf_99
) <= mean_length <= (
expected_length + conf_99
)
class TestChromosomeHelperMethods:
""" Tests various class methods form chromosomes"""
def test_nucleotides(self):
"""Tests nucleotide method returns correct value."""
assert Chromosome.nucleotides() == "abcd/:*+?"
def test_default_behavior(self):
"""Tests that the last nucleotide is default"""
cfg = AppSettings()
assert Chromosome.default_behavior() == cfg.behaviors[-1]
class TestSubstitutions:
"""Test substitution mutations."""
def test_substitutions_length(self):
"""Ensure the substitions don't change sequence length."""
dna = Chromosome("a"*100)
dna.substitutions()
assert len(dna.sequence) == 100
def test_substitutions_changes(self):
"""Test that substitions occur at the expected rate."""
cfg = AppSettings()
reps = 1000
deltas = []
for _ in range(0, reps):
seq = "a"*100
dna = Chromosome(seq)
dna.substitutions()
deltas.append( sum(1 for a, b in zip(seq, dna.sequence) if a != b) )
# Expand the conf_99 to compensate for repeated mutations in the same place
expected_delta = cfg.mutation_rate * 100 * \
(1 - 1/len(Chromosome.nucleotides()))
# Because there is a little slop around synonymous substitions I multiply
# the confidence by 10 just to limit the number of failing tests.
conf_99 = ((poisson.var(cfg.mutation_rate * 100)/1000)**(1/2)) * 10
observed_delta = sum(deltas)/reps
assert (expected_delta - conf_99) < observed_delta < (expected_delta + conf_99)
class TestDeletion:
"""Tests of the deletion method in chromosomes"""
def test_deletion_length(self):
"""Test that deletions return the correct averge length"""
cfg = AppSettings()
reps = 1000
deltas = []
for _ in range(0, reps):
dna = Chromosome()
init_length = len(dna.sequence)
dna.deletion()
deltas.append(init_length - len(dna.sequence))
expected_delta = cfg.mutation_length
var = nbinom.var(1, cfg.mutation_length/(1 + cfg.mutation_length))
# Because there is a little slop around short strings or positions near the
# end of the string, I multiply
# the confidence by 10 just to limit the number of failing tests.
conf_99 = ((var/reps)**(1/2)) * 10
observed_delta = sum(deltas)/reps
assert (expected_delta - conf_99) < observed_delta < (expected_delta + conf_99)
class TestInsertion:
"""Tests the insertion method in chromosomes"""
def test_insertion_length(self):
"""Tests that insertion mutations are of the correct length"""
cfg = AppSettings()
reps = 1000
deltas = []
for _ in range(0, reps):
dna = Chromosome()
init_length = len(dna.sequence)
dna.insertion()
deltas.append(len(dna.sequence) - init_length)
expected_delta = cfg.mutation_length
var = nbinom.var(1, cfg.mutation_length/(1 + cfg.mutation_length))
conf_99 = ((var/reps)**(1/2)) * 4
observed_delta = (sum(deltas)/reps)
assert (expected_delta - conf_99) < observed_delta < (expected_delta + conf_99)
class TestInversion:
"""Tests inversion method in chromosome"""
def test_inversion_diffs(self):
cfg = AppSettings()
reps = 1000
deltas = [] # observed number of differences
for _ in range(0, reps):
dna = Chromosome()
old_seq = dna.sequence
dna.inversion()
deltas.append( sum(1 for a, b in zip(old_seq, dna.sequence) if a != b) )
pmfs = []
expected_deltas = [] # expected differences
# Assumes the length of an inversion is drawn from a negative binomial
# distribution. Calculates the probability of each length until
# 99.99% of the distribution is accounted for. The expected number of
# differences for each length is multiplied by the probability of that length
# and the sum of that gives the expected differences overall.
k = 0
while sum(pmfs) <= 0.9999:
pmf = nbinom.pmf(
k, 1,
(1 - cfg.mutation_length/(1 + cfg.mutation_length))
)
pmfs.append(pmf)
diffs = math.floor(k/2) * (1 - 1/len(Chromosome.nucleotides())) * 2
expected_deltas.append(pmf * diffs)
k += 1
expected_delta = sum(expected_deltas)
# Since we are multiplying the binomial distribution (probably of differences at
# a given lenght) by a negative binomial distribution (probability of a length)
# we must compute the variance of two independent random variables
# is Var(X * Y) = var(x) * var(y) + var(x) * mean(y) + mean(x) * var(y)
# http://www.odelama.com/data-analysis/Commonly-Used-Math-Formulas/
mean_binom = cfg.mutation_length
var_binom = binom.var(
mean_binom, 1/(len(Chromosome.nucleotides()))
)
mean_nbinom = cfg.mutation_length
var_nbinom = nbinom.var(
cfg.mutation_length,
mean_nbinom/(1 + mean_nbinom)
)
var = var_binom * var_nbinom + \
var_binom * mean_nbinom + \
mean_binom * var_nbinom
observed_delta = sum(deltas)/reps
conf_99 = ((var/reps)**(1/2)) * 5
assert expected_delta - conf_99 < observed_delta < expected_delta + conf_99
class TestCrossingOver:
"""Test crossing over method as Class method"""
def test_crossovers_freq(self):
"""Tests that the number of swaps is as expected"""
cfg = AppSettings()
reps = 1000
deltas = []
diffs = [] # Differences between two deltas, should be zero
for _ in range(1, reps):
dna1 = Chromosome("a"*100)
dna2 = Chromosome("b"*100)
Chromosome.crossover(dna1, dna2)
delta1 = len(re.findall(r"ab", dna1.sequence)) + \
len(re.findall(r"ab", dna1.sequence))
delta2 = len(re.findall(r"ab", dna2.sequence)) + \
len(re.findall(r"ab", dna2.sequence))
deltas.append(delta1)
diffs.append(abs(delta1 - delta2))
min_len = len(min([dna1.sequence, dna2.sequence], key=len))
# Expected delta is the per base crossover rate, mulitplied by the
# probability of the same position getting chosen twice times the
# probability either end getting chosen. This still ignores the effect
# of the same position getting chosen four times. (only even hits cause
# difrence.s)
expected_delta = cfg.crossover_rate * min_len * ( 1 - 1/min_len ) * (1 - 2/min_len)
var = poisson.var(expected_delta)
conf_99 = ((var/reps)**(1/2)) * 6
observed_delta = sum(deltas)/reps
assert expected_delta - conf_99 < observed_delta < expected_delta + conf_99
class TestMutate:
""" Ensures agents go through their genetic mutations """
def test_genetic_operators(self):
dna = Chromosome("a"*100)
old_dna = dna.sequence
dna.mutate()
assert dna.sequence != old_dna
| 36.223485
| 91
| 0.574506
|
e40ebcdf391dd4776b816c91741a38955d4b8e08
| 953
|
py
|
Python
|
stats/citylist.py
|
private-octopus/diana
|
af8ffaac4a8fad26bf78104e5238e358a5ad8b21
|
[
"MIT"
] | 3
|
2018-02-15T20:00:44.000Z
|
2020-07-16T06:08:20.000Z
|
stats/citylist.py
|
private-octopus/diana
|
af8ffaac4a8fad26bf78104e5238e358a5ad8b21
|
[
"MIT"
] | 111
|
2017-11-04T23:13:38.000Z
|
2021-05-20T00:12:28.000Z
|
stats/citylist.py
|
private-octopus/dianal
|
af8ffaac4a8fad26bf78104e5238e358a5ad8b21
|
[
"MIT"
] | 5
|
2018-06-21T08:18:26.000Z
|
2021-10-11T02:02:54.000Z
|
#!/usr/bin/python
# coding=utf-8
#
# This scripts tries to estimate the variance of a few M3 test metrics.
import codecs
import sys
# Main program
# Load a list of files from argv[1], and for each file compute the
# list of metrics and their contribution to variances. Then,
# compute the final values of the metrics and variances.
if len(sys.argv) != 3:
print("Usage: " + argv[0] + " smummary_m3_file.csv city_file.csv")
exit(-1)
sum_m3 = codecs.open(sys.argv[1], "r", "UTF-8")
city_out = codecs.open(sys.argv[2], "w", "UTF-8")
city_list = []
for line in sum_m3:
cells = line.split(",")
if len(cells) > 3 and (cells[1] != "CC" or cells[2] != "City"):
city_name = cells[1] + "-" + cells[2]
city_list.append(city_name)
sum_m3.close()
city_list.sort()
i = 0
while i < len(city_list):
if i == 0 or city_list[i] != city_list[i-1]:
city_out.write(city_list[i] + "\n")
i += 1
city_out.close()
| 23.825
| 71
| 0.632739
|
87d4ae8a089b9b65307eb7f6dac6055b8f71418c
| 10,792
|
py
|
Python
|
contrib/testgen/gen_key_io_test_vectors.py
|
eleccoin/eleccoin
|
95f86f28019fe8666816e75e1dc82f1edeee3b31
|
[
"MIT"
] | 3
|
2020-04-24T08:03:09.000Z
|
2020-06-24T00:53:03.000Z
|
contrib/testgen/gen_key_io_test_vectors.py
|
eleccoin/eleccoin
|
95f86f28019fe8666816e75e1dc82f1edeee3b31
|
[
"MIT"
] | 8
|
2021-02-06T16:15:10.000Z
|
2022-02-20T20:08:45.000Z
|
contrib/testgen/gen_key_io_test_vectors.py
|
eleccoin/eleccoin
|
95f86f28019fe8666816e75e1dc82f1edeee3b31
|
[
"MIT"
] | 7
|
2020-02-26T22:08:49.000Z
|
2021-02-06T12:35:40.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58/bech32(m) address and private key test vectors.
Usage:
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 70 > ../../src/test/data/key_io_valid.json
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 70 > ../../src/test/data/key_io_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET, Encoding
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PUBKEY_ADDRESS_REGTEST = 111
SCRIPT_ADDRESS_REGTEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
PRIVKEY_REGTEST = 239
# script
OP_0 = 0x00
OP_1 = 0x51
OP_2 = 0x52
OP_3 = 0x53
OP_16 = 0x60
OP_DUP = 0x76
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_HASH160 = 0xa9
OP_CHECKSIG = 0xac
pubkey_prefix = (OP_DUP, OP_HASH160, 20)
pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG)
script_prefix = (OP_HASH160, 20)
script_suffix = (OP_EQUAL,)
p2wpkh_prefix = (OP_0, 20)
p2wsh_prefix = (OP_0, 32)
p2tr_prefix = (OP_1, 32)
metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata, output_prefix, output_suffix
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, 'main', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix),
((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()),
((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'signet', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'signet', True, None), (), ()),
((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()),
((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ())
]
# templates for valid bech32 sequences
bech32_templates = [
# hrp, version, witprog_size, metadata, encoding, output_prefix
('ec', 0, 20, (False, 'main', None, True), Encoding.BECH32, p2wpkh_prefix),
('ec', 0, 32, (False, 'main', None, True), Encoding.BECH32, p2wsh_prefix),
('ec', 1, 32, (False, 'main', None, True), Encoding.BECH32M, p2tr_prefix),
('ec', 2, 2, (False, 'main', None, True), Encoding.BECH32M, (OP_2, 2)),
('te', 0, 20, (False, 'test', None, True), Encoding.BECH32, p2wpkh_prefix),
('te', 0, 32, (False, 'test', None, True), Encoding.BECH32, p2wsh_prefix),
('te', 1, 32, (False, 'test', None, True), Encoding.BECH32M, p2tr_prefix),
('te', 3, 16, (False, 'test', None, True), Encoding.BECH32M, (OP_3, 16)),
('te', 0, 20, (False, 'signet', None, True), Encoding.BECH32, p2wpkh_prefix),
('te', 0, 32, (False, 'signet', None, True), Encoding.BECH32, p2wsh_prefix),
('te', 1, 32, (False, 'signet', None, True), Encoding.BECH32M, p2tr_prefix),
('te', 3, 32, (False, 'signet', None, True), Encoding.BECH32M, (OP_3, 32)),
('ecrt', 0, 20, (False, 'regtest', None, True), Encoding.BECH32, p2wpkh_prefix),
('ecrt', 0, 32, (False, 'regtest', None, True), Encoding.BECH32, p2wsh_prefix),
('ecrt', 1, 32, (False, 'regtest', None, True), Encoding.BECH32M, p2tr_prefix),
('ecrt', 16, 40, (False, 'regtest', None, True), Encoding.BECH32M, (OP_16, 40))
]
# templates for invalid bech32 sequences
bech32_ng_templates = [
# hrp, version, witprog_size, encoding, invalid_bech32, invalid_checksum, invalid_char
('te', 17, 32, Encoding.BECH32M, False, False, False),
('ecrt', 3, 1, Encoding.BECH32M, False, False, False),
('ec', 15, 41, Encoding.BECH32M, False, False, False),
('te', 0, 16, Encoding.BECH32, False, False, False),
('ecrt', 0, 32, Encoding.BECH32, True, False, False),
('ec', 0, 16, Encoding.BECH32, True, False, False),
('te', 0, 32, Encoding.BECH32, False, True, False),
('ecrt', 0, 20, Encoding.BECH32, False, False, True),
('ec', 0, 20, Encoding.BECH32M, False, False, False),
('te', 0, 32, Encoding.BECH32M, False, False, False),
('ecrt', 0, 20, Encoding.BECH32M, False, False, False),
('ec', 1, 32, Encoding.BECH32, False, False, False),
('te', 2, 16, Encoding.BECH32, False, False, False),
('ecrt', 16, 20, Encoding.BECH32, False, False, False),
]
def is_valid(v):
'''Check vector v for validity'''
if len(set(v) - set(b58chars)) > 0:
return is_valid_bech32(v)
result = b58decode_chk(v)
if result is None:
return is_valid_bech32(v)
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return is_valid_bech32(v)
def is_valid_bech32(v):
'''Check vector v for bech32 validity'''
for hrp in ['bc', 'tb', 'bcrt']:
if decode_segwit_address(hrp, v) != (None, None):
return True
return False
def gen_valid_base58_vector(template):
'''Generate valid base58 vector'''
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
dst_prefix = bytearray(template[4])
dst_suffix = bytearray(template[5])
rv = b58encode_chk(prefix + payload + suffix)
return rv, dst_prefix + payload + dst_suffix
def gen_valid_bech32_vector(template):
'''Generate valid bech32 vector'''
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
encoding = template[4]
dst_prefix = bytearray(template[5])
rv = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5))
return rv, dst_prefix + witprog
def gen_valid_vectors():
'''Generate valid test vectors'''
glist = [gen_valid_base58_vector, gen_valid_bech32_vector]
tlist = [templates, bech32_templates]
while True:
for template, valid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
rv, payload = valid_vector_generator(template)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = payload.hex()
yield (rv, hexrepr, metadata)
def gen_invalid_base58_vector(template):
'''Generate possibly invalid vector'''
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
corrupt_prefix = randbool(0.2)
randomize_payload_size = randbool(0.2)
corrupt_suffix = randbool(0.2)
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
val = b58encode_chk(prefix + payload + suffix)
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
return val
def gen_invalid_bech32_vector(template):
'''Generate possibly invalid bech32 vector'''
no_data = randbool(0.1)
to_upper = randbool(0.1)
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
encoding = template[3]
if no_data:
rv = bech32_encode(encoding, hrp, [])
else:
data = [witver] + convertbits(witprog, 8, 5)
if template[4] and not no_data:
if template[2] % 5 in {2, 4}:
data[-1] |= 1
else:
data.append(0)
rv = bech32_encode(encoding, hrp, data)
if template[5]:
i = len(rv) - random.randrange(1, 7)
rv = rv[:i] + random.choice(CHARSET.replace(rv[i], '')) + rv[i + 1:]
if template[6]:
i = len(hrp) + 1 + random.randrange(0, len(rv) - len(hrp) - 4)
rv = rv[:i] + rv[i:i + 4].upper() + rv[i + 4:]
if to_upper:
rv = rv.swapcase()
return rv
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
glist = [gen_invalid_base58_vector, gen_invalid_bech32_vector]
tlist = [templates, bech32_ng_templates]
while True:
for template, invalid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
val = invalid_vector_generator(template)
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| 40.419476
| 133
| 0.618143
|
c136bf1d154368b9faebb0195c579450e8ee2a2f
| 1,362
|
py
|
Python
|
bookstore/tests.py
|
bassamalasadi/bookstore
|
9b80044efd9179704770422cc34b55059c6fef21
|
[
"MIT"
] | null | null | null |
bookstore/tests.py
|
bassamalasadi/bookstore
|
9b80044efd9179704770422cc34b55059c6fef21
|
[
"MIT"
] | null | null | null |
bookstore/tests.py
|
bassamalasadi/bookstore
|
9b80044efd9179704770422cc34b55059c6fef21
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import Book, Writer
class BookStoreTest(TestCase):
def setUp(self):
""" Setup a new writer and book """
self.writer = Writer.objects.create(name='writertest')
self.book = Book.objects.create(
book_name = 'booktest',
synopsis='this is a test',
genre='fantasy',
release_date='2022-09-01',
price="15.15"
)
self.book.writer.set([self.writer.pk])
self.book.save()
def test_create_writer(self):
""" Test create a new writer """
writer = Writer.objects.get(name="writertest")
self.assertEqual(writer, self.writer)
def test_create_book_with_one_writer(self):
""" Test create a new book with one writer """
book = Book.objects.get(book_name="booktest")
self.assertEqual(book, self.book)
def test_create_book_with_two_writer(self):
""" Test create a new book with two writer """
self.secondWriter = Writer.objects.create(name='secondeWriterTest')
self.book.writer.set([self.writer.pk, self.secondWriter.pk])
book = Book.objects.get(book_name="booktest")
created_book = list(self.book.writer.all())
retrieved_book = list(book.writer.all())
self.assertEqual(retrieved_book, created_book)
| 30.954545
| 75
| 0.629222
|
a5661e62ccd52ec407ea63070439d249a3330e9b
| 3,036
|
py
|
Python
|
src/pytree/binarytree/tree.py
|
RESPULTE/PyTree
|
fea511d1414f536a1a523b8b01645ae763083027
|
[
"MIT"
] | null | null | null |
src/pytree/binarytree/tree.py
|
RESPULTE/PyTree
|
fea511d1414f536a1a523b8b01645ae763083027
|
[
"MIT"
] | null | null | null |
src/pytree/binarytree/tree.py
|
RESPULTE/PyTree
|
fea511d1414f536a1a523b8b01645ae763083027
|
[
"MIT"
] | null | null | null |
from pytree.Binarytree.Node import RBT_Node, AVL_Node, Splay_Node, BST_Node
from pytree.Binarytree._tree import BinaryTree
__all__ = ['RBTree', 'BSTree', 'AVLTree', 'SplayTree']
class RBTree(BinaryTree):
'''
- a type of Balanced Binary Search Tree that
does not maintain a strict height level for every node, but still
remains balanced (somehow)
- Pros:
* faster deletion & insertion
- Cons:
* slower traversal time due to not bein gheight balanced
P.S: Even though that it is slower in traversing,
the difference is not that big unless time is critical
'''
_node_type = RBT_Node
def __init__(self):
super().__init__()
class BSTree(BinaryTree):
'''
- a type of tree that stores values in nodes, based on the values
- each node in a BST tree will have a reference to 2 other nodes:
- left-node : the node that holds value lesser than the node
- right-node: the node that holds value larger than the node
- in my case, I added a refernce to the parent's node too
because this is my project and i do whatever the heck i want >:3
'''
_node_type = BST_Node
def __init__(self):
super().__init__()
class AVLTree(BinaryTree):
'''
- a type of Balanced Binary Search Tree that
maintains a strict height level for every node
- Pros:
* faster traversal of the tree
- Cons:
* slower deletion & insertion due to the rebalancing for each node
P.S: Even though that it is slower in insertion & deletion,
the difference is not that big unless time is critical
'''
_node_type = AVL_Node
def __init__(self):
super().__init__()
class SplayTree(BinaryTree):
'''
- a type of self-adjusting Binary Search Tree
that depends on the number of search of an item
- Pros:
* faster traversal of the tree for items that's used frequently
- Cons:
* not balanced :/
'''
_node_type = Splay_Node
def __init__(self):
super().__init__()
def __getattribute__(self, attr_name):
'''
reroute all attr access to here an check if any 'find' method is called
if so, splay the intended node up to the root with the '_update' method
-> if the node that is search is invalid,
get the closest node available in the tree and splay that node
'''
attr = super().__getattribute__(attr_name)
if 'find' not in attr_name or not callable(attr) or self.root.value is None:
return attr
def node_splayer(*args, **kwargs):
# set the node to True to get the node for the splaying process
new_attr_name = f"{attr_name}_node"
attr = self.root.__getattribute__(new_attr_name)
found_node = attr(*args, **kwargs)
# splaying process
if found_node:
self.root = found_node._update_node()
return found_node
return node_splayer
| 28.111111
| 84
| 0.640316
|
c4dba711d6ae041bc564b687d47bd5e0ecd7131f
| 545
|
py
|
Python
|
database.py
|
yuukinokai/MCHacks2018
|
86dd2b5db37353365c5dd49c9622f6fdda6f8bfb
|
[
"MIT"
] | null | null | null |
database.py
|
yuukinokai/MCHacks2018
|
86dd2b5db37353365c5dd49c9622f6fdda6f8bfb
|
[
"MIT"
] | null | null | null |
database.py
|
yuukinokai/MCHacks2018
|
86dd2b5db37353365c5dd49c9622f6fdda6f8bfb
|
[
"MIT"
] | null | null | null |
import pymysql.cursors
host = "localhost"
user = "root"
password = ""
dbName = "test"
def insertMessage(email, message):
connection = pymysql.connect(host = host, user = user, password = password, db = dbName, charset = 'utf8mb4', cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
sql = "INSERT INTO `messages` (`msgID`, `email`, `message`) VALUES (uuid(), %s, %s)"
cursor.execute(sql, (email, message))
connection.commit()
finally:
connection.close()
| 32.058824
| 153
| 0.636697
|
99591ad39a225a1bc4239b9f1bf754479dfee8c5
| 20,887
|
py
|
Python
|
gpflow/likelihoods.py
|
a5a/GPflow
|
5fbff1955f4e81d5ec9b44947a9f409314dc2b91
|
[
"Apache-2.0"
] | null | null | null |
gpflow/likelihoods.py
|
a5a/GPflow
|
5fbff1955f4e81d5ec9b44947a9f409314dc2b91
|
[
"Apache-2.0"
] | null | null | null |
gpflow/likelihoods.py
|
a5a/GPflow
|
5fbff1955f4e81d5ec9b44947a9f409314dc2b91
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Valentine Svensson, James Hensman, alexggmatthews, Alexis Boukouvalas
# Copyright 2017 Artem Artemev @awav
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
from . import settings
from . import densities
from . import transforms
from .decors import params_as_tensors
from .decors import params_as_tensors_for
from .params import Parameter
from .params import Parameterized
from .params import ParamList
from .quadrature import hermgauss
class Likelihood(Parameterized):
def __init__(self, name=None):
super(Likelihood, self).__init__(name)
self.num_gauss_hermite_points = 20
def predict_mean_and_var(self, Fmu, Fvar):
"""
Given a Normal distribution for the latent function,
return the mean of Y
if
q(f) = N(Fmu, Fvar)
and this object represents
p(y|f)
then this method computes the predictive mean
\int\int y p(y|f)q(f) df dy
and the predictive variance
\int\int y^2 p(y|f)q(f) df dy - [ \int\int y^2 p(y|f)q(f) df dy ]^2
Here, we implement a default Gauss-Hermite quadrature routine, but some
likelihoods (e.g. Gaussian) will implement specific cases.
"""
gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
gh_w /= np.sqrt(np.pi)
gh_w = gh_w.reshape(-1, 1)
shape = tf.shape(Fmu)
Fmu, Fvar = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar)]
X = gh_x[None, :] * tf.sqrt(2.0 * Fvar) + Fmu
# here's the quadrature for the mean
E_y = tf.reshape(tf.matmul(self.conditional_mean(X), gh_w), shape)
# here's the quadrature for the variance
integrand = self.conditional_variance(X) \
+ tf.square(self.conditional_mean(X))
V_y = tf.reshape(tf.matmul(integrand, gh_w), shape) - tf.square(E_y)
return E_y, V_y
def predict_density(self, Fmu, Fvar, Y):
"""
Given a Normal distribution for the latent function, and a datum Y,
compute the (log) predictive density of Y.
i.e. if
q(f) = N(Fmu, Fvar)
and this object represents
p(y|f)
then this method computes the predictive density
\int p(y=Y|f)q(f) df
Here, we implement a default Gauss-Hermite quadrature routine, but some
likelihoods (Gaussian, Poisson) will implement specific cases.
"""
gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi)
shape = tf.shape(Fmu)
Fmu, Fvar, Y = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar, Y)]
X = gh_x[None, :] * tf.sqrt(2.0 * Fvar) + Fmu
Y = tf.tile(Y, [1, self.num_gauss_hermite_points]) # broadcast Y to match X
logp = self.logp(X, Y)
return tf.reshape(tf.log(tf.matmul(tf.exp(logp), gh_w)), shape)
def variational_expectations(self, Fmu, Fvar, Y):
"""
Compute the expected log density of the data, given a Gaussian
distribution for the function values.
if
q(f) = N(Fmu, Fvar)
and this object represents
p(y|f)
then this method computes
\int (\log p(y|f)) q(f) df.
Here, we implement a default Gauss-Hermite quadrature routine, but some
likelihoods (Gaussian, Poisson) will implement specific cases.
"""
gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
gh_x = gh_x.reshape(1, -1)
gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi)
shape = tf.shape(Fmu)
Fmu, Fvar, Y = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar, Y)]
X = gh_x * tf.sqrt(2.0 * Fvar) + Fmu
Y = tf.tile(Y, [1, self.num_gauss_hermite_points]) # broadcast Y to match X
logp = self.logp(X, Y)
return tf.reshape(tf.matmul(logp, gh_w), shape)
class Gaussian(Likelihood):
def __init__(self, var=1.0):
super().__init__()
self.variance = Parameter(
var, transform=transforms.positive, dtype=settings.float_type)
@params_as_tensors
def logp(self, F, Y):
return densities.gaussian(F, Y, self.variance)
@params_as_tensors
def conditional_mean(self, F): # pylint: disable=R0201
return tf.identity(F)
@params_as_tensors
def conditional_variance(self, F):
return tf.fill(tf.shape(F), tf.squeeze(self.variance))
@params_as_tensors
def predict_mean_and_var(self, Fmu, Fvar):
return tf.identity(Fmu), Fvar + self.variance
@params_as_tensors
def predict_density(self, Fmu, Fvar, Y):
return densities.gaussian(Fmu, Y, Fvar + self.variance)
@params_as_tensors
def variational_expectations(self, Fmu, Fvar, Y):
return -0.5 * np.log(2 * np.pi) - 0.5 * tf.log(self.variance) \
- 0.5 * (tf.square(Y - Fmu) + Fvar) / self.variance
class Poisson(Likelihood):
"""
Poisson likelihood for use with count data, where the rate is given by the (transformed) GP.
let g(.) be the inverse-link function, then this likelihood represents
p(y_i | f_i) = Poisson(y_i | g(f_i) * binsize)
Note:binsize
For use in a Log Gaussian Cox process (doubly stochastic model) where the
rate function of an inhomogeneous Poisson process is given by a GP. The
intractable likelihood can be approximated by gridding the space (into bins
of size 'binsize') and using this Poisson likelihood.
"""
def __init__(self, invlink=tf.exp, binsize=1.):
Likelihood.__init__(self)
self.invlink = invlink
self.binsize = np.double(binsize)
def logp(self, F, Y):
return densities.poisson(self.invlink(F) * self.binsize, Y)
def conditional_variance(self, F):
return self.invlink(F) * self.binsize
def conditional_mean(self, F):
return self.invlink(F) * self.binsize
def variational_expectations(self, Fmu, Fvar, Y):
if self.invlink is tf.exp:
return Y * Fmu - tf.exp(Fmu + Fvar / 2) * self.binsize \
- tf.lgamma(Y + 1) + Y * tf.log(self.binsize)
return super(Poisson, self).variational_expectations(Fmu, Fvar, Y)
class Exponential(Likelihood):
def __init__(self, invlink=tf.exp):
super().__init__()
self.invlink = invlink
def logp(self, F, Y):
return densities.exponential(self.invlink(F), Y)
def conditional_mean(self, F):
return self.invlink(F)
def conditional_variance(self, F):
return tf.square(self.invlink(F))
def variational_expectations(self, Fmu, Fvar, Y):
if self.invlink is tf.exp:
return - tf.exp(-Fmu + Fvar / 2) * Y - Fmu
return super().variational_expectations(Fmu, Fvar, Y)
class StudentT(Likelihood):
def __init__(self, deg_free=3.0):
Likelihood.__init__(self)
self.deg_free = deg_free
self.scale = Parameter(1.0, transform=transforms.positive)
@params_as_tensors
def logp(self, F, Y):
return densities.student_t(Y, F, self.scale, self.deg_free)
@params_as_tensors
def conditional_mean(self, F):
return tf.identity(F)
@params_as_tensors
def conditional_variance(self, F):
return F * 0.0 + (self.deg_free / (self.deg_free - 2.0))
def probit(x):
return 0.5 * (1.0 + tf.erf(x / np.sqrt(2.0))) * (1 - 2e-3) + 1e-3
class Bernoulli(Likelihood):
def __init__(self, invlink=probit):
Likelihood.__init__(self)
self.invlink = invlink
def logp(self, F, Y):
return densities.bernoulli(self.invlink(F), Y)
def predict_mean_and_var(self, Fmu, Fvar):
if self.invlink is probit:
p = probit(Fmu / tf.sqrt(1 + Fvar))
return p, p - tf.square(p)
else:
# for other invlink, use quadrature
return Likelihood.predict_mean_and_var(self, Fmu, Fvar)
def predict_density(self, Fmu, Fvar, Y):
p = self.predict_mean_and_var(Fmu, Fvar)[0]
return densities.bernoulli(p, Y)
def conditional_mean(self, F):
return self.invlink(F)
def conditional_variance(self, F):
p = self.invlink(F)
return p - tf.square(p)
class Gamma(Likelihood):
"""
Use the transformed GP to give the *scale* (inverse rate) of the Gamma
"""
def __init__(self, invlink=tf.exp):
Likelihood.__init__(self)
self.invlink = invlink
self.shape = Parameter(1.0, transform=transforms.positive)
@params_as_tensors
def logp(self, F, Y):
return densities.gamma(self.shape, self.invlink(F), Y)
@params_as_tensors
def conditional_mean(self, F):
return self.shape * self.invlink(F)
@params_as_tensors
def conditional_variance(self, F):
scale = self.invlink(F)
return self.shape * tf.square(scale)
@params_as_tensors
def variational_expectations(self, Fmu, Fvar, Y):
if self.invlink is tf.exp:
return -self.shape * Fmu - tf.lgamma(self.shape) \
+ (self.shape - 1.) * tf.log(Y) - Y * tf.exp(-Fmu + Fvar / 2.)
else:
return Likelihood.variational_expectations(self, Fmu, Fvar, Y)
class Beta(Likelihood):
"""
This uses a reparameterisation of the Beta density. We have the mean of the
Beta distribution given by the transformed process:
m = sigma(f)
and a scale parameter. The familiar alpha, beta parameters are given by
m = alpha / (alpha + beta)
scale = alpha + beta
so:
alpha = scale * m
beta = scale * (1-m)
"""
def __init__(self, invlink=probit, scale=1.0):
Likelihood.__init__(self)
self.scale = Parameter(scale, transform=transforms.positive)
self.invlink = invlink
@params_as_tensors
def logp(self, F, Y):
mean = self.invlink(F)
alpha = mean * self.scale
beta = self.scale - alpha
return densities.beta(alpha, beta, Y)
@params_as_tensors
def conditional_mean(self, F):
return self.invlink(F)
@params_as_tensors
def conditional_variance(self, F):
mean = self.invlink(F)
return (mean - tf.square(mean)) / (self.scale + 1.)
class RobustMax(object):
"""
This class represent a multi-class inverse-link function. Given a vector
f=[f_1, f_2, ... f_k], the result of the mapping is
y = [y_1 ... y_k]
with
y_i = (1-eps) i == argmax(f)
eps/(k-1) otherwise.
"""
def __init__(self, num_classes, epsilon=1e-3):
self.epsilon = epsilon
self.num_classes = num_classes
self._eps_K1 = self.epsilon / (self.num_classes - 1.)
def __call__(self, F):
i = tf.argmax(F, 1)
return tf.one_hot(i, self.num_classes, 1. - self.epsilon, self._eps_K1)
def prob_is_largest(self, Y, mu, var, gh_x, gh_w):
Y = tf.cast(Y, tf.int64)
# work out what the mean and variance is of the indicated latent function.
oh_on = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 1., 0.), settings.float_type)
mu_selected = tf.reduce_sum(oh_on * mu, 1)
var_selected = tf.reduce_sum(oh_on * var, 1)
# generate Gauss Hermite grid
X = tf.reshape(mu_selected, (-1, 1)) + gh_x * tf.reshape(
tf.sqrt(tf.clip_by_value(2. * var_selected, 1e-10, np.inf)), (-1, 1))
# compute the CDF of the Gaussian between the latent functions and the grid (including the selected function)
dist = (tf.expand_dims(X, 1) - tf.expand_dims(mu, 2)) / tf.expand_dims(
tf.sqrt(tf.clip_by_value(var, 1e-10, np.inf)), 2)
cdfs = 0.5 * (1.0 + tf.erf(dist / np.sqrt(2.0)))
cdfs = cdfs * (1 - 2e-4) + 1e-4
# blank out all the distances on the selected latent function
oh_off = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 0., 1.), settings.float_type)
cdfs = cdfs * tf.expand_dims(oh_off, 2) + tf.expand_dims(oh_on, 2)
# take the product over the latent functions, and the sum over the GH grid.
return tf.matmul(tf.reduce_prod(cdfs, reduction_indices=[1]), tf.reshape(gh_w / np.sqrt(np.pi), (-1, 1)))
class MultiClass(Likelihood):
def __init__(self, num_classes, invlink=None):
"""
A likelihood that can do multi-way classification.
Currently the only valid choice
of inverse-link function (invlink) is an instance of RobustMax.
"""
Likelihood.__init__(self)
self.num_classes = num_classes
if invlink is None:
invlink = RobustMax(self.num_classes)
elif not isinstance(invlink, RobustMax):
raise NotImplementedError
self.invlink = invlink
def logp(self, F, Y):
if isinstance(self.invlink, RobustMax):
hits = tf.equal(tf.expand_dims(tf.argmax(F, 1), 1), tf.cast(Y, tf.int64))
yes = tf.ones(tf.shape(Y), dtype=settings.float_type) - self.invlink.epsilon
no = tf.zeros(tf.shape(Y), dtype=settings.float_type) + self.invlink._eps_K1
p = tf.where(hits, yes, no)
return tf.log(p)
else:
raise NotImplementedError
def variational_expectations(self, Fmu, Fvar, Y):
if isinstance(self.invlink, RobustMax):
gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
p = self.invlink.prob_is_largest(Y, Fmu, Fvar, gh_x, gh_w)
return p * np.log(1 - self.invlink.epsilon) + (1. - p) * np.log(self.invlink._eps_K1)
else:
raise NotImplementedError
def predict_mean_and_var(self, Fmu, Fvar):
if isinstance(self.invlink, RobustMax):
# To compute this, we'll compute the density for each possible output
possible_outputs = [tf.fill(tf.stack([tf.shape(Fmu)[0], 1]), np.array(i, dtype=np.int64)) for i in
range(self.num_classes)]
ps = [self._predict_non_logged_density(Fmu, Fvar, po) for po in possible_outputs]
ps = tf.transpose(tf.stack([tf.reshape(p, (-1,)) for p in ps]))
return ps, ps - tf.square(ps)
else:
raise NotImplementedError
def predict_density(self, Fmu, Fvar, Y):
return tf.log(self._predict_non_logged_density(Fmu, Fvar, Y))
def _predict_non_logged_density(self, Fmu, Fvar, Y):
if isinstance(self.invlink, RobustMax):
gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
p = self.invlink.prob_is_largest(Y, Fmu, Fvar, gh_x, gh_w)
return p * (1 - self.invlink.epsilon) + (1. - p) * (self.invlink._eps_K1)
else:
raise NotImplementedError
def conditional_mean(self, F):
return self.invlink(F)
def conditional_variance(self, F):
p = self.conditional_mean(F)
return p - tf.square(p)
class SwitchedLikelihood(Likelihood):
def __init__(self, likelihood_list):
"""
In this likelihood, we assume at extra column of Y, which contains
integers that specify a likelihood from the list of likelihoods.
"""
Likelihood.__init__(self)
for l in likelihood_list:
assert isinstance(l, Likelihood)
self.likelihood_list = ParamList(likelihood_list)
self.num_likelihoods = len(self.likelihood_list)
def _partition_and_stitch(self, args, func_name):
"""
args is a list of tensors, to be passed to self.likelihoods.<func_name>
args[-1] is the 'Y' argument, which contains the indexes to self.likelihoods.
This function splits up the args using dynamic_partition, calls the
relevant function on the likelihoods, and re-combines the result.
"""
# get the index from Y
Y = args[-1]
ind = Y[:, -1]
ind = tf.cast(ind, tf.int32)
Y = Y[:, :-1]
args[-1] = Y
# split up the arguments into chunks corresponding to the relevant likelihoods
args = zip(*[tf.dynamic_partition(X, ind, self.num_likelihoods) for X in args])
# apply the likelihood-function to each section of the data
with params_as_tensors_for(self, convert=False):
funcs = [getattr(lik, func_name) for lik in self.likelihood_list]
results = [f(*args_i) for f, args_i in zip(funcs, args)]
# stitch the results back together
partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, self.num_likelihoods)
results = tf.dynamic_stitch(partitions, results)
return results
def logp(self, F, Y):
return self._partition_and_stitch([F, Y], 'logp')
def predict_density(self, Fmu, Fvar, Y):
return self._partition_and_stitch([Fmu, Fvar, Y], 'predict_density')
def variational_expectations(self, Fmu, Fvar, Y):
return self._partition_and_stitch([Fmu, Fvar, Y], 'variational_expectations')
def predict_mean_and_var(self, Fmu, Fvar):
mvs = [lik.predict_mean_and_var(Fmu, Fvar) for lik in self.likelihood_list]
mu_list, var_list = zip(*mvs)
mu = tf.concat(mu_list, 1)
var = tf.concat(var_list, 1)
return mu, var
class Ordinal(Likelihood):
"""
A likelihood for doing ordinal regression.
The data are integer values from 0 to K, and the user must specify (K-1)
'bin edges' which define the points at which the labels switch. Let the bin
edges be [a_0, a_1, ... a_{K-1}], then the likelihood is
p(Y=0|F) = phi((a_0 - F) / sigma)
p(Y=1|F) = phi((a_1 - F) / sigma) - phi((a_0 - F) / sigma)
p(Y=2|F) = phi((a_2 - F) / sigma) - phi((a_1 - F) / sigma)
...
p(Y=K|F) = 1 - phi((a_{K-1} - F) / sigma)
where phi is the cumulative density function of a Gaussian (the probit
function) and sigma is a parameter to be learned. A reference is:
@article{chu2005gaussian,
title={Gaussian processes for ordinal regression},
author={Chu, Wei and Ghahramani, Zoubin},
journal={Journal of Machine Learning Research},
volume={6},
number={Jul},
pages={1019--1041},
year={2005}
}
"""
def __init__(self, bin_edges):
"""
bin_edges is a numpy array specifying at which function value the
output label should switch. In the possible Y values are 0...K, then
the size of bin_edges should be (K-1).
"""
Likelihood.__init__(self)
self.bin_edges = bin_edges
self.num_bins = bin_edges.size + 1
self.sigma = Parameter(1.0, transform=transforms.positive)
@params_as_tensors
def logp(self, F, Y):
Y = tf.cast(Y, tf.int64)
scaled_bins_left = tf.concat([self.bin_edges/self.sigma, np.array([np.inf])], 0)
scaled_bins_right = tf.concat([np.array([-np.inf]), self.bin_edges/self.sigma], 0)
selected_bins_left = tf.gather(scaled_bins_left, Y)
selected_bins_right = tf.gather(scaled_bins_right, Y)
return tf.log(probit(selected_bins_left - F / self.sigma) -
probit(selected_bins_right - F / self.sigma) + 1e-6)
@params_as_tensors
def _make_phi(self, F):
"""
A helper function for making predictions. Constructs a probability
matrix where each row output the probability of the corresponding
label, and the rows match the entries of F.
Note that a matrix of F values is flattened.
"""
scaled_bins_left = tf.concat([self.bin_edges / self.sigma, np.array([np.inf])], 0)
scaled_bins_right = tf.concat([np.array([-np.inf]), self.bin_edges/self.sigma], 0)
return probit(scaled_bins_left - tf.reshape(F, (-1, 1)) / self.sigma)\
- probit(scaled_bins_right - tf.reshape(F, (-1, 1)) / self.sigma)
def conditional_mean(self, F):
phi = self._make_phi(F)
Ys = tf.reshape(np.arange(self.num_bins, dtype=np.float64), (-1, 1))
return tf.reshape(tf.matmul(phi, Ys), tf.shape(F))
def conditional_variance(self, F):
phi = self._make_phi(F)
Ys = tf.reshape(np.arange(self.num_bins, dtype=np.float64), (-1, 1))
E_y = tf.matmul(phi, Ys)
E_y2 = tf.matmul(phi, tf.square(Ys))
return tf.reshape(E_y2 - tf.square(E_y), tf.shape(F))
| 34.928094
| 117
| 0.626562
|
52669cb5ade97c04141340534d9eeeb88a09282b
| 2,949
|
py
|
Python
|
bfxapi/utils/custom_logger.py
|
VictorInacio/bitfinex-api-py
|
6883406da3891898b6be9caffb45f2cc3c696c8a
|
[
"Apache-2.0"
] | null | null | null |
bfxapi/utils/custom_logger.py
|
VictorInacio/bitfinex-api-py
|
6883406da3891898b6be9caffb45f2cc3c696c8a
|
[
"Apache-2.0"
] | null | null | null |
bfxapi/utils/custom_logger.py
|
VictorInacio/bitfinex-api-py
|
6883406da3891898b6be9caffb45f2cc3c696c8a
|
[
"Apache-2.0"
] | 1
|
2020-12-03T01:43:48.000Z
|
2020-12-03T01:43:48.000Z
|
"""
Module used to describe all of the different data types
"""
import logging
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
UNDERLINE_SEQ = "\033[04m"
YELLOW = '\033[93m'
WHITE = '\33[37m'
BLUE = '\033[34m'
LIGHT_BLUE = '\033[94m'
RED = '\033[91m'
GREY = '\33[90m'
KEYWORD_COLORS = {
'WARNING': YELLOW,
'INFO': LIGHT_BLUE,
'DEBUG': WHITE,
'CRITICAL': YELLOW,
'ERROR': RED,
'TRADE': '\33[102m\33[30m'
}
def formatter_message(message, use_color = True):
"""
Syntax highlight certain keywords
"""
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
def format_word(message, word, color_seq, bold=False, underline=False):
"""
Surround the fiven word with a sequence
"""
replacer = color_seq + word + RESET_SEQ
if underline:
replacer = UNDERLINE_SEQ + replacer
if bold:
replacer = BOLD_SEQ + replacer
return message.replace(word, replacer)
class Formatter(logging.Formatter):
'''
This Formatted simply colors in the levelname i.e 'INFO', 'DEBUG'
'''
def __init__(self, msg, use_color = True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
"""
Format and highlight certain keywords
"""
levelname = record.levelname
if self.use_color and levelname in KEYWORD_COLORS:
levelname_color = KEYWORD_COLORS[levelname] + levelname + RESET_SEQ
record.levelname = levelname_color
record.name = GREY + record.name + RESET_SEQ
return logging.Formatter.format(self, record)
class CustomLogger(logging.Logger):
'''
This adds extra logging functions such as logger.trade and also
sets the logger to use the custom formatter
'''
FORMAT = "[$BOLD%(name)s$RESET] [%(levelname)s] %(message)s"
COLOR_FORMAT = formatter_message(FORMAT, True)
TRADE = 50
def __init__(self, name, logLevel='DEBUG'):
logging.Logger.__init__(self, name, logLevel)
color_formatter = Formatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
self.addHandler(console)
logging.addLevelName(self.TRADE, "TRADE")
return
def trade(self, message, *args, **kws):
"""
Print a syntax highlighted trade signal
"""
if self.isEnabledFor(self.TRADE):
message = format_word(message, 'CLOSED ', YELLOW, bold=True)
message = format_word(message, 'OPENED ', LIGHT_BLUE, bold=True)
message = format_word(message, 'UPDATED ', BLUE, bold=True)
message = format_word(message, 'CLOSED_ALL ', RED, bold=True)
# Yes, logger takes its '*args' as 'args'.
self._log(self.TRADE, message, args, **kws)
| 30.402062
| 81
| 0.640217
|
a0264cf0167ab3434e9a00b83d9c3d381a5db415
| 10,607
|
py
|
Python
|
oslo_messaging/tests/drivers/zmq/test_zmq_ack_manager.py
|
ox12345/oslo.messaging
|
bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c
|
[
"Apache-1.1"
] | null | null | null |
oslo_messaging/tests/drivers/zmq/test_zmq_ack_manager.py
|
ox12345/oslo.messaging
|
bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c
|
[
"Apache-1.1"
] | null | null | null |
oslo_messaging/tests/drivers/zmq/test_zmq_ack_manager.py
|
ox12345/oslo.messaging
|
bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c
|
[
"Apache-1.1"
] | null | null | null |
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import mock
import testtools
import time
import oslo_messaging
from oslo_messaging._drivers.zmq_driver.client import zmq_receivers
from oslo_messaging._drivers.zmq_driver.client import zmq_senders
from oslo_messaging._drivers.zmq_driver.proxy import zmq_proxy
from oslo_messaging._drivers.zmq_driver.server.consumers.zmq_dealer_consumer \
import DealerConsumerWithAcks
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_options
from oslo_messaging.tests.drivers.zmq import zmq_common
from oslo_messaging.tests import utils as test_utils
zmq = zmq_async.import_zmq()
class TestZmqAckManager(test_utils.BaseTestCase):
@testtools.skipIf(zmq is None, "zmq not available")
def setUp(self):
super(TestZmqAckManager, self).setUp()
# register and set necessary config opts
self.messaging_conf.transport_driver = 'zmq'
zmq_options.register_opts(self.conf, mock.MagicMock())
kwargs = {'rpc_zmq_matchmaker': 'dummy',
'use_pub_sub': False,
'use_router_proxy': True,
'rpc_thread_pool_size': 1,
'rpc_use_acks': True,
'rpc_ack_timeout_base': 5,
'rpc_ack_timeout_multiplier': 1,
'rpc_retry_attempts': 2}
self.config(group='oslo_messaging_zmq', **kwargs)
self.conf.register_opts(zmq_proxy.zmq_proxy_opts,
group='zmq_proxy_opts')
# mock set_result method of futures
self.set_result_patcher = mock.patch.object(
zmq_receivers.futurist.Future, 'set_result',
side_effect=zmq_receivers.futurist.Future.set_result, autospec=True
)
self.set_result = self.set_result_patcher.start()
# mock send method of senders
self.send_patcher = mock.patch.object(
zmq_senders.RequestSenderProxy, 'send',
side_effect=zmq_senders.RequestSenderProxy.send, autospec=True
)
self.send = self.send_patcher.start()
# get driver
transport = oslo_messaging.get_transport(self.conf)
self.driver = transport._driver
# prepare and launch proxy
self.proxy = zmq_proxy.ZmqProxy(self.conf)
vars(self.driver.matchmaker).update(vars(self.proxy.matchmaker))
self.executor = zmq_async.get_executor(self.proxy.run)
self.executor.execute()
# create listener
self.listener = zmq_common.TestServerListener(self.driver)
# create target and message
self.target = oslo_messaging.Target(topic='topic', server='server')
self.message = {'method': 'xyz', 'args': {'x': 1, 'y': 2, 'z': 3}}
# start listening to target
self.listener.listen(self.target)
# get ack manager
self.ack_manager = self.driver.client.get().publishers['default']
self.addCleanup(
zmq_common.StopRpc(
self, [('listener', 'stop'), ('executor', 'stop'),
('proxy', 'close'), ('driver', 'cleanup'),
('send_patcher', 'stop'),
('set_result_patcher', 'stop')]
)
)
# wait for all connections to be established
# and all parties to be ready for messaging
time.sleep(1)
@mock.patch.object(DealerConsumerWithAcks, '_acknowledge',
side_effect=DealerConsumerWithAcks._acknowledge,
autospec=True)
def test_cast_success_without_retries(self, received_ack_mock):
result = self.driver.send(
self.target, {}, self.message, wait_for_reply=False
)
self.assertIsNone(result)
self.ack_manager.pool.shutdown(wait=True)
self.assertTrue(self.listener._received.isSet())
self.assertEqual(self.message, self.listener.message.message)
self.assertEqual(1, self.send.call_count)
self.assertEqual(1, received_ack_mock.call_count)
self.assertEqual(2, self.set_result.call_count)
def test_cast_success_with_one_retry(self):
with mock.patch.object(DealerConsumerWithAcks,
'_acknowledge') as lost_ack_mock:
result = self.driver.send(
self.target, {}, self.message, wait_for_reply=False
)
self.assertIsNone(result)
self.listener._received.wait(5)
self.assertTrue(self.listener._received.isSet())
self.assertEqual(self.message, self.listener.message.message)
self.assertEqual(1, self.send.call_count)
self.assertEqual(1, lost_ack_mock.call_count)
self.assertEqual(0, self.set_result.call_count)
self.listener._received.clear()
with mock.patch.object(DealerConsumerWithAcks, '_acknowledge',
side_effect=DealerConsumerWithAcks._acknowledge,
autospec=True) as received_ack_mock:
self.ack_manager.pool.shutdown(wait=True)
self.assertFalse(self.listener._received.isSet())
self.assertEqual(2, self.send.call_count)
self.assertEqual(1, received_ack_mock.call_count)
self.assertEqual(2, self.set_result.call_count)
def test_cast_success_with_two_retries(self):
with mock.patch.object(DealerConsumerWithAcks,
'_acknowledge') as lost_ack_mock:
result = self.driver.send(
self.target, {}, self.message, wait_for_reply=False
)
self.assertIsNone(result)
self.listener._received.wait(5)
self.assertTrue(self.listener._received.isSet())
self.assertEqual(self.message, self.listener.message.message)
self.assertEqual(1, self.send.call_count)
self.assertEqual(1, lost_ack_mock.call_count)
self.assertEqual(0, self.set_result.call_count)
self.listener._received.clear()
self.listener._received.wait(7.5)
self.assertFalse(self.listener._received.isSet())
self.assertEqual(2, self.send.call_count)
self.assertEqual(2, lost_ack_mock.call_count)
self.assertEqual(0, self.set_result.call_count)
with mock.patch.object(DealerConsumerWithAcks, '_acknowledge',
side_effect=DealerConsumerWithAcks._acknowledge,
autospec=True) as received_ack_mock:
self.ack_manager.pool.shutdown(wait=True)
self.assertFalse(self.listener._received.isSet())
self.assertEqual(3, self.send.call_count)
self.assertEqual(1, received_ack_mock.call_count)
self.assertEqual(2, self.set_result.call_count)
@mock.patch.object(DealerConsumerWithAcks, '_acknowledge')
def test_cast_failure_exhausted_retries(self, lost_ack_mock):
result = self.driver.send(
self.target, {}, self.message, wait_for_reply=False
)
self.assertIsNone(result)
self.ack_manager.pool.shutdown(wait=True)
self.assertTrue(self.listener._received.isSet())
self.assertEqual(self.message, self.listener.message.message)
self.assertEqual(3, self.send.call_count)
self.assertEqual(3, lost_ack_mock.call_count)
self.assertEqual(1, self.set_result.call_count)
@mock.patch.object(DealerConsumerWithAcks, '_acknowledge',
side_effect=DealerConsumerWithAcks._acknowledge,
autospec=True)
@mock.patch.object(DealerConsumerWithAcks, '_reply',
side_effect=DealerConsumerWithAcks._reply,
autospec=True)
@mock.patch.object(DealerConsumerWithAcks, '_reply_from_cache',
side_effect=DealerConsumerWithAcks._reply_from_cache,
autospec=True)
def test_call_success_without_retries(self, unused_reply_from_cache_mock,
received_reply_mock,
received_ack_mock):
result = self.driver.send(
self.target, {}, self.message, wait_for_reply=True, timeout=10
)
self.assertIsNotNone(result)
self.ack_manager.pool.shutdown(wait=True)
self.assertTrue(self.listener._received.isSet())
self.assertEqual(self.message, self.listener.message.message)
self.assertEqual(1, self.send.call_count)
self.assertEqual(1, received_ack_mock.call_count)
self.assertEqual(3, self.set_result.call_count)
received_reply_mock.assert_called_once_with(mock.ANY, mock.ANY,
reply=True, failure=None)
self.assertEqual(0, unused_reply_from_cache_mock.call_count)
@mock.patch.object(DealerConsumerWithAcks, '_acknowledge')
@mock.patch.object(DealerConsumerWithAcks, '_reply')
@mock.patch.object(DealerConsumerWithAcks, '_reply_from_cache')
def test_call_failure_exhausted_retries(self, lost_reply_from_cache_mock,
lost_reply_mock, lost_ack_mock):
self.assertRaises(oslo_messaging.MessagingTimeout,
self.driver.send,
self.target, {}, self.message,
wait_for_reply=True, timeout=20)
self.ack_manager.pool.shutdown(wait=True)
self.assertTrue(self.listener._received.isSet())
self.assertEqual(self.message, self.listener.message.message)
self.assertEqual(3, self.send.call_count)
self.assertEqual(3, lost_ack_mock.call_count)
self.assertEqual(2, self.set_result.call_count)
lost_reply_mock.assert_called_once_with(mock.ANY,
reply=True, failure=None)
self.assertEqual(2, lost_reply_from_cache_mock.call_count)
| 46.726872
| 79
| 0.645894
|
6cc84c385a88d6056cc3eacf0b1474bb27dea577
| 2,654
|
py
|
Python
|
Problema3/sobres/sobres/settings.py
|
danic96/Problemes
|
d53a14f14000cc95f9c4d00d1554501b2b361f54
|
[
"MIT"
] | null | null | null |
Problema3/sobres/sobres/settings.py
|
danic96/Problemes
|
d53a14f14000cc95f9c4d00d1554501b2b361f54
|
[
"MIT"
] | null | null | null |
Problema3/sobres/sobres/settings.py
|
danic96/Problemes
|
d53a14f14000cc95f9c4d00d1554501b2b361f54
|
[
"MIT"
] | null | null | null |
"""
Django settings for sobres project.
Generated by 'django-admin startproject' using Django 1.8.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q+qwsqpu&3^)ph9_%68)2ohd-@-t4rr6pnr+$2hcyo9(r2=dab'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'isobres',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'sobres.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sobres.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| 25.519231
| 71
| 0.699699
|
9f0959dc88b7ec47fc752e14776120964334f574
| 1,551
|
py
|
Python
|
tests/components/mazda/test_lock.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/mazda/test_lock.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/mazda/test_lock.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The lock tests for the Mazda Connected Services integration."""
from homeassistant.components.lock import (
DOMAIN as LOCK_DOMAIN,
SERVICE_LOCK,
SERVICE_UNLOCK,
STATE_LOCKED,
)
from homeassistant.const import ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME
from homeassistant.helpers import entity_registry as er
from . import init_integration
async def test_lock_setup(hass):
"""Test locking and unlocking the vehicle."""
await init_integration(hass)
entity_registry = er.async_get(hass)
entry = entity_registry.async_get("lock.my_mazda3_lock")
assert entry
assert entry.unique_id == "JM000000000000000"
state = hass.states.get("lock.my_mazda3_lock")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Lock"
assert state.state == STATE_LOCKED
async def test_locking(hass):
"""Test locking the vehicle."""
client_mock = await init_integration(hass)
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_LOCK,
{ATTR_ENTITY_ID: "lock.my_mazda3_lock"},
blocking=True,
)
await hass.async_block_till_done()
client_mock.lock_doors.assert_called_once()
async def test_unlocking(hass):
"""Test unlocking the vehicle."""
client_mock = await init_integration(hass)
await hass.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{ATTR_ENTITY_ID: "lock.my_mazda3_lock"},
blocking=True,
)
await hass.async_block_till_done()
client_mock.unlock_doors.assert_called_once()
| 26.288136
| 71
| 0.718891
|
4c162094548b68d9dca90e6842ebdf9da9739c17
| 1,394
|
py
|
Python
|
bco_api/api/scripts/method_specific/POST_check_object_permissions.py
|
syntheticgio/bco_api
|
b78c691f0508654aac66e6bfdde4fcd548a89ad1
|
[
"MIT"
] | null | null | null |
bco_api/api/scripts/method_specific/POST_check_object_permissions.py
|
syntheticgio/bco_api
|
b78c691f0508654aac66e6bfdde4fcd548a89ad1
|
[
"MIT"
] | null | null | null |
bco_api/api/scripts/method_specific/POST_check_object_permissions.py
|
syntheticgio/bco_api
|
b78c691f0508654aac66e6bfdde4fcd548a89ad1
|
[
"MIT"
] | null | null | null |
import json
from .. import JsonUtils
# For server information.
from django.conf import settings
# User info
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User, Group
# Permissions
from guardian.shortcuts import get_group_perms
# Responses
from rest_framework.response import Response
from rest_framework import status
def POST_check_object_permissions(incoming, objct):
print('POST_check_object_permissions')
print(incoming)
# Get the user's groups, then get the permissions of
# each group.
user_id = Token.objects.get(key = incoming.META.get('HTTP_AUTHORIZATION').split(' ')[1]).user_id
username = User.objects.get(id = user_id)
# Create a dictionary to hold the return information
# which includes the server information.
returnable = {
'hostname': settings.PUBLIC_HOSTNAME,
'human_readable_hostname': settings.HUMAN_READABLE_HOSTNAME,
'groups': {}
}
# Get object permissions by group.
for group in username.groups.all():
# Get the group name.
g_name = group.name
# Get the permissions.
# Source: https://django-guardian.readthedocs.io/en/stable/api/guardian.shortcuts.html#get-group-perms
g_permissions = list(get_group_perms(group, objct))
# Append.
returnable['groups'][g_name] = g_permissions
return(
Response(
status = status.HTTP_200_OK,
data = returnable
)
)
| 24.45614
| 104
| 0.746772
|
411eef8e29ac47d9f9fce20aede6b4ab3933954d
| 68,063
|
py
|
Python
|
salt/modules/dockerio.py
|
vamshi98/salt-formulas
|
30edeadafd5d173efe4e1f767a8d562547ad128a
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/dockerio.py
|
vamshi98/salt-formulas
|
30edeadafd5d173efe4e1f767a8d562547ad128a
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/dockerio.py
|
vamshi98/salt-formulas
|
30edeadafd5d173efe4e1f767a8d562547ad128a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Management of Docker Containers
.. versionadded:: 2014.1.0
.. note::
The DockerIO integration is still in beta; the API is subject to change
General Notes
-------------
As we use states, we don't want to be continuously popping dockers, so we
will map each container id (or image) with a grain whenever it is relevant.
As a corollary, we will resolve a container id either directly by the id
or try to find a container id matching something stocked in grain.
Installation Prerequisites
--------------------------
- You will need the ``docker-py`` python package in your python installation
path that is running salt. Its version should support `Docker Remote API
v1.12 <http://docs.docker.io/en/latest/reference/api/docker_remote_api_v1.12>`_.
Currently, ``docker-py 0.6.0`` is known to support `Docker Remote API v1.12
<http://docs.docker.io/en/latest/reference/api/docker_remote_api_v1.12>`_
.. code-block:: bash
pip install docker-py==0.6.0
Prerequisite Pillar Configuration for Authentication
----------------------------------------------------
- To push or pull you will need to be authenticated as the ``docker-py`` bindings
require it
- For this to happen, you will need to configure a mapping in the pillar
representing your per URL authentication bits:
.. code-block:: yaml
docker-registries:
registry_url:
email: foo@foo.com
password: s3cr3t
username: foo
- You need at least an entry to the default docker index:
.. code-block:: yaml
docker-registries:
https://index.docker.io/v1/:
email: foo@foo.com
password: s3cr3t
username: foo
- You can define multiple registry blocks for them to be aggregated. The only thing to keep
in mind is that their ID must finish with ``-docker-registries``:
.. code-block:: yaml
ac-docker-registries:
https://index.bar.io/v1/:
email: foo@foo.com
password: s3cr3t
username: foo
ab-docker-registries:
https://index.foo.io/v1/:
email: foo@foo.com
password: s3cr3t
username: foo
This could be also written as:
.. code-block:: yaml
docker-registries:
https://index.bar.io/v1/:
email: foo@foo.com
password: s3cr3t
username: foo
https://index.foo.io/v1/:
email: foo@foo.com
password: s3cr3t
username: foo
Methods
_______
- Registry Dialog
- :py:func:`login<salt.modules.dockerio.login>`
- :py:func:`push<salt.modules.dockerio.push>`
- :py:func:`pull<salt.modules.dockerio.pull>`
- Docker Management
- :py:func:`version<salt.modules.dockerio.version>`
- :py:func:`info<salt.modules.dockerio.info>`
- Image Management
- :py:func:`search<salt.modules.dockerio.search>`
- :py:func:`inspect_image<salt.modules.dockerio.inspect_image>`
- :py:func:`get_images<salt.modules.dockerio.get_images>`
- :py:func:`remove_image<salt.modules.dockerio.remove_image>`
- :py:func:`import_image<salt.modules.dockerio.import_image>`
- :py:func:`build<salt.modules.dockerio.build>`
- :py:func:`tag<salt.modules.dockerio.tag>`
- :py:func:`save<salt.modules.dockerio.save>`
- :py:func:`load<salt.modules.dockerio.load>`
- Container Management
- :py:func:`start<salt.modules.dockerio.start>`
- :py:func:`stop<salt.modules.dockerio.stop>`
- :py:func:`restart<salt.modules.dockerio.restart>`
- :py:func:`kill<salt.modules.dockerio.kill>`
- :py:func:`wait<salt.modules.dockerio.wait>`
- :py:func:`get_containers<salt.modules.dockerio.get_containers>`
- :py:func:`inspect_container<salt.modules.dockerio.inspect_container>`
- :py:func:`remove_container<salt.modules.dockerio.remove_container>`
- :py:func:`is_running<salt.modules.dockerio.is_running>`
- :py:func:`top<salt.modules.dockerio.top>`
- :py:func:`port<salt.modules.dockerio.port>`
- :py:func:`logs<salt.modules.dockerio.logs>`
- :py:func:`diff<salt.modules.dockerio.diff>`
- :py:func:`commit<salt.modules.dockerio.commit>`
- :py:func:`create_container<salt.modules.dockerio.create_container>`
- :py:func:`export<salt.modules.dockerio.export>`
- :py:func:`get_container_root<salt.modules.dockerio.get_container_root>`
Runtime Execution within a specific, already existing/running container
--------------------------------------------------------------------------
Idea is to use `lxc-attach <http://linux.die.net/man/1/lxc-attach>`_ to execute
inside the container context.
We do not want to use ``docker run`` but want to execute something inside a
running container.
These are the available methods:
- :py:func:`retcode<salt.modules.dockerio.retcode>`
- :py:func:`run<salt.modules.dockerio.run>`
- :py:func:`run_all<salt.modules.dockerio.run_all>`
- :py:func:`run_stderr<salt.modules.dockerio.run_stderr>`
- :py:func:`run_stdout<salt.modules.dockerio.run_stdout>`
- :py:func:`script<salt.modules.dockerio.script>`
- :py:func:`script_retcode<salt.modules.dockerio.script_retcode>`
'''
from __future__ import absolute_import
from salt.ext.six.moves import range
__docformat__ = 'restructuredtext en'
import datetime
import json
import logging
import os
import re
import traceback
import shutil
import types
from salt.modules import cmdmod
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.ext.six import string_types
import salt.utils
import salt.utils.odict
try:
import docker
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
HAS_NSENTER = bool(salt.utils.which('nsenter'))
log = logging.getLogger(__name__)
INVALID_RESPONSE = 'We did not get any expected answer from docker'
VALID_RESPONSE = ''
NOTSET = object()
base_status = {
'status': None,
'id': None,
'comment': '',
'out': None
}
# Define the module's virtual name
__virtualname__ = 'docker'
def __virtual__():
'''
Only load if docker libs are present
'''
if HAS_DOCKER:
return __virtualname__
return False
def _sizeof_fmt(num):
'''
Return disk format size data
'''
for unit in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']:
if num < 1024.0:
return '{0:3.1f} {1}'.format(num, unit)
num /= 1024.0
def _set_status(m,
id_=NOTSET,
comment=INVALID_RESPONSE,
status=False,
out=None):
'''
Assign status data to a dict
'''
m['comment'] = comment
m['status'] = status
m['out'] = out
if id_ is not NOTSET:
m['id'] = id_
return m
def _invalid(m, id_=NOTSET, comment=INVALID_RESPONSE, out=None):
'''
Return invalid status
'''
return _set_status(m, status=False, id_=id_, comment=comment, out=out)
def _valid(m, id_=NOTSET, comment=VALID_RESPONSE, out=None):
'''
Return valid status
'''
return _set_status(m, status=True, id_=id_, comment=comment, out=out)
def _get_client(version=None, timeout=None):
'''
Get a connection to a docker API (socket or URL)
based on config.get mechanism (pillar -> grains)
By default it will use the base docker-py defaults which
at the time of writing are using the local socket and
the 1.4 API
Set those keys in your configuration tree somehow:
- docker.url: URL to the docker service
- docker.version: API version to use
'''
kwargs = {}
get = __salt__['config.get']
for k, p in (('base_url', 'docker.url'),
('version', 'docker.version')):
param = get(p, NOTSET)
if param is not NOTSET:
kwargs[k] = param
if timeout is not None:
# make sure we override default timeout of docker-py
# only if defined by user.
kwargs['timeout'] = timeout
if 'base_url' not in kwargs and 'DOCKER_HOST' in os.environ:
#Check if the DOCKER_HOST environment variable has been set
kwargs['base_url'] = os.environ.get('DOCKER_HOST')
client = docker.Client(**kwargs)
if not version:
# set version that match docker daemon
client._version = client.version()['ApiVersion']
# try to authenticate the client using credentials
# found in pillars
registry_auth_config = __pillar__.get('docker-registries', {})
for k, data in __pillar__.items():
if k.endswith('-docker-registries'):
registry_auth_config.update(data)
for registry, creds in registry_auth_config.items():
client.login(creds['username'], password=creds['password'],
email=creds.get('email'), registry=registry)
return client
def _get_image_infos(image):
'''
Verify that the image exists
We will try to resolve either by:
- name
- image_id
- tag
image
Image Name / Image Id / Image Tag
Returns the image id
'''
status = base_status.copy()
client = _get_client()
try:
infos = client.inspect_image(image)
if infos:
_valid(status,
id_=infos['Id'],
out=infos,
comment='found')
except Exception:
pass
if not status['id']:
_invalid(status)
raise CommandExecutionError(
'ImageID {0!r} could not be resolved to '
'an existing Image'.format(image)
)
return status['out']
def _get_container_infos(container):
'''
Get container infos
We will try to resolve either by:
- the mapping grain->docker id or directly
- dockerid
container
Image Id / grain name
'''
status = base_status.copy()
client = _get_client()
try:
container_info = client.inspect_container(container)
if container_info:
_valid(status,
id_=container_info['Id'],
out=container_info)
except Exception:
pass
if not status['id']:
raise CommandExecutionError(
'Container_id {0} could not be resolved to '
'an existing container'.format(
container)
)
if 'id' not in status['out'] and 'Id' in status['out']:
status['out']['id'] = status['out']['Id']
return status['out']
def get_containers(all=True,
trunc=False,
since=None,
before=None,
limit=-1,
host=False,
inspect=False):
'''
Get a list of mappings representing all containers
all
return all containers, Default is ``True``
trunc
set it to True to have the short ID, Default is ``False``
host
include the Docker host's ipv4 and ipv6 address in return, Default is ``False``
inspect
Get more granular information about each container by running a docker inspect
CLI Example:
.. code-block:: bash
salt '*' docker.get_containers
salt '*' docker.get_containers host=True
salt '*' docker.get_containers host=True inspect=True
'''
client = _get_client()
status = base_status.copy()
if host:
status['host'] = {}
status['host']['interfaces'] = __salt__['network.interfaces']()
containers = client.containers(all=all,
trunc=trunc,
since=since,
before=before,
limit=limit)
# Optionally for each container get more granular information from them
# by inspecting the container
if inspect:
for container in containers:
container_id = container.get('Id')
if container_id:
inspect = _get_container_infos(container_id)
container['detail'] = inspect.copy()
_valid(status, comment='All containers in out', out=containers)
return status
def logs(container):
'''
Return logs for a specified container
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.logs <container id>
'''
status = base_status.copy()
client = _get_client()
try:
container_logs = client.logs(_get_container_infos(container)['Id'])
_valid(status, id_=container, out=container_logs)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def commit(container,
repository=None,
tag=None,
message=None,
author=None,
conf=None):
'''
Commit a container (promotes it to an image)
container
container id
repository
repository/image to commit to
tag
tag of the image (Optional)
message
commit message (Optional)
author
author name (Optional)
conf
conf (Optional)
CLI Example:
.. code-block:: bash
salt '*' docker.commit <container id>
'''
status = base_status.copy()
client = _get_client()
try:
container = _get_container_infos(container)['Id']
commit_info = client.commit(
container,
repository=repository,
tag=tag,
message=message,
author=author,
conf=conf)
found = False
for k in ('Id', 'id', 'ID'):
if k in commit_info:
found = True
image_id = commit_info[k]
if not found:
raise Exception('Invalid commit return')
image = _get_image_infos(image_id)['Id']
comment = 'Image {0} created from {1}'.format(image, container)
_valid(status, id_=image, out=commit_info, comment=comment)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def diff(container):
'''
Get container diffs
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.diff <container id>
'''
status = base_status.copy()
client = _get_client()
try:
container_diff = client.diff(_get_container_infos(container)['Id'])
_valid(status, id_=container, out=container_diff)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def export(container, path):
'''
Export a container to a file
container
container id
path
path to which file is to be exported
CLI Example:
.. code-block:: bash
salt '*' docker.export <container id>
'''
try:
ppath = os.path.abspath(path)
with salt.utils.fopen(ppath, 'w') as fic:
status = base_status.copy()
client = _get_client()
response = client.export(_get_container_infos(container)['Id'])
byte = response.read(4096)
fic.write(byte)
while byte != '':
# Do stuff with byte.
byte = response.read(4096)
fic.write(byte)
fic.flush()
_valid(status,
id_=container, out=ppath,
comment='Exported to {0}'.format(ppath))
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def create_container(image,
command=None,
hostname=None,
user=None,
detach=True,
stdin_open=False,
tty=False,
mem_limit=0,
ports=None,
environment=None,
dns=None,
volumes=None,
volumes_from=None,
name=None,
cpu_shares=None,
cpuset=None,
binds=None):
'''
Create a new container
image
image to create the container from
command
command to execute while starting
hostname
hostname of the container
user
user to run docker as
detach
daemon mode, Default is ``True``
environment
environment variable mapping ``({'foo':'BAR'})``
ports
port redirections ``({'222': {}})``
volumes
list of volume mappings in either local volume, bound volume, or read-only
bound volume form::
(['/var/lib/mysql/', '/usr/local/etc/ssl:/etc/ssl', '/etc/passwd:/etc/passwd:ro'])
binds
complete dictionary of bound volume mappings::
{ '/usr/local/etc/ssl/certs/internal.crt': {
'bind': '/etc/ssl/certs/com.example.internal.crt',
'ro': True
},
'/var/lib/mysql': {
'bind': '/var/lib/mysql/',
'ro': False
}
}
This dictionary is suitable for feeding directly into the Docker API, and all
keys are required.
(see http://docker-py.readthedocs.org/en/latest/volumes/)
tty
attach ttys, Default is ``False``
stdin_open
let stdin open, Default is ``False``
name
name given to container
cpu_shares
CPU shares (relative weight)
cpuset
CPUs in which to allow execution ('0-3' or '0,1')
CLI Example:
.. code-block:: bash
salt '*' docker.create_container o/ubuntu volumes="['/s','/m:/f']"
'''
log.trace("modules.dockerio.create_container() called for image " + image)
status = base_status.copy()
client = _get_client()
# In order to permit specification of bind volumes in the volumes field,
# we'll look through it for bind-style specs and move them. This is purely
# for CLI convenience and backwards-compatibility, as states.dockerio
# should parse volumes before this, and the binds argument duplicates this.
# N.B. this duplicates code in states.dockerio._parse_volumes()
if isinstance(volumes, list):
for volume in volumes:
if ':' in volume:
volspec = volume.split(':')
source = volspec[0]
target = volspec[1]
ro = False
try:
if len(volspec) > 2:
ro = volspec[2] == "ro"
except IndexError:
pass
binds[source] = {'bind': target, 'ro': ro}
volumes.remove(volume)
try:
container_info = client.create_container(
image=image,
command=command,
hostname=hostname,
user=user,
detach=detach,
stdin_open=stdin_open,
tty=tty,
mem_limit=mem_limit,
ports=ports,
environment=environment,
dns=dns,
volumes=volumes,
volumes_from=volumes_from,
name=name,
cpu_shares=cpu_shares,
cpuset=cpuset,
host_config=docker.utils.create_host_config(binds=binds)
)
log.trace("docker.client.create_container returned: " + str(container_info))
container = container_info['Id']
callback = _valid
comment = 'Container created'
out = {
'info': _get_container_infos(container),
'out': container_info
}
__salt__['mine.send']('docker.get_containers', host=True)
return callback(status, id_=container, comment=comment, out=out)
except Exception, e:
_invalid(status, id_=image, out=traceback.format_exc())
raise e
__salt__['mine.send']('docker.get_containers', host=True)
return status
def version():
'''
Get docker version
CLI Example:
.. code-block:: bash
salt '*' docker.version
'''
status = base_status.copy()
client = _get_client()
try:
docker_version = client.version()
_valid(status, out=docker_version)
except Exception:
_invalid(status, out=traceback.format_exc())
return status
def info():
'''
Get the version information about docker. This is similar to ``docker info`` command
CLI Example:
.. code-block:: bash
salt '*' docker.info
'''
status = base_status.copy()
client = _get_client()
try:
version_info = client.info()
_valid(status, out=version_info)
except Exception:
_invalid(status, out=traceback.format_exc())
return status
def port(container, private_port):
'''
Private port mapping allocation information. This method is broken on docker-py
side. Just use the result of inspect to mangle port
allocation
container
container id
private_port
private port on the container to query for
CLI Example:
.. code-block:: bash
salt '*' docker.port <container id> <private port>
'''
status = base_status.copy()
client = _get_client()
try:
port_info = client.port(
_get_container_infos(container)['Id'],
private_port)
_valid(status, id_=container, out=port_info)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def stop(container, timeout=10):
'''
Stop a running container
container
container id
timeout
timeout for container to exit gracefully before killing it, Default is ``10`` seconds
CLI Example:
.. code-block:: bash
salt '*' docker.stop <container id> [timeout=20]
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
client.stop(dcontainer, timeout=timeout)
if not is_running(dcontainer):
_valid(
status,
comment='Container {0} was stopped'.format(
container),
id_=container)
else:
_invalid(status)
else:
_valid(status,
comment='Container {0} was already stopped'.format(
container),
id_=container)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc(),
comment=(
'An exception occurred while stopping '
'your container {0}').format(container))
__salt__['mine.send']('docker.get_containers', host=True)
return status
def kill(container):
'''
Kill a running container
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.kill <container id>
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
client.kill(dcontainer)
if not is_running(dcontainer):
_valid(status,
comment='Container {0} was killed'.format(
container),
id_=container)
else:
_invalid(status,
comment='Container {0} was not killed'.format(
container))
else:
_valid(status,
comment='Container {0} was already stopped'.format(
container),
id_=container)
except Exception:
_invalid(status,
id_=container,
out=traceback.format_exc(),
comment=(
'An exception occurred while killing '
'your container {0}').format(container))
__salt__['mine.send']('docker.get_containers', host=True)
return status
def restart(container, timeout=10):
'''
Restart a running container
container
container id
timeout
timeout for container to exit gracefully before killing it, Default is ``10`` seconds
CLI Example:
.. code-block:: bash
salt '*' docker.restart <container id> [timeout=20]
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
client.restart(dcontainer, timeout=timeout)
if is_running(dcontainer):
_valid(status,
comment='Container {0} was restarted'.format(container),
id_=container)
else:
_invalid(status)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc(),
comment=(
'An exception occurred while restarting '
'your container {0}').format(container))
__salt__['mine.send']('docker.get_containers', host=True)
return status
def start(container,
binds=None,
port_bindings=None,
lxc_conf=None,
publish_all_ports=None,
links=None,
privileged=False,
dns=None,
volumes_from=None,
network_mode=None,
restart_policy=None,
cap_add=None,
cap_drop=None):
'''
Start the specified container
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.start <container id>
'''
if binds:
if not isinstance(binds, dict):
raise SaltInvocationError('binds must be formatted as a dictionary')
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if not is_running(container):
bindings = None
if port_bindings is not None:
try:
bindings = {}
for k, v in port_bindings.items():
bindings[k] = (v.get('HostIp', ''), v['HostPort'])
except AttributeError:
raise SaltInvocationError(
'port_bindings must be formatted as a dictionary of '
'dictionaries'
)
client.start(dcontainer,
binds=binds,
port_bindings=bindings,
lxc_conf=lxc_conf,
publish_all_ports=publish_all_ports,
links=links,
privileged=privileged,
dns=dns,
volumes_from=volumes_from,
network_mode=network_mode,
restart_policy=restart_policy,
cap_add=cap_add,
cap_drop=cap_drop)
if is_running(dcontainer):
_valid(status,
comment='Container {0} was started'.format(container),
id_=container)
else:
_invalid(status)
else:
_valid(status,
comment='Container {0} was already started'.format(container),
id_=container)
except Exception:
_invalid(status,
id_=container,
out=traceback.format_exc(),
comment=(
'An exception occurred while starting '
'your container {0}').format(container))
__salt__['mine.send']('docker.get_containers', host=True)
return status
def wait(container):
'''
Wait for a container to exit gracefully
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.wait <container id>
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
client.wait(dcontainer)
if not is_running(container):
_valid(status,
id_=container,
comment='Container waited for stop')
else:
_invalid(status)
else:
_valid(status,
comment='Container {0} was already stopped'.format(container),
id_=container)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc(),
comment=(
'An exception occurred while waiting '
'your container {0}').format(container))
__salt__['mine.send']('docker.get_containers', host=True)
return status
def exists(container):
'''
Check if a given container exists
container
container id
Returns ``True`` if container exists otherwise returns ``False``
CLI Example:
.. code-block:: bash
salt '*' docker.exists <container id>
'''
try:
_get_container_infos(container)
return True
except Exception:
return False
def is_running(container):
'''
Check if the specified container is running
container
container id
Returns ``True`` if container is running otherwise returns ``False``
CLI Example:
.. code-block:: bash
salt '*' docker.is_running <container id>
'''
try:
infos = _get_container_infos(container)
return infos.get('State', {}).get('Running')
except Exception:
return False
def remove_container(container, force=False, v=False):
'''
Remove a container from a docker installation
container
container id
force
remove a running container, Default is ``False``
v
remove the volumes associated to the container, Default is ``False``
CLI Example:
.. code-block:: bash
salt '*' docker.remove_container <container id> [force=True|False] [v=True|False]
'''
client = _get_client()
status = base_status.copy()
status['id'] = container
dcontainer = None
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
if not force:
_invalid(status, id_=container, out=None,
comment=(
'Container {0} is running, '
'won\'t remove it').format(container))
__salt__['mine.send']('docker.get_containers', host=True)
return status
else:
kill(dcontainer)
client.remove_container(dcontainer, v=v)
try:
_get_container_infos(dcontainer)
_invalid(status,
comment='Container was not removed: {0}'.format(container))
except Exception:
status['status'] = True
status['comment'] = 'Container {0} was removed'.format(container)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
__salt__['mine.send']('docker.get_containers', host=True)
return status
def top(container):
'''
Run the docker top command on a specific container
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.top <container id>
'''
client = _get_client()
status = base_status.copy()
try:
dcontainer = _get_container_infos(container)['Id']
if is_running(dcontainer):
ret = client.top(dcontainer)
if ret:
ret['mprocesses'] = []
titles = ret['Titles']
for i in ret['Processes']:
data = salt.utils.odict.OrderedDict()
for k, j in enumerate(titles):
data[j] = i[k]
ret['mprocesses'].append(data)
_valid(status,
out=ret,
id_=container,
comment='Current top for container')
if not status['id']:
_invalid(status)
else:
_invalid(status,
comment='Container {0} is not running'.format(container))
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def inspect_container(container):
'''
Get container information. This is similar to ``docker inspect`` command but only for containers
container
container id
CLI Example:
.. code-block:: bash
salt '*' docker.inspect_container <container id>
'''
status = base_status.copy()
status['id'] = container
try:
infos = _get_container_infos(container)
_valid(status, id_=container, out=infos)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc(),
comment='Container does not exit: {0}'.format(container))
return status
def login(url=None, username=None, password=None, email=None):
'''
Wrapper to the ``docker.py`` login method (does not do much yet)
url
registry url to authenticate to
username
username to authenticate
password
password to authenticate
email
email to authenticate
CLI Example:
.. code-block:: bash
salt '*' docker.login <url> <username> <password> <email>
'''
client = _get_client()
return client.login(username, password, email, url)
def search(term):
'''
Search for an image on the registry
term
search keyword
CLI Example:
.. code-block:: bash
salt '*' docker.search <term>
'''
client = _get_client()
status = base_status.copy()
ret = client.search(term)
if ret:
_valid(status, out=ret, id_=term)
else:
_invalid(status)
return status
def _create_image_assemble_error_status(status, ret, image_logs):
'''
Given input in this form::
[{u'error': u'Get file:///r.tar.gz: unsupported protocol scheme "file"',
u'errorDetail': {
u'message':u'Get file:///r.tar.gz:unsupported protocol scheme "file"'}},
{u'status': u'Downloading from file:///r.tar.gz'}]
'''
comment = 'An error occurred while importing your image'
out = None
is_invalid = True
status['out'] = ''
try:
is_invalid = False
status['out'] += '\n' + ret
for err_log in image_logs:
if isinstance(err_log, dict):
if 'errorDetail' in err_log:
if 'code' in err_log['errorDetail']:
msg = '\n{0}\n{1}: {2}'.format(
err_log['error'],
err_log['errorDetail']['code'],
err_log['errorDetail']['message']
)
else:
msg = '\n{0}\n{1}'.format(
err_log['error'],
err_log['errorDetail']['message'],
)
comment += msg
except Exception:
is_invalid = True
trace = traceback.format_exc()
out = (
'An error occurred while '
'parsing error output:\n{0}'
).format(trace)
if is_invalid:
_invalid(status, out=out, comment=comment)
return status
def import_image(src, repo, tag=None):
'''
Import content from a local tarball or a URL to a docker image
src
content to import (URL or absolute path to a tarball)
repo
repository to import to
tag
set tag of the image (Optional)
CLI Example:
.. code-block:: bash
salt '*' docker.import_image <src> <repo> [tag]
'''
client = _get_client()
status = base_status.copy()
try:
ret = client.import_image(src, repository=repo, tag=tag)
if ret:
image_logs, _info = _parse_image_multilogs_string(ret)
_create_image_assemble_error_status(status, ret, image_logs)
if status['status'] is not False:
infos = _get_image_infos(image_logs[0]['status'])
_valid(status,
comment='Image {0} was created'.format(infos['Id']),
id_=infos['Id'],
out=ret)
else:
_invalid(status)
except Exception:
_invalid(status, out=traceback.format_exc())
return status
def tag(image, repository, tag=None, force=False):
'''
Tag an image into a repository
image
name of image
repository
name of repository
tag
tag to apply (Optional)
force
force apply tag, Default is ``False``
CLI Example:
.. code-block:: bash
salt '*' docker.tag <image> <repository> [tag] [force=True|False]
'''
client = _get_client()
status = base_status.copy()
try:
dimage = _get_image_infos(image)['Id']
ret = client.tag(dimage, repository, tag=tag, force=force)
except Exception:
_invalid(status,
out=traceback.format_exc(),
comment='Cant tag image {0} {1}{2}'.format(
image, repository,
tag and (':' + tag) or '').strip())
return status
if ret:
_valid(status,
id_=image,
comment='Image was tagged: {0}{1}'.format(
repository,
tag and (':' + tag) or '').strip())
else:
_invalid(status)
return status
def get_images(name=None, quiet=False, all=True):
'''
List docker images
name
repository name
quiet
only show image id, Default is ``False``
all
show all images, Default is ``True``
CLI Example:
.. code-block:: bash
salt '*' docker.get_images <name> [quiet=True|False] [all=True|False]
'''
client = _get_client()
status = base_status.copy()
try:
infos = client.images(name=name, quiet=quiet, all=all)
for i in range(len(infos)):
inf = infos[i]
try:
inf['Human_Size'] = _sizeof_fmt(int(inf['Size']))
except ValueError:
pass
try:
ts = int(inf['Created'])
dts = datetime.datetime.fromtimestamp(ts)
inf['Human_IsoCreated'] = dts.isoformat()
inf['Human_Created'] = dts.strftime(
'%Y-%m-%d %H:%M:%S')
except Exception:
pass
try:
inf['Human_VirtualSize'] = (
_sizeof_fmt(int(inf['VirtualSize'])))
except ValueError:
pass
_valid(status, out=infos)
except Exception:
_invalid(status, out=traceback.format_exc())
return status
def build(path=None,
tag=None,
quiet=False,
fileobj=None,
nocache=False,
rm=True,
timeout=None):
'''
Build a docker image from a dockerfile or an URL
path
url/branch/docker_dir or path on the filesystem to the dockerfile
tag
tag of the image
quiet
quiet mode, Default is ``False``
nocache
do not use docker image cache, Default is ``False``
rm
remove intermediate commits, Default is ``True``
timeout
timeout value before aborting (in seconds)
CLI Example:
.. code-block:: bash
salt '*' docker.build vieux/apache
salt '*' docker.build github.com/creack/docker-firefox
'''
client = _get_client(timeout=timeout)
status = base_status.copy()
if path or fileobj:
try:
ret = client.build(path=path,
tag=tag,
quiet=quiet,
fileobj=fileobj,
rm=rm,
nocache=nocache)
if isinstance(ret, types.GeneratorType):
message = json.loads(list(ret)[-1])
if 'stream' in message:
if 'Successfully built' in message['stream']:
_valid(status, out=message['stream'])
if 'errorDetail' in message:
_invalid(status, out=message['errorDetail']['message'])
elif isinstance(ret, tuple):
id_, out = ret[0], ret[1]
if id_:
_valid(status, id_=id_, out=out, comment='Image built')
else:
_invalid(status, id_=id_, out=out)
except Exception:
_invalid(status,
out=traceback.format_exc(),
comment='Unexpected error while building an image')
return status
return status
def remove_image(image):
'''
Remove an image from a system.
image
name of image
CLI Example:
.. code-block:: bash
salt '*' docker.remove_image <image>
'''
client = _get_client()
status = base_status.copy()
# will raise an error if no deletion
try:
infos = _get_image_infos(image)
if infos:
status['id'] = infos['Id']
try:
client.remove_image(infos['Id'])
except Exception:
_invalid(status,
id_=image,
out=traceback.format_exc(),
comment='Image could not be deleted')
try:
infos = _get_image_infos(image)
_invalid(status,
comment=(
'Image marked to be deleted but not deleted yet'))
except Exception:
_valid(status, id_=image, comment='Image deleted')
else:
_invalid(status)
except Exception:
_invalid(status,
out=traceback.format_exc(),
comment='Image does not exist: {0}'.format(image))
return status
def inspect_image(image):
'''
Inspect the status of an image and return relative data. This is similar to
``docker inspect`` command but only for images
image
name of the image
CLI Example:
.. code-block:: bash
salt '*' docker.inspect_image <image>
'''
status = base_status.copy()
try:
infos = _get_image_infos(image)
try:
for k in ['Size']:
infos[
'Human_{0}'.format(k)
] = _sizeof_fmt(int(infos[k]))
except Exception:
pass
_valid(status, id_=image, out=infos)
except Exception:
_invalid(status, id_=image, out=traceback.format_exc(),
comment='Image does not exist')
return status
def _parse_image_multilogs_string(ret):
'''
Parse image log strings into grokable data
'''
image_logs, infos = [], None
if ret and ret.strip().startswith('{') and ret.strip().endswith('}'):
pushd = 0
buf = ''
for char in ret:
buf += char
if char == '{':
pushd += 1
if char == '}':
pushd -= 1
if pushd == 0:
try:
buf = json.loads(buf)
except Exception:
pass
else:
image_logs.append(buf)
buf = ''
image_logs.reverse()
# Valid statest when pulling an image from the docker registry
valid_states = [
'Download complete',
'Already exists',
]
# search last layer grabbed
for l in image_logs:
if isinstance(l, dict):
if l.get('status') in valid_states and l.get('id'):
infos = _get_image_infos(l['id'])
break
return image_logs, infos
def _pull_assemble_error_status(status, ret, logs):
'''
Given input in this form::
u'{"status":"Pulling repository foo/ubuntubox"}:
"image (latest) from foo/ ...
rogress":"complete","id":"2c80228370c9"}'
construct something like that (load JSON data is possible)::
[u'{"status":"Pulling repository foo/ubuntubox"',
{"status":"Download","progress":"complete","id":"2c80228370c9"}]
'''
comment = 'An error occurred pulling your image'
out = ''
try:
out = '\n' + ret
for err_log in logs:
if isinstance(err_log, dict):
if 'errorDetail' in err_log:
if 'code' in err_log['errorDetail']:
msg = '\n{0}\n{1}: {2}'.format(
err_log['error'],
err_log['errorDetail']['code'],
err_log['errorDetail']['message']
)
else:
msg = '\n{0}\n{1}'.format(
err_log['error'],
err_log['errorDetail']['message'],
)
comment += msg
except Exception:
out = traceback.format_exc()
_invalid(status, out=out, comment=comment)
return status
def pull(repo, tag=None, insecure_registry=False):
'''
Pulls an image from any registry. See documentation at top of this page to
configure authenticated access
repo
name of repository
tag
specific tag to pull (Optional)
insecure_registry
set as ``True`` to use insecure (non HTTPS) registry. Default is ``False``
(only available if using docker-py >= 0.5.0)
CLI Example:
.. code-block:: bash
salt '*' docker.pull <repository> [tag]
'''
client = _get_client()
status = base_status.copy()
try:
kwargs = {'tag': tag}
# if docker-py version is greater than 0.5.0 use the
# insecure_registry parameter
if salt.utils.compare_versions(ver1=docker.__version__,
oper='>=',
ver2='0.5.0'):
kwargs['insecure_registry'] = insecure_registry
ret = client.pull(repo, **kwargs)
if ret:
image_logs, infos = _parse_image_multilogs_string(ret)
if infos and infos.get('Id', None):
repotag = repo
if tag:
repotag = '{0}:{1}'.format(repo, tag)
_valid(status,
out=image_logs if image_logs else ret,
id_=infos['Id'],
comment='Image {0} was pulled ({1})'.format(
repotag, infos['Id']))
else:
_pull_assemble_error_status(status, ret, image_logs)
else:
_invalid(status)
except Exception:
_invalid(status, id_=repo, out=traceback.format_exc())
return status
def _push_assemble_error_status(status, ret, logs):
'''
Given input in this form::
u'{"status":"Pulling repository foo/ubuntubox"}:
"image (latest) from foo/ ...
rogress":"complete","id":"2c80228370c9"}'
construct something like that (load json data is possible)::
[u'{"status":"Pulling repository foo/ubuntubox"',
{"status":"Download","progress":"complete","id":"2c80228370c9"}]
'''
comment = 'An error occurred pushing your image'
status['out'] = ''
try:
status['out'] += '\n' + ret
for err_log in logs:
if isinstance(err_log, dict):
if 'errorDetail' in err_log:
if 'code' in err_log['errorDetail']:
msg = '\n{0}\n{1}: {2}'.format(
err_log['error'],
err_log['errorDetail']['code'],
err_log['errorDetail']['message']
)
else:
msg = '\n{0}\n{1}'.format(
err_log['error'],
err_log['errorDetail']['message'],
)
comment += msg
except Exception:
trace = traceback.format_exc()
status['out'] = (
'An error occurred while '
'parsing error output:\n{0}'
).format(trace)
_invalid(status, comment=comment)
return status
def push(repo, tag=None, quiet=False, insecure_registry=False):
'''
Pushes an image to any registry. See documentation at top of this page to
configure authenticated access
repo
name of repository
tag
specific tag to push (Optional)
quiet
set as ``True`` to quiet output, Default is ``False``
insecure_registry
set as ``True`` to use insecure (non HTTPS) registry. Default is ``False``
(only available if using docker-py >= 0.5.0)
CLI Example:
.. code-block:: bash
salt '*' docker.push <repository> [tag] [quiet=True|False]
'''
client = _get_client()
status = base_status.copy()
registry, repo_name = docker.auth.resolve_repository_name(repo)
try:
kwargs = {'tag': tag}
# if docker-py version is greater than 0.5.0 use the
# insecure_registry parameter
if salt.utils.compare_versions(ver1=docker.__version__,
oper='>=',
ver2='0.5.0'):
kwargs['insecure_registry'] = insecure_registry
ret = client.push(repo, **kwargs)
if ret:
image_logs, infos = _parse_image_multilogs_string(ret)
if image_logs:
repotag = repo_name
if tag:
repotag = '{0}:{1}'.format(repo, tag)
if not quiet:
status['out'] = image_logs
else:
status['out'] = None
laststatus = image_logs[2].get('status', None)
if laststatus and (
('already pushed' in laststatus)
or ('Pushing tags for rev' in laststatus)
or ('Pushing tag for rev' in laststatus)
):
status['status'] = True
status['id'] = _get_image_infos(repo)['Id']
status['comment'] = 'Image {0}({1}) was pushed'.format(
repotag, status['id'])
else:
_push_assemble_error_status(status, ret, image_logs)
else:
status['out'] = ret
_push_assemble_error_status(status, ret, image_logs)
else:
_invalid(status)
except Exception:
_invalid(status, id_=repo, out=traceback.format_exc())
return status
def _run_wrapper(status, container, func, cmd, *args, **kwargs):
'''
Wrapper to a cmdmod function
Idea is to prefix the call to cmdrun with the relevant driver to
execute inside a container context
.. note::
Only lxc and native drivers are implemented.
status
status object
container
container id to execute in
func
cmd function to execute
cmd
command to execute in the container
'''
client = _get_client()
# For old version of docker. lxc was the only supported driver.
# We can safely hardcode it
driver = client.info().get('ExecutionDriver', 'lxc-')
container_info = _get_container_infos(container)
container_id = container_info['Id']
if driver.startswith('lxc-'):
full_cmd = 'lxc-attach -n {0} -- {1}'.format(container_id, cmd)
elif driver.startswith('native-'):
if HAS_NSENTER:
# http://jpetazzo.github.io/2014/03/23/lxc-attach-nsinit-nsenter-docker-0-9/
container_pid = container_info['State']['Pid']
if container_pid == 0:
_invalid(status, id_=container,
comment='Container is not running')
return status
full_cmd = (
'nsenter --target {pid} --mount --uts --ipc --net --pid'
' -- {cmd}'.format(pid=container_pid, cmd=cmd)
)
else:
raise CommandExecutionError(
'nsenter is not installed on the minion, cannot run command'
)
else:
raise NotImplementedError(
'Unknown docker ExecutionDriver {0!r}. Or didn\'t find command'
' to attach to the container'.format(driver))
# now execute the command
comment = 'Executed {0}'.format(full_cmd)
try:
f = __salt__[func]
ret = f(full_cmd, *args, **kwargs)
if ((isinstance(ret, dict) and ('retcode' in ret) and (ret['retcode'] != 0))
or (func == 'cmd.retcode' and ret != 0)):
_invalid(status, id_=container, out=ret, comment=comment)
else:
_valid(status, id_=container, out=ret, comment=comment)
except Exception:
_invalid(status, id_=container, comment=comment, out=traceback.format_exc())
return status
def load(imagepath):
'''
Load the specified file at imagepath into docker that was generated from
a docker save command
e.g. `docker load < imagepath`
imagepath
imagepath to docker tar file
CLI Example:
.. code-block:: bash
salt '*' docker.load /path/to/image
'''
status = base_status.copy()
if os.path.isfile(imagepath):
try:
dockercmd = ['docker', 'load', '-i', imagepath]
ret = __salt__['cmd.run'](dockercmd, python_shell=False)
if isinstance(ret, dict) and ('retcode' in ret) and (ret['retcode'] != 0):
return _invalid(status, id_=None,
out=ret,
comment='Command to load image {0} failed.'.format(imagepath))
_valid(status, id_=None, out=ret, comment='Image load success')
except Exception:
_invalid(status, id_=None,
comment="Image not loaded.",
out=traceback.format_exc())
else:
_invalid(status, id_=None,
comment='Image file {0} could not be found.'.format(imagepath),
out=traceback.format_exc())
return status
def save(image, filename):
'''
.. versionadded:: 2015.5.0
Save the specified image to filename from docker
e.g. `docker save image > filename`
image
name of image
filename
The filename of the saved docker image
CLI Example:
.. code-block:: bash
salt '*' docker.save arch_image /path/to/save/image
'''
status = base_status.copy()
ok = False
try:
_info = _get_image_infos(image)
ok = True
except Exception:
_invalid(status, id_=image,
comment="docker image {0} could not be found.".format(image),
out=traceback.format_exc())
if ok:
try:
dockercmd = ['docker', 'save', '-o', filename, image]
ret = __salt__['cmd.run'](dockercmd)
if isinstance(ret, dict) and ('retcode' in ret) and (ret['retcode'] != 0):
return _invalid(status,
id_=image,
out=ret,
comment='Command to save image {0} to {1} failed.'.format(image, filename))
_valid(status, id_=image, out=ret, comment='Image save success')
except Exception:
_invalid(status, id_=image, comment="Image not saved.", out=traceback.format_exc())
return status
def run(container, cmd):
'''
Wrapper for :py:func:`cmdmod.run<salt.modules.cmdmod.run>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is a bit different as we use the docker struct.
Output of the command is in 'out' and result is always ``True``.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.run <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.run', cmd)
def run_all(container, cmd):
'''
Wrapper for :py:func:`cmdmod.run_all<salt.modules.cmdmod.run_all>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is a bit different as we use the docker struct.
Output of the command is in 'out' and result is ``False`` if
command failed to execute.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.run_all <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.run_all', cmd)
def run_stderr(container, cmd):
'''
Wrapper for :py:func:`cmdmod.run_stderr<salt.modules.cmdmod.run_stderr>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is a bit different as we use the docker struct.
Output of the command is in 'out' and result is always ``True``.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.run_stderr <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.run_stderr', cmd)
def run_stdout(container, cmd):
'''
Wrapper for :py:func:`cmdmod.run_stdout<salt.modules.cmdmod.run_stdout>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is a bit different as we use the docker struct.
Output of the command is in 'out' and result is always ``True``.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.run_stdout <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.run_stdout', cmd)
def retcode(container, cmd):
'''
Wrapper for :py:func:`cmdmod.retcode<salt.modules.cmdmod.retcode>` inside a container context
container
container id (or grain)
cmd
command to execute
.. note::
The return is True or False depending on the commands success.
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.retcode <container id> 'ls -l /etc'
'''
status = base_status.copy()
return _run_wrapper(
status, container, 'cmd.retcode', cmd)['status']
def get_container_root(container):
'''
Get the container rootfs path
container
container id or grain
CLI Example:
.. code-block:: bash
salt '*' docker.get_container_root <container id>
'''
default_path = os.path.join(
'/var/lib/docker',
'containers',
_get_container_infos(container)['Id'],
)
default_rootfs = os.path.join(default_path, 'rootfs')
rootfs_re = re.compile(r'^lxc.rootfs\s*=\s*(.*)\s*$', re.U)
try:
lxcconfig = os.path.join(default_path, 'config.lxc')
with salt.utils.fopen(lxcconfig) as fhr:
lines = fhr.readlines()
rlines = lines[:]
rlines.reverse()
for rl in rlines:
robj = rootfs_re.search(rl)
if robj:
rootfs = robj.groups()[0]
break
except Exception:
rootfs = default_rootfs
return rootfs
def _script(status,
container,
source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=cmdmod.DEFAULT_SHELL,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
run_func_=None,
no_clean=False,
saltenv='base',
output_loglevel='info',
quiet=False,
**kwargs):
try:
if not run_func_:
run_func_ = run_all
rpath = get_container_root(container)
tpath = os.path.join(rpath, 'tmp')
if isinstance(env, string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
path = salt.utils.mkstemp(dir=tpath)
if template:
__salt__['cp.get_template'](
source, path, template, saltenv, **kwargs)
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
shutil.copyfile(fn_, path)
in_path = os.path.join('/', os.path.relpath(path, rpath))
os.chmod(path, 0o755)
command = in_path + ' ' + str(args) if args else in_path
status = run_func_(container,
command,
cwd=cwd,
stdin=stdin,
output_loglevel=output_loglevel,
quiet=quiet,
runas=runas,
shell=shell,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale)
if not no_clean:
os.remove(path)
except Exception:
_invalid(status, id_=container, out=traceback.format_exc())
return status
def script(container,
source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=cmdmod.DEFAULT_SHELL,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
no_clean=False,
saltenv='base'):
'''
Wrapper for :py:func:`cmdmod.script<salt.modules.cmdmod.script>` inside a container context
container
container id (or grain)
additional parameters
See :py:func:`cmd.script <salt.modules.cmdmod.script>`
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
Download a script from a remote location and execute the script in the container.
The script can be located on the salt master file server or on an HTTP/FTP server.
The script will be executed directly, so it can be written in any available programming
language.
The script can also be formatted as a template, the default is jinja. Arguments for the
script can be specified as well.
CLI Example:
.. code-block:: bash
salt '*' docker.script <container id> salt://docker_script.py
salt '*' docker.script <container id> salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' docker.script <container id> salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
A string of standard input can be specified for the command to be run using the stdin
parameter. This can be useful in cases where sensitive information must be read from
standard input:
CLI Example:
.. code-block:: bash
salt '*' docker.script <container id> salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
status = base_status.copy()
if isinstance(env, string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
return _script(status,
container,
source,
args=args,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
no_clean=no_clean,
saltenv=saltenv)
def script_retcode(container,
source,
cwd=None,
stdin=None,
runas=None,
shell=cmdmod.DEFAULT_SHELL,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
no_clean=False,
saltenv='base'):
'''
Wrapper for :py:func:`cmdmod.script_retcode<salt.modules.cmdmod.script_retcode>` inside a container context
container
container id (or grain)
additional parameters
See :py:func:`cmd.script_retcode <salt.modules.cmdmod.script_retcode>`
.. warning::
Be advised that this function allows for raw shell access to the named
container! If allowing users to execute this directly it may allow more
rights than intended!
CLI Example:
.. code-block:: bash
salt '*' docker.script_retcode <container id> salt://docker_script.py
'''
if isinstance(env, string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
status = base_status.copy()
return _script(status,
container,
source=source,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
run_func_=retcode,
no_clean=no_clean,
saltenv=saltenv)
| 29.400864
| 131
| 0.555368
|
e08b51033bd93a87cff74605d2f0ef0cfbdb3257
| 287
|
py
|
Python
|
cxxheaderparser/errors.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 12
|
2020-12-28T09:40:53.000Z
|
2022-03-13T15:36:21.000Z
|
cxxheaderparser/errors.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 28
|
2021-01-04T14:58:59.000Z
|
2022-01-03T03:00:16.000Z
|
cxxheaderparser/errors.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 1
|
2021-11-06T03:44:53.000Z
|
2021-11-06T03:44:53.000Z
|
import typing
from .lexer import LexToken
class CxxParseError(Exception):
"""
Exception raised when a parsing error occurs
"""
def __init__(self, msg: str, tok: typing.Optional["LexToken"] = None) -> None:
Exception.__init__(self, msg)
self.tok = tok
| 20.5
| 82
| 0.655052
|
22620d601d5d82377ec03afaa3746cbb24862bff
| 1,302
|
py
|
Python
|
cogs/general.py
|
Gumbachi/HallBot
|
eb779507ffd2f17b7f9776c3dda2a55de34cae6e
|
[
"MIT"
] | null | null | null |
cogs/general.py
|
Gumbachi/HallBot
|
eb779507ffd2f17b7f9776c3dda2a55de34cae6e
|
[
"MIT"
] | null | null | null |
cogs/general.py
|
Gumbachi/HallBot
|
eb779507ffd2f17b7f9776c3dda2a55de34cae6e
|
[
"MIT"
] | null | null | null |
import discord
import random
import asyncio
from common.cfg import devguilds
from discord.commands import slash_command
class GeneralCommands(discord.Cog):
"""Handles simple commands and listeners."""
def __init__(self, bot):
self.bot = bot
@slash_command(name="howdy", guild_ids=devguilds)
async def howdy(self, ctx):
"""Command to check if bot is alive or if you need a friend."""
print("HOWDY")
await ctx.respond(f"Howdy {ctx.author.mention}!")
@discord.Cog.listener()
async def on_message(self, message):
"""Called for every message sent that the bot can see"""
# Ignore bot messages
if message.author.bot:
return
if message.content.lower() == "brian":
await message.reply("hall")
if message.content.lower() == "hall":
await message.reply("brian")
@discord.Cog.listener()
async def on_guild_join(self, guild):
"""Bot has joined a guild."""
print(f"Joined {guild.name}")
@discord.Cog.listener()
async def on_guild_remove(self, guild):
"""Bot is kicked/removed."""
print(f"Left {guild.name}")
def setup(bot):
"""Entry point for loading cogs. Required for all cogs"""
bot.add_cog(GeneralCommands(bot))
| 27.702128
| 71
| 0.632873
|
4bde73a9882de4feb721f2b4a076a1dbef24803a
| 10,445
|
py
|
Python
|
Sketches/THF/3D/Util3D.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/THF/3D/Util3D.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/THF/3D/Util3D.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=====================
3D Utility library
=====================
TODO
"""
from math import *
# =========================
# Control3D contains movement commands
# =========================
class Control3D:
POSITION, REL_POSITION, ROTATION, REL_ROTATION, SCALING, REL_SCALING = range(6)
def __init__(self, type, amount):
# Command types
self.type = type
self.amount = amount
# =====================
# Vector: used for handling 3D Vectors
# =====================
class Vector:
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def zero(self):
self.x = 0.0
self.y = 0.0
self.z = 0.0
return self
def invert(self):
self.x = -self.x
self.y = -self.y
self.z = -self.z
return self
def __str__(self):
return str([self.x,self.y,self.z])
def __eq__(self, other):
if self.x == other.x and self.y == other.y and self.z == other.z:
return True
return False
def __ne__(self, other):
if self.x == other.x and self.y == other.y and self.z == other.z:
return False
return True
def copy(self):
return Vector(self.x,self.y,self.z)
def length(self):
return sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def dot(self, other):
return self.x*other.x + self.y*other.y + self.z*other.z
def cross(self, other):
return Vector(self.y*other.z - self.z*other.y, self.z*other.x - self.x*other.z, self.x*other.y - self.y*other.x)
def norm(self):
l = sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
self.x /= l
self.y /= l
self.z /= l
return self
def __mul__(self, factor):
return Vector(self.x * factor, self.y * factor, self.z * factor)
def __div__(self, factor):
return Vector(self.x / factor, self.y / factor, self.z / factor)
def __mod__(self, factor):
return Vector(self.x % factor, self.y % factor, self.z % factor)
def __add__(self, other):
return Vector(self.x +other.x, self.y +other.y, self.z +other.z)
def __sub__(self, other):
return Vector(self.x -other.x, self.y -other.y, self.z-other.z)
def __imul__(self, factor):
return Vector(self.x * factor, self.y * factor, self.z * factor)
def __idiv__(self, factor):
return Vector(self.x / factor, self.y / factor, self.z / factor)
def __imod__(self, factor):
return Vector(self.x % factor, self.y % factor, self.z % factor)
def __iadd__(self, other):
return Vector(self.x +other.x, self.y +other.y, self.z +other.z)
def __isub__(self, other):
return Vector(self.x -other.x, self.y -other.y, self.z-other.z)
# =============================
# Transform: for generating transformation matrices
# =============================
class Transform:
def __init__(self):
# load identity
self.m = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
# return transformation matrix
def getMatrix(self):
return self.m
# reset to identity matrix
def reset(self):
self.m = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
def applyRotation(self, xyzangle):
global pi
t = Transform()
# convert degrees to radiant
xyzangle *= pi/180.0
#rotation around x axis
if xyzangle.x != 0:
t.m[5] = cos(xyzangle.x)
t.m[6] = sin(xyzangle.x)
t.m[9] = -sin(xyzangle.x)
t.m[10] = cos(xyzangle.x)
self.m = (self*t).m
#rotation around y axis
t.reset()
if xyzangle.y != 0:
t.m[0] = cos(xyzangle.y)
t.m[2] = -sin(xyzangle.y)
t.m[8] = sin(xyzangle.y)
t.m[10] = cos(xyzangle.y)
self.m = (self*t).m
#rotation around z axis
t.reset()
if xyzangle.z != 0:
t.m[0] = cos(xyzangle.z)
t.m[1] = sin(xyzangle.z)
t.m[4] = -sin(xyzangle.z)
t.m[5] = cos(xyzangle.z)
self.m = (self*t).m
def applyTranslation(self, vector):
t = Transform()
if (vector.x != 0 or vector.y != 0 or vector.z != 0):
t.m[12] = vector.x
t.m[13] = vector.y
t.m[14] = vector.z
self.m = (self*t).m
def applyScaling(self, vector):
t = Transform()
if (vector.x != 0 or vector.y != 0 or vector.z != 0):
t.m[0] = vector.x
t.m[5] = vector.y
t.m[10] = vector.z
self.m = (self*t).m
# Vector multiplication
def transformVector(self, v):
return Vector(self.m[0]*v.x + self.m[4]*v.y + self.m[8]*v.z + self.m[12],
self.m[1]*v.x + self.m[5]*v.y + self.m[9]*v.z + self.m[13],
self.m[2]*v.x + self.m[6]*v.y + self.m[10]*v.z + self.m[14])
# Matrix multiplication
def __mul__(self,other):
x = Transform()
x.m[0] = self.m[0]*other.m[0] + self.m[1]*other.m[4] + self.m[2]*other.m[8] + self.m[3]*other.m[12];
x.m[1] = self.m[0]*other.m[1] + self.m[1]*other.m[5] + self.m[2]*other.m[9] + self.m[3]*other.m[13];
x.m[2] = self.m[0]*other.m[2] + self.m[1]*other.m[6] + self.m[2]*other.m[10] + self.m[3]*other.m[14];
x.m[3] = self.m[0]*other.m[3] + self.m[1]*other.m[7] + self.m[2]*other.m[11] + self.m[3]*other.m[15];
x.m[4] = self.m[4]*other.m[0] + self.m[5]*other.m[4] + self.m[6]*other.m[8] + self.m[7]*other.m[12];
x.m[5] = self.m[4]*other.m[1] + self.m[5]*other.m[5] + self.m[6]*other.m[9] + self.m[7]*other.m[13];
x.m[6] = self.m[4]*other.m[2] + self.m[5]*other.m[6] + self.m[6]*other.m[10] + self.m[7]*other.m[14];
x.m[7] = self.m[4]*other.m[3] + self.m[5]*other.m[7] + self.m[6]*other.m[11] + self.m[7]*other.m[15];
x.m[8] = self.m[8]*other.m[0] + self.m[9]*other.m[4] + self.m[10]*other.m[8] + self.m[11]*other.m[12];
x.m[9] = self.m[8]*other.m[1] + self.m[9]*other.m[5] + self.m[10]*other.m[9] + self.m[11]*other.m[13];
x.m[10] = self.m[8]*other.m[2] + self.m[9]*other.m[6] + self.m[10]*other.m[10] + self.m[11]*other.m[14];
x.m[11] = self.m[8]*other.m[3] + self.m[9]*other.m[7] + self.m[10]*other.m[11] + self.m[11]*other.m[15];
x.m[12] = self.m[12]*other.m[0] + self.m[13]*other.m[4] + self.m[14]*other.m[8] + self.m[15]*other.m[12];
x.m[13] = self.m[12]*other.m[1] + self.m[13]*other.m[5] + self.m[14]*other.m[9] + self.m[15]*other.m[13];
x.m[14] = self.m[12]*other.m[2] + self.m[13]*other.m[6] + self.m[14]*other.m[10] + self.m[15]*other.m[14];
x.m[15] = self.m[12]*other.m[3] + self.m[13]*other.m[7] + self.m[14]*other.m[11] + self.m[15]*other.m[15];
return x
# Immediate matrix multiplication
def __imul__(self,other):
x = Transform()
x.m[0] = self.m[0]*other.m[0] + self.m[1]*other.m[4] + self.m[2]*other.m[8] + self.m[3]*other.m[12];
x.m[1] = self.m[0]*other.m[1] + self.m[1]*other.m[5] + self.m[2]*other.m[9] + self.m[3]*other.m[13];
x.m[2] = self.m[0]*other.m[2] + self.m[1]*other.m[6] + self.m[2]*other.m[10] + self.m[3]*other.m[14];
x.m[3] = self.m[0]*other.m[3] + self.m[1]*other.m[7] + self.m[2]*other.m[11] + self.m[3]*other.m[15];
x.m[4] = self.m[4]*other.m[0] + self.m[5]*other.m[4] + self.m[6]*other.m[8] + self.m[7]*other.m[12];
x.m[5] = self.m[4]*other.m[1] + self.m[5]*other.m[5] + self.m[6]*other.m[9] + self.m[7]*other.m[13];
x.m[6] = self.m[4]*other.m[2] + self.m[5]*other.m[6] + self.m[6]*other.m[10] + self.m[7]*other.m[14];
x.m[7] = self.m[4]*other.m[3] + self.m[5]*other.m[7] + self.m[6]*other.m[11] + self.m[7]*other.m[15];
x.m[8] = self.m[8]*other.m[0] + self.m[9]*other.m[4] + self.m[10]*other.m[8] + self.m[11]*other.m[12];
x.m[9] = self.m[8]*other.m[1] + self.m[9]*other.m[5] + self.m[10]*other.m[9] + self.m[11]*other.m[13];
x.m[10] = self.m[8]*other.m[2] + self.m[9]*other.m[6] + self.m[10]*other.m[10] + self.m[11]*other.m[14];
x.m[11] = self.m[8]*other.m[3] + self.m[9]*other.m[7] + self.m[10]*other.m[11] + self.m[11]*other.m[15];
x.m[12] = self.m[12]*other.m[0] + self.m[13]*other.m[4] + self.m[14]*other.m[8] + self.m[15]*other.m[12];
x.m[13] = self.m[12]*other.m[1] + self.m[13]*other.m[5] + self.m[14]*other.m[9] + self.m[15]*other.m[13];
x.m[14] = self.m[12]*other.m[2] + self.m[13]*other.m[6] + self.m[14]*other.m[10] + self.m[15]*other.m[14];
x.m[15] = self.m[12]*other.m[3] + self.m[13]*other.m[7] + self.m[14]*other.m[11] + self.m[15]*other.m[15];
self.m = x.m
if __name__=='__main__':
# Test for Transform (not very exhaustive :)
print "Testing transform..."
t = Transform()
v = Vector(0,0,0)
t.applyTranslation(Vector(1,2,3))
vt = t.transformVector(v)
print str(vt), "(1,2,3 expected)"
t.reset();
t.applyRotation(Vector(90,0,0))
print str(t.transformVector(vt)), "(1,-3,2 expected)"
t.reset();
v1 = Vector(1,0,0)
t.applyRotation(Vector(0,0,90))
print str(t.transformVector(v1)), "(0,1,0 expected)"
t.reset();
v2 = Vector(1,-2,3)
t.applyScaling(Vector(2,3,1))
print str(t.transformVector(v2)), "(2,-6,3 expected)"
print
| 39.415094
| 121
| 0.530589
|
2e40dd0712b53e988528391c836df15e30467a3c
| 317
|
py
|
Python
|
3/max_prod_word_len.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 4
|
2018-03-07T02:56:03.000Z
|
2021-06-15T05:43:31.000Z
|
3/max_prod_word_len.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | null | null | null |
3/max_prod_word_len.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 1
|
2021-09-02T12:05:15.000Z
|
2021-09-02T12:05:15.000Z
|
class Solution(object):
def maxProduct(self, words):
d = {}
for w in words:
mask = 0
for c in set(w):
mask |= (1 << (ord(c) - 97))
d[mask] = max(d.get(mask, 0), len(w))
return max([d[x] * d[y] for x in d for y in d if not x & y] or [0])
| 31.7
| 75
| 0.432177
|
f79ad1ef860269c0bc696d5f75a3d65ea6b68661
| 2,545
|
py
|
Python
|
openpyxl/worksheet/tests/test_controls.py
|
dangqhuy/openpyxl
|
42e929b69b0938b081a62fed529ce470054249fb
|
[
"MIT"
] | 12
|
2019-08-07T16:48:21.000Z
|
2021-12-13T02:47:22.000Z
|
openpyxl/worksheet/tests/test_controls.py
|
dangqhuy/openpyxl
|
42e929b69b0938b081a62fed529ce470054249fb
|
[
"MIT"
] | 19
|
2019-12-29T05:07:36.000Z
|
2021-04-22T18:09:49.000Z
|
openpyxl/worksheet/tests/test_controls.py
|
dangqhuy/openpyxl
|
42e929b69b0938b081a62fed529ce470054249fb
|
[
"MIT"
] | 1
|
2020-05-26T20:33:10.000Z
|
2020-05-26T20:33:10.000Z
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from openpyxl.drawing.spreadsheet_drawing import AnchorMarker
from openpyxl.worksheet.ole import ObjectAnchor
@pytest.fixture
def ControlProperty():
from ..controls import ControlProperty
return ControlProperty
class TestControlProperty:
def test_ctor(self, ControlProperty):
_from = AnchorMarker()
to = AnchorMarker()
anchor = ObjectAnchor(_from=_from, to=to)
prop = ControlProperty(anchor=anchor)
xml = tostring(prop.to_tree())
expected = """
<controlPr autoFill="1" autoLine="1" autoPict="1" cf="pict" defaultSize="1" disabled="0" locked="1" print="1"
recalcAlways="0" uiObject="0"
xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing">
<anchor moveWithCells="0" sizeWithCells="0">
<xdr:from>
<xdr:col>0</xdr:col>
<xdr:colOff>0</xdr:colOff>
<xdr:row>0</xdr:row>
<xdr:rowOff>0</xdr:rowOff>
</xdr:from>
<xdr:to>
<xdr:col>0</xdr:col>
<xdr:colOff>0</xdr:colOff>
<xdr:row>0</xdr:row>
<xdr:rowOff>0</xdr:rowOff>
</xdr:to>
</anchor>
</controlPr>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, ControlProperty):
src = """
<controlPr
xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing"
autoLine="0">
<anchor moveWithCells="1">
<from>
<xdr:col>4</xdr:col>
<xdr:colOff>704850</xdr:colOff>
<xdr:row>59</xdr:row>
<xdr:rowOff>114300</xdr:rowOff>
</from>
<to>
<xdr:col>4</xdr:col>
<xdr:colOff>1190625</xdr:colOff>
<xdr:row>61</xdr:row>
<xdr:rowOff>47625</xdr:rowOff>
</to>
</anchor>
</controlPr>
"""
node = fromstring(src)
prop = ControlProperty.from_tree(node)
_from = AnchorMarker(col=4, colOff=704850, row=59, rowOff=114300)
to = AnchorMarker(col=4, colOff=1190625, row=61, rowOff=47625)
anchor = ObjectAnchor(_from=_from, to=to, moveWithCells=True)
assert prop == ControlProperty(anchor=anchor, autoLine=False)
| 33.051948
| 117
| 0.589391
|
0f38ddcf4b3d4c615c8dab9c7f1a16c4e1bc0509
| 548
|
py
|
Python
|
pytest_splunk_env/splunk/helmut/manager/__init__.py
|
splunk/pytest-splunk-env
|
63ce423446f54869e4530627ff7463ea3e26c38a
|
[
"Apache-2.0"
] | 1
|
2021-03-18T23:35:08.000Z
|
2021-03-18T23:35:08.000Z
|
pytest_splunk_env/splunk/helmut/manager/__init__.py
|
splunk/pytest-splunk-env
|
63ce423446f54869e4530627ff7463ea3e26c38a
|
[
"Apache-2.0"
] | null | null | null |
pytest_splunk_env/splunk/helmut/manager/__init__.py
|
splunk/pytest-splunk-env
|
63ce423446f54869e4530627ff7463ea3e26c38a
|
[
"Apache-2.0"
] | 1
|
2022-03-27T16:55:33.000Z
|
2022-03-27T16:55:33.000Z
|
# SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
"""
@author: Nicklas Ansman-Giertz
@contact: U{ngiertz@splunk.com<mailto:ngiertz@splunk.com>}
@since: 2011-11-23
"""
from abc import ABCMeta
from future.utils import with_metaclass
from pytest_splunk_env.splunk.helmut.log import Logging
class Manager(with_metaclass(ABCMeta, Logging)):
def __init__(self, connector):
self._connector = connector
Logging.__init__(self)
@property
def connector(self):
return self._connector
| 21.076923
| 58
| 0.731752
|
ccdfd5f00570c7867b88540af76e1a18b5c22a89
| 7,203
|
py
|
Python
|
train.py
|
gouezec/aipnd-project
|
d1aaf1866d0db80b6c8560aac128a927de9957f1
|
[
"MIT"
] | null | null | null |
train.py
|
gouezec/aipnd-project
|
d1aaf1866d0db80b6c8560aac128a927de9957f1
|
[
"MIT"
] | null | null | null |
train.py
|
gouezec/aipnd-project
|
d1aaf1866d0db80b6c8560aac128a927de9957f1
|
[
"MIT"
] | null | null | null |
# coding: ascii
import os
import time
import copy
import argparse
import json
import numpy as np
import torch
from torch import nn, optim
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from flowerclassifier import create_model
def train_model(model, dataloaders, criterion, optimizer, scheduler, num_epochs=25, device='cpu'):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'valid']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
dataset_size = 0
for inputs, labels in dataloaders[phase]:
batch_size = inputs.size()[0]
dataset_size = dataset_size + batch_size
print( '|' * batch_size, end='', flush=True)
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
print()
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_size
epoch_acc = running_corrects.double() / dataset_size
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def main():
# Define accepted arguments
parser = argparse.ArgumentParser(description='Train flower classifier')
parser.add_argument('data_dir', help='directory path where training and validation datasets are stored')
parser.add_argument('--save_dir', type=str, action='store', help='directory path where to store checkpoint')
parser.add_argument('--gpu', action='store_true', help='enable gpu usage')
parser.add_argument('--learning_rate', type=float, default=0.001, action='store', help='set learning rate')
parser.add_argument('--hidden_units', type=int, default=4096, action='store', help='set number of hidden units in classifier')
parser.add_argument('--epochs', type=int, default=20, action='store', help='set number of epochs')
parser.add_argument('--arch', type=str, choices=['vgg16', 'resnet18'], default='vgg16', action='store', help='set pretrained model architecture')
# Parse arguments
args = parser.parse_args()
checkpoint_path = os.path.join(args.save_dir, 'model.pkl') if args.save_dir is not None else None
train_dir = os.path.join(args.data_dir, 'train')
valid_dir = os.path.join(args.data_dir, 'valid')
hidden_units = args.hidden_units
device = "cuda" if args.gpu else "cpu"
epochs = args.epochs
learning_rate = args.learning_rate
arch = args.arch
# Define your transforms for the training, validation, and testing sets
data_transforms = { 'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]) }
# Load the datasets with ImageFolder
image_datasets = {'train': ImageFolder(train_dir, data_transforms['train']),
'valid': ImageFolder(valid_dir, data_transforms['valid']) }
# Using the image datasets and the trainforms, define the dataloaders
dataloaders = { 'train': DataLoader(image_datasets['train'], batch_size=8,shuffle=True, num_workers=2),
'valid': DataLoader(image_datasets['valid'], batch_size=8,shuffle=True, num_workers=2)}
# Instanciate model accroding to arch and hidden units
model = create_model(arch, hidden_units)
model.class_to_idx = image_datasets['train'].class_to_idx
# Move it to the requested device, gpu or cpu
model = model.to(device)
# Choose cross entropy loss as it is a category optimization problem
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer = optim.SGD(model.classifier.parameters(), lr=learning_rate, momentum=0.9)
# Decay LR by a factor of 0.1 every 5 epochs
exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
# Train the model
model = train_model(model, dataloaders, criterion, optimizer, exp_lr_scheduler, num_epochs=epochs, device=device)
# Save the checkpoint
if checkpoint_path is not None:
model.to('cpu')
torch.save({
'class_to_idx': model.class_to_idx,
'classifier_state_dict': model.classifier.state_dict(),
'hidden_units' : model.classifier.hidden_units,
'arch' : arch
}, checkpoint_path)
return
if __name__ == "__main__":
# Execute only if run as a script
main()
| 42.875
| 150
| 0.563376
|
9b9df6ece268143e49664d2ef0e6a0424879eeec
| 16,294
|
py
|
Python
|
models/segmentation.py
|
ver0z/Deformable-DETR-
|
f37ef3e73675ee4be1d8a0649901b1402f907f1a
|
[
"Apache-2.0"
] | 5
|
2021-12-09T02:47:24.000Z
|
2022-03-08T17:37:26.000Z
|
models/segmentation.py
|
ver0z/Deformable-DETR-
|
f37ef3e73675ee4be1d8a0649901b1402f907f1a
|
[
"Apache-2.0"
] | null | null | null |
models/segmentation.py
|
ver0z/Deformable-DETR-
|
f37ef3e73675ee4be1d8a0649901b1402f907f1a
|
[
"Apache-2.0"
] | 1
|
2022-03-31T07:15:18.000Z
|
2022-03-31T07:15:18.000Z
|
# ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
import io
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
import util.box_ops as box_ops
from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
try:
from panopticapi.utils import id2rgb, rgb2id
except ImportError:
pass
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0)
self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)
def forward(self, samples: NestedTensor):
if not isinstance(samples, NestedTensor):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.detr.backbone(samples)
bs = features[-1].tensors.shape[0]
src, mask = features[-1].decompose()
src_proj = self.detr.input_proj(src)
hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])
outputs_class = self.detr.class_embed(hs)
outputs_coord = self.detr.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.detr.aux_loss:
out["aux_outputs"] = [
{"pred_logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
# FIXME h_boxes takes the last one computed, keep this in mind
bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])
outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])
out["pred_masks"] = outputs_seg_masks
return out
class MaskHeadSmallConv(nn.Module):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x, bbox_mask, fpns):
def expand(tensor, length):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
x = torch.cat([expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0, bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask=None):
q = self.q_linear(q)
k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)
qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])
weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf"))
weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights)
weights = self.dropout(weights)
return weights
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
class PostProcessSegm(nn.Module):
def __init__(self, threshold=0.5):
super().__init__()
self.threshold = threshold
@torch.no_grad()
def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
assert len(orig_target_sizes) == len(max_target_sizes)
max_h, max_w = max_target_sizes.max(0)[0].tolist()
outputs_masks = outputs["pred_masks"].squeeze(2)
outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False)
outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()
for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
img_h, img_w = t[0], t[1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
results[i]["masks"] = F.interpolate(
results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
).byte()
return results
class PostProcessPanoptic(nn.Module):
"""This class converts the output of the model to the final panoptic result, in the format expected by the
coco panoptic API """
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[None], to_tuple(size), mode="bilinear").squeeze(0)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
| 44.037838
| 121
| 0.585185
|
d74f7474088306cf89656792e14e64cec7944e22
| 29,469
|
py
|
Python
|
fudge/reactions/base.py
|
brown170/fudge
|
4f818b0e0b0de52bc127dd77285b20ce3568c97a
|
[
"BSD-3-Clause"
] | 14
|
2019-08-29T23:46:24.000Z
|
2022-03-21T10:16:25.000Z
|
fudge/reactions/base.py
|
brown170/fudge
|
4f818b0e0b0de52bc127dd77285b20ce3568c97a
|
[
"BSD-3-Clause"
] | 1
|
2020-08-04T16:14:45.000Z
|
2021-12-01T01:54:34.000Z
|
fudge/reactions/base.py
|
brown170/fudge
|
4f818b0e0b0de52bc127dd77285b20ce3568c97a
|
[
"BSD-3-Clause"
] | 2
|
2022-03-03T22:41:41.000Z
|
2022-03-03T22:54:43.000Z
|
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
from pqu import PQU as PQUModule
from PoPs import IDs as IDsPoPsModule
from PoPs.groups import misc as chemicalElementMiscPoPsModule
from PoPs.families import nuclide as nuclideModule
import xData.ancestry as ancestryModule
from xData import standards as standardsModule
import fudge
from .. import outputChannel as outputChannelModule
from ..reactionData.doubleDifferentialCrossSection import doubleDifferentialCrossSection as doubleDifferentialCrossSectionModule
from ..reactionData import crossSection as crossSectionModule
from ..reactionData import availableEnergy as availableEnergyModule
from ..reactionData import availableMomentum as availableMomentumModule
__metaclass__ = type
class FissionGenre :
total = 'total'
firstChance = 'firstChance'
secondChance = 'secondChance'
thirdChance = 'thirdChance'
fourthChance = 'fourthChance'
allowed = ( None, total, firstChance, secondChance, thirdChance, fourthChance )
class base_reaction( ancestryModule.ancestry ) :
"""Base class for all types of reaction."""
ancestryMembers = ( 'crossSection', 'doubleDifferentialCrossSection', 'outputChannel' )
def __init__( self, genre, ENDF_MT, fissionGenre = None, documentation = None, label = None ) :
ancestryModule.ancestry.__init__( self )
self.__label = label
self.__doubleDifferentialCrossSection = doubleDifferentialCrossSectionModule.component( )
self.__doubleDifferentialCrossSection.setAncestor( self )
self.__crossSection = crossSectionModule.component( )
self.__crossSection.setAncestor( self )
self.__outputChannel = outputChannelModule.outputChannel( genre )
self.__outputChannel.setAncestor( self )
self.fissionGenre = fissionGenre
self.ENDF_MT = int( ENDF_MT )
self.documentation = {}
if( not( documentation is None ) ) : self.addDocumentation( documentation )
def __str__( self ) :
return( self.label )
@property
def doubleDifferentialCrossSection( self ) :
return( self.__doubleDifferentialCrossSection )
@property
def crossSection( self ) :
return( self.__crossSection )
@property
def outputChannel( self ) :
return( self.__outputChannel )
@property
def fissionGenre( self ) :
return( self.__fissionGenre )
@fissionGenre.setter
def fissionGenre( self, value ) :
if( value not in FissionGenre.allowed ) : raise Exception( 'Invalid fission genre "%s".' % value )
self.__fissionGenre = value
@property
def label( self ) :
"""Returns the reaction's label."""
if( self.__label is None ) : self.updateLabel( )
return( self.__label )
def updateLabel( self ) :
"""Sets the reaction's label from outputChannel products."""
label = self.__outputChannel.toString( MT = self.ENDF_MT )
if self.fissionGenre is not None:
if '[' in label: raise Exception('Label already contains a process: "%s"' % label)
if len(label) == 0:
label = self.fissionGenre + ' fission'
else:
label += ' [%s fission]' % self.fissionGenre
self.__label = label
def findLinks( self, links ) :
for ancestryMember in self.ancestryMembers :
if( ancestryMember in ( 'doubleDifferentialCrossSection' ) ) : continue
getattr( self, ancestryMember ).findLinks( links )
def isBasicReaction( self ) :
return( False )
def isCompleteReaction( self ):
return( False )
def isFission( self ):
return self.__fissionGenre is not None
def isThermalNeutronScatteringLaw( self ) :
for form in self.__doubleDifferentialCrossSection :
if( form.isThermalNeutronScatteringLaw( ) ) : return( True )
return( False )
def check( self, info ):
"""
This method is usually not called directly. Use reactionSuite.check() instead.
Checks cross section and outputChannel (and differential cross sections if available). Checks include:
do Z/A balance? do Q-value and thresholds agree?
Does cross section domain agree with each product distribution/multiplicity domain?
Does energy balance?
@:param info: dict
@:return list of warnings
"""
from fudge import warning
from . import production as productionModule
from fudge.reactionData.doubleDifferentialCrossSection.chargedParticleElastic.CoulombPlusNuclearElastic \
import CoulombDepositionNotSupported
warnings = []
reactionSuite = self.getRootAncestor( )
def particleZA( particleID ) :
particle = reactionSuite.PoPs[particleID]
if hasattr(particle, 'id') and particle.id in reactionSuite.PoPs.aliases:
particle = reactionSuite.PoPs[ particle.pid ]
return( chemicalElementMiscPoPsModule.ZA( particle ) )
try:
# BRB6 hardwired
info['Q'] = self.getQ('eV', final=False)
except ValueError:
pass
cpcount = sum( [ ( particleZA( prod.id ) // 1000 ) > 0 for prod in self.__outputChannel ] )
info['CoulombOutputChannel'] = cpcount > 1
differentialCrossSectionWarnings = self.doubleDifferentialCrossSection.check( info )
if differentialCrossSectionWarnings:
warnings.append( warning.context("doubleDifferentialCrossSection:", differentialCrossSectionWarnings) )
crossSectionWarnings = self.crossSection.check( info )
if crossSectionWarnings:
warnings.append( warning.context("Cross section:", crossSectionWarnings) )
if 'Q' in info: del info['Q']
del info['CoulombOutputChannel']
if info['crossSectionOnly']:
return warnings # otherwise continue to check outputs
# compare calculated and listed Q-values:
if not isinstance(self, productionModule.production): # can't reliably compute Q for production reactions
try:
Q = self.getQ('eV', final=False)
Qcalc = info['availableEnergy_eV']
for prod in self.__outputChannel:
try:
Qcalc -= prod.getMass('eV/c**2') * prod.multiplicity.getConstant()
except Exception: # multiplicity is not constant
if( prod.id == IDsPoPsModule.photon ) : continue
raise ValueError("Non-constant multiplicity")
if abs(Q-Qcalc) > PQUModule.PQU(info['dQ']).getValueAs('eV'):
if self.__outputChannel.process != outputChannelModule.processes.continuum:
warnings.append( warning.Q_mismatch( PQUModule.PQU(Qcalc,'eV'), PQUModule.PQU(Q,'eV'), self ) )
except ValueError:
pass # this test only works if multiplicity and Q are both constant for all non-gamma products
if not (self.__outputChannel.genre == outputChannelModule.Genre.sumOfRemainingOutputChannels or
self.isFission( ) or isinstance( self, productionModule.production) ):
# check that ZA balances:
ZAsum = 0
for product in self.__outputChannel:
if( product.id == IDsPoPsModule.photon ) : continue
ZAsum += particleZA( product.id ) * product.multiplicity.getConstant()
if ZAsum != info['compoundZA']:
warnings.append( warning.ZAbalanceWarning( self ) )
# disabling for now: only complain if distributions are missing for transportables:
"""
if (not any( [product.distributions.components for product in self.__outputChannel] ) and not any(
[dProd.distributions.components for prod in [p for p in self.__outputChannel
if p.outputChannel is not None] for dProd in prod.outputChannel] ) ):
# no distributions found for any reaction product or subsequent decay product
warnings.append( warning.noDistributions( self ) )
return warnings """
info['crossSectionDomain'] = self.crossSection.domainMin, self.crossSection.domainMax
info['isTwoBody'] = self.__outputChannel.genre == outputChannelModule.Genre.twoBody
for product in self.__outputChannel:
productWarnings = product.check( info )
if productWarnings:
warnings.append( warning.context("Product: %s" % product.label, productWarnings) )
fissionFragmentWarnings = self.__outputChannel.fissionFragmentData.check( info )
if fissionFragmentWarnings:
warnings.append( warning.context("Fission fragment info:", fissionFragmentWarnings) )
del info['crossSectionDomain']
del info['isTwoBody']
if info['checkEnergyBalance'] and not isinstance( self, productionModule.production ):
# Calculate energy deposition data for all products, and for decay products.
# Then recursively check the product list for energy balance at each step of the reaction/decay
# At each step, if we have energy deposition for *every* product in the list, we can rigorously check
# energy balance. Otherwise, can only check that we don't exceed available energy.
try:
self.calculateAverageProductData( style=info['averageProductDataStyle'], **info['averageProductDataArgs'] )
except CoulombDepositionNotSupported:
warnings.append( warning.SkippedCoulombElasticEnergyDeposition( self ) )
except Exception as e:
warnings.append( warning.EnergyDepositionExceptionRaised( str(e), self ) )
if info['failOnException']: raise
return warnings
def checkProductsForEnergyBalance( products, Qs, fission = False, decay = False ):
# sample usage: for the reaction n + F19 -> n + (F19_c -> F19 + gamma), this function
# should be called twice, to test energy balance at each step of the reaction.
# First call: products = [n, F19_c] and Qs = [Q_reaction],
# second call: products = [n, F19, gamma] and Qs = [Q_reaction, Q_decay].
edepWarnings = []
averageProductDataLabel = info['averageProductDataStyle'].label
energyDep = []
for prod in products:
if averageProductDataLabel in prod.energyDeposition:
energyDep.append( [ prod.label, prod.energyDeposition[ averageProductDataLabel ] ] )
if energyDep:
totalEDep = energyDep[0][1]
for idx in range(1,len(energyDep)):
if( ( totalEDep.domainMin != energyDep[idx][1].domainMin ) or
( totalEDep.domainMax != energyDep[idx][1].domainMax ) ) :
upperEps = 0
if totalEDep.domainMax != energyDep[idx][1].domainMax:
upperEps = 1e-8
try:
totalEDep, energyDep[idx][1] = totalEDep.mutualify(
1e-8,upperEps,0, energyDep[idx][1], 1e-8,upperEps,0)
except Exception as e:
edepWarnings.append( warning.EnergyDepositionExceptionRaised( str(e), self ) )
if info['failOnException']: raise
return warnings
totalEDep = totalEDep + energyDep[idx][1]
else: totalEDep = []
Qsum = sum(Qs)
def energyDepositedPerProduct( energyDep, ein ):
""" if energy doesn't balance, determine how much is deposited in each product """
result = []
availableEnergy = ein + Qsum
if availableEnergy == 0: availableEnergy = sum( [edep.evaluate(ein) for p,edep in energyDep] )
for prod, edep in energyDep:
edep = edep.evaluate( ein )
if edep is None: result.append( (prod, 0) )
else: result.append( ( prod, 100.0 * edep/availableEnergy ) )
return sorted(result, key=lambda foo: foo[1])[::-1]
# now we have total energy deposition for all particles, use to check energy balance.
# a few special cases to consider:
if fission:
# fission products aren't listed (so far, anyway), so about 85% of available energy should be missing:
for i,(ein,edep) in enumerate(totalEDep):
if edep > abs((ein + Qsum) * info['fissionEnergyBalanceLimit']):
edepWarnings.append( warning.fissionEnergyImbalance( PQUModule.PQU(ein, totalEDep.axes[0].unit),
i, ein+Qsum, energyDepositedPerProduct(energyDep, ein), self ) )
elif len(products) == len(energyDep):
# have full energy dep. data for all products, so we can rigorously check energy balance:
for i,(ein,edep) in enumerate(totalEDep):
if ( abs(edep - (ein+Qsum)) > abs((ein+Qsum) * info['dEnergyBalanceRelative'])
and abs(edep - (ein+Qsum)) > PQUModule.PQU( info['dEnergyBalanceAbsolute'] )
.getValueAs(totalEDep.axes[0].unit) ):
edepWarnings.append( warning.energyImbalance( PQUModule.PQU(ein, totalEDep.axes[0].unit),
i, ein+Qsum, energyDepositedPerProduct(energyDep, ein), self ) )
else:
# missing some products, so just check that outgoing energy doesn't exceed incoming:
for i,(ein,edep) in enumerate(totalEDep):
if ( (edep - (ein+Qsum)) > ((ein+Qsum) * info['dEnergyBalanceRelative'])
and (edep - (ein+Qsum)) > PQUModule.PQU( info['dEnergyBalanceAbsolute'] )
.getValueAs(totalEDep.axes[0].unit) ):
edepWarnings.append( warning.energyImbalance( PQUModule.PQU(ein, totalEDep.axes[0].unit),
i, ein+Qsum, energyDepositedPerProduct(energyDep, ein), self ) )
if edepWarnings:
context = "Energy balance"
if decay: context += " (after decay)"
context += " for products: " + ', '.join( [prod.id for prod in products] )
warnings.append( warning.context( context, edepWarnings ) )
# now recursively check decay products, if any:
for pidx, currentProd in enumerate(products):
if currentProd.outputChannel is not None:
checkProductsForEnergyBalance(
products[:pidx] + [p for p in currentProd.outputChannel] + products[pidx+1:],
Qs + [currentProd.outputChannel.Q.getConstant()],
decay = True, fission = False # FIXME what about spontaneous fission decay?
)
# end of helper function checkProductsForEnergyBalance
try:
Q = self.__outputChannel.Q.getConstant()
checkProductsForEnergyBalance( products = [p1 for p1 in self.__outputChannel], Qs = [Q],
fission = self.isFission(), decay=False )
except ValueError:
pass # FIXME this test currently disabled when Q non-constant
return warnings
@property
def domainMin( self ) :
return( self.crossSection.domainMin )
@property
def domainMax( self ) :
return( self.crossSection.domainMax )
@property
def domainUnit( self ) :
return( self.crossSection.domainUnit )
def domainUnitConversionFactor( self, unitTo ) :
return( self.crossSection.domainUnitConversionFactor( unitTo ) )
def convertUnits( self, unitMap ) :
"""See documentation for reactionSuite.convertUnits."""
from . import reaction as reactionModule
self.__doubleDifferentialCrossSection.convertUnits( unitMap )
self.__crossSection.convertUnits( unitMap )
self.__outputChannel.convertUnits( unitMap )
if( isinstance( self, reactionModule.reaction ) ) :
self.availableEnergy.convertUnits( unitMap )
self.availableMomentum.convertUnits( unitMap )
def amendForPatch( self, fromLabel, toLabel ) :
from . import reaction as reactionModule
self.__doubleDifferentialCrossSection.amendForPatch( fromLabel, toLabel )
self.__crossSection.amendForPatch( fromLabel, toLabel )
self.__outputChannel.amendForPatch( fromLabel, toLabel )
if( isinstance( self, reactionModule.reaction ) ) :
self.availableEnergy.amendForPatch( fromLabel, toLabel )
self.availableMomentum.amendForPatch( fromLabel, toLabel )
def diff( self, other, diffResults ) :
self.__crossSection.diff( other.crossSection, diffResults )
self.__outputChannel.diff( other.outputChannel, diffResults )
def heatCrossSection( self, temperature, EMin, lowerlimit = None, upperlimit = None, interpolationAccuracy = 0.001, heatAllPoints = False,
doNotThin = True, heatBelowThreshold = True, heatAllEDomain = True, setThresholdToZero = False, verbose = 0 ) :
if( len( self.doubleDifferentialCrossSection ) > 0 ) :
if( verbose > 0 ) : print( " Skipping doubleDifferentialCrossSection reaction " )
return
crossSection = self.crossSection.heat( temperature, EMin, lowerlimit, upperlimit, interpolationAccuracy, heatAllPoints, doNotThin,
heatBelowThreshold, heatAllEDomain, setThresholdToZero = setThresholdToZero, addToSuite = True )
return( crossSection )
def addDocumentation( self, documentation ) :
self.documentation[documentation.name] = documentation
def thresholdQAs( self, unit, final = True ) :
return( self.__outputChannel.thresholdQAs( unit, final = final ) )
def getDocumentation( self, name ) :
return( self.documentation[name] )
def getQ( self, unit, final = True ) :
"""Returns the Q-value for this reaction. Converted to float if possible, otherwise a string value is returned."""
return( self.thresholdQAs( unit, final = final ) )
def getReactionSuite( self ) :
from .. import reactionSuite as reactionSuiteModule
return( self.findClassInAncestry( reactionSuiteModule.reactionSuite ) )
def cullStyles( self, styleList ) :
from . import reaction as reactionModule
# self.__doubleDifferentialCrossSection.cullStyles( styleList )
self.__crossSection.cullStyles( styleList )
self.__outputChannel.cullStyles( styleList )
if( isinstance( self, reactionModule.reaction ) ) :
self.availableEnergy.cullStyles( styleList )
self.availableMomentum.cullStyles( styleList )
def calculateAverageProductData( self, style, indent = '', **kwargs ) :
"""
Calculate average product data.
:param style: The style to use.
:param indent: string; The amount to indent and verbose output.
:param kwargs: string; All other parameters.
:return:
"""
verbosity = kwargs['verbosity']
indent2 = indent + kwargs['incrementalIndent']
if( verbosity > 0 ) : print( '%s%s' % (indent, self.__outputChannel.toString( simpleString = True, MT = self.ENDF_MT ) ) )
kwargs['reaction'] = self
kwargs['EMin'] = self.domainMin
kwargs['EMax'] = self.domainMax
self.__outputChannel.calculateAverageProductData( style, indent2, **kwargs )
if( hasattr( self, 'availableEnergy' ) ) :
axes = availableEnergyModule.defaultAxes( self.domainUnit )
QToPointwiseLinear = self.__outputChannel.QToPointwiseLinear( final = True )
availableEnergy = availableEnergyModule.XYs1d( data = [ [ QToPointwiseLinear.domainMin, QToPointwiseLinear.domainMin ], [ self.domainMax, self.domainMax ] ], axes = axes )
availableEnergy += QToPointwiseLinear
self.availableEnergy.add( availableEnergyModule.XYs1d( data = availableEnergy, axes = axes, label = style.label ) )
if( hasattr( self, 'availableMomentum' ) ) :
massInE = kwargs['projectileMass']
availableMomentum = availableMomentumModule.calculateMomentumPoints( style, massInE, self.domainMin, self.domainMax, self.domainUnit )
self.availableMomentum.add( availableMomentum )
def partialProductionIntegral( self, reaction_suite, productID, energyIn, energyOut = None, muOut = None, phiOut = None,
frame = standardsModule.frames.labToken, LegendreOrder = 0, **kwargs ) :
if( isinstance( self.crossSection[0], crossSectionModule.CoulombPlusNuclearElastic ) ) :
issueCounters = kwargs.get( 'issueCounters', None )
if( issueCounters is not None ) : issueCounter = issueCounters.get( 'partialProductionIntegral:CoulombPlusNuclearElastic', 1, True )
status = issueCounter.increment( )
if( status ) : print( ' WARNING: partialProductionIntegral: skipping elastic Coulomb reaction %s.' % self )
return( 0.0 )
crossSection = self.crossSection.evaluate( energyIn )
if( crossSection == 0.0 ) : return( 0.0 )
_partialProductionIntegral = self.__outputChannel.partialProductionIntegral( reaction_suite, productID, energyIn, energyOut = energyOut, muOut = muOut,
phiOut = phiOut, frame = frame, LegendreOrder = LegendreOrder, **kwargs )
return( crossSection * _partialProductionIntegral )
def processMC_cdf( self, style, tempInfo, indent = '', incrementalIndent = ' ' ) :
indent2 = indent + tempInfo['incrementalIndent']
verbosity = tempInfo['verbosity']
if( verbosity > 0 ) : print( '%s%s' % (indent, self.__outputChannel.toString( simpleString = True, MT = self.ENDF_MT ) ) )
self.__outputChannel.processMC_cdf( style, tempInfo, indent2 )
def processGriddedCrossSections( self, style, verbosity = 0, indent = '', incrementalIndent = ' ', isPhotoAtomic = False ) :
self.crossSection.processGriddedCrossSections( style, verbosity = verbosity, indent = indent, incrementalIndent = incrementalIndent, isPhotoAtomic = isPhotoAtomic )
def processMultiGroup( self, style, tempInfo, indent ) :
from . import reaction as reactionModule
tempInfo['workFile'].append( 'r%s' % tempInfo['reactionIndex'] )
tempInfo['transferMatrixComment'] = tempInfo['reactionSuite'].inputParticlesToReactionString( suffix = " --> " ) + self.toString( )
indent2 = indent + tempInfo['incrementalIndent']
verbosity = tempInfo['verbosity']
if( verbosity > 0 ) : print( '%s%s' % (indent, self.__outputChannel.toString( simpleString = True, MT = self.ENDF_MT ) ) )
tempInfo['reaction'] = self
norm = tempInfo['groupedFlux']
tempInfo['groupedFlux'] = None
crossSection = style.findFormMatchingDerivedStyle( self.crossSection )
# BRB FIXME The next line is a kludge, see note on crossSection.resonancesWithBackground.processMultiGroup.
if( isinstance( crossSection, crossSectionModule.reference ) ) :
crossSection = crossSection.crossSection
if( isinstance( crossSection, crossSectionModule.resonancesWithBackground ) ) :
crossSection = crossSection.ancestor['recon']
if( not( isinstance( crossSection, crossSectionModule.XYs1d ) ) ) :
crossSection = crossSection.toPointwise_withLinearXYs( accuracy = 1e-5, upperEps = 1e-8 )
tempInfo['crossSection'] = crossSection
tempInfo['multiGroupCrossSection'] = self.crossSection.processMultiGroup( style, tempInfo, indent2 )
self.crossSection.remove( style.label ) # Not normalized by tempInfo['groupedFlux'] so remove.
tempInfo['groupedFlux'] = norm
tempInfo['multiGroupCrossSectionNormed'] = self.crossSection.processMultiGroup( style, tempInfo, indent2 ) # Normalized by tempInfo['groupedFlux'].
if( isinstance( self, reactionModule.reaction ) ) :
self.availableEnergy.processMultiGroup( style, tempInfo, indent2 )
self.availableMomentum.processMultiGroup( style, tempInfo, indent2 )
self.__outputChannel.processMultiGroup( style, tempInfo, indent2 )
del tempInfo['workFile'][-1]
def removeStyles( self, styleLabels ) :
from . import reaction as reactionModule
self.__doubleDifferentialCrossSection.removeStyles( styleLabels )
self.__crossSection.removeStyles( styleLabels )
self.__outputChannel.removeStyles( styleLabels )
if( isinstance( self, reactionModule.reaction ) ) :
self.availableEnergy.removeStyles( styleLabels )
self.availableMomentum.removeStyles( styleLabels )
def toString( self, indent = '' ) :
return( self.__outputChannel.toString( indent = indent, MT = self.ENDF_MT ) )
def toXML( self, indent = '', **kwargs ) :
return( '\n'.join( self.toXMLList( indent, **kwargs ) ) )
def toXMLList( self, indent = '', **kwargs ) :
from . import reaction as reactionModule
incrementalIndent = kwargs.get( 'incrementalIndent', ' ' )
indent2 = indent + incrementalIndent
indent3 = indent2 + incrementalIndent
attributeString = ""
attributeString += ' ENDF_MT="%s"' % self.ENDF_MT
if( self.fissionGenre is not None ) : attributeString += ' fissionGenre="%s"' % str( self.fissionGenre )
xmlString = [ '%s<%s label="%s"' % ( indent, self.moniker, self.label ) ]
xmlString[-1] += attributeString + '>'
if self.documentation:
# BRB6 What is this
xmlString.append( '%s<documentations>' % indent2 )
for doc in self.documentation: xmlString += self.documentation[doc].toXMLList( indent3, **kwargs )
xmlString[-1] += '</documentations>'
xmlString += self.__doubleDifferentialCrossSection.toXMLList( indent2, **kwargs )
xmlString += self.__crossSection.toXMLList( indent2, **kwargs )
xmlString += self.__outputChannel.toXMLList( indent2, **kwargs )
if( isinstance( self, reactionModule.reaction ) ) :
xmlString += self.availableEnergy.toXMLList( indent2, **kwargs )
xmlString += self.availableMomentum.toXMLList( indent2, **kwargs )
xmlString[-1] += '</%s>' % self.moniker
return xmlString
def parseNode( self, node, xPath, linkData, **kwargs ) :
xPath.append( node.tag )
if( node.find( 'documentations' ) ) :
for doc in node.find( 'documentations' ) :
self.addDocumentation(fudge.documentation.documentation.parseXMLNode(doc, xPath, linkData))
self.doubleDifferentialCrossSection.parseXMLNode( node.find( doubleDifferentialCrossSectionModule.component.moniker ), xPath, linkData )
self.crossSection.parseXMLNode( node.find( crossSectionModule.component.moniker ), xPath, linkData )
if( node.find( outputChannelModule.outputChannel.moniker ) ) :
self.outputChannel.parseXMLNode( node.find( outputChannelModule.outputChannel.moniker ), xPath, linkData )
if( node.find( availableEnergyModule.component.moniker ) ) :
self.availableEnergy = availableEnergyModule.component( )
self.availableEnergy.setAncestor( self )
self.availableEnergy.parseXMLNode( node.find( availableEnergyModule.component.moniker ), xPath, linkData )
if( node.find( availableMomentumModule.component.moniker ) ) :
self.availableMomentum = availableMomentumModule.component( )
self.availableMomentum.setAncestor( self )
self.availableMomentum.parseXMLNode( node.find( availableMomentumModule.component.moniker ), xPath, linkData )
xPath.pop( )
@classmethod
def parseXMLNode( cls, element, xPath, linkData ) :
"""Translate a <reaction> element from xml into a reaction class instance."""
xPath.append( '%s[@label="%s"]' % ( element.tag, element.get( 'label' ) ) )
reaction = cls( outputChannelModule.Genre.NBody, label = element.get( 'label' ), ENDF_MT = int( element.get( 'ENDF_MT' ) ),
fissionGenre = element.get( 'fissionGenre' ) )
xPath.pop( )
reaction.parseNode( element, xPath, linkData )
return( reaction )
def isGNDSReaction( o ) :
"""Returns True if o is an instance of base_reaction or of a subclass thereof. """
return isinstance(o, base_reaction)
| 46.925159
| 183
| 0.63521
|
3c4b16c98b97ff465b3b22fd588c426a7d37e754
| 1,572
|
py
|
Python
|
app.py
|
andyalky/facebook-mailchimp-webhook
|
337a1fbafb5bf45a5cf05f31cb7c2d2771ee5da3
|
[
"MIT"
] | 2
|
2017-04-05T00:40:46.000Z
|
2018-08-14T18:06:27.000Z
|
app.py
|
andyalky/facebook-mailchimp-webhook
|
337a1fbafb5bf45a5cf05f31cb7c2d2771ee5da3
|
[
"MIT"
] | null | null | null |
app.py
|
andyalky/facebook-mailchimp-webhook
|
337a1fbafb5bf45a5cf05f31cb7c2d2771ee5da3
|
[
"MIT"
] | 2
|
2019-12-22T19:17:53.000Z
|
2021-02-06T20:57:49.000Z
|
from flask import Flask, request
from facebookads.adobjects.lead import Lead
from facebookads.api import FacebookAdsApi
import mailchimp
import os
import json
FACEBOOK_APP_ID = os.environ.get('FACEBOOK_APP_ID')
FACEBOOK_APP_SECRET = os.environ.get('FACEBOOK_APP_SECRET')
FACEBOOK_ACCESS_TOKEN = os.environ.get('FACEBOOK_ACCESS_TOKEN')
FB_VERIFY_TOKEN = os.environ.get('FB_VERIFY_TOKEN')
MAILCHIMP_API_KEY = os.environ.get('MAILCHIMP_API_KEY')
MAILCHIMP_LIST_ID = os.environ.get('MAILCHIMP_LIST_ID')
app = Flask(__name__)
def processLead(lead_data):
subscriber_info = {}
for fields in lead_data['field_data']:
subscriber_info[fields['name']] = fields['values'][0]
mailchimp_api = mailchimp.Mailchimp(MAILCHIMP_API_KEY)
mailchimp_api.lists.subscribe(MAILCHIMP_LIST_ID, subscriber_info)
@app.route('/')
def index():
return "Hello"
@app.route('/webhook/', methods=['GET', 'POST'])
def webhook():
if request.method == 'GET':
#https://developers.facebook.com/docs/graph-api/webhooks#setupget
if request.args.get('hub.verify_token') == FB_VERIFY_TOKEN:
return request.args.get('hub.challenge')
else:
return "Token Verification Failed"
else:
FacebookAdsApi.init(FACEBOOK_APP_ID, FACEBOOK_APP_SECRET, FACEBOOK_ACCESS_TOKEN)
leadgen_info = json.loads(request.data)
lead_id = leadgen_info['entry'][0]['changes'][0]['value']['leadgen_id']
lead = Lead(lead_id)
lead_data = lead.remote_read()
processLead(lead_data)
return "Success"
| 30.230769
| 88
| 0.714377
|
90577dba9639c37391de3e2064fe2eea23be814e
| 4,308
|
py
|
Python
|
var/spack/repos/builtin/packages/libfuse/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-10-04T20:05:45.000Z
|
2021-10-04T20:05:45.000Z
|
var/spack/repos/builtin/packages/libfuse/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 23
|
2021-01-25T15:13:45.000Z
|
2022-03-28T20:19:04.000Z
|
var/spack/repos/builtin/packages/libfuse/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 7
|
2018-09-13T18:04:56.000Z
|
2020-03-18T20:52:06.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack import *
class Libfuse(MesonPackage):
"""The reference implementation of the Linux FUSE (Filesystem in
Userspace) interface"""
homepage = "https://github.com/libfuse/libfuse"
url = "https://github.com/libfuse/libfuse/releases/download/fuse-2.9.9/fuse-2.9.9.tar.gz"
version('3.10.5', sha256='e73f75e58da59a0e333d337c105093c496c0fd7356ef3a5a540f560697c9c4e6')
version('3.10.4', sha256='bfcb2520fd83db29e9fefd57d3abd5285f38ad484739aeee8e03fbec9b2d984a')
version('3.10.3', sha256='c32527782cef620df58b162aa29901d1fb13253b029375d5860a2253a810344e')
version('3.10.2', sha256='a16f93cc083264afd0d2958a0dc88f24c6c5d40a9f3842c645b1909e13edb75f')
version('3.10.1', sha256='d8954e7b4c022c651aa80db3bb4a161437dd285cd5f1a23d0e25f055dcebe00d')
version('3.10.0', sha256='52bbb52035f7eeaa54d139e21805d357f848f6e02ac956831d04988165a92c7b')
version('3.9.4', sha256='9e076ae757a09cac9ce1beb50b3361ae83a831e5abc0f1bf5cdf771cd1320338')
version('3.9.3', sha256='0f8f7ad9cc6667c6751efa425dd0a665dcc9d75f0b7fc0cb5b85141a514110e9')
version('3.9.2', sha256='b4409255cbda6f6975ca330f5b04cb335b823a95ddd8c812c3d224ec53478fc0')
version('2.9.9', sha256='d0e69d5d608cc22ff4843791ad097f554dd32540ddc9bed7638cc6fea7c1b4b5')
def url_for_version(self, version):
if version < Version("3.0.0"):
return "https://github.com/libfuse/libfuse/releases/download/fuse-{0}/fuse-{1}.tar.gz".format(version, version)
return "https://github.com/libfuse/libfuse/archive/refs/tags/fuse-{0}.tar.gz".format(version)
variant('useroot', default=False, description="Use root privileges to make fusermount a setuid binary after installation")
variant('system_install', default=False, description=(
"Do not run the post-install script "
"which typically sets up udev rules and "
"and init script in /etc/init.d"))
provides('fuse')
conflicts("+useroot", when='~system_install', msg="useroot requires system_install")
conflicts('platform=darwin', msg='libfuse does not support OS-X, use macfuse instead')
# Drops the install script which does system configuration
patch('0001-Do-not-run-install-script.patch', when='@3: ~system_install')
patch('https://src.fedoraproject.org/rpms/fuse3/raw/0519b7bf17c4dd1b31ee704d49f8ed94aa5ba6ab/f/fuse3-gcc11.patch', sha256='3ad6719d2393b46615b5787e71778917a7a6aaa189ba3c3e0fc16d110a8414ec', when='@3: %gcc@11:')
executables = ['^fusermount3?$']
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('--version', output=str, error=str)
match = re.search(r'^fusermount.*version: (\S+)', output)
return match.group(1) if match else None
def meson_args(self):
args = []
if '+useroot' in self.spec:
args.append('-Duseroot=true')
else:
args.append('-Duseroot=false')
if '~system_install' in self.spec:
# Fix meson's setup if meson does not have the host system's udev package:
args.append('-Dudevrulesdir={0}'.format(self.prefix.etc.rules.d))
else:
# Likewise, but with +system_install, it may install to /lib/udev/rules.d:
args.append('-Dudevrulesdir={0}'.format('/lib/udev/rules.d'))
return args
# Before libfuse 3.x this was an autotools package
@when('@:2')
def meson(self, spec, prefix):
args = [
"--prefix={0}".format(prefix),
"MOUNT_FUSE_PATH={0}".format(self.prefix.sbin),
"UDEV_RULES_PATH={0}".format(self.prefix.etc),
"INIT_D_PATH={0}".format(self.prefix.etc),
]
args.append('--enable-static' if 'default_library=static' in self.spec
else '--disable-static')
args.append('--enable-shared' if 'default_library=shared' in self.spec
else '--disable-shared')
configure(*args)
@when('@:2')
def build(self, spec, prefix):
make()
@when('@:2')
def install(self, spec, prefix):
make("install")
| 44.412371
| 214
| 0.687558
|
48a0001243f60c6548532eef60105210ad82b634
| 1,422
|
py
|
Python
|
aliyun-python-sdk-actiontrail/aliyunsdkactiontrail/request/v20200706/StartLoggingRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-actiontrail/aliyunsdkactiontrail/request/v20200706/StartLoggingRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-actiontrail/aliyunsdkactiontrail/request/v20200706/StartLoggingRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkactiontrail.endpoint import endpoint_data
class StartLoggingRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Actiontrail', '2020-07-06', 'StartLogging','actiontrail')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
| 37.421053
| 87
| 0.76512
|
1db9666fbae42216a03e01a6bcce5fb526c9eeda
| 63
|
py
|
Python
|
commands.py
|
timurdyutin/udarenie
|
0f388b8f88b799b8d462a0cc3320beb82d8597c4
|
[
"MIT"
] | null | null | null |
commands.py
|
timurdyutin/udarenie
|
0f388b8f88b799b8d462a0cc3320beb82d8597c4
|
[
"MIT"
] | null | null | null |
commands.py
|
timurdyutin/udarenie
|
0f388b8f88b799b8d462a0cc3320beb82d8597c4
|
[
"MIT"
] | null | null | null |
availableCommands = ["Начать", "Начать игру", "Выйти из игры"]
| 31.5
| 62
| 0.698413
|
3f2f1969b784a9354e5a905ddf12411e2e206355
| 1,692
|
py
|
Python
|
tests.py
|
bmacphee/wordle
|
de4d5c96680f09a7e023f9f67e70b930f957eb5f
|
[
"MIT"
] | null | null | null |
tests.py
|
bmacphee/wordle
|
de4d5c96680f09a7e023f9f67e70b930f957eb5f
|
[
"MIT"
] | null | null | null |
tests.py
|
bmacphee/wordle
|
de4d5c96680f09a7e023f9f67e70b930f957eb5f
|
[
"MIT"
] | null | null | null |
from pguess import Guess
from server import Color, compute_result
def test_a():
result = compute_result('index', 'steep')
assert (Color.BLACK, Color.BLACK, Color.BLACK, Color.GREEN, Color.BLACK) == result
def test_b():
result = compute_result('index', 'panda')
assert (Color.BLACK, Color.BLACK, Color.ORANGE, Color.ORANGE, Color.BLACK) == result
def test_c():
result = compute_result('abbey', 'about')
assert (Color.GREEN, Color.GREEN, Color.BLACK, Color.BLACK, Color.BLACK) == result
def test_d():
result = compute_result('crust','trust')
assert (Color.BLACK, Color.GREEN, Color.GREEN, Color.GREEN, Color.GREEN) == result
def test_e():
result = compute_result('trust', 'crust')
assert (Color.BLACK, Color.GREEN, Color.GREEN, Color.GREEN, Color.GREEN) == result
def test_f():
result = compute_result('trust', 'outdo')
assert (Color.BLACK, Color.ORANGE, Color.ORANGE, Color.BLACK, Color.BLACK) == result
def test_g():
result = compute_result('batty', 'treat')
assert (Color.ORANGE, Color.BLACK, Color.BLACK, Color.ORANGE, Color.ORANGE) == result
def test_h():
result = compute_result('abyss', 'hissy')
assert (Color.BLACK, Color.BLACK, Color.ORANGE, Color.GREEN, Color.ORANGE) == result
def test_i():
result = compute_result('bloat', 'abbey')
assert (Color.ORANGE, Color.ORANGE, Color.BLACK, Color.BLACK, Color.BLACK) == result
def test_j():
possible = ['waste', 'asset', 'beset', 'roate', 'raise']
g = Guess(possible, possible)
g.next_guess = 'sissy'
g.notify_result((Color.BLACK, Color.BLACK, Color.GREEN, Color.BLACK, Color.BLACK))
assert g.possible_words == {'waste', 'beset'}
| 30.214286
| 89
| 0.68026
|
7e06f640274a5941730f8a2dcdefec0e0e7e6451
| 639
|
py
|
Python
|
indico_hub/crawler.py
|
hassanTiger11/indico-hub
|
25c57673a56fac5b5a61814102b8543057df7640
|
[
"MIT"
] | null | null | null |
indico_hub/crawler.py
|
hassanTiger11/indico-hub
|
25c57673a56fac5b5a61814102b8543057df7640
|
[
"MIT"
] | null | null | null |
indico_hub/crawler.py
|
hassanTiger11/indico-hub
|
25c57673a56fac5b5a61814102b8543057df7640
|
[
"MIT"
] | null | null | null |
import socket
from urllib.parse import urlparse
from .db import db
from .es_conf import es, index_name
def geolocate(instance):
url = urlparse(instance.url).hostname
try:
ip_address = socket.gethostbyname(url)
except socket.gaierror:
return
new_doc = es.get(index=index_name, id=instance.uuid)['_source']
new_doc['ip'] = ip_address
es.index(index=index_name, id=instance.uuid, body=new_doc, pipeline='geoip')
doc = es.get(index=index_name, id=instance.uuid)['_source']
instance.geolocation = doc['geoip']
db.session.commit()
return f"reside at {doc['geoip']['continent_name']}"
| 27.782609
| 80
| 0.696401
|
a001192f4aafe78ef8da2541eab79bbf1bf2816e
| 512
|
py
|
Python
|
SfM/Traditional/BuildVisibilityMatrix.py
|
akathpal/UMD-CMSC733-ComputerVision
|
f5fa21a0ada8ab8ea08a6c558f6df9676570a2df
|
[
"MIT"
] | 1
|
2022-03-30T05:03:10.000Z
|
2022-03-30T05:03:10.000Z
|
SfM/Traditional/BuildVisibilityMatrix.py
|
akathpal/UMD-CMSC733-ComputerVision
|
f5fa21a0ada8ab8ea08a6c558f6df9676570a2df
|
[
"MIT"
] | null | null | null |
SfM/Traditional/BuildVisibilityMatrix.py
|
akathpal/UMD-CMSC733-ComputerVision
|
f5fa21a0ada8ab8ea08a6c558f6df9676570a2df
|
[
"MIT"
] | 1
|
2022-03-30T05:03:09.000Z
|
2022-03-30T05:03:09.000Z
|
""" File to return Visibility matrix
"""
import sys
sys.dont_write_bytecode = True
def BuildVisibilityMatrix(Visibility, r_indx, print_enable=False):
"""To return Visibility matrix element
Args:
Visibility (array): Visibility Matrix
r_indx (images): Index of images
print_enable (bool, optional): To print the returning element
Returns:
TYPE: Element of matrix
"""
if (print_enable):
print(Visibility[:, r_indx])
return Visibility[:, r_indx]
| 22.26087
| 69
| 0.671875
|
f2e70341d975fb06bce7f2ce6cba7d8c3bc9826c
| 13,431
|
py
|
Python
|
tensorflow/python/eager/ops_test.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 24
|
2018-02-01T15:49:22.000Z
|
2021-01-11T16:31:18.000Z
|
tensorflow/python/eager/ops_test.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 2
|
2017-08-01T21:11:06.000Z
|
2017-08-01T23:07:02.000Z
|
tensorflow/python/eager/ops_test.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 4
|
2018-10-29T18:43:22.000Z
|
2020-09-28T07:19:52.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for operations in eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
class OpsTest(test_util.TensorFlowTestCase):
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testMatMulGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = math_ops.matmul(three, five)
self.assertEqual([[15.0]], product.numpy())
def testExecuteStringAttr(self):
three = constant_op.constant(3.0)
checked_three = array_ops.check_numerics(three,
message='just checking')
self.assertEqual([[3]], checked_three.numpy())
def testExecuteFloatAttr(self):
three = constant_op.constant(3.0)
almost_three = constant_op.constant(2.8)
almost_equal = math_ops.approximate_equal(
three, almost_three, tolerance=0.3)
self.assertTrue(almost_equal)
def testExecuteIntAttr(self):
three = constant_op.constant(3)
four = constant_op.constant(4)
total = math_ops.add_n([three, four])
self.assertAllEqual(7, total)
def testExecuteBoolAttr(self):
three = constant_op.constant([[3]])
five = constant_op.constant([[5]])
product = math_ops.matmul(three, five, transpose_a=True)
self.assertAllEqual([[15]], product)
def testExecuteOneListOutput(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
x1, x2, x3 = array_ops.split(value, 3, axis=split_dim)
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testGraphMode(self):
graph = ops.Graph()
with graph.as_default(), context.graph_mode():
array_ops.placeholder(dtypes.int32)
self.assertEqual(1, len(graph.get_operations()))
# See comments on handling of int32 tensors on GPU in
# EagerTensor.__init__.
def testInt32CPUDefault(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.device('/gpu:0'):
r = constant_op.constant(1) + constant_op.constant(2)
self.assertAllEqual(r, 3)
def testExecuteListOutputLen1(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen0(self):
empty = constant_op.constant([], dtype=dtypes.int32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteMultipleNonListOutput(self):
x = constant_op.constant([1, 2, 3, 4, 5, 6])
y = constant_op.constant([1, 3, 5])
result = array_ops.listdiff(x, y)
out, idx = result
self.assertTrue(out is result.out)
self.assertTrue(idx is result.idx)
self.assertAllEqual([2, 4, 6], out)
self.assertAllEqual([1, 3, 5], idx)
def testExecuteMultipleListOutput(self):
split_dim = constant_op.constant(1, dtype=dtypes.int64)
indices = constant_op.constant([[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]],
dtype=dtypes.int64)
values = constant_op.constant([2, 3, 5, 7, 11])
shape = constant_op.constant([2, 7], dtype=dtypes.int64)
result = sparse_ops.gen_sparse_ops._sparse_split( # pylint: disable=protected-access
split_dim, indices, values, shape, num_split=2)
output_indices, output_values, output_shape = result
self.assertEqual(2, len(output_indices))
self.assertEqual(2, len(output_values))
self.assertEqual(2, len(output_shape))
self.assertEqual(output_indices, result.output_indices)
self.assertEqual(output_values, result.output_values)
self.assertEqual(output_shape, result.output_shape)
self.assertAllEqual([[0, 2], [1, 0], [1, 1]], output_indices[0])
self.assertAllEqual([[0, 0], [0, 1]], output_indices[1])
self.assertAllEqual([2, 7, 11], output_values[0])
self.assertAllEqual([3, 5], output_values[1])
self.assertAllEqual([2, 4], output_shape[0])
self.assertAllEqual([2, 3], output_shape[1])
# TODO(josh11b): Test an op that has multiple outputs, some but not
# all of which are lists. Examples: barrier_take_many (currently
# unsupported since it uses a type list) or sdca_optimizer (I don't
# have an example of legal inputs & outputs).
def testComposition(self):
x = constant_op.constant(1, dtype=dtypes.int32)
three_x = x + x + x
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
def testOperatorOverrides(self):
# TODO(henrytan): test with negative number.
a = constant_op.constant([1])
b = constant_op.constant([2])
self.assertAllEqual((-a), [-1])
self.assertAllEqual(abs(b), [2])
self.assertAllEqual((a + b), [3])
self.assertAllEqual((a - b), [-1])
self.assertAllEqual((a * b), [2])
self.assertAllEqual((a * a), [1])
self.assertAllEqual((a**b), [1])
self.assertAllEqual((a / b), [1 / 2])
self.assertAllEqual((a / a), [1])
self.assertAllEqual((a % b), [1])
self.assertAllEqual((a < b), [True])
self.assertAllEqual((a <= b), [True])
self.assertAllEqual((a > b), [False])
self.assertAllEqual((a >= b), [False])
self.assertAllEqual((a == b), False)
self.assertAllEqual((a != b), True)
self.assertAllEqual(1, a[constant_op.constant(0)])
def test_basic_slice(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :], t[:, :, :])
self.assertAllEqual(npt[::, ::, ::], t[::, ::, ::])
self.assertAllEqual(npt[::1, ::1, ::1], t[::1, ::1, ::1])
self.assertAllEqual(npt[::1, ::5, ::2], t[::1, ::5, ::2])
self.assertAllEqual(npt[::-1, :, :], t[::-1, :, :])
self.assertAllEqual(npt[:, ::-1, :], t[:, ::-1, :])
self.assertAllEqual(npt[:, :, ::-1], t[:, :, ::-1])
self.assertAllEqual(npt[-2::-1, :, ::1], t[-2::-1, :, ::1])
self.assertAllEqual(npt[-2::-1, :, ::2], t[-2::-1, :, ::2])
def testDegenerateSlices(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testEllipsis(self):
npt = np.array(
[[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[0:], t[0:])
# implicit ellipsis
self.assertAllEqual(npt[0:, ...], t[0:, ...])
# ellipsis alone
self.assertAllEqual(npt[...], t[...])
# ellipsis at end
self.assertAllEqual(npt[0:1, ...], t[0:1, ...])
# ellipsis at begin
self.assertAllEqual(npt[..., 0:1], t[..., 0:1])
# ellipsis at middle
self.assertAllEqual(npt[0:1, ..., 0:1], t[0:1, ..., 0:1])
def testShrink(self):
npt = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :, :, 3], t[:, :, :, :, 3])
self.assertAllEqual(npt[..., 3], t[..., 3])
self.assertAllEqual(npt[:, 0], t[:, 0])
self.assertAllEqual(npt[:, :, 0], t[:, :, 0])
def testOpWithInputsOnDifferentDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The GPU kernel for the Reshape op requires that the
# shape input be on CPU.
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = array_ops.reshape(value, shape)
self.assertAllEqual([[1], [2]], reshaped.cpu())
def testInt64(self):
# Fill requires the first input to be an int32 tensor.
self.assertAllEqual(
[1.0, 1.0],
array_ops.fill(constant_op.constant([2], dtype=dtypes.int64),
constant_op.constant(1)))
def testOutputOnHostMemory(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The Shape op kernel on GPU places the output in host memory.
value = constant_op.constant([1.]).gpu()
shape = array_ops.shape(value)
self.assertEqual([1], shape.numpy())
def testSilentCopy(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# Temporarily replace the context
# pylint: disable=protected-access
del context._context
try:
context._context = context.Context(
device_policy=context.DEVICE_PLACEMENT_SILENT)
cpu_tensor = constant_op.constant(1.0)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
finally:
del context._context
context._context = context.Context()
# pylint: enable=protected-access
def testRandomUniform(self):
scalar_shape = constant_op.constant([], dtype=dtypes.int32)
x = random_ops.random_uniform(scalar_shape)
self.assertEquals(0, x.shape.ndims)
self.assertEquals(dtypes.float32, x.dtype)
x = random_ops.random_uniform(
scalar_shape, minval=constant_op.constant(5.),
maxval=constant_op.constant(6.))
self.assertLess(x, 6)
self.assertGreaterEqual(x, 5)
def testArgsToMatchingEagerDefault(self):
# Uses default
ctx = context.context()
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int32)
self.assertEquals(t, dtypes.int32)
self.assertEquals(r[0].dtype, dtypes.int32)
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int64)
self.assertEquals(t, dtypes.int64)
self.assertEquals(r[0].dtype, dtypes.int64)
# Doesn't use default
t, r = execute.args_to_matching_eager(
[['string', 'arg']], ctx, dtypes.int32)
self.assertEquals(t, dtypes.string)
self.assertEquals(r[0].dtype, dtypes.string)
def testFlattenLayer(self):
flatten_layer = core.Flatten()
x = constant_op.constant([[[-10, -20], [-30, -40]], [[10, 20], [30, 40]]])
y = flatten_layer(x)
self.assertAllEqual([[-10, -20, -30, -40], [10, 20, 30, 40]], y)
def testIdentity(self):
self.assertAllEqual(2, array_ops.identity(2))
def testIdentityOnVariable(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.device('/gpu:0'):
v = resource_variable_ops.ResourceVariable(True)
self.assertAllEqual(True, array_ops.identity(v))
def testIncompatibleSetShape(self):
x = constant_op.constant(1)
with self.assertRaises(ValueError):
x.set_shape((1, 2))
def testCompatibleSetShape(self):
x = constant_op.constant([[1, 2]])
x.set_shape(tensor_shape.TensorShape([None, 2]))
self.assertEqual(x.get_shape(), (1, 2))
def testCastScalarToPrimitiveTypes(self):
x = constant_op.constant(1.3)
self.assertIsInstance(int(x), int)
self.assertEqual(int(x), 1)
self.assertIsInstance(float(x), float)
self.assertAllClose(float(x), 1.3)
def testCastNonScalarToPrimitiveTypesFails(self):
x = constant_op.constant([1.3, 2])
with self.assertRaises(TypeError):
int(x)
with self.assertRaises(TypeError):
float(x)
def testFormatString(self):
x = constant_op.constant(3.1415)
self.assertEqual('3.14', '{:.2f}'.format(x))
def testNoOpIsNone(self):
self.assertTrue(control_flow_ops.no_op() is None)
if __name__ == '__main__':
test.main()
| 37.51676
| 89
| 0.657509
|
5252e26508a94414499885601fbdadfe3c75c57f
| 636
|
py
|
Python
|
tests/messages/message_interface01.py
|
pnarvor/nephelae_paparazzi
|
1c000444c39b342e90f39f432737ef06be762f56
|
[
"BSD-3-Clause"
] | null | null | null |
tests/messages/message_interface01.py
|
pnarvor/nephelae_paparazzi
|
1c000444c39b342e90f39f432737ef06be762f56
|
[
"BSD-3-Clause"
] | null | null | null |
tests/messages/message_interface01.py
|
pnarvor/nephelae_paparazzi
|
1c000444c39b342e90f39f432737ef06be762f56
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/python3
import signal
from nephelae_paparazzi import MessageInterface
messageInterface = MessageInterface()
def callback(msg):
# print(msg)
print(msg.fieldvalues)
# bindId = messageInterface.bind(callback, '(.* GPS .*)')
# bindId = messageInterface.bind(callback, '(.* FLIGHT_PARAM .*)')
# bindId = messageInterface.bind(callback, '(.* NAVIGATION_REF .*)')
# bindId = messageInterface.bind(callback, '(.* AP_STATUS .*)')
# bindId = messageInterface.bind(callback, '(.* NAV_STATUS .*)')
# bindId = messageInterface.bind(callback, '(.* MISSION_STATUS .*)')
bindId = messageInterface.bind(callback, '(.* BAT .*)')
| 31.8
| 68
| 0.699686
|
77d7e366aaa58a6189406ee2aca72fa98c61b2c4
| 2,736
|
py
|
Python
|
ucsmsdk/mometa/aaa/AaaSshAuth.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/aaa/AaaSshAuth.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/aaa/AaaSshAuth.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for AaaSshAuth ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class AaaSshAuthConsts:
OLD_STR_TYPE_KEY = "key"
OLD_STR_TYPE_NONE = "none"
STR_TYPE_KEY = "key"
STR_TYPE_NONE = "none"
class AaaSshAuth(ManagedObject):
"""This is AaaSshAuth class."""
consts = AaaSshAuthConsts()
naming_props = set([])
mo_meta = MoMeta("AaaSshAuth", "aaaSshAuth", "sshauth", VersionMeta.Version101e, "InputOutput", 0x7f, [], ["aaa", "admin"], [u'aaaUser'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"data": MoPropertyMeta("data", "data", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[\n\r \+\-\./=@_a-zA-Z0-9]{0,16384}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"old_str_type": MoPropertyMeta("old_str_type", "oldStrType", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["key", "none"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"str_type": MoPropertyMeta("str_type", "strType", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["key", "none"], []),
}
prop_map = {
"childAction": "child_action",
"data": "data",
"dn": "dn",
"oldStrType": "old_str_type",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"strType": "str_type",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.data = None
self.old_str_type = None
self.sacl = None
self.status = None
self.str_type = None
ManagedObject.__init__(self, "AaaSshAuth", parent_mo_or_dn, **kwargs)
| 49.745455
| 248
| 0.635599
|
8a2258cf6eb652b98bcf86c60fad11535e253e5d
| 2,318
|
py
|
Python
|
tests/modules/extra/rabbitmq/mother/rabbitmq_message_consumer_mother.py
|
alice-biometrics/petisco
|
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
|
[
"MIT"
] | 19
|
2019-11-01T09:27:17.000Z
|
2021-12-15T10:52:31.000Z
|
tests/modules/extra/rabbitmq/mother/rabbitmq_message_consumer_mother.py
|
alice-biometrics/petisco
|
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
|
[
"MIT"
] | 68
|
2020-01-15T06:55:00.000Z
|
2022-02-22T15:57:24.000Z
|
tests/modules/extra/rabbitmq/mother/rabbitmq_message_consumer_mother.py
|
alice-biometrics/petisco
|
b96e697cc875f67a28e60b4fc0d9ed9fc646cd86
|
[
"MIT"
] | 2
|
2019-11-19T10:40:25.000Z
|
2019-11-28T07:12:07.000Z
|
from petisco.base.domain.message.chaos.message_chaos import MessageChaos
from petisco.extra.rabbitmq import RabbitMqConnector, RabbitMqMessageConsumer
from petisco.legacy.logger.interface_logger import ILogger
from petisco.legacy.logger.not_implemented_logger import NotImplementedLogger
from tests.modules.extra.rabbitmq.mother.defaults import (
DEFAULT_MAX_RETRIES,
DEFAULT_ORGANIZATION,
DEFAULT_SERVICE,
DEFAULT_VERBOSE,
)
class RabbitMqMessageConsumerMother:
@staticmethod
def default(connector: RabbitMqConnector = None):
connector = RabbitMqConnector() if not connector else connector
return RabbitMqMessageConsumer(
DEFAULT_ORGANIZATION,
DEFAULT_SERVICE,
DEFAULT_MAX_RETRIES,
connector,
DEFAULT_VERBOSE,
)
@staticmethod
def with_service(service: str, connector: RabbitMqConnector = None):
connector = RabbitMqConnector() if not connector else connector
return RabbitMqMessageConsumer(
DEFAULT_ORGANIZATION,
service,
DEFAULT_MAX_RETRIES,
connector,
DEFAULT_VERBOSE,
)
@staticmethod
def with_max_retries(max_retries: int, connector: RabbitMqConnector = None):
connector = RabbitMqConnector() if not connector else connector
return RabbitMqMessageConsumer(
DEFAULT_ORGANIZATION,
DEFAULT_SERVICE,
max_retries,
connector,
DEFAULT_VERBOSE,
)
@staticmethod
def without_retry(connector: RabbitMqConnector = None):
connector = RabbitMqConnector() if not connector else connector
return RabbitMqMessageConsumer(
DEFAULT_ORGANIZATION, DEFAULT_SERVICE, 0, connector, DEFAULT_VERBOSE
)
@staticmethod
def with_chaos(
chaos: MessageChaos,
max_retries: int,
logger: ILogger = NotImplementedLogger(),
connector: RabbitMqConnector = None,
):
connector = RabbitMqConnector() if not connector else connector
return RabbitMqMessageConsumer(
DEFAULT_ORGANIZATION,
DEFAULT_SERVICE,
max_retries,
connector,
DEFAULT_VERBOSE,
chaos,
logger,
)
| 32.647887
| 80
| 0.668248
|
9fbfa28c4478776554b95ad5bbc86cf8879752eb
| 769
|
py
|
Python
|
ymir/backend/src/ymir_app/tests/api/test_upload.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 64
|
2021-11-15T03:48:00.000Z
|
2022-03-25T07:08:46.000Z
|
ymir/backend/src/ymir_app/tests/api/test_upload.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 35
|
2021-11-23T04:14:35.000Z
|
2022-03-26T09:03:43.000Z
|
ymir/backend/src/ymir_app/tests/api/test_upload.py
|
Aryalfrat/ymir
|
d4617ed00ef67a77ab4e1944763f608bface4be6
|
[
"Apache-2.0"
] | 57
|
2021-11-11T10:15:40.000Z
|
2022-03-29T07:27:54.000Z
|
from pathlib import Path
from typing import Union
from fastapi.testclient import TestClient
from pydantic import AnyHttpUrl, BaseModel
from app.config import settings
class Resp(BaseModel):
code: int
message: str
result: Union[AnyHttpUrl, str]
def test_upload_file(client: TestClient, tmp_path, normal_user_token_headers) -> None:
p = tmp_path / "uploaded_stuff.doc"
with open(p, "wb") as tmp:
tmp.write(b"content")
with open(p, "rb") as tmp:
files = {"file": tmp}
r = client.post(
f"{settings.API_V1_STR}/uploadfile/",
headers=normal_user_token_headers,
files=files,
)
assert r.status_code == 200
j = r.json()
Resp.validate(j)
Path(j["result"]).unlink()
| 24.806452
| 86
| 0.648895
|
c1dd26958bb7311d6822b60a3e3e185e002fbc38
| 4,130
|
py
|
Python
|
SocialNPHS/language/tweet.py
|
controversial/SocialNPHS
|
ec2d51e7801e62be68b8ed712b1f33ad58a0404c
|
[
"MIT"
] | 1
|
2016-07-23T19:39:46.000Z
|
2016-07-23T19:39:46.000Z
|
SocialNPHS/language/tweet.py
|
controversial/SocialNPHS
|
ec2d51e7801e62be68b8ed712b1f33ad58a0404c
|
[
"MIT"
] | 4
|
2016-09-14T23:27:06.000Z
|
2016-12-17T19:47:11.000Z
|
SocialNPHS/language/tweet.py
|
controversial/SocialNPHS
|
ec2d51e7801e62be68b8ed712b1f33ad58a0404c
|
[
"MIT"
] | 2
|
2016-08-10T12:27:30.000Z
|
2019-06-27T17:00:53.000Z
|
"""
Given a tweet, tokenize it and shit.
"""
import nltk
from nltk.tag import util
from nltk.tokenize import sent_tokenize, TweetTokenizer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from SocialNPHS.sources.twitter.auth import api
from SocialNPHS.sources.twitter import user
def get_tweet_tags(tweet):
""" Break up a tweet into individual word parts """
tknzr = TweetTokenizer()
tokens = tknzr.tokenize(tweet)
# replace handles with real names
for n, tok in enumerate(tokens):
if tok.startswith('@'):
handle = tok.strip("@")
if handle in user.students:
# If we have a database entry for the mentioned user, we can
# easily substitute a full name.
usr = user.NPUser(handle)
tokens[n] = usr.fullname
else:
# If there is no database entry, we use the user's alias. While
# this is the full name in many cases, it is often not reliable
usr = api.get_user(handle)
tokens[n] = usr.name
tagged = nltk.pos_tag(tokens)
# In nltk, if a teacher's name is written with a period after an
# abbreviated prefix, it is awkwardly broken up into 3 tags
for n, tag in enumerate(tagged):
# If there is the weird period after the prefix,
if tag[1] == '.':
# and it is in fact splitting up a person's name,
if tagged[n - 1][1] == 'NNP' and tagged[n + 1][1] == 'NNP':
if tagged[n - 1][0] in ['Mr', 'Ms', 'Mrs', 'Mx']:
# combine it into the actual name,
tagged[n - 1] = ('{}. {}'.format(tagged[n - 1][0],
tagged[n + 1][0]), 'NNP')
# and then remove the extra tags.
del tagged[n + 1]
del tagged[n]
return tagged
def tweet_connotation(tweet):
""" Decide whether a tweet is generally positive or negative """
anlyzr = SentimentIntensityAnalyzer()
# break tweet up into sentences and analyze each seperately
twtcontent = sent_tokenize(tweet)
overall = {'compound': 0, 'neg': 0, 'neu': 0, 'pos': 0}
for s in twtcontent:
scores = anlyzr.polarity_scores(s)
# tally up each sentence's overall tone
for i, z in enumerate(scores):
overall[z] += scores[z]
# average it all together for the tweet as a whole
for v in overall:
overall[v] = round(overall[v] / len(twtcontent), 3)
return overall
def person_connotation(tweet, name):
"""
Decide whether a person is talked favorably about or not, based on the
tone of the sentences in which their name appears
"""
twtcontent = sent_tokenize(tweet)
overall = {'compound': 0, 'neg': 0, 'neu': 0, 'pos': 0}
mentions = 0
# analyze each sentence talking about `name` person
for s in twtcontent:
tags = get_tweet_tags(s)
# if the name appears in the tagged sentence, get its tone
if (name, 'NNP') in tags:
sentence = util.untag(tags)
scores = tweet_connotation(' '.join(sentence))
# add it up to the overall tweet's tone
for i, z in enumerate(scores):
overall[z] += scores[z]
mentions += 1
# averaging all sentences' scores. don't wanna divide by zero now do we
if mentions != 0:
for v in overall:
overall[v] = round(overall[v] / mentions, 3)
return overall
def person_multi_connotation(tweets, name):
""" Analyze many tweets (a list of tweets in fact) about a person """
mentioned = 0
overall = {'compound': 0, 'neg': 0, 'neu': 0, 'pos': 0}
for t in tweets:
score = person_connotation(t, name)
for i, z in enumerate(score):
if z != 0:
mentioned += 1
break
for i, z in enumerate(score):
overall[z] += score[z]
if mentioned != 0:
for v in overall:
overall[v] = round(overall[v] / mentioned, 3)
return overall
| 37.889908
| 79
| 0.577966
|
eaad9af8c2cabdb3d69178cf3693b380c2a3f76d
| 946
|
py
|
Python
|
zetafold/util/wrapped_array.py
|
rhiju/myfold
|
d789c4c8b1cff0a3b860a049fb00f3e02de7d8d1
|
[
"MIT"
] | 8
|
2018-11-14T05:18:56.000Z
|
2018-12-03T14:21:56.000Z
|
zetafold/util/wrapped_array.py
|
rhiju/myfold
|
d789c4c8b1cff0a3b860a049fb00f3e02de7d8d1
|
[
"MIT"
] | 9
|
2019-01-02T22:17:33.000Z
|
2019-03-29T23:15:50.000Z
|
zetafold/util/wrapped_array.py
|
rhiju/myfold
|
d789c4c8b1cff0a3b860a049fb00f3e02de7d8d1
|
[
"MIT"
] | 3
|
2018-10-27T21:28:42.000Z
|
2018-12-03T14:21:57.000Z
|
class WrappedArray:
'''
For all the various cross-checks, like equality of partition function starting at any
i and wrapping around to N and then back 1 and then i-1, need to keep applying modulo N.
'''
def __init__( self, N, val = None ):
self.data = [val] * N
self.N = N
def __getitem__( self, idx ):
return self.data[idx % self.N]
def __setitem__( self, idx, item ):
self.data[idx % self.N] = item
def __len__( self ):
return self.N
##################################################################################################
def initialize_matrix( N, val = None, wrapped = True ):
assert( not isinstance(val,list ) ) # causes issue with references to same list.
X = WrappedArray( N ) if wrapped else [None]*N
for i in range( N ):
X[ i ] = WrappedArray( N ) if wrapped else [None]*N
for j in range( N ): X[ i ][ j ] = val
return X
| 39.416667
| 98
| 0.540169
|
afad33f4159554357c2efa70ff71a7e9735bc682
| 489
|
py
|
Python
|
world challenges 3/dictionary/df90.py
|
T-Terra/Exercises-of-Python
|
65dfb9ca32faff13f9d1160c07425330a68eef1a
|
[
"MIT"
] | null | null | null |
world challenges 3/dictionary/df90.py
|
T-Terra/Exercises-of-Python
|
65dfb9ca32faff13f9d1160c07425330a68eef1a
|
[
"MIT"
] | null | null | null |
world challenges 3/dictionary/df90.py
|
T-Terra/Exercises-of-Python
|
65dfb9ca32faff13f9d1160c07425330a68eef1a
|
[
"MIT"
] | null | null | null |
main_dic = {}
main_dic['name'] = str(input(f'nome: '))
main_dic['media'] = float(input(f'media de {main_dic["name"]}: '))
print(25 * '-=')
print(f'O nome é igual a {main_dic["name"]}')
print(f'Média igual a {main_dic["media"]}')
if main_dic['media'] >= 7:
main_dic['situation'] = 'Aprovado!'
elif 5 <= main_dic['media'] < 7:
main_dic['situation'] = 'Recuperação!'
else:
main_dic['situation'] = 'Reprovado!'
print(f'Situação é igual a {main_dic["situation"]}')
print(25 * '-=')
| 32.6
| 66
| 0.625767
|
dceb26a1645aeeb71e260dae5a110d1d74cfcd97
| 4,014
|
py
|
Python
|
data/train/python/dceb26a1645aeeb71e260dae5a110d1d74cfcd97make_char_img.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/dceb26a1645aeeb71e260dae5a110d1d74cfcd97make_char_img.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/dceb26a1645aeeb71e260dae5a110d1d74cfcd97make_char_img.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
#!/home/kohei/.pyenv/shims/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append('..')
import kdebug
from kimage import *
from kfont import get_word_image
if(len(sys.argv) <= 1):
print "no save object: all, ascii"
sys.exit(1)
SAVE_OBJECT = sys.argv[1]
if(len(sys.argv) <= 2):
print "no save directory"
sys.exit(1)
SAVE_DIR = sys.argv[2]
def cut(image):
l = -1
r = -1
l = -1
d = -1
for i in range(image.size[0]):
if(not is_vline(image, i, 0)):
l = i
break
for i in range(image.size[0]-1, -1, -1):
if(not is_vline(image, i, 0)):
r = i+1
break
for i in range(image.size[1]):
if(not is_hline(image, i, 0)):
u = i
break
for i in range(image.size[1]-1, -1, -1):
if(not is_hline(image, i, 0)):
d = i+1
break
if(l == -1):
return image
return image.crop((l,u,r,d))
def save_at(i):
ch = unichr(i)
print(u"{0}:[{1}]".format(hex(i), ch))
image = get_word_image(ch)
barray = g_to_barray(image.getdata())
gimg = Image.new("L", image.size)
gimg.putdata(b_to_garray(barray))
gimg = cut(gimg).resize((20,20))
name = "{0:0>4}".format(hex(i)[2:].upper())
gimg.save(SAVE_DIR + "/{0}.png".format(name))
def save_range(start, end):
for i in range(start, end + 1):
save_at(i)
zenkaku_symbol_list = [
u"、",
u"。",
u"?",
# u"・",
u"「",
u"」",
u"『",
u"』",
u"○",
u"ー",
u"&",
u"%",
u"#",
u"$",
u"!",
u"*",
u"=",
u"+",
]
zenkaku_kanji_list = [
u"門",
u"鮎",
u"安",
u"井",
u"戸",
u"右", u"左", u"上", u"下",
u"鳥",
u"白", u"赤", u"青", u"黒", u"黄",
u"色",
u"永",
u"駅",
u"王",
u"化",
u"口", u"因", u"国",
u"日", u"年", u"月",
u"花", u"草",
u"海", u"湖",
u"外",
u"本",
u"学",
u"甘",
u"辛",
u"丸",
u"二", u"三", u"四", u"五", u"六", u"七", u"八", u"九", u"十",
u"百", u"千", u"万", u"億",
u"曲",
u"犬", u"猫",
u"野", u"球",
u"見",
u"工",
u"作",
u"子",
u"親",
u"次",
u"人",
u"中",
u"何",
u"夏", u"秋", u"冬", u"春",
u"朝", u"昼", u"夜",
u"東", u"西", u"南", u"北",
u"文",
u"漫", u"画", u"映",
u"英", u"語",
u"呼",
u"表",
u"動", u"虫", u"物",
]
zenkaku_kana_list = [
u"あ", u"い", u"う", u"え", u"お",
u"か", u"き", u"く", u"け", u"こ",
u"さ", u"し", u"す", u"せ", u"そ",
u"た", u"ち", u"つ", u"て", u"と",
u"な", u"に", u"ぬ", u"ね", u"の",
u"は", u"ひ", u"ふ", u"へ", u"ほ",
u"ま", u"み", u"む", u"め", u"も",
u"や", u"ゆ", u"よ",
u"ゃ", u"ゅ", u"ょ",
u"ら", u"り", u"る", u"れ", u"ろ",
u"わ", u"を", u"ん",
u"ア", u"イ", u"ウ", u"エ", u"オ",
u"カ", u"キ", u"ク", u"ケ", u"コ",
u"サ", u"シ", u"ス", u"セ", u"ソ",
u"タ", u"チ", u"ツ", u"テ", u"ト",
u"ナ", u"ニ", u"ヌ", u"ネ", u"ノ",
u"ハ", u"ヒ", u"フ", u"ヘ", u"ホ",
u"マ", u"ミ", u"ム", u"メ", u"モ",
u"ヤ", u"ユ", u"ヨ",
u"ャ", u"ュ", u"ョ",
u"ラ", u"リ", u"ル", u"レ", u"ロ",
u"ワ", u"ヲ", u"ン",
]
def save_zenkaku():
for ch in zenkaku_symbol_list:
save_at(ord(ch))
save_range(0x3041, 0x3093) # save hiragana
save_range(0x30A1, 0x30F6) # save katakana
for ch in zenkaku_kanji_list:
save_at(ord(ch))
def save_ascii():
# save_range(0x0020, 0x007D) # all ascii codes
save_range(0x0030, 0x0039) # number
save_range(0x0041, 0x005A) # alphabets upper
save_range(0x0061, 0x007A) # alphabets lower
if(SAVE_OBJECT == "ascii"):
save_ascii()
if(SAVE_OBJECT == "zenkaku"):
save_zenkaku()
if(SAVE_OBJECT == "all"):
save_zenkaku()
# save_ascii()
| 21.815217
| 61
| 0.397857
|
93913ae4ebf4f5817a238e23f6b1a0315f3f2088
| 15,915
|
py
|
Python
|
main.py
|
odemasi/simpletod
|
8d694bc1b09c12497488be46879dfe1dede83df3
|
[
"BSD-3-Clause"
] | 167
|
2020-07-26T08:33:54.000Z
|
2022-03-24T13:30:07.000Z
|
main.py
|
odemasi/simpletod
|
8d694bc1b09c12497488be46879dfe1dede83df3
|
[
"BSD-3-Clause"
] | 27
|
2020-07-27T07:07:05.000Z
|
2021-11-30T11:10:36.000Z
|
main.py
|
odemasi/simpletod
|
8d694bc1b09c12497488be46879dfe1dede83df3
|
[
"BSD-3-Clause"
] | 62
|
2020-08-02T18:25:55.000Z
|
2022-02-06T15:04:57.000Z
|
"""
Fine-tuning pretrained language model (GPT2) on Task-oriented Dialogue
"""
import argparse
import glob
import logging
import os
import pickle
import random
import re
# import shutil
# from typing import Dict, List, Tuple
import numpy as np
import torch
# from torch.nn.utils.rnn import pad_sequence
# from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
# from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
# AdamW,
GPT2Tokenizer,
PreTrainedModel,
PreTrainedTokenizer,
# get_linear_schedule_with_warmup,
)
from transformers import GPT2Tokenizer
# comment this if you want to load gpt2 class from transformers
from models import GPT2LMHeadModel
from models import GPT2Config, GPT2SmallConfig
# uncomment this if you want to load gpt2 class from transformers
# from transformers import GP2Config, GPT2LMHeadModel
from data.dataset.language_model import *
from utils.model import *
from utils.language_model import get_optimizer_scheduler
from utils.gpt2_args_parser import ArgsParser
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
"gpt2-small": (GPT2SmallConfig, GPT2LMHeadModel, GPT2Tokenizer),
}
def get_model_tokenizer(args):
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
if args.config_name:
config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
config = config_class()
if args.tokenizer_name:
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
raise ValueError(
"You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name".format(tokenizer_class.__name__)
)
if args.block_size <= 0:
args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
args.block_size = min(args.block_size, tokenizer.max_len)
if args.model_name_or_path:
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = model_class(config=config)
model.to(args.device)
if args.model_name_or_path == 'openai-gpt':
tokenizer.add_special_tokens({'bos_token': '<|endoftext|>'})
tokenizer.add_special_tokens({'eos_token': '<|endoftext|>'})
elif args.model_name_or_path == 'gpt2':
pass
return model, tokenizer, model_class, args
def get_training_info(dataloader, args):
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
return global_step, epochs_trained, steps_trained_in_current_epoch
def train_epoch(model, tokenizer, optimizer, scheduler, train_dataloader, tr_loss, logging_loss, global_step, steps_trained_in_current_epoch, tb_writer, args):
"""train one epoch"""
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs, labels = (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = model(inputs, labels=labels)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log metrics
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
if (args.local_rank == -1 and args.evaluate_during_training): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
# save checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.evaluate_during_training:
save_checkpoint(model, optimizer, scheduler, tokenizer, args)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
return model, optimizer, scheduler, global_step, tr_loss, logging_loss
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter('./runs/{}'.format(args.output_dir.split('/')[-1]))
# Prepare dataloader
train_dataloader, args = get_dataloader(train_dataset, tokenizer, args)
# total iteration and batch size
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
total_batch_size = args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# Prepare optimizer and schedule (linear warmup and decay)
optimizer, scheduler = get_optimizer_scheduler(args, model, t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = {}".format(len(train_dataset)))
logger.info(" Num Epochs = {}".format(args.num_train_epochs))
logger.info(" Instantaneous batch size per GPU = {}".format(args.per_gpu_train_batch_size))
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = {}".format(total_batch_size))
logger.info(" Gradient Accumulation steps = {}".format(args.gradient_accumulation_steps))
logger.info(" Total optimization steps = {}".format(t_total))
global_step, epochs_trained, steps_trained_in_current_epoch = get_training_info(train_dataloader, args)
tr_loss, logging_loss = 0.0, 0.0
model_to_resize = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model_to_resize.resize_token_embeddings(len(tokenizer))
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
for _ in train_iterator:
model, optimizer, scheduler, global_step, tr_loss, logging_loss = train_epoch(model, tokenizer, optimizer, scheduler, train_dataloader, tr_loss, logging_loss, global_step,
steps_trained_in_current_epoch, tb_writer, args)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
# Prepare dataloader
eval_dataloader, args = get_dataloader(eval_dataset, tokenizer, args, split='eval')
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = {}".format(len(eval_dataset)))
logger.info(" Batch size = {}".format(args.eval_batch_size))
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
inputs, labels = (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = model(inputs, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
args = ArgsParser().parse()
if args.eval_data_file is None and args.do_eval:
raise ValueError(
"--eval_data_file should be specified when do_eval is true"
)
if args.should_continue:
sorted_checkpoints = _sorted_checkpoints(args)
if len(sorted_checkpoints) == 0:
raise ValueError("--should_continue is true, but no checkpoint found in --output_dir")
else:
args.model_name_or_path = sorted_checkpoints[-1]
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # initialize distributed training
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # if not the first process, do not load pretrained model & vocab
model, tokenizer, model_class, args = get_model_tokenizer(args)
if args.local_rank == 0:
torch.distributed.barrier() # finish barrier, when first process has loaded pretrained model & vocab
logger.info("Training/evaluation parameters {}".format(args))
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # only first process will preprocess data/caching
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
if args.local_rank == 0:
torch.distributed.barrier() # end of barrier
global_step, train_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = {}, average loss = {}".format(global_step, train_loss))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("models.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: {}".format(checkpoints))
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 38.257212
| 179
| 0.666855
|
bf06fb772978cbd8b941e3a1703a644646af4b97
| 17,036
|
py
|
Python
|
google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py
|
busunkim96/python-automl
|
7df905910b86721a6ee3a3b6c916a4f8e27d0aa7
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py
|
busunkim96/python-automl
|
7df905910b86721a6ee3a3b6c916a4f8e27d0aa7
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py
|
busunkim96/python-automl
|
7df905910b86721a6ee3a3b6c916a4f8e27d0aa7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/automl_v1beta1/proto/annotation_payload.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.cloud.automl_v1beta1.proto import (
classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2,
)
from google.cloud.automl_v1beta1.proto import (
detection_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2,
)
from google.cloud.automl_v1beta1.proto import (
tables_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2,
)
from google.cloud.automl_v1beta1.proto import (
text_extraction_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2,
)
from google.cloud.automl_v1beta1.proto import (
text_sentiment_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2,
)
from google.cloud.automl_v1beta1.proto import (
translation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2,
)
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/automl_v1beta1/proto/annotation_payload.proto",
package="google.cloud.automl.v1beta1",
syntax="proto3",
serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/api/annotations.proto"\xe6\x05\n\x11\x41nnotationPayload\x12I\n\x0btranslation\x18\x02 \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TranslationAnnotationH\x00\x12O\n\x0e\x63lassification\x18\x03 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotationH\x00\x12]\n\x16image_object_detection\x18\x04 \x01(\x0b\x32;.google.cloud.automl.v1beta1.ImageObjectDetectionAnnotationH\x00\x12Z\n\x14video_classification\x18\t \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoClassificationAnnotationH\x00\x12[\n\x15video_object_tracking\x18\x08 \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoObjectTrackingAnnotationH\x00\x12P\n\x0ftext_extraction\x18\x06 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TextExtractionAnnotationH\x00\x12N\n\x0etext_sentiment\x18\x07 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.TextSentimentAnnotationH\x00\x12?\n\x06tables\x18\n \x01(\x0b\x32-.google.cloud.automl.v1beta1.TablesAnnotationH\x00\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x05 \x01(\tB\x08\n\x06\x64\x65tailB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3',
dependencies=[
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2.DESCRIPTOR,
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2.DESCRIPTOR,
google_dot_protobuf_dot_any__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_ANNOTATIONPAYLOAD = _descriptor.Descriptor(
name="AnnotationPayload",
full_name="google.cloud.automl.v1beta1.AnnotationPayload",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="translation",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.translation",
index=0,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="classification",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.classification",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="image_object_detection",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.image_object_detection",
index=2,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="video_classification",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.video_classification",
index=3,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="video_object_tracking",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.video_object_tracking",
index=4,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="text_extraction",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.text_extraction",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="text_sentiment",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.text_sentiment",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="tables",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.tables",
index=7,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="annotation_spec_id",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.annotation_spec_id",
index=8,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="display_name",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.display_name",
index=9,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="detail",
full_name="google.cloud.automl.v1beta1.AnnotationPayload.detail",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=470,
serialized_end=1212,
)
_ANNOTATIONPAYLOAD.fields_by_name[
"translation"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_translation__pb2._TRANSLATIONANNOTATION
)
_ANNOTATIONPAYLOAD.fields_by_name[
"classification"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._CLASSIFICATIONANNOTATION
)
_ANNOTATIONPAYLOAD.fields_by_name[
"image_object_detection"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2._IMAGEOBJECTDETECTIONANNOTATION
)
_ANNOTATIONPAYLOAD.fields_by_name[
"video_classification"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2._VIDEOCLASSIFICATIONANNOTATION
)
_ANNOTATIONPAYLOAD.fields_by_name[
"video_object_tracking"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2._VIDEOOBJECTTRACKINGANNOTATION
)
_ANNOTATIONPAYLOAD.fields_by_name[
"text_extraction"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__extraction__pb2._TEXTEXTRACTIONANNOTATION
)
_ANNOTATIONPAYLOAD.fields_by_name[
"text_sentiment"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__sentiment__pb2._TEXTSENTIMENTANNOTATION
)
_ANNOTATIONPAYLOAD.fields_by_name[
"tables"
].message_type = (
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2._TABLESANNOTATION
)
_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append(
_ANNOTATIONPAYLOAD.fields_by_name["translation"]
)
_ANNOTATIONPAYLOAD.fields_by_name[
"translation"
].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"]
_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append(
_ANNOTATIONPAYLOAD.fields_by_name["classification"]
)
_ANNOTATIONPAYLOAD.fields_by_name[
"classification"
].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"]
_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append(
_ANNOTATIONPAYLOAD.fields_by_name["image_object_detection"]
)
_ANNOTATIONPAYLOAD.fields_by_name[
"image_object_detection"
].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"]
_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append(
_ANNOTATIONPAYLOAD.fields_by_name["video_classification"]
)
_ANNOTATIONPAYLOAD.fields_by_name[
"video_classification"
].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"]
_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append(
_ANNOTATIONPAYLOAD.fields_by_name["video_object_tracking"]
)
_ANNOTATIONPAYLOAD.fields_by_name[
"video_object_tracking"
].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"]
_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append(
_ANNOTATIONPAYLOAD.fields_by_name["text_extraction"]
)
_ANNOTATIONPAYLOAD.fields_by_name[
"text_extraction"
].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"]
_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append(
_ANNOTATIONPAYLOAD.fields_by_name["text_sentiment"]
)
_ANNOTATIONPAYLOAD.fields_by_name[
"text_sentiment"
].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"]
_ANNOTATIONPAYLOAD.oneofs_by_name["detail"].fields.append(
_ANNOTATIONPAYLOAD.fields_by_name["tables"]
)
_ANNOTATIONPAYLOAD.fields_by_name[
"tables"
].containing_oneof = _ANNOTATIONPAYLOAD.oneofs_by_name["detail"]
DESCRIPTOR.message_types_by_name["AnnotationPayload"] = _ANNOTATIONPAYLOAD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AnnotationPayload = _reflection.GeneratedProtocolMessageType(
"AnnotationPayload",
(_message.Message,),
{
"DESCRIPTOR": _ANNOTATIONPAYLOAD,
"__module__": "google.cloud.automl_v1beta1.proto.annotation_payload_pb2",
"__doc__": """Contains annotation information that is relevant to AutoML.
Attributes:
detail:
Output only . Additional information about the annotation
specific to the AutoML domain.
translation:
Annotation details for translation.
classification:
Annotation details for content or image classification.
image_object_detection:
Annotation details for image object detection.
video_classification:
Annotation details for video classification. Returned for
Video Classification predictions.
video_object_tracking:
Annotation details for video object tracking.
text_extraction:
Annotation details for text extraction.
text_sentiment:
Annotation details for text sentiment.
tables:
Annotation details for Tables.
annotation_spec_id:
Output only . The resource ID of the annotation spec that this
annotation pertains to. The annotation spec comes from either
an ancestor dataset, or the dataset that was used to train the
model in use.
display_name:
Output only. The value of [display_name][google.cloud.automl.v
1beta1.AnnotationSpec.display_name] when the model was
trained. Because this field returns a value at model training
time, for different models trained using the same dataset, the
returned value could be different as model owner could update
the ``display_name`` between any two model training.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.AnnotationPayload)
},
)
_sym_db.RegisterMessage(AnnotationPayload)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 40.755981
| 1,785
| 0.704508
|
fef89396995ca0389d748167ae8a58117ba5789f
| 58
|
py
|
Python
|
cogs/__init__.py
|
yaansz/RoleManager
|
fbde2ab4a8d0721dfe5a7f258981a298787c20f6
|
[
"MIT"
] | 1
|
2021-11-12T04:25:56.000Z
|
2021-11-12T04:25:56.000Z
|
cogs/__init__.py
|
yaansz/RoleManager
|
fbde2ab4a8d0721dfe5a7f258981a298787c20f6
|
[
"MIT"
] | 1
|
2021-10-21T04:28:53.000Z
|
2021-10-21T04:28:53.000Z
|
cogs/__init__.py
|
yaansz/RoleManager
|
fbde2ab4a8d0721dfe5a7f258981a298787c20f6
|
[
"MIT"
] | null | null | null |
# PATH PROBLEM
import pathlib
pathlib.Path().resolve()
| 14.5
| 25
| 0.724138
|
1a11713508974d9008ba030242c2f039c0fa6e35
| 6,867
|
py
|
Python
|
sdk/python/pulumi_gcp/compute/health_check.py
|
pulumi-bot/pulumi-gcp
|
43ff11bf1c99b4e9e493f61d9755e359b686ae67
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/compute/health_check.py
|
pulumi-bot/pulumi-gcp
|
43ff11bf1c99b4e9e493f61d9755e359b686ae67
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/compute/health_check.py
|
pulumi-bot/pulumi-gcp
|
43ff11bf1c99b4e9e493f61d9755e359b686ae67
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class HealthCheck(pulumi.CustomResource):
"""
Manages a health check within GCE. This is used to monitor instances
behind load balancers. Timeouts or HTTP errors cause the instance to be
removed from the pool. For more information, see [the official
documentation](https://cloud.google.com/compute/docs/load-balancing/health-checks)
and
[API](https://cloud.google.com/compute/docs/reference/latest/healthChecks).
"""
def __init__(__self__, __name__, __opts__=None, check_interval_sec=None, description=None, healthy_threshold=None, http_health_check=None, https_health_check=None, name=None, project=None, ssl_health_check=None, tcp_health_check=None, timeout_sec=None, unhealthy_threshold=None):
"""Create a HealthCheck resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if check_interval_sec and not isinstance(check_interval_sec, int):
raise TypeError('Expected property check_interval_sec to be a int')
__self__.check_interval_sec = check_interval_sec
"""
The number of seconds between each poll of
the instance instance (default 5).
"""
__props__['checkIntervalSec'] = check_interval_sec
if description and not isinstance(description, basestring):
raise TypeError('Expected property description to be a basestring')
__self__.description = description
"""
Textual description field.
"""
__props__['description'] = description
if healthy_threshold and not isinstance(healthy_threshold, int):
raise TypeError('Expected property healthy_threshold to be a int')
__self__.healthy_threshold = healthy_threshold
"""
Consecutive successes required (default 2).
"""
__props__['healthyThreshold'] = healthy_threshold
if http_health_check and not isinstance(http_health_check, dict):
raise TypeError('Expected property http_health_check to be a dict')
__self__.http_health_check = http_health_check
"""
An HTTP Health Check. Only one kind of Health Check can be added.
Structure is documented below.
"""
__props__['httpHealthCheck'] = http_health_check
if https_health_check and not isinstance(https_health_check, dict):
raise TypeError('Expected property https_health_check to be a dict')
__self__.https_health_check = https_health_check
"""
An HTTPS Health Check. Only one kind of Health Check can be added.
Structure is documented below.
"""
__props__['httpsHealthCheck'] = https_health_check
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
A unique name for the resource, required by GCE.
Changing this forces a new resource to be created.
"""
__props__['name'] = name
if project and not isinstance(project, basestring):
raise TypeError('Expected property project to be a basestring')
__self__.project = project
"""
The project in which the resource belongs. If it
is not provided, the provider project is used.
"""
__props__['project'] = project
if ssl_health_check and not isinstance(ssl_health_check, dict):
raise TypeError('Expected property ssl_health_check to be a dict')
__self__.ssl_health_check = ssl_health_check
"""
An SSL Health Check. Only one kind of Health Check can be added.
Structure is documented below.
"""
__props__['sslHealthCheck'] = ssl_health_check
if tcp_health_check and not isinstance(tcp_health_check, dict):
raise TypeError('Expected property tcp_health_check to be a dict')
__self__.tcp_health_check = tcp_health_check
"""
A TCP Health Check. Only one kind of Health Check can be added.
Structure is documented below.
"""
__props__['tcpHealthCheck'] = tcp_health_check
if timeout_sec and not isinstance(timeout_sec, int):
raise TypeError('Expected property timeout_sec to be a int')
__self__.timeout_sec = timeout_sec
"""
The number of seconds to wait before declaring
failure (default 5).
"""
__props__['timeoutSec'] = timeout_sec
if unhealthy_threshold and not isinstance(unhealthy_threshold, int):
raise TypeError('Expected property unhealthy_threshold to be a int')
__self__.unhealthy_threshold = unhealthy_threshold
"""
Consecutive failures required (default 2).
"""
__props__['unhealthyThreshold'] = unhealthy_threshold
__self__.self_link = pulumi.runtime.UNKNOWN
"""
The URI of the created resource.
"""
super(HealthCheck, __self__).__init__(
'gcp:compute/healthCheck:HealthCheck',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'checkIntervalSec' in outs:
self.check_interval_sec = outs['checkIntervalSec']
if 'description' in outs:
self.description = outs['description']
if 'healthyThreshold' in outs:
self.healthy_threshold = outs['healthyThreshold']
if 'httpHealthCheck' in outs:
self.http_health_check = outs['httpHealthCheck']
if 'httpsHealthCheck' in outs:
self.https_health_check = outs['httpsHealthCheck']
if 'name' in outs:
self.name = outs['name']
if 'project' in outs:
self.project = outs['project']
if 'selfLink' in outs:
self.self_link = outs['selfLink']
if 'sslHealthCheck' in outs:
self.ssl_health_check = outs['sslHealthCheck']
if 'tcpHealthCheck' in outs:
self.tcp_health_check = outs['tcpHealthCheck']
if 'timeoutSec' in outs:
self.timeout_sec = outs['timeoutSec']
if 'unhealthyThreshold' in outs:
self.unhealthy_threshold = outs['unhealthyThreshold']
| 42.91875
| 283
| 0.657929
|
f0a206db7e6e2e0a621dad6d6d8d3f2de3bd04e3
| 7,823
|
py
|
Python
|
docs/conf.py
|
alexanha/psutil
|
8455119262647e898e0aee0dc52774b5696cf4a4
|
[
"BSD-3-Clause"
] | 7
|
2017-04-08T13:08:47.000Z
|
2021-01-28T22:46:51.000Z
|
docs/conf.py
|
alexanha/psutil
|
8455119262647e898e0aee0dc52774b5696cf4a4
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
alexanha/psutil
|
8455119262647e898e0aee0dc52774b5696cf4a4
|
[
"BSD-3-Clause"
] | 4
|
2019-04-21T14:56:19.000Z
|
2021-04-14T22:59:37.000Z
|
# -*- coding: utf-8 -*-
#
# psutil documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
PROJECT_NAME = "psutil"
AUTHOR = "Giampaolo Rodola'"
THIS_YEAR = str(datetime.datetime.now().year)
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version():
INIT = os.path.abspath(os.path.join(HERE, '../psutil/__init__.py'))
with open(INIT, 'r') as f:
for line in f:
if line.startswith('__version__'):
ret = eval(line.strip().split(' = ')[1])
assert ret.count('.') == 2, ret
for num in ret.split('.'):
assert num.isdigit(), ret
return ret
else:
raise ValueError("couldn't find version string")
VERSION = get_version()
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_template']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = PROJECT_NAME
copyright = '2009-%s, %s' % (THIS_YEAR, AUTHOR)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
autodoc_docstring_signature = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme = 'pydoctheme'
html_theme_options = {'collapsiblesidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "{project} {version} documentation".format(**locals())
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html',
'**': ['globaltoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {
# 'index': 'indexcontent.html',
# }
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '%s-doc' % PROJECT_NAME
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', '%s.tex' % PROJECT_NAME,
'%s documentation' % PROJECT_NAME, AUTHOR),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', PROJECT_NAME, '%s documentation' % PROJECT_NAME, [AUTHOR], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
| 31.292
| 79
| 0.698965
|
5be17d9d12a85cf9e8836ca6a51e5594c4474213
| 533
|
py
|
Python
|
quantifiedcode/plugins/example/backend/tasks/test.py
|
marcinguy/quantifiedcode
|
cafc8b99d56a5e51820421af5d77be8b736ab03d
|
[
"BSD-3-Clause"
] | 118
|
2017-01-03T18:18:29.000Z
|
2022-02-06T15:32:02.000Z
|
quantifiedcode/plugins/example/backend/tasks/test.py
|
marcinguy/quantifiedcode
|
cafc8b99d56a5e51820421af5d77be8b736ab03d
|
[
"BSD-3-Clause"
] | 14
|
2016-12-21T11:26:48.000Z
|
2022-03-02T10:32:24.000Z
|
quantifiedcode/plugins/example/backend/tasks/test.py
|
marcinguy/quantifiedcode
|
cafc8b99d56a5e51820421af5d77be8b736ab03d
|
[
"BSD-3-Clause"
] | 26
|
2017-08-01T10:00:16.000Z
|
2022-02-06T15:31:55.000Z
|
"""
Contains tasks and helper functions to send notifications.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import logging
import requests
import json
from quantifiedcode.settings import settings
from quantifiedcode.backend.worker import celery
logger = logging.getLogger(__name__)
@celery.task(time_limit=120, queue="email", ignore_result=False)
def test(webhook, template, template_context=None):
"""
Example task.
"""
pass
| 20.5
| 64
| 0.774859
|
961033a053b3636252c7937b8989284baad33175
| 524
|
py
|
Python
|
ideas/migrations/0007_auto_20190512_0019.py
|
neosergio/hackatrix-api
|
27f0180415efa97bd7345d100b314d8807486b67
|
[
"Apache-2.0"
] | 1
|
2021-02-12T10:25:28.000Z
|
2021-02-12T10:25:28.000Z
|
ideas/migrations/0007_auto_20190512_0019.py
|
neosergio/hackatrix-api
|
27f0180415efa97bd7345d100b314d8807486b67
|
[
"Apache-2.0"
] | 7
|
2020-02-21T00:53:38.000Z
|
2022-02-10T12:22:53.000Z
|
ideas/migrations/0007_auto_20190512_0019.py
|
neosergio/hackatrix-api
|
27f0180415efa97bd7345d100b314d8807486b67
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-05-12 00:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ideas', '0006_auto_20190511_2342'),
]
operations = [
migrations.AlterField(
model_name='idea',
name='author',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='author_idea', to='events.Registrant'),
),
]
| 26.2
| 159
| 0.65458
|
11a37c0b9685bec9751fa2cf9f293b18ca147395
| 1,674
|
py
|
Python
|
src/add_drug_info.py
|
wchwang/Method_Pancorona
|
9dbe0dfd984497406a129c8029ebf1c0c928c27f
|
[
"MIT"
] | null | null | null |
src/add_drug_info.py
|
wchwang/Method_Pancorona
|
9dbe0dfd984497406a129c8029ebf1c0c928c27f
|
[
"MIT"
] | null | null | null |
src/add_drug_info.py
|
wchwang/Method_Pancorona
|
9dbe0dfd984497406a129c8029ebf1c0c928c27f
|
[
"MIT"
] | null | null | null |
# Created by woochanghwang at 12/07/2021
# Created by woochanghwang at 14/03/2021
from src_drug.p02_add_drug_info_on_result import add_drugbank_info, add_drug_target, groupby_drug_targets
def main():
# ############################
# ## Add drug name in result
# ############################
disease = "SARS-CoV"
drug_proximity_f = f"../result/{disease}/Drug/{disease}_drug_network_proximity.csv"
result_file_prefix = f"../result/{disease}/Drug/{disease}_drug_network_proximity"
# result_f = "{}.tsv".format(result_file_prefix)
drugbank_data_file = "/Users/woochanghwang/PycharmProjects/MTIProject/General/data/Drugbank/v5.1.6/drugbank.tsv"
drugbank_result_f = "{}_drugbank.tsv".format(result_file_prefix)
add_drugbank_info(drug_proximity_f, drugbank_data_file, drugbank_result_f)
# #############################
# ## Add drug target in result
# ############################
drug_target_file = "/Users/woochanghwang/PycharmProjects/MTIProject/General/data/STITCH/2020.12/9606.protein_chemical.links.v5.0.drugbank.v5.1.6.target_symbol.s900.onlyTarget.tsv"
drug_target_result_f = "{}_target.tsv".format(result_file_prefix)
add_drug_target(drugbank_result_f,drug_target_file, drug_target_result_f , type="symbol")
# ########################
# ## groupby drug targets
# #######################
drug_targets_addr = "{}_drug_to_target.csv".format(result_file_prefix)
drug_groupby_target_addr = "{}_drug_groupby_target.tsv".format(result_file_prefix)
groupby_drug_targets(drug_target_result_f, drug_targets_addr, drug_groupby_target_addr)
if __name__ == '__main__':
main()
| 37.2
| 183
| 0.681601
|
48169ebe064f02ed217fe3720764e020132bd6ae
| 3,631
|
py
|
Python
|
examples/python/gen-py/v1/music/f_AlbumWinners_publisher.py
|
jeffreymelvin-wf/frugal
|
8208673be5888bfbb4fb395ad9acce2210496279
|
[
"Apache-2.0"
] | 1
|
2017-10-05T15:36:11.000Z
|
2017-10-05T15:36:11.000Z
|
examples/python/gen-py/v1/music/f_AlbumWinners_publisher.py
|
jeffreymelvin-wf/frugal
|
8208673be5888bfbb4fb395ad9acce2210496279
|
[
"Apache-2.0"
] | null | null | null |
examples/python/gen-py/v1/music/f_AlbumWinners_publisher.py
|
jeffreymelvin-wf/frugal
|
8208673be5888bfbb4fb395ad9acce2210496279
|
[
"Apache-2.0"
] | null | null | null |
#
# Autogenerated by Frugal Compiler (2.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import TMessageType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
class AlbumWinnersPublisher(object):
"""
Scopes are a Frugal extension to the IDL for declaring PubSub
semantics. Subscribers to this scope will be notified if they win a contest.
Scopes must have a prefix.
"""
_DELIMITER = '.'
def __init__(self, provider, middleware=None):
"""
Create a new AlbumWinnersPublisher.
Args:
provider: FScopeProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
middleware += provider.get_middleware()
self._transport, self._protocol_factory = provider.new_publisher()
self._methods = {
'publish_ContestStart': Method(self._publish_ContestStart, middleware),
'publish_TimeLeft': Method(self._publish_TimeLeft, middleware),
'publish_Winner': Method(self._publish_Winner, middleware),
}
def open(self):
self._transport.open()
def close(self):
self._transport.close()
def publish_ContestStart(self, ctx, req):
"""
Args:
ctx: FContext
req: list
"""
self._methods['publish_ContestStart']([ctx, req])
def _publish_ContestStart(self, ctx, req):
op = 'ContestStart'
prefix = 'v1.music.'
topic = '{}AlbumWinners{}{}'.format(prefix, self._DELIMITER, op)
buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit())
oprot = self._protocol_factory.get_protocol(buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin(op, TMessageType.CALL, 0)
oprot.writeListBegin(TType.STRUCT, len(req))
for elem3 in req:
elem3.write(oprot)
oprot.writeListEnd()
oprot.writeMessageEnd()
self._transport.publish(topic, buffer.getvalue())
def publish_TimeLeft(self, ctx, req):
"""
Args:
ctx: FContext
req: Minutes
"""
self._methods['publish_TimeLeft']([ctx, req])
def _publish_TimeLeft(self, ctx, req):
op = 'TimeLeft'
prefix = 'v1.music.'
topic = '{}AlbumWinners{}{}'.format(prefix, self._DELIMITER, op)
buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit())
oprot = self._protocol_factory.get_protocol(buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin(op, TMessageType.CALL, 0)
oprot.writeDouble(req)
oprot.writeMessageEnd()
self._transport.publish(topic, buffer.getvalue())
def publish_Winner(self, ctx, req):
"""
Args:
ctx: FContext
req: Album
"""
self._methods['publish_Winner']([ctx, req])
def _publish_Winner(self, ctx, req):
op = 'Winner'
prefix = 'v1.music.'
topic = '{}AlbumWinners{}{}'.format(prefix, self._DELIMITER, op)
buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit())
oprot = self._protocol_factory.get_protocol(buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin(op, TMessageType.CALL, 0)
req.write(oprot)
oprot.writeMessageEnd()
self._transport.publish(topic, buffer.getvalue())
| 31.301724
| 83
| 0.63371
|
341f7e3fe5d81bbaa77051e70a49cc3be8cfaedc
| 32,261
|
py
|
Python
|
pandevice/objects.py
|
steve-krause/pandevice
|
25634d9e9a78507029922aa559d0b9268982080d
|
[
"0BSD"
] | null | null | null |
pandevice/objects.py
|
steve-krause/pandevice
|
25634d9e9a78507029922aa559d0b9268982080d
|
[
"0BSD"
] | null | null | null |
pandevice/objects.py
|
steve-krause/pandevice
|
25634d9e9a78507029922aa559d0b9268982080d
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2014, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Objects module contains objects that exist in the 'Objects' tab in the firewall GUI"""
import logging
import re
import xml.etree.ElementTree as ET
import pandevice
import pandevice.errors as err
from pandevice import getlogger
from pandevice.base import ENTRY, MEMBER, PanObject, Root
from pandevice.base import VarPath as Var
from pandevice.base import VersionedPanObject, VersionedParamPath
logger = getlogger(__name__)
class AddressObject(VersionedPanObject):
"""Address Object
Args:
name (str): Name of the object
value (str): IP address or other value of the object
type (str): Type of address:
* ip-netmask (default)
* ip-range
* fqdn
description (str): Description of this object
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/address")
# params
params = []
params.append(VersionedParamPath("value", path="{type}"))
params.append(
VersionedParamPath(
"type",
default="ip-netmask",
values=["ip-netmask", "ip-range", "fqdn"],
path="{type}",
)
)
params.append(VersionedParamPath("description", path="description"))
params.append(VersionedParamPath("tag", path="tag", vartype="member"))
self._params = tuple(params)
class AddressGroup(VersionedPanObject):
"""Address Group
Args:
name (str): Name of the address group
static_value (list): Values for a static address group
dynamic_value (str): Registered-ip tags for a dynamic address group
description (str): Description of this object
tag (list): Administrative tags (not to be confused with registered-ip tags)
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/address-group")
# params
params = []
params.append(
VersionedParamPath("static_value", path="static", vartype="member")
)
params.append(VersionedParamPath("dynamic_value", path="dynamic/filter"))
params.append(VersionedParamPath("description", path="description"))
params.append(VersionedParamPath("tag", path="tag", vartype="member"))
self._params = tuple(params)
class Tag(VersionedPanObject):
"""Administrative tag
Args:
name (str): Name of the tag
color (str): Color ID (eg. 'color1', 'color4', etc). You can
use :func:`~pandevice.objects.Tag.color_code` to generate the ID.
comments (str): Comments
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/tag")
# params
params = []
params.append(VersionedParamPath("color", path="color"))
params.append(VersionedParamPath("comments", path="comments"))
self._params = tuple(params)
@staticmethod
def color_code(color_name):
"""Returns the color code for a color
Args:
color_name (str): One of the following colors:
* red
* green
* blue
* yellow
* copper
* orange
* purple
* gray
* light green
* cyan
* light gray
* blue gray
* lime
* black
* gold
* brown
"""
colors = {
"red": 1,
"green": 2,
"blue": 3,
"yellow": 4,
"copper": 5,
"orange": 6,
"purple": 7,
"gray": 8,
"light green": 9,
"cyan": 10,
"light gray": 11,
"blue gray": 12,
"lime": 13,
"black": 14,
"gold": 15,
"brown": 16,
}
if color_name not in colors:
raise ValueError("Color '{0}' is not valid".format(color_name))
return "color" + str(colors[color_name])
class ServiceObject(VersionedPanObject):
"""Service Object
Args:
name (str): Name of the object
protocol (str): Protocol of the service, either tcp or udp
source_port (str): Source port of the protocol, if any
destination_port (str): Destination port of the service
description (str): Description of this object
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/service")
# params
params = []
params.append(
VersionedParamPath(
"protocol",
path="protocol/{protocol}",
values=["tcp", "udp"],
default="tcp",
)
)
params.append(
VersionedParamPath("source_port", path="protocol/{protocol}/source-port")
)
params.append(
VersionedParamPath("destination_port", path="protocol/{protocol}/port")
)
params.append(VersionedParamPath("description", path="description"))
params.append(VersionedParamPath("tag", path="tag", vartype="member"))
self._params = tuple(params)
class ServiceGroup(VersionedPanObject):
"""ServiceGroup Object
Args:
name (str): Name of the object
value (list): List of service values
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/service-group")
# params
params = []
params.append(VersionedParamPath("value", path="members", vartype="member"))
params.append(VersionedParamPath("tag", path="tag", vartype="member"))
self._params = tuple(params)
class ApplicationObject(VersionedPanObject):
"""Application Object
Args:
name (str): Name of the object
category (str): Application category
subcategory (str): Application subcategory
technology (str): Application technology
risk (int): Risk (1-5) of the application
default_type (str): Default identification type of the application
default_value (list): Values for the default type
parent_app (str): Parent Application for which this app falls under
timeout (int): Default timeout
tcp_timeout (int): TCP timeout
udp_timeout (int): UDP timeout
tcp_half_closed_timeout (int): TCP half closed timeout
tcp_time_wait_timeout (int): TCP wait time timeout
evasive_behavior (bool): Applicaiton is actively evasive
consume_big_bandwidth (bool): Application uses large bandwidth
used_by_malware (bool): Application is used by malware
able_to_transfer_file (bool): Application can do file transfers
has_known_vulnerability (bool): Application has known vulnerabilities
tunnel_other_application (bool):
tunnel_applications (list): List of tunneled applications
prone_to_misuse (bool):
pervasive_use (bool):
file_type_ident (bool):
virus_ident (bool):
data_ident (bool):
description (str): Description of this object
tag (list): Administrative tags
Please refer to https://applipedia.paloaltonetworks.com/ for more info on these params
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/application")
# params
params = []
params.append(VersionedParamPath("category", path="category"))
params.append(VersionedParamPath("subcategory", path="subcategory"))
params.append(VersionedParamPath("technology", path="technology"))
params.append(VersionedParamPath("risk", path="risk", vartype="int"))
params.append(
VersionedParamPath(
"default_type",
path="default/{default_type}",
values=[
"port",
"ident-by-ip-protocol",
"ident-by-icmp-type",
"ident-by-icmp6-type",
],
)
)
params.append(
VersionedParamPath(
"default_port",
path="default/{default_type}",
vartype="member",
condition={"default_type": "port"},
)
)
params.append(
VersionedParamPath(
"default_ip_protocol",
path="default/{default_type}",
condition={"default_type": "ident-by-ip-protocol"},
)
)
params.append(
VersionedParamPath(
"default_icmp_type",
path="default/{default_type}/type",
vartype="int",
condition={
"default_type": ["ident-by-icmp-type", "ident-by-icmp6-type"]
},
)
)
params.append(
VersionedParamPath(
"default_icmp_code",
path="default/{default_type}/code",
vartype="int",
condition={
"default_type": ["ident-by-icmp-type", "ident-by-icmp6-type"]
},
)
)
params.append(VersionedParamPath("parent_app", path="parent-app"))
params.append(VersionedParamPath("timeout", path="timeout", vartype="int"))
params.append(
VersionedParamPath("tcp_timeout", path="tcp-timeout", vartype="int")
)
params.append(
VersionedParamPath("udp_timeout", path="udp-timeout", vartype="int")
)
params.append(
VersionedParamPath(
"tcp_half_closed_timeout", path="tcp-half-closed-timeout", vartype="int"
)
)
params.append(
VersionedParamPath(
"tcp_time_wait_timeout", path="tcp-time-wait-timeout", vartype="int"
)
)
params.append(
VersionedParamPath(
"evasive_behavior", path="evasive-behavior", vartype="yesno"
)
)
params.append(
VersionedParamPath(
"consume_big_bandwidth", path="consume-big-bandwidth", vartype="yesno"
)
)
params.append(
VersionedParamPath(
"used_by_malware", path="used-by-malware", vartype="yesno"
)
)
params.append(
VersionedParamPath(
"able_to_transfer_file", path="able-to-transfer-file", vartype="yesno"
)
)
params.append(
VersionedParamPath(
"has_known_vulnerability",
path="has-known-vulnerability",
vartype="yesno",
)
)
params.append(
VersionedParamPath(
"tunnel_other_application",
path="tunnel-other-application",
vartype="yesno",
)
)
params.append(
VersionedParamPath(
"tunnel_applications", path="tunnel-applications", vartype="member"
)
)
params.append(
VersionedParamPath(
"prone_to_misuse", path="prone-to-misuse", vartype="yesno"
)
)
params.append(
VersionedParamPath("pervasive_use", path="pervasive-use", vartype="yesno")
)
params.append(
VersionedParamPath(
"file_type_ident", path="file-type-ident", vartype="yesno"
)
)
params.append(
VersionedParamPath("virus_ident", path="virus-ident", vartype="yesno")
)
params.append(
VersionedParamPath("data_ident", path="data-ident", vartype="yesno")
)
params.append(VersionedParamPath("description", path="description"))
params.append(VersionedParamPath("tag", path="tag", vartype="member"))
self._params = tuple(params)
class ApplicationGroup(VersionedPanObject):
"""ApplicationGroup Object
Args:
name (str): Name of the object
value (list): List of application values
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/application-group")
# params
params = []
params.append(VersionedParamPath("value", path="members", vartype="member"))
params.append(VersionedParamPath("tag", path="tag", vartype="member"))
self._params = tuple(params)
class ApplicationFilter(VersionedPanObject):
"""ApplicationFilter Object
Args:
name (str): Name of the object
category (list): Application category
subcategory (list): Application subcategory
technology (list): Application technology
risk (list): Application risk
evasive (bool):
excessive_bandwidth_use (bool):
prone_to_misuse (bool):
is_saas (bool):
transfers_files (bool):
tunnels_other_apps (bool):
used_by_malware (bool):
has_known_vulnerabilities (bool):
pervasive (bool):
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/application-filter")
# params
params = []
params.append(VersionedParamPath("category", path="category", vartype="member"))
params.append(
VersionedParamPath("subcategory", path="subcategory", vartype="member")
)
params.append(
VersionedParamPath("technology", path="technology", vartype="member")
)
params.append(VersionedParamPath("risk", path="risk", vartype="member"))
params.append(VersionedParamPath("evasive", path="evasive", vartype="yesno"))
params.append(
VersionedParamPath(
"excessive_bandwidth_use",
path="excessive-bandwidth-use",
vartype="yesno",
)
)
params.append(
VersionedParamPath(
"prone_to_misuse", path="prone-to-misuse", vartype="yesno"
)
)
params.append(VersionedParamPath("is_saas", path="is-saas", vartype="yesno"))
params.append(
VersionedParamPath(
"transfers_files", path="transfers-files", vartype="yesno"
)
)
params.append(
VersionedParamPath(
"tunnels_other_apps", path="tunnels-other-apps", vartype="yesno"
)
)
params.append(
VersionedParamPath(
"used_by_malware", path="used-by-malware", vartype="yesno"
)
)
params.append(
VersionedParamPath(
"has_known_vulnerabilities",
path="has-known-vulnerabilities",
vartype="yesno",
)
)
params.append(
VersionedParamPath("pervasive", path="pervasive", vartype="yesno")
)
params.append(VersionedParamPath("tag", path="tag", vartype="member"))
self._params = tuple(params)
class ApplicationContainer(VersionedPanObject):
"""ApplicationContainer object
This is a special class that is used in the predefined module.
It acts much like an ApplicationGroup object but exists only
in the predefined context. It is more or less a way that
Palo Alto groups predefined applications together.
Args:
applications (list): List of memeber applications
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/application-container")
# params
params = []
params.append(
VersionedParamPath("applications", path="functions", vartype="member")
)
self._params = tuple(params)
class SecurityProfileGroup(VersionedPanObject):
"""Security Profile Group object
Args:
name (str): The group name
virus (str): Antivirus profile
spyware (str): Anti-spyware profile
vulnerability (str): Vulnerability protection profile
url_filtering (str): URL filtering profile
file_blocking (str): File blocking profile
data_filtering (str): Data filtering profile
wildfire_analysis (str): WildFire analysis profile
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/profile-group")
# params
params = []
params.append(VersionedParamPath("virus", path="virus", vartype="member"))
params.append(VersionedParamPath("spyware", path="spyware", vartype="member"))
params.append(
VersionedParamPath("vulnerability", path="vulnerability", vartype="member")
)
params.append(
VersionedParamPath("url_filtering", path="url-filtering", vartype="member")
)
params.append(
VersionedParamPath("file_blocking", path="file-blocking", vartype="member")
)
params.append(
VersionedParamPath(
"data_filtering", path="data-filtering", vartype="member"
)
)
params.append(
VersionedParamPath(
"wildfire_analysis", path="wildfire-analysis", vartype="member"
)
)
self._params = tuple(params)
class CustomUrlCategory(VersionedPanObject):
"""Custom url category group
Args:
name (str): The name
url_value (list): Values to include in custom URL category object
description (str): Description of this object
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/profiles/custom-url-category")
# params
params = []
params.append(VersionedParamPath("url_value", path="list", vartype="member"))
params.append(VersionedParamPath("description", path="description"))
self._params = tuple(params)
class LogForwardingProfile(VersionedPanObject):
"""A log forwarding profile.
Note: This is valid for PAN-OS 8.0+
Args:
name (str): The name
description (str): The description
enhanced_logging (bool): (PAN-OS 8.1+) Enabling enhanced application
logging
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
CHILDTYPES = ("objects.LogForwardingProfileMatchList",)
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/log-settings/profiles")
# params
params = []
params.append(VersionedParamPath("description", path="description"))
params.append(VersionedParamPath("enhanced_logging", exclude=True))
params[-1].add_profile(
"8.1.0", vartype="yesno", path="enhanced-application-logging"
)
self._params = tuple(params)
class LogForwardingProfileMatchList(VersionedPanObject):
"""A log forwarding profile match list entry.
Note: This is valid for PAN-OS 8.0+
Args:
name (str): The name
description (str): Description
log_type (str): Log type. Valid values are traffic, threat, wildfire,
url, data, gtp, tunnel, auth, or sctp (PAN-OS 8.1+).
filter (str): The filter.
send_to_panorama (bool): Send to panorama or not
snmp_profiles (str/list): List of SnmpServerProfiles.
email_profiles (str/list): List of EmailServerProfiles.
syslog_profiles (str/list): List of SyslogServerProfiles.
http_profiles (str/list): List of HttpServerProfiles.
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
CHILDTYPES = ("objects.LogForwardingProfileMatchListAction",)
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/match-list")
# params
params = []
params.append(VersionedParamPath("description", path="action-desc"))
params.append(
VersionedParamPath(
"log_type",
path="log-type",
values=[
"traffic",
"threat",
"wildfire",
"url",
"data",
"gtp",
"tunnel",
"auth",
],
)
)
params[-1].add_profile(
"8.1.0",
path="log-type",
values=[
"traffic",
"threat",
"wildfire",
"url",
"data",
"gtp",
"tunnel",
"auth",
"sctp",
],
)
params.append(VersionedParamPath("filter", path="filter"))
params.append(
VersionedParamPath(
"send_to_panorama", vartype="yesno", path="send-to-panorama"
)
)
params.append(
VersionedParamPath("snmp_profiles", vartype="member", path="send-snmptrap")
)
params.append(
VersionedParamPath("email_profiles", vartype="member", path="send-email")
)
params.append(
VersionedParamPath("syslog_profiles", vartype="member", path="send-syslog")
)
params.append(
VersionedParamPath("http_profiles", vartype="member", path="send-http")
)
self._params = tuple(params)
class LogForwardingProfileMatchListAction(VersionedPanObject):
"""Action for a log forwarding profile match list entry.
Note: This is valid for PAN-OS 8.0+
Args:
name (str): The name
action_type (str): Action type. Valid values are tagging (default)
or (PAN-OS 8.1+) integration.
action (str): The action. Valid values are add-tag, remove-tag, or
(PAN-OS 8.1+) Azure-Security-Center-Integration.
target (str): The target. Valid values are source-address or
destination-address.
registration (str): Registration. Valid values are localhost,
panorama, or remote.
http_profile (str): The HTTP profile for registration of "remote".
tags (str/list): List of administrative tags.
timeout (int): (PAN-OS 9.0+) Timeout in minutes
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/actions")
# params
params = []
params.append(
VersionedParamPath(
"action_type",
default="tagging",
values=["tagging",],
path="type/{action_type}",
)
)
params[-1].add_profile(
"8.1.0", values=["tagging", "integration"], path="type/{action_type}"
)
params.append(
VersionedParamPath(
"action",
path="type/{action_type}/action",
values=["add-tag", "remove-tag"],
)
)
params[-1].add_profile(
"8.1.0",
path="type/{action_type}/action",
values=["Azure-Security-Center-Integration", "add-tag", "remove-tag"],
)
params.append(
VersionedParamPath(
"target",
path="type/{action_type}/target",
condition={"action_type": "tagging"},
values=["source-address", "destination-address"],
)
)
params.append(
VersionedParamPath(
"registration",
values=["localhost", "panorama", "remote"],
condition={"action_type": "tagging"},
path="type/{action_type}/registration/{registration}",
)
)
params.append(
VersionedParamPath(
"http_profile",
condition={"action_type": "tagging", "registration": "remote"},
path="type/{action_type}/registration/{registration}/http-profile",
)
)
params.append(
VersionedParamPath(
"tags",
condition={"action_type": "tagging"},
vartype="member",
path="type/{action_type}/tags",
)
)
params.append(VersionedParamPath("timeout", exclude=True))
params[-1].add_profile(
"9.0.0",
vartype="int",
path="type/{action_type}/timeout",
condition={"action_type": "tagging"},
)
self._params = tuple(params)
class DynamicUserGroup(VersionedPanObject):
"""Dynamic user group.
Note: PAN-OS 9.1+
Args:
name: Name of the dynamic user group
description (str): Description of this object
filter: Tag-based filter.
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/dynamic-user-group")
# params
params = []
params.append(VersionedParamPath("description", path="description"))
params.append(VersionedParamPath("filter", path="filter"))
params.append(VersionedParamPath("tag", path="tag", vartype="member"))
self._params = tuple(params)
class ScheduleObject(VersionedPanObject):
"""Schedule Object
"Date and Time Range" Example: 2019/11/01@00:15-2019/11/28@00:30
"Time Range" Example: 17:00-19:00
Args:
name (str): Name of the object
disable_override (bool): "True" to set disable-override
type (str): Type of Schedule: "recurring" or "non-recurring"
non_recurring_date_time (list/str): "Date and Time Range" string for a non-recurring schedule
recurrence (str): "daily" or "weekly" recurrence
daily_time (list/str): "Time Range" for a daily recurring schedule
weekly_sunday_time (list/str): "Time Range" for a weekly recurring schedule (Sunday)
weekly_monday_time (list/str): "Time Range" for a weekly recurring schedule (Monday)
weekly_tuesday_time (list/str): "Time Range" for a weekly recurring schedule (Tuesday)
weekly_wednesday_time (list/str): "Time Range" for a weekly recurring schedule (Wednesday)
weekly_thursday_time (list/str): "Time Range" for a weekly recurring schedule (Thursday)
weekly_friday_time (list/str): "Time Range" for a weekly recurring schedule (Friday)
weekly_saturday_time (list/str): "Time Range" for a weekly recurring schedule (Saturday)
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/schedule")
# params
params = []
params.append(
VersionedParamPath(
"disable_override", vartype="yesno", path="disable-override"
)
)
params.append(
VersionedParamPath(
"type",
path="schedule-type/{type}",
values=["recurring", "non-recurring"],
)
)
params.append(
VersionedParamPath(
"non_recurring_date_time",
path="schedule-type/{type}",
vartype="member",
condition={"type": "non-recurring"},
)
)
params.append(
VersionedParamPath(
"recurrence",
path="schedule-type/{type}/{recurrence}",
values=["weekly", "daily"],
condition={"type": "recurring"},
)
)
params.append(
VersionedParamPath(
"daily_time",
path="schedule-type/{type}/{recurrence}",
vartype="member",
condition={"type": "recurring", "recurrence": "daily"},
)
)
params.append(
VersionedParamPath(
"weekly_sunday_time",
path="schedule-type/{type}/{recurrence}/sunday",
vartype="member",
condition={"type": "recurring", "recurrence": "weekly"},
)
)
params.append(
VersionedParamPath(
"weekly_monday_time",
path="schedule-type/{type}/{recurrence}/monday",
vartype="member",
condition={"type": "recurring", "recurrence": "weekly"},
)
)
params.append(
VersionedParamPath(
"weekly_tuesday_time",
path="schedule-type/{type}/{recurrence}/tuesday",
vartype="member",
condition={"type": "recurring", "recurrence": "weekly"},
)
)
params.append(
VersionedParamPath(
"weekly_wednesday_time",
path="schedule-type/{type}/{recurrence}/wednesday",
vartype="member",
condition={"type": "recurring", "recurrence": "weekly"},
)
)
params.append(
VersionedParamPath(
"weekly_thursday_time",
path="schedule-type/{type}/{recurrence}/thursday",
vartype="member",
condition={"type": "recurring", "recurrence": "weekly"},
)
)
params.append(
VersionedParamPath(
"weekly_friday_time",
path="schedule-type/{type}/{recurrence}/friday",
vartype="member",
condition={"type": "recurring", "recurrence": "weekly"},
)
)
params.append(
VersionedParamPath(
"weekly_saturday_time",
path="schedule-type/{type}/{recurrence}/saturday",
vartype="member",
condition={"type": "recurring", "recurrence": "weekly"},
)
)
self._params = tuple(params)
class Region(VersionedPanObject):
"""Region.
Args:
name (str): Name of the region
address (list): List of IP networks
latitude (float): Latitude of the region
longitude (float): Longitude of the region
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value="/region")
# params
params = []
params.append(
VersionedParamPath("address", path="address", vartype="member")
)
params.append(VersionedParamPath("latitude", path="geo-location/latitude", vartype="float"))
params.append(VersionedParamPath("longitude", path="geo-location/longitude", vartype="float"))
self._params = tuple(params)
| 30.842256
| 102
| 0.55953
|
235a662507bf499d614230feff5d2d8c53687d81
| 5,584
|
py
|
Python
|
test/functional/mining.py
|
lavajumper/viacoin
|
b2f0445a3ef596c9830475f20ecbc7c18371d50d
|
[
"MIT"
] | 30
|
2015-11-01T22:55:25.000Z
|
2021-11-12T12:08:43.000Z
|
test/functional/mining.py
|
OpenBlock-Org/OpenBlock
|
3e070699415be363577af3b6a21785e6a23f8a40
|
[
"MIT"
] | 2
|
2017-06-29T15:16:04.000Z
|
2019-01-09T00:07:33.000Z
|
test/functional/mining.py
|
OpenBlock-Org/OpenBlock
|
3e070699415be363577af3b6a21785e6a23f8a40
|
[
"MIT"
] | 16
|
2015-06-06T19:28:51.000Z
|
2021-03-22T06:35:59.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.08333333333333333'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"])) # + 1)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
| 41.058824
| 141
| 0.680874
|
51a7fd4f7d20c079fcd104fdb265443ef64bc4aa
| 433
|
py
|
Python
|
Builder/meal.py
|
HOWZ1T/learning-design-patterns
|
73a844f9d8ea00bae711fb0d90b93ca652b2b039
|
[
"MIT"
] | 1
|
2018-09-24T12:05:06.000Z
|
2018-09-24T12:05:06.000Z
|
Builder/meal.py
|
HOWZ1T/learning-design-patterns
|
73a844f9d8ea00bae711fb0d90b93ca652b2b039
|
[
"MIT"
] | null | null | null |
Builder/meal.py
|
HOWZ1T/learning-design-patterns
|
73a844f9d8ea00bae711fb0d90b93ca652b2b039
|
[
"MIT"
] | null | null | null |
class Meal:
def __init__(self):
self._items = []
def add_item(self, item):
self._items.append(item)
def get_cost(self):
cost = float(0)
for item in self._items:
cost += item.price()
return cost
def show_items(self):
for item in self._items:
print("Item: {}, Packing: {}, Price: {}".format(item.name(), item.packing().pack(), item.price()))
| 22.789474
| 110
| 0.542725
|
14d2a169c0280a98f90192f34e81908cc47735e7
| 2,222
|
py
|
Python
|
tests/terraform/checks/resource/aws/test_DocDBTLS.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
tests/terraform/checks/resource/aws/test_DocDBTLS.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
tests/terraform/checks/resource/aws/test_DocDBTLS.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
import unittest
import hcl2
from checkov.common.models.enums import CheckResult
from checkov.terraform.checks.resource.aws.DocDBTLS import check
class TestDocDBTLS(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "aws_docdb_cluster_parameter_group" "test" {
family = "docdb3.6"
name = "test"
description = "docdb cluster parameter group"
parameter {
name = "tls"
value = "disabled"
}
parameter {
name = "other-param"
value = "enabled"
}
}
""")
resource_conf = hcl_res['resource'][0]['aws_docdb_cluster_parameter_group']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success_no_parameters(self):
hcl_res = hcl2.loads("""
resource "aws_docdb_cluster_parameter_group" "test" {
family = "docdb3.6"
name = "test"
description = "docdb cluster parameter group"
}
""")
resource_conf = hcl_res['resource'][0]['aws_docdb_cluster_parameter_group']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_with_parameters(self):
hcl_res = hcl2.loads("""
resource "aws_docdb_cluster_parameter_group" "test" {
family = "docdb3.6"
name = "test"
description = "docdb cluster parameter group"
parameter {
name = "tls"
value = "enabled"
}
}
""")
resource_conf = hcl_res['resource'][0]['aws_docdb_cluster_parameter_group']['test']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 34.71875
| 91
| 0.541854
|
f421c9e0c5f92c5168662618ffcef1f4bc9acc10
| 1,102
|
py
|
Python
|
djangorestlogger/settings.py
|
pedrorodriguesgomes/django-rest-logger
|
01152aad9854b02e8b76404e2960632d72e4b0e3
|
[
"BSD-3-Clause"
] | null | null | null |
djangorestlogger/settings.py
|
pedrorodriguesgomes/django-rest-logger
|
01152aad9854b02e8b76404e2960632d72e4b0e3
|
[
"BSD-3-Clause"
] | null | null | null |
djangorestlogger/settings.py
|
pedrorodriguesgomes/django-rest-logger
|
01152aad9854b02e8b76404e2960632d72e4b0e3
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
LOGGING = {
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['rest_logger_handler'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'rest_logger_handler': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['rest_logger_handler'],
'propagate': False,
},
'django_rest_logger': {
'level': 'DEBUG',
'handlers': ['rest_logger_handler'],
'propagate': False,
},
},
}
LOGGING_SETTINGS = getattr(settings, 'LOGGING', LOGGING)
DEFAULT_LOGGER = getattr(settings, 'DEFAULT_LOGGER', 'django_rest_logger')
LOGGER_EXCEPTION = DEFAULT_LOGGER
LOGGER_ERROR = DEFAULT_LOGGER
LOGGER_WARNING = DEFAULT_LOGGER
| 26.238095
| 74
| 0.533575
|
65f441cdad0bb6b84b52c967c4739206b0747cb5
| 1,237
|
py
|
Python
|
coremltools/converters/mil/mil/passes/divide_to_multiply.py
|
seibert/coremltools
|
609188ebcfee2178293f0d4e93a5af2995c88645
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/mil/passes/divide_to_multiply.py
|
seibert/coremltools
|
609188ebcfee2178293f0d4e93a5af2995c88645
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/mil/passes/divide_to_multiply.py
|
seibert/coremltools
|
609188ebcfee2178293f0d4e93a5af2995c88645
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil import Builder as mb
def divide_to_multiply_block(block):
for op in list(block.operations):
for b in op.blocks:
divide_to_multiply_block(b)
if len(op.blocks) > 0:
# This op can't be divide.
continue
if op.op_type == "real_div" and op.y.val is not None:
with block:
x = mb.mul(
x=op.x, y=1.0 / op.y.val, name="_inversed_" + op.name, before_op=op
)
op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=op, old_var=op.outputs[0], new_var=x
)
block.remove_ops([op])
@register_pass(namespace="common")
def divide_to_multiply(prog):
"""
Convert divide into multiply if divisor is const.
"""
for f in prog.functions.values():
divide_to_multiply_block(f)
| 31.717949
| 87
| 0.616815
|
f6a29858f1dbdc07464cbacbc33fb41ee31eb892
| 18,609
|
py
|
Python
|
qa/rpc-tests/bip68-sequence.py
|
JSKitty/QuantisNet-Core
|
75c66b11e29ea0597965471505e5da552d900d49
|
[
"MIT"
] | 21
|
2019-06-03T22:24:33.000Z
|
2021-04-10T14:14:26.000Z
|
qa/rpc-tests/bip68-sequence.py
|
JSKitty/QuantisNet-Core
|
75c66b11e29ea0597965471505e5da552d900d49
|
[
"MIT"
] | 3
|
2019-08-22T16:21:45.000Z
|
2020-02-26T15:19:21.000Z
|
qa/rpc-tests/bip68-sequence.py
|
JSKitty/QuantisNet-Core
|
75c66b11e29ea0597965471505e5da552d900d49
|
[
"MIT"
] | 4
|
2019-06-06T10:49:06.000Z
|
2019-06-29T07:46:18.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The QuantisNet Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test BIP68 implementation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "64: non-BIP68-final"
class BIP68Test(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-blockprioritysize=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-blockprioritysize=0", "-acceptnonstdtxn=0"]))
self.is_network_split = False
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
def run_test(self):
# Generate some coins
self.nodes[0].generate(110)
print("Running test disable flag")
self.test_disable_flag()
print("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
print("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
print("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
print("Verifying nVersion=2 transactions aren't standard")
self.test_version2_relay(before_activation=True)
print("Activating BIP68 (and 112/113)")
self.activateCSV()
print("Verifying nVersion=2 transactions are now standard")
self.test_version2_relay(before_activation=False)
print("Passed\n")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx2))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
try:
self.nodes[0].sendrawtransaction(rawtx)
except JSONRPCException as exp:
assert(not should_pass and using_sequence_locks)
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(should_pass or not using_sequence_locks)
# Recalculate utxos if we successfully sent the transaction
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
try:
node.sendrawtransaction(ToHex(tx))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
assert(orig_tx.hash in node.getrawmempool())
else:
# orig_tx must not be in mempool
assert(orig_tx.hash not in node.getrawmempool())
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(tx2.hash, -1e15, int(-self.relayfee*COIN))
cur_time = get_mocktime()
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(tx2.hash, 1e15, int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
try:
self.nodes[0].sendrawtransaction(raw_tx5)
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(get_mocktime())
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx3))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1), get_mocktime() + 600)
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert(height < 432)
self.nodes[0].generate(432-height)
assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active')
sync_blocks(self.nodes)
# Use self.nodes[1] to test standardness relay policy
def test_version2_relay(self, before_activation):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
try:
tx_id = self.nodes[1].sendrawtransaction(tx_signed)
assert(before_activation == False)
except:
assert(before_activation)
if __name__ == '__main__':
BIP68Test().main()
| 43.076389
| 119
| 0.627223
|
fa4366ff0789a3d05c26479a746a18dfcf7e902b
| 46,021
|
py
|
Python
|
tensorflow/compiler/xla/python/xla_client.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 3
|
2018-09-25T00:35:34.000Z
|
2018-09-25T00:38:06.000Z
|
tensorflow/compiler/xla/python/xla_client.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 1
|
2019-08-22T20:29:33.000Z
|
2019-12-19T10:16:21.000Z
|
tensorflow/compiler/xla/python/xla_client.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 2
|
2020-01-31T04:19:45.000Z
|
2020-03-06T16:33:00.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An in-process, local XLA client in Python, supporting AOT compilation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum # pylint: disable=g-bad-import-order
import inspect
import itertools
import os
import numpy as np
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.compiler.xla.python import pywrap_xla as c_api
from tensorflow.compiler.xla.service import hlo_pb2
# Most functions are snake_case for consistency with other modules, whereas
# method names of ComputationBuilder and LocalComputation are CamelCase for
# consistency with XLA.
# pylint: disable=invalid-name
_OP_METADATA_FIELDS = [
'op_type',
'op_name',
'source_file',
'source_line',
]
OpMetadata = collections.namedtuple('OpMetadata', _OP_METADATA_FIELDS)
def OpMetadataToProto(pyobj):
proto = xla_data_pb2.OpMetadata()
for field in _OP_METADATA_FIELDS:
attr = getattr(pyobj, field)
if attr is not None:
setattr(proto, field, attr)
return proto
def CurrentSourceInfoMetadata(op_type=None, op_name=None, skip_frames=1):
"""Helper for use in source mapping that returns an OpMetadata object."""
full_filename, lineno = inspect.stack()[skip_frames][1:3]
filename = os.path.basename(full_filename)
return OpMetadata(
op_type=op_type,
op_name=op_name,
source_file=filename,
source_line=lineno)
class PaddingType(enum.Enum):
VALID = 1
SAME = 2
def _convert_padding_type_to_pad_values(padding_type, lhs_dims, rhs_dims,
window_strides):
"""Maps PaddingType (VALID or SAME) to pad values (list of pairs of ints)."""
if padding_type == PaddingType.VALID:
return [(0, 0)] * len(window_strides)
out_shape = np.ceil(np.true_divide(lhs_dims, window_strides)).astype(int)
pad_sizes = [max((out_size - 1) * stride + filter_size - in_size, 0)
for out_size, stride, filter_size, in_size
in zip(out_shape, window_strides, rhs_dims, lhs_dims)]
return [(pad_size // 2, pad_size - pad_size // 2)
for pad_size in pad_sizes]
_UNARY_OPS = [
'Not',
'Abs',
'Exp',
'Expm1',
'Floor',
'Round',
'Ceil',
'Log',
'Log1p',
'Sign',
'Cos',
'Sin',
'Tanh',
'IsFinite',
'Sqrt',
'Rsqrt',
'Square',
'Reciprocal',
'Neg',
'Erf',
'Erfc',
'ErfInv',
'Lgamma',
'Digamma',
'Acos',
'Asin',
'Atan',
'Tan',
'Acosh',
'Asinh',
'Atanh',
'Cosh',
'Sinh',
'Real',
'Imag',
'Conj',
]
_BINARY_OPS = [
'Eq',
'Ne',
'Ge',
'Gt',
'Lt',
'Le',
'Add',
'Sub',
'Mul',
'Div',
'Rem',
'Max',
'Min',
'And',
'Or',
'Xor',
'Pow',
'ShiftLeft',
'ShiftRightArithmetic',
'ShiftRightLogical',
'Atan2',
'Complex',
]
XLA_ELEMENT_TYPE_TO_DTYPE = {
xla_data_pb2.PRED: np.dtype('bool'),
xla_data_pb2.S8: np.dtype('int8'),
xla_data_pb2.S16: np.dtype('int16'),
xla_data_pb2.S32: np.dtype('int32'),
xla_data_pb2.S64: np.dtype('int64'),
xla_data_pb2.U8: np.dtype('uint8'),
xla_data_pb2.U16: np.dtype('uint16'),
xla_data_pb2.U32: np.dtype('uint32'),
xla_data_pb2.U64: np.dtype('uint64'),
xla_data_pb2.F16: np.dtype('float16'),
xla_data_pb2.F32: np.dtype('float32'),
xla_data_pb2.F64: np.dtype('float64'),
xla_data_pb2.C64: np.dtype('complex64'),
xla_data_pb2.TUPLE: np.dtype(np.object),
}
# Note the conversion on the key. Numpy has a known issue wherein dtype hashing
# doesn't work as expected (https://github.com/numpy/numpy/issues/7242). Thus,
# when keying by dtype in this dict, we use the string form of dtypes.
DTYPE_TO_XLA_ELEMENT_TYPE = {str(dt): et
for et, dt in XLA_ELEMENT_TYPE_TO_DTYPE.items()}
def dtype_to_etype(dtype):
"""Convenience function for reading DTYPE_TO_XLA_ELEMENT_TYPE."""
return DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))]
class LocalBuffer(object):
"""Represents a handle to data owned by XLA.
The referent is ready for use in executing a local, compiled
Computation. On XLA platforms involving a device (e.g. GPU), this
means the referent is in device memory.
"""
def __init__(self, c_local_shaped_buffer):
self.c_local_shaped_buffer = c_local_shaped_buffer
self._delete = c_api.DeleteLocalShapedBuffer
@staticmethod
def from_pyval(pyval, layout_fn=None):
pyval = require_numpy_array_layout(pyval)
if layout_fn:
shape = Shape.from_pyval(pyval)
shape = shape.map_leaves(layout_fn)
else:
shape = None
return LocalBuffer(c_api.LocalShapedBuffer.FromLiteral(pyval, shape))
def to_py(self):
return self.c_local_shaped_buffer.ToLiteral()
def delete(self):
if self.c_local_shaped_buffer is not None:
self._delete(self.c_local_shaped_buffer)
self.c_local_shaped_buffer = None
def destructure(self):
assert self.c_local_shaped_buffer is not None
result = c_api.DestructureLocalShapedBufferTuple(self.c_local_shaped_buffer)
self.c_local_shaped_buffer = None
size = result.size()
destructured = tuple(LocalBuffer(result.Release(i)) for i in xrange(size))
return destructured
def is_deleted(self):
return self.c_local_shaped_buffer is None
def __del__(self):
self.delete()
class Shape(object):
"""Represents an XLA shape.
A shape is either an array shape, having rank-many integer
dimensions and an element type (represented by a Numpy dtype), or it
is a tuple shape, having a shape for every tuple component:
type shape =
TupleShape of shape list
| ArrayShape of { dimensions: int list; element_type: dtype }
Callers are expected to instantiate this class only via the static
constructors: tuple_shape, array_shape, and from_pyval.
"""
@staticmethod
def tuple_shape(tuple_shapes):
"""Construct a tuple shape."""
if (not isinstance(tuple_shapes, (tuple, list)) or
not all(isinstance(t, Shape) for t in tuple_shapes)):
raise TypeError('tuple_shapes must be a tuple of Shapes')
return Shape(tuple_shapes, tuple)
@staticmethod
def array_shape(element_type, dimensions, minor_to_major=None):
"""Construct an array shape."""
if (not isinstance(dimensions, tuple) or
not all(isinstance(i, int) for i in dimensions)):
dimensions = tuple(int(i) for i in dimensions)
return Shape(dimensions, np.dtype(element_type),
minor_to_major=minor_to_major)
@staticmethod
def from_pyval(pyval):
def convert(pyval):
if isinstance(pyval, tuple):
return Shape.tuple_shape(tuple(convert(elt) for elt in pyval))
else:
pyval = require_numpy_array_layout(pyval)
return Shape.array_shape(pyval.dtype, np.shape(pyval))
return convert(pyval)
def __init__(self, dimensions, dtype, minor_to_major=None):
assert isinstance(dimensions, tuple)
self._dimensions = dimensions
self._dtype = dtype
self._is_tuple = dtype == tuple
self._minor_to_major = minor_to_major
self._check_minor_to_major()
def __eq__(self, other):
# pylint: disable=protected-access
return (self._dtype == other._dtype and
self._dimensions == other._dimensions and
self._minor_to_major == other._minor_to_major)
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('xla_client.Shape(_dtype={!r}, _dimensions={!r}, '
'_is_tuple={!r}, _minor_to_major={!r})').format(
self._dtype, self._dimensions, self._is_tuple,
self._minor_to_major)
def is_tuple(self):
return self._is_tuple
def is_array(self):
return not self._is_tuple
def tuple_shapes(self):
if not self.is_tuple():
raise ValueError('not a tuple shape')
return self._dimensions
def numpy_dtype(self):
"""Like element_type(), but returns dtype('O') in case of a tuple shape."""
if self.is_tuple():
return np.dtype(np.object)
else:
return self.element_type()
def xla_element_type(self):
return DTYPE_TO_XLA_ELEMENT_TYPE[str(self.numpy_dtype())]
def element_type(self):
if not self.is_array():
raise ValueError('not an array shape')
return self._dtype
def dimensions(self):
if not self.is_array():
raise ValueError('not an array shape')
return self._dimensions
def rank(self):
return len(self.dimensions())
def minor_to_major(self):
return self._minor_to_major
def map_leaves(self, f):
"""Map f over each leaf-level array subshape.
Args:
f: The function to apply. Whenever f returns None, the identity is
applied instead.
Returns:
A new Shape with the mapped leaves.
"""
if self.is_tuple():
children = tuple(child.map_leaves(f) for child in self.tuple_shapes())
return Shape.tuple_shape(children)
else:
mapped = f(self)
return self if mapped is None else mapped
def _check_minor_to_major(self):
mtm = self._minor_to_major
if self.is_tuple():
assert mtm is None, self
if mtm is not None:
assert self.rank() == len(mtm), self
assert sorted(mtm) == range(len(mtm)), self
def update_minor_to_major(self, minor_to_major):
if not self.is_array():
raise ValueError('not an array shape')
if not isinstance(minor_to_major, tuple):
raise TypeError('minor_to_major must be a tuple')
updated = Shape.array_shape(
self.element_type(), self.dimensions(), minor_to_major)
updated._check_minor_to_major() # pylint: disable=protected-access
return updated
def _wrap_shape(shape_info):
dtype, dims = shape_info
element_type = DTYPE_TO_XLA_ELEMENT_TYPE[str(dtype)]
if element_type == xla_data_pb2.TUPLE:
shapes = tuple(_wrap_shape(subshape_info) for subshape_info in dims)
return Shape.tuple_shape(shapes)
else:
return Shape.array_shape(dtype, dims)
def require_numpy_array_layout(value):
if isinstance(value, tuple):
return tuple(require_numpy_array_layout(x) for x in value)
else:
return np.require(value, requirements=['C', 'A'])
class CompileOptions(object):
"""Python object for XLA compile options.
These options can be passed to the 'compile' step when using a local XLA
client.
"""
def __init__(self):
self.generate_hlo_graph = None
self.dump_optimized_hlo_proto_to = None
self.dump_unoptimized_hlo_proto_to = None
self.dump_per_pass_hlo_proto_to = None
self.hlo_profile = False
def transfer_to_infeed(value, replica_number=None):
"""Transfers the given value into the XLA infeed queue.
XLA's infeed queue is a single queue that feeds the "XLA virtual machine" with
a totally ordered stream of values. This is dequeued from XLA computations via
the Infeed() operation.
Args:
value: the value that the caller would like to enqueue into the XLA infeed
queue
replica_number: the replica number to infeed the value to -- if not
provided, then the default replica (trivially replica 0) is used.
"""
if replica_number is None:
c_api.TransferToInfeedLocal(require_numpy_array_layout(value))
else:
c_api.TransferToInfeedLocalReplica(
require_numpy_array_layout(value), replica_number)
def transfer_from_outfeed(shape, replica_number=None):
"""Transfers a literal of the given shape from replica_number's outfeed.
Args:
shape: The shape of the value to transfer from outfeed.
replica_number: The replica number ordinal to transfer the outfeed value
from. (Each replica has a distinct outfeed queue.)
Returns:
The literal value that is produced from the outfeed queue.
"""
return c_api.TransferFromOutfeedLocalReplica(shape, replica_number or 0)
class LocalComputation(object):
"""Python wrapper for a local XLA Computation.
A LocalComputation can be executed if it is compiled. Otherwise, it
can still be used as a Computation where required by the
ComputationBuilder methods.
"""
def __init__(self, c_local_computation, is_compiled):
self.c_local_computation = c_local_computation
self.is_compiled = is_compiled
# Ensure a reference to C-based destructor for use in __del__.
if is_compiled:
assert isinstance(c_local_computation, c_api.CompiledLocalComputation)
self._delete = c_api.DeleteCompiledLocalComputation
else:
assert isinstance(c_local_computation, c_api.LocalComputation)
self._delete = c_api.DeleteLocalComputation
def GetProto(self):
"""Get the HloModuleProto proto object in this local computation.
Returns:
An HloModuleProto proto object that has the whole-graph information.
"""
serialized = self.c_local_computation.GetSerializedProto()
proto = hlo_pb2.HloModuleProto.FromString(serialized)
return proto
def Compile(self, argument_shapes=(), compile_options=None, layout_fn=None):
"""Compiles an un-compiled local computation.
Local computations are the result of a "LocalComputationBuild'ing" process
-- they start in uncompiled form, and via a call to Compile() turn into a
compiled local computation.
Raises:
ValueError: if this is already a compiled local computation.
Arguments:
argument_shapes: parameter shapes -- they are first laid out by layout_fn
if layout_fn is provided. Otherwise, the default layout for those shapes
will be used.
compile_options: options to use for compilation, includes an optional
laid out result shape for the computation.
layout_fn: lambda that is used to lay out the argument/result shapes.
Returns:
A newly *compiled* local computation instance.
"""
if self.is_compiled:
raise ValueError('Attempt to compile a compiled local XLA computation.')
result_shape = _wrap_shape(self.c_local_computation.GetReturnValueShape())
if layout_fn:
argument_shapes = [
shape.map_leaves(layout_fn) for shape in argument_shapes
]
result_shape = result_shape.map_leaves(layout_fn)
compile_options = compile_options or CompileOptions()
compile_options.result_shape = result_shape
return LocalComputation(
self.c_local_computation.Compile(argument_shapes, compile_options),
is_compiled=True)
def CompileWithExampleArguments(self,
arguments=(),
compile_options=None,
layout_fn=None):
return self.Compile(
argument_shapes=[Shape.from_pyval(arg) for arg in arguments],
compile_options=compile_options,
layout_fn=layout_fn)
def Execute(self, arguments=(), layout_fn=None):
"""Execute with Python values as arguments and return value."""
if not self.is_compiled:
raise ValueError('Cannot execute an uncompiled local XLA computation.')
argument_shapes = [Shape.from_pyval(arg) for arg in arguments]
if layout_fn:
argument_shapes = [
shape.map_leaves(layout_fn) for shape in argument_shapes
]
else:
argument_shapes = [None for shape in argument_shapes]
arguments = tuple(map(require_numpy_array_layout, arguments))
return self.c_local_computation.Execute(arguments, argument_shapes)
def ExecuteWithLocalBuffers(self, arguments=()):
"""Execute with LocalBuffer arguments and return value."""
if not self.is_compiled:
raise ValueError('Cannot execute an uncompiled local XLA computation.')
arguments = tuple(arguments)
if any(arg.is_deleted() for arg in arguments):
raise ValueError('Executing with deleted local buffer argument')
return LocalBuffer(
self.c_local_computation.ExecuteWithShapedBuffers(
[arg.c_local_shaped_buffer for arg in arguments]))
def __del__(self):
self._delete(self.c_local_computation)
class ComputationBuilder(object):
"""XLA computation builder.
Enqueues XLA ops in sequence and in order to build a
LocalComputation, which in turn can be compiled into a
CompiledLocalComputation, which in turn can be locally executed.
"""
# The methods of this class map 1-to-1 onto the XLA C++
# computation builder API. Therefore, there's no need to laboriously list
# arguments and return values for every method, especially where it's obvious.
#
# pylint: disable=g-doc-return-or-yield
# pylint: disable=g-doc-args
def __init__(self, name):
self._client = c_api.LocalComputationBuilder(name.encode('utf8'))
self._parameter_numbering = itertools.count()
def Build(self):
return LocalComputation(self._client.Build(), is_compiled=False)
def SetOpMetadata(self, op_metadata):
"""Set metadata for operations that are about to be enqueued."""
self._client.SetOpMetadata(op_metadata)
def ClearOpMetadata(self):
"""Clear metadata for operations that are about to be enqueued."""
self._client.ClearOpMetadata()
def Infeed(self, shape):
"""Enqueues an infeed op onto the computation.
Infeed operations dequeue data of the given shape from the device's infeed
queue for subsequent use in the computation.
Returns:
A LocalOp.
"""
return self._client.Infeed(shape)
def Outfeed(self, operand):
"""Enqueues an outfeed op onto the computation.
Outfeed operations enqueue data, using the given operand, onto the XLA
outfeed queue for subsequent dequeue via the client API.
"""
self._client.Outfeed(operand, self.GetShape(operand), ''.encode('utf-8'))
def Constant(self, value):
"""Enqueues a constant op onto the computation.
Args:
value: value for the constant, as a np.array with an explicit dtype set
to one of the supported types.
Returns:
A LocalOp.
"""
value = require_numpy_array_layout(value)
return self._client.ConstantLiteral(value)
def ConstantF32Scalar(self, value):
"""Convenience method to enqueue a scalar F32 constant op.
Args:
value: a floating-point number.
Returns:
A LocalOp.
"""
return self.Constant(np.array(value, dtype=np.float32))
def ConstantF64Scalar(self, value):
"""Convenience method to enqueue a scalar F32 constant op.
Args:
value: a floating-point number.
Returns:
A LocalOp.
"""
return self.Constant(np.array(value, dtype=np.float64))
def ConstantS32Scalar(self, value):
"""Convenience method to enqueue a scalar S32 constant op.
Args:
value: a floating-point number.
Returns:
A LocalOp.
"""
return self.Constant(np.array(value, dtype=np.int32))
def ConstantS64Scalar(self, value):
"""Convenience method to enqueue a scalar S64 constant op.
Args:
value: a floating-point number.
Returns:
A LocalOp.
"""
return self.Constant(np.array(value, dtype=np.int64))
def ConstantPredScalar(self, value):
"""Convenience method to enqueue a scalar PRED constant op.
Args:
value: a boolean value.
Returns:
A LocalOp.
"""
return self.Constant(np.array(value, dtype=np.bool))
def ParameterWithShape(self, shape, name=None, parameter_num=None):
"""Enqueues a Parameter op onto the computation, given a shape.
Args:
shape: the parameter's shape as a Shape object.
name: optional string name for the parameter.
parameter_num: parameter number in the computation function. If None,
the next linear parameter number is used. The default value capability
can be used for auto-numbering. If you're using auto-numbering for some
parameters, use it for *all* parameters to avoid clashes.
Returns:
A LocalOp.
"""
if name is None:
name = ''
if parameter_num is None:
parameter_num = next(self._parameter_numbering)
return self._client.Parameter(parameter_num, shape, name.encode('utf8'))
def ParameterFromNumpy(self, value, name=None, parameter_num=None):
"""Enqueues a Parameter op onto the computation.
Args:
value: a Numpy array, or a nested tuple thereof, from which the
shape is inferred.
name: as in ParameterWithShape.
parameter_num: as in ParameterWithShape.
Returns:
A LocalOp.
"""
return self.ParameterWithShape(
Shape.from_pyval(value), name=name, parameter_num=parameter_num)
def Broadcast(self, operand, sizes):
"""Enqueues a broadcast operation onto the computation.
Args:
operand: the operand LocalOp to broadcast.
sizes: an iterable of broadcast sizes.
Returns:
A LocalOp representing the added broadcast op.
"""
return self._client.Broadcast(operand, sizes)
def Concatenate(self, operands, dimension):
"""Enqueues a concatenate operation onto the computation.
Args:
operands: the operands to concatenate.
dimension: the dimension in which to perform the concatenation.
Returns:
A LocalOp representing the added concatenate op.
"""
return self._client.ConcatInDim(operands, dimension)
def ConvertElementType(self, operand, new_element_type):
"""Enqueues an element type conversion operation onto the computation.
Args:
operand: the operand to convert.
new_element_type: the target primitive type.
Returns:
A LocalOp representing the added conversion op.
"""
return self._client.ConvertElementType(operand, new_element_type)
def BitcastConvertType(self, operand, new_element_type):
"""Enqueues a bitcast type conversion operation onto the computation.
Args:
operand: the operand to convert.
new_element_type: the target primitive type.
Returns:
A LocalOp representing the added conversion op.
"""
return self._client.BitcastConvertType(operand, new_element_type)
def GetShape(self, operand):
return _wrap_shape(self._client.GetShape(operand))
def GetReturnValueShape(self):
return _wrap_shape(self._client.GetReturnValueShape())
def GetComputationStats(self):
raise NotImplementedError()
def Pad(self, operand, padding_value, padding_config):
"""Enqueues a Pad operation onto the computation.
Args:
operand: LocalOp representing the array to pad.
padding_value: LocalOp representing the scalar pad value.
padding_config: either an xla_data_pb2.PaddingConfig or a list of integer
triples (edge_padding_low, edge_padding_high, interior_padding)
representing the configuration of the padding operation.
Returns:
A LocalOp representing the added Pad op.
"""
if not isinstance(padding_config, xla_data_pb2.PaddingConfig):
padding_config = GetPaddingConfigFromTriples(padding_config)
return self._client.Pad(operand, padding_value, padding_config)
def Reshape(self, operand, dimensions, new_sizes):
"""Enqueues a reshape op onto the computation.
Args:
operand: LocalOp representing the array to be reshaped.
dimensions: sequence of integers encoding the order in which dimensions
are collapsed or None, in which case dimensions are flattened in order.
new_sizes: sequence of integers encoding the new dimension sizes (shape).
Returns:
A LocalOp representing the added Reshape op.
"""
if dimensions is None:
ndim = len(self.GetShape(operand).dimensions())
dimensions = tuple(range(ndim))
return self._client.Reshape(operand, dimensions, new_sizes)
def CrossReplicaSum(self, operand):
"""CrossReplicaSum op.
Args:
operand: the operand to sum across replica instances.
Returns:
A LocalOp that has the sum of the value among all replicas.
"""
return self._client.CrossReplicaSum(operand)
def Collapse(self, operand, dimensions):
"""Collapse op."""
return self._client.Collapse(operand, dimensions)
def Trans(self, operand):
"""Specialized matrix transpose op."""
return self._client.Transpose(operand, [1, 0])
def Transpose(self, operand, permutation):
"""Transpose op."""
return self._client.Transpose(operand, permutation)
def Rev(self, operand, dimensions):
"""Rev op."""
return self._client.Rev(operand, dimensions)
def Clamp(self, min, operand, max): # pylint: disable=redefined-builtin
"""Clamp op."""
return self._client.Clamp(min, operand, max)
def SelectAndScatter(self, operand, select, window_dimensions, window_strides,
padding, source, init_value, scatter):
"""Select and scatter op, used by the gradient of ReduceWindow.
Args:
operand: LocalOp for array of dimension N and type T over
which the windows slide.
select: Computation of type (T, T) -> Pred to apply to the elements of
each window to indicate which element is selected.
window_dimensions: sequence of N integers for dimensions of the window.
window_strides: sequence of N integers for the strides of the window.
padding: PaddingType representing either 'SAME' or 'VALID ' padding.
source: LocalOp for array of type T with values to scatter.
init_value: LocalOp of scalar type T for initial out value.
scatter: Computation of type (T, T) -> T to apply to each scatter source
element with its destination element.
Returns:
A LocalOp representing the added SelectAndScatter op.
"""
pads = _convert_padding_type_to_pad_values(
padding, self.GetShape(operand).dimensions(),
window_dimensions, window_strides)
return self._client.SelectAndScatterWithGeneralPadding(
operand, select.c_local_computation, window_dimensions, window_strides,
pads, source, init_value, scatter.c_local_computation)
def Select(self, pred, on_true, on_false):
"""Element-wise selection op.
Constructs an output array from elements of two input arrays, based on the
values of a predicate array.
"""
return self._client.Select(pred, on_true, on_false)
def Slice(self, operand, start_indices, limit_indices, strides=None):
"""Enqueues a slice operation onto the computation.
Args:
operand: LocalOp for the N dimensional array to be sliced.
start_indices: iterable of N integers containing the starting indices of
the slice for each dimension.
limit_indices: iterable of N integers containing the ending indices
(exclusive) of the slice for each dimension.
strides: optional iterable of N integers containing the stride sizes for
each dimension.
Returns:
A LocalOp representing the added Slice op.
"""
if strides is None:
start_indices = list(start_indices)
strides = [1] * len(start_indices)
return self._client.Slice(operand, start_indices, limit_indices, strides)
def SliceInDim(self, operand, start_index, limit_index, stride, dimno):
"""Enqueues a slice-in-dimension operation onto the computation.
Args:
operand: LocalOp for the N dimensional array to be sliced.
start_index: an integer containing the start index of the slice.
limit_index: an integer containing the end index of the slice.
stride: an integer containing the stride size for the slice.
dimno: an integer indicating the dimension along which to slice.
Returns:
A LocalOp representing the added Slice op.
"""
return self._client.SliceInDim(operand, start_index, limit_index, stride,
dimno)
def DynamicSlice(self, operand, start_indices, slice_sizes):
"""Enqueues a slice op with dynamic start indices onto the computation.
Args:
operand: LocalOp for the N dimensional array to be sliced.
start_indices: LocalOp for the 1D array of N integers
containing the starting indices of the slice.
slice_sizes: iterable of N integers containing the slice sizes in each
dimension.
Returns:
A LocalOp representing the added DynamicSlice op.
"""
return self._client.DynamicSlice(operand, start_indices, slice_sizes)
def DynamicUpdateSlice(self, operand, update, start_indices):
"""Enqueues a dynamic update slice operation onto the computation.
Args:
operand: LocalOp for the N dimensional array to be updated.
update: N dimensional array comprising the slice update.
start_indices: Rank-1 array of N integers comprising the starting indices
of the slice along each dimension.
Returns:
A LocalOp representing the added DynamicUpdateSlice op.
"""
return self._client.DynamicUpdateSlice(operand, update, start_indices)
def Tuple(self, *ops):
"""Enqueues a tuple operation onto the computation.
Args:
ops: a sequence of tuple operands (each a LocalOp).
Returns:
A LocalOp representing the added Tuple op.
"""
return self._client.Tuple(ops)
def GetTupleElement(self, tup, index):
"""Enqueues a 'get tuple element' operation onto the computation.
Args:
tup: the tuple operand (a LocalOp).
index: numeric index to select from the tuple.
Returns:
A LocalOp representing the added GetTupleElement op.
"""
return self._client.GetTupleElement(tup, index)
def Call(self, computation_to_apply, operands):
"""Enqueues a call operation onto the computation.
Args:
computation_to_apply: a Computation object.
operands: an iterable of LocalOp. The number and types of
operands must match the arity of computation_to_apply.
Returns:
A LocalOp representing the added call op.
"""
return self._client.Call(computation_to_apply.c_local_computation, operands)
def Map(self, operands, computation_to_apply, dimensions):
"""Enqueues a map operation onto the computation.
Args:
operands: an iterable of LocalOp.
computation_to_apply: a Computation object.
dimensions: dimensions over which to apply map the function.
Returns:
A LocalOp representing the added Map op.
"""
return self._client.Map(operands, computation_to_apply.c_local_computation,
dimensions)
def Reduce(self, operand, init_value, computation_to_apply, dimensions):
"""Enqueues a reduction operation onto the computation.
Args:
operand: reduction operand (LocalOp).
init_value: reduction initial value (LocalOp).
computation_to_apply: a Computation object - binary reduction function.
dimensions: sequence of dimensions (integers) to reduce on.
Returns:
A LocalOp representing the added Reduce op.
"""
return self._client.Reduce(operand, init_value,
computation_to_apply.c_local_computation,
dimensions)
def ReduceWindow(self, operand, init_value, computation_to_apply,
window_dimensions, window_strides, padding):
"""Enqueues a windowed reduction operation onto the computation.
Args:
operand: reduction operand (LocalOp).
init_value: reduction initial value (LocalOp).
computation_to_apply: a binary reduction function (Computation).
window_dimensions: dimensions of window (sequence of integers).
window_strides: strides for window (sequence of integers).
padding: PaddingType representing either 'SAME' or 'VALID' padding.
Returns:
A LocalOp representing the added ReduceWindow op.
"""
pads = _convert_padding_type_to_pad_values(
padding, self.GetShape(operand).dimensions(), window_dimensions,
window_strides)
return self._client.ReduceWindowWithGeneralPadding(
operand, init_value, computation_to_apply.c_local_computation,
window_dimensions, window_strides, pads)
def RngNormal(self, mu, sigma, dims):
"""Enqueues an RngNormal operation onto the computation.
Args:
mu: A LocalOp to an F32 scalar specifying the mean.
sigma: A LocalOp to an F32 scalar specifying the standard
deviation.
dims: A 1D array-like of nonnegative integers specifying the dimensions.
Returns: a LocalOp to the generated array of F32 values.
"""
shape = Shape.array_shape(self.GetShape(mu).element_type(), dims)
return self._client.RngNormal(mu, sigma, shape)
def RngUniform(self, a, b, dims):
"""Enqueues an RngUniform operation onto the computation.
Args:
a: a LocalOp to an F32, S32, or U32 scalar (consistent with
the type of b) specifying the low end of the interval [a, b) over which
values are generated.
b: a LocalOp to an F32, S32, or U32 scalar (consistent with
the type of a) specifying the high end of the interval [a, b) over which
values are generated.
dims: A 1D array-like of nonnegative integers specifying the dimensions.
Returns: a LocalOp to the generated array of values with the
same numeric type (F32, S32, or U32) as the arguments a and b.
"""
shape = Shape.array_shape(self.GetShape(a).element_type(), dims)
return self._client.RngUniform(a, b, shape)
def While(self, cond, body, init):
"""Enqueues a While operation onto the computation.
Args:
cond: a Computation for the loop condition, which has type T -> PRED
body: a Computation for the loop body, which has type T -> T
init: a LocalOp for the initial parameter, which has type T
Returns: a LocalOp representing the While operation.
"""
return self._client.While(cond.c_local_computation,
body.c_local_computation, init)
def Conditional(self, pred, true_operand, true_computation, false_operand,
false_computation):
"""Enqueues a Conditional operation onto the computation.
Args:
predicate: a LocalOp to test, which has scalar type PRED
true_operand: a LocalOp of type T_0
true_computation: a Computation to apply to true_operand, type T_0 -> S
false_operand: a ComputationDatahandle of type T_1
false_computation: a Computation to apply to false_operand, type T_1 -> S
Returns: a LocalOp representing the Conditional operation.
"""
return self._client.Conditional(
pred, true_operand, true_computation.c_local_computation, false_operand,
false_computation.c_local_computation)
def IsConstant(self, operand):
"""Checks whether the given operand is a compile-time constant.
Args:
operand: a ComputationDataHandle to test.
Returns: bool indicating whether `operand` is a compile-time constant,
meaning its value does not depend on any parametersor, or on stateful
operators such as `RngNormal` or `Infeed`.
"""
return self._client.IsConstant(operand)
def BuildConstantSubGraph(self, operand):
"""Builds a constant sub graph.
Args:
operand: a LocalOp to test.
Returns: a LocalComputation that is rooted on the given `operand` which is a
compile-time constant.
"""
return self._client.BuildConstantSubGraph(operand)
def Dot(self, lhs, rhs):
"""Enqueues a dot operation onto the computation.
Args:
lhs: LocalOp for the rank 1 or rank 2 left-hand-side array.
rhs: LocalOp for the rank 1 or rank 2 right-hand-side array.
Returns: a LocalOp representing the Dot operation.
"""
return self._client.Dot(lhs, rhs)
def DotGeneral(self, lhs, rhs, dimension_numbers):
"""Enqueues a general dot operation onto the computation.
Args:
lhs: LocalOp for the left-hand-side array.
rhs: LocalOp for the right-hand-side array.
dimension_numbers: either an xla_data_pb2.DotDimensionNumbers or a nested
tuple ((lhs_contract, rhs_contract), (lhs_batch, rhs_batch)) of lists of
integers representing the dimensions to treat as contracting dimensions
and batch dimensions on each input operand.
Returns: a LocalOp representing the DotGeneral operation.
"""
if not isinstance(dimension_numbers, xla_data_pb2.DotDimensionNumbers):
dimension_numbers = GetDotDimensionsFromLists(dimension_numbers)
return self._client.DotGeneral(lhs, rhs, dimension_numbers)
def Conv(self, lhs, rhs, window_strides, padding):
"""Enqueues a Conv operation onto the computation.
Args:
lhs: LocalOp for the rank N+2 array of inputs.
rhs: LocalOp for the rank N+2 array of kernel weights.
window_strides: length-N array-like of integer kernel strides.
padding: PaddingType representing either 'SAME' or 'VALID' padding.
Returns: a LocalOp representing the Conv operation.
"""
pads = _convert_padding_type_to_pad_values(
padding, self.GetShape(lhs).dimensions()[2:],
self.GetShape(rhs).dimensions()[2:], window_strides)
dimension_numbers = self._GetConvDimensionNumbers(len(window_strides))
return self._client.ConvGeneralDilated(lhs, rhs, window_strides, pads, (),
(), dimension_numbers)
def ConvWithGeneralPadding(self, lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation):
"""Enqueues a ConvWithGeneralPadding operation onto the computation.
Args:
lhs: LocalOp for the rank N+2 array of inputs.
rhs: LocalOp for the rank N+2 array of kernel weights.
window_strides: length-N array-like of kernel strides.
padding: length-N array-like of pairs of integers of (low, high) padding.
lhs_dilation: length-N array-like of dilation factors.
rhs_dilation: length-N array-like of dilation factors.
Returns:
A ComputationdataHandle representing the added ConvWithGeneralPadding op.
"""
dimension_numbers = self._GetConvDimensionNumbers(len(window_strides))
return self._client.ConvGeneralDilated(lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation,
dimension_numbers)
def _GetConvDimensionNumbers(self, num_spatial_dims):
"""Create ConvolutionDimensionNumbers proto for convolutions."""
nd = num_spatial_dims
dimension_numbers = xla_data_pb2.ConvolutionDimensionNumbers()
dimension_numbers.input_batch_dimension = 0
dimension_numbers.input_feature_dimension = 1
dimension_numbers.output_batch_dimension = 0
dimension_numbers.output_feature_dimension = 1
dimension_numbers.kernel_output_feature_dimension = 0
dimension_numbers.kernel_input_feature_dimension = 1
dimension_numbers.input_spatial_dimensions.extend(range(2, 2 + nd))
dimension_numbers.kernel_spatial_dimensions.extend(range(2, 2 + nd))
dimension_numbers.output_spatial_dimensions.extend(range(2, 2 + nd))
return dimension_numbers
def ConvGeneralDilated(self, lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers):
"""Enqueues a ConvGeneralDilated operation onto the computation.
Args:
lhs: LocalOp for the rank N+2 array of inputs.
rhs: LocalOp for the rank N+2 array of kernel weights.
window_strides: length-N array-like of integer kernel strides.
padding: length-N array-like of pairs of integers of (low, high) padding.
lhs_dilation: length-N array-like of integer dilation factors.
rhs_dilation: length-N array-like of integer dilation factors.
dimension_numbers: either an xla_data_pb2.ConvolutionDimensionNumbers or a
triple (lhs_spec, rhs_spec, out_spec) where each element is a string of
length N+2 identifying by position (1) batch dimensions in lhs, rhs, and
the output with the character 'N', (2) feature dimensions in lhs and the
output with the character 'C', (3) input and output feature dimensions
in rhs with the characters 'I' and 'O' respectively, and (4) spatial
dimension correspondences between lhs, rhs, and the output using any
distinct characters. For example, to indicate dimension numbers
consistent with the Conv operation with two spatial dimensions, one
could use ('NCHW', 'OIHW', 'NCHW'). As another example, to indicate
dimension numbers consistent with the TensorFlow Conv2D operation, one
could use ('NHWC', 'HWIO', 'NHWC'). When using the latter form of
convolution dimension specification, window strides are associated with
spatial dimension character labels according to the order in which the
labels appear in the rhs_spec string, so that window_strides[0] is
matched with the dimension corresponding to the first character
appearing in rhs_spec that is not 'I' or 'O'.
Returns: a LocalOp representing the ConvGenralDilated operation.
"""
if not isinstance(dimension_numbers,
xla_data_pb2.ConvolutionDimensionNumbers):
lhs_spec, rhs_spec, out_spec = dimension_numbers
dimension_numbers = xla_data_pb2.ConvolutionDimensionNumbers()
dimension_numbers.input_batch_dimension = lhs_spec.index('N')
dimension_numbers.input_feature_dimension = lhs_spec.index('C')
dimension_numbers.output_batch_dimension = out_spec.index('N')
dimension_numbers.output_feature_dimension = out_spec.index('C')
dimension_numbers.kernel_output_feature_dimension = rhs_spec.index('O')
dimension_numbers.kernel_input_feature_dimension = rhs_spec.index('I')
dimension_numbers.kernel_spatial_dimensions.extend(
i for i, c in enumerate(rhs_spec) if c not in {'I', 'O'})
dimension_numbers.input_spatial_dimensions.extend(
sorted((i for i, c in enumerate(lhs_spec) if c not in {'N', 'C'}),
key=lambda i: rhs_spec.index(lhs_spec[i])))
dimension_numbers.output_spatial_dimensions.extend(
sorted((i for i, c in enumerate(out_spec) if c not in {'N', 'C'}),
key=lambda i: rhs_spec.index(out_spec[i])))
return self._client.ConvGeneralDilated(lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation,
dimension_numbers)
def Sort(self, operand, dimension=-1):
"""Enqueues a sort operation onto the computation."""
return self._client.Sort(operand, dimension)
def SortKeyVal(self, keys, values, dimension=-1):
"""Enqueues a key-value sort operation onto the computation."""
return self._client.SortKeyVal(keys, values, dimension)
def _forward_methods_to_local_builder():
"""Forward remaining ComputationBuilder methods to the C API.
Set up methods, corresponding to unary and binary XLA operations,
whose calls are forwarded in a boilerplate manner to the underlying
LocalComputationBuilder C-extension API.
"""
def forward_to_local_builder_with_handles(target_method, is_binop=False):
"""Generate a forwarding method that wraps/unwraps data handles."""
def forward(self, *args, **kwargs):
arg_list = list(args)
if is_binop and len(arg_list) < 3:
arg_list.append(kwargs.get('broadcast_dimensions', ()))
return target_method(
self._client, # pylint: disable=protected-access
*arg_list)
return forward
for method_name in _UNARY_OPS:
forward = forward_to_local_builder_with_handles(
getattr(c_api.LocalComputationBuilder, method_name))
forward.__name__ = method_name
setattr(ComputationBuilder, method_name, forward)
for method_name in _BINARY_OPS:
forward = forward_to_local_builder_with_handles(
getattr(c_api.LocalComputationBuilder, method_name), is_binop=True)
forward.__name__ = method_name
setattr(ComputationBuilder, method_name, forward)
_forward_methods_to_local_builder()
def initialize_replica_count(replica_count):
"""Initializes the desired replica count to use on XLA service init.
Args:
replica_count: number of replicas that are desired for set up during XLA
initialization.
Raises:
A runtime exception if the XLA service has already been initialized.
"""
c_api.InitializeReplicaCount(replica_count)
def get_replica_count():
"""Returns the current replica count used for the XLA service.
Note: this will return a value whether the XLA service has been initialized
yet or not.
"""
return c_api.GetReplicaCount()
def GetPaddingConfigFromTriples(triples):
"""Create PaddingConfig proto from list of triples of integers."""
padding_config = xla_data_pb2.PaddingConfig()
for lo, hi, interior in triples:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
return padding_config
def GetDotDimensionsFromLists(dimension_numbers):
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
dot_dims_proto = xla_data_pb2.DotDimensionNumbers()
dot_dims_proto.lhs_contracting_dimensions.extend(lhs_contract)
dot_dims_proto.rhs_contracting_dimensions.extend(rhs_contract)
dot_dims_proto.lhs_batch_dimensions.extend(lhs_batch)
dot_dims_proto.rhs_batch_dimensions.extend(rhs_batch)
return dot_dims_proto
| 35.157372
| 80
| 0.704287
|
e1c95abf97cc042b1e311425999f5c7784b113db
| 1,156
|
py
|
Python
|
sagemaker/functions/auto-shutdown.py
|
hackoregon/hackoregon-aws-infrastructure
|
d1fbe5894e5655aefdbf2b346783aa3ca867ca51
|
[
"Apache-2.0"
] | 6
|
2017-03-08T02:22:47.000Z
|
2021-11-03T16:33:32.000Z
|
sagemaker/functions/auto-shutdown.py
|
hackoregon/hackoregon-aws-infrastructure
|
d1fbe5894e5655aefdbf2b346783aa3ca867ca51
|
[
"Apache-2.0"
] | 36
|
2017-03-25T06:13:29.000Z
|
2021-09-29T17:18:52.000Z
|
sagemaker/functions/auto-shutdown.py
|
hackoregon/hackoregon-aws-infrastructure
|
d1fbe5894e5655aefdbf2b346783aa3ca867ca51
|
[
"Apache-2.0"
] | 13
|
2017-03-07T05:15:15.000Z
|
2019-05-05T20:09:08.000Z
|
import boto3
def lambda_handler(event, context):
stopCount = 0
keepaliveCount = 0
totalCount = 0
client = boto3.client('sagemaker')
notebooks = client.list_notebook_instances(MaxResults=100)['NotebookInstances']
totalCount = len(notebooks)
print('Found %s Notebook Instances' % totalCount)
for notebook in notebooks:
tags = client.list_tags(ResourceArn=notebook['NotebookInstanceArn'])['Tags']
if notebook['NotebookInstanceStatus'] != 'Stopped':
keepalive = has_keepalive(tags)
if keepalive:
keepaliveCount += 1
print('Keeping notebook %s alive' % notebook['NotebookInstanceName'])
else:
print('Stopping a notebook %s' % notebook['NotebookInstanceName'])
stopCount += 1
client.stop_notebook_instance(NotebookInstanceName=notebook['NotebookInstanceName'])
return { 'stopped': stopCount, 'kept': keepaliveCount, 'total': totalCount }
def has_keepalive(tags):
for tag in tags:
if tag['Key'] == 'keepalive' and tag['Value'] == 'true':
return True
return False
| 37.290323
| 100
| 0.639273
|
88945903e60ee41e999a4363840a21df9e1a5c43
| 15,165
|
py
|
Python
|
v6.0.6/wanopt/fortios_wanopt_webcache.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 14
|
2018-09-25T20:35:25.000Z
|
2021-07-14T04:30:54.000Z
|
v6.0.6/wanopt/fortios_wanopt_webcache.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 32
|
2018-10-09T04:13:42.000Z
|
2020-05-11T07:20:28.000Z
|
v6.0.6/wanopt/fortios_wanopt_webcache.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 11
|
2018-10-09T00:14:53.000Z
|
2021-11-03T10:54:09.000Z
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wanopt_webcache
short_description: Configure global Web cache settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wanopt feature and webcache category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.6
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
wanopt_webcache:
description:
- Configure global Web cache settings.
default: null
type: dict
suboptions:
always_revalidate:
description:
- Enable/disable revalidation of requested cached objects, which have content on the server, before serving it to the client.
type: str
choices:
- enable
- disable
cache_by_default:
description:
- Enable/disable caching content that lacks explicit caching policies from the server.
type: str
choices:
- enable
- disable
cache_cookie:
description:
- Enable/disable caching cookies. Since cookies contain information for or about individual users, they not usually cached.
type: str
choices:
- enable
- disable
cache_expired:
description:
- Enable/disable caching type-1 objects that are already expired on arrival.
type: str
choices:
- enable
- disable
default_ttl:
description:
- Default object expiry time . This only applies to those objects that do not have an expiry time set by the web server.
type: int
external:
description:
- Enable/disable external Web caching.
type: str
choices:
- enable
- disable
fresh_factor:
description:
- Frequency that the server is checked to see if any objects have expired (1 - 100). The higher the fresh factor, the less often the
checks occur.
type: int
host_validate:
description:
- "Enable/disable validating "Host:" with original server IP."
type: str
choices:
- enable
- disable
ignore_conditional:
description:
- Enable/disable controlling the behavior of cache-control HTTP 1.1 header values.
type: str
choices:
- enable
- disable
ignore_ie_reload:
description:
- "Enable/disable ignoring the PNC-interpretation of Internet Explorer's Accept: / header."
type: str
choices:
- enable
- disable
ignore_ims:
description:
- Enable/disable ignoring the if-modified-since (IMS) header.
type: str
choices:
- enable
- disable
ignore_pnc:
description:
- Enable/disable ignoring the pragma no-cache (PNC) header.
type: str
choices:
- enable
- disable
max_object_size:
description:
- Maximum cacheable object size in kB (1 - 2147483 kb (2GB). All objects that exceed this are delivered to the client but not stored in
the web cache.
type: int
max_ttl:
description:
- Maximum time an object can stay in the web cache without checking to see if it has expired on the server .
type: int
min_ttl:
description:
- Minimum time an object can stay in the web cache without checking to see if it has expired on the server .
type: int
neg_resp_time:
description:
- Time in minutes to cache negative responses or errors (0 - 4294967295).
type: int
reval_pnc:
description:
- Enable/disable revalidation of pragma-no-cache (PNC) to address bandwidth concerns.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure global Web cache settings.
fortios_wanopt_webcache:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
wanopt_webcache:
always_revalidate: "enable"
cache_by_default: "enable"
cache_cookie: "enable"
cache_expired: "enable"
default_ttl: "7"
external: "enable"
fresh_factor: "9"
host_validate: "enable"
ignore_conditional: "enable"
ignore_ie_reload: "enable"
ignore_ims: "enable"
ignore_pnc: "enable"
max_object_size: "15"
max_ttl: "16"
min_ttl: "17"
neg_resp_time: "18"
reval_pnc: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wanopt_webcache_data(json):
option_list = ['always_revalidate', 'cache_by_default', 'cache_cookie',
'cache_expired', 'default_ttl', 'external',
'fresh_factor', 'host_validate', 'ignore_conditional',
'ignore_ie_reload', 'ignore_ims', 'ignore_pnc',
'max_object_size', 'max_ttl', 'min_ttl',
'neg_resp_time', 'reval_pnc']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wanopt_webcache(data, fos):
vdom = data['vdom']
wanopt_webcache_data = data['wanopt_webcache']
filtered_data = underscore_to_hyphen(filter_wanopt_webcache_data(wanopt_webcache_data))
return fos.set('wanopt',
'webcache',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wanopt(data, fos):
if data['wanopt_webcache']:
resp = wanopt_webcache(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"wanopt_webcache": {
"required": False, "type": "dict", "default": None,
"options": {
"always_revalidate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"cache_by_default": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"cache_cookie": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"cache_expired": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"default_ttl": {"required": False, "type": "int"},
"external": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fresh_factor": {"required": False, "type": "int"},
"host_validate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ignore_conditional": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ignore_ie_reload": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ignore_ims": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ignore_pnc": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"max_object_size": {"required": False, "type": "int"},
"max_ttl": {"required": False, "type": "int"},
"min_ttl": {"required": False, "type": "int"},
"neg_resp_time": {"required": False, "type": "int"},
"reval_pnc": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wanopt(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wanopt(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 34.702517
| 155
| 0.559446
|
a3f4cf25e32a1c10bd57ef7381cd314d3be2df12
| 18,242
|
py
|
Python
|
phase-1/server/main.py
|
Cryliss/comp-440
|
b3dfbce4fadef8e936216f2efeb7f9ec500a6f17
|
[
"MIT"
] | null | null | null |
phase-1/server/main.py
|
Cryliss/comp-440
|
b3dfbce4fadef8e936216f2efeb7f9ec500a6f17
|
[
"MIT"
] | null | null | null |
phase-1/server/main.py
|
Cryliss/comp-440
|
b3dfbce4fadef8e936216f2efeb7f9ec500a6f17
|
[
"MIT"
] | null | null | null |
import pymysql
from app import app
from config import mysql
from validation import check_payload
from flask import jsonify
from flask import flash, request
# Let's create a route for our app that adds a user to our database
# We can get it by using a url like:
# http://127.0.0.1:8080/add
# However, this requires a payload - Go to https://reqbin.com/ to test it out
@app.route('/api/add', methods=['GET', 'POST'])
def add():
# Create a variable for the response message
# Unsure why, but if I remove this line, it breaks :D
response = ''
# Rejected is to check whether or not we rejected the payload, so that
# when we get to the 'finally' portion of our try, we don't attempt to
# close the cursor or conn as we never created them in the first place
rejected = True
try:
# Read the payload data from the request and save the values we need
# Usually a javascript object sent with the fetch call
_json = request.json
_username = _json['username']
_firstname = _json['firstName']
_lastname = _json['lastName']
_email = _json['email']
_passconfirmed = _json['passConfirmed']
_password = _json['password']
# If we have all of these things, then let's go ahead and add a new uer to the database
if _username and _firstname and _lastname and _email and _passconfirmed and _password and request.method == 'POST':
# Let's check our payload for improper values
if check_payload(_username) or check_payload(_firstname) or check_payload(_lastname) or check_payload(_email) or check_payload(_password):
# Check Payload returned true, so we have malicious values in our data
# Return status code 418: I'm a teapot.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/418
rejected = True
message = {
'status': 418,
'message': 'Im a teapot. Go away',
}
response = jsonify(message)
response.status_code = 418
return response
if _passconfirmed != True:
# The password was not confirmed prior to being sent to us?
# Return status code 400: Bad Request
rejected = True
message = {
'status': 400,
'message': 'Password was not confirmed',
}
response = jsonify(message)
response.status_code = 400
return response
rejected = False
# Create the SQL query
sqlQuery = 'CALL sp_register(%s, %s, %s, %s, %s, %s, @registered, @message)'
bindData = (_username, _password, _passconfirmed, _firstname, _lastname, _email)
# Make a new connection to the MySQL server
conn = mysql.connect()
cursor = conn.cursor()
# Execute the query and commit it the database
cursor.execute(sqlQuery, bindData)
conn.commit()
# Get the updated variables from the procedure and check them
cursor.execute('SELECT @registered, @message')
data = cursor.fetchall() # data = ((0, 'Username already exists!'),)
# First value is registered
if data[0][0] == False:
# We didn't actually register the user when we called sp_register
# So let's return the reason message to the client
message = {
'status': 409,
'message': data[0][1],
}
# Put that into a json object and set the status 200: OK
response = jsonify(message)
response.status_code = 409
return response
# Okay so we didn't have any issues, so let's let the client know
message = {
'status': 200,
'message': 'User added successfully!',
}
# Put that into a json object and set the status 200: OK
response = jsonify(message)
response.status_code = 200
# Return the status to the client
return response
else:
# Hm, we didn't get anything in our payload, return 404
return not_found()
except Exception as e:
# Was there some error in our code above?
# Print it out to the terminal so we can try and debug it
print(e)
finally:
if rejected == False:
# If we've made it here, then we successfully executed our try
# Now we can close our cursor and connection
cursor.close()
conn.close()
# Define a route to list all registered users.
@app.route('/api/users')
def users():
rejected = True
response = ''
try:
rejected == False
# Make a new connection to the MySQL server
conn = mysql.connect()
cursor = conn.cursor(pymysql.cursors.DictCursor)
# Select all but sensitive data (password) from the database
cursor.execute("SELECT username, email, firstName, lastName FROM user")
# Get all rows retrieved, add them to the response and return
userRows = cursor.fetchall()
response = jsonify(userRows)
response.status_code = 200
return response
except Exception as e:
# Was there some error in our code above?
# Print it out to the terminal so we can try and debug it
print(e)
finally:
if rejected == False:
# If we've made it here, then we successfully executed our try
# Now we can close our cursor and connection
cursor.close()
conn.close()
# Define a route to get data from our random table
@app.route('/api/advisor')
def random():
rejected = True
response = ''
try:
rejected = False
# Make a new connection to the MySQL server
conn = mysql.connect()
cursor = conn.cursor(pymysql.cursors.DictCursor)
# Select name and email from the advisor table
cursor.execute('SELECT * FROM advisor')
# Add that data to the response and return
randomRows = cursor.fetchall()
response = jsonify(randomRows)
response.status_code = 200
return response
except Exception as e:
# Was there some error in our code above?
# Print it out to the terminal so we can try and debug it
print(e)
finally:
if rejected == False:
# If we've made it here, then we successfully executed our try
# Now we can close our cursor and connection
cursor.close()
conn.close()
# Get data about a specific user
@app.route('/api/user/<string:username>')
def user(username):
rejected = True
# First, let's make sure our payload doesn't contain anything malicious
if check_payload(username):
# Check Payload returned true, so we have malicious values in our data
# Return status code 418: I'm a teapot.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/418
rejected = True
message = {
'status': 418,
'message': "I'm a teapot. Go away",
}
response = jsonify(message)
response.status_code = 418
return response
try:
rejected = False
# Make a new connection to the MySQL server
conn = mysql.connect()
cursor = conn.cursor(pymysql.cursors.DictCursor)
# Get the requested data
cursor.execute("SELECT username, email, firstName, lastName FROM user WHERE username =%s", username)
# Fetch only one row from the return
userRow = cursor.fetchone()
# Add that row to our response and return
response = jsonify(userRow)
response.status_code = 200
return response
except Exception as e:
# Was there some error in our code above?
# Print it out to the terminal so we can try and debug it
print(e)
finally:
if rejected == False:
# If we've made it here, then we successfully executed our try
# Now we can close our cursor and connection
cursor.close()
conn.close()
# Delete a user from the table
@app.route('/api/delete/<string:username>')
def delete(username):
rejected = True
response = ''
try:
# First, let's make sure our payload doesn't contain anything malicious
if check_payload(username):
# Check Payload returned true, so we have malicious values in our data
# Return status code 418: I'm a teapot.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/418
rejected = True
message = {
'status': 418,
'message': 'Go away',
}
response = jsonify(message)
response.status_code = 418
return response
rejected = False
# Make a new connection to the MySQL server
conn = mysql.connect()
cursor = conn.cursor()
# Create the SQL query
sqlQuery = 'DELETE FROM user WHERE username=%s'
bindData = (username,)
# Execute the query and commit the changes
cursor.execute(sqlQuery, bindData)
conn.commit()
# Send a message to the client letting them know all went well.
message = {
'status': 200,
'message': 'User ' + username + ' deleted successfully!',
}
response = jsonify(message)
response.status_code = 200
return response
except Exception as e:
# Was there some error in our code above?
# Print it out to the terminal so we can try and debug it
print(e)
finally:
if rejected == False:
# If we've made it here, then we successfully executed our try
# Now we can close our cursor and connection
cursor.close()
conn.close()
# Initializes a new database - to be used when create databse button is clicked
@app.route('/api/initializedb')
def initializedb():
rejected = True
response = ''
try:
# Make a new connection to the MySQL server
conn = mysql.connect()
cursor = conn.cursor()
rejected = False
# We are going to re-initialize all the tables except for our users table
# by using the university-1.sql file, provided by the professor.
# In order to use this file properly, we need to make sure we don't accidently
# try to execute lines of code that are just comments, or multiples line long
# as this will cause an error. The following for loop handles the processing
# of the file.
# sql will hold the SQL statement for when we see 'CREATE', as that's usually
# for 'CREATE TABLE' which always has new lines in it, so we need to add
# the lines following this to sql, so we can get one string for the full
# create satement.
sql = ''
# waiting is if we are waiting to see a ';' to indicate the statement end.
waiting = False
for line in open("/Users/sabra/go/src/comp-440/sql/university-1.sql"):
# Strip the line of the new line character, '\n'
line = line.strip()
# Is this just an empty line?
if line == '':
# Yep, move on.
continue
elif line[0] == '-' or line[0] == '/':
# We have a comment here, move on
continue
elif line[len(line)-1] == ';' and waiting:
# We've been waiting for the end of statement character, ';'
# and now we've found it
waiting = False # Set waiting to false
sql = sql + line # Add the last line to the statement
#print(sql) # Output the statement to the terminal
cursor.execute(sql) # Execute the statement
sql = '' # Reset our sql variable
continue # Move on with the for loop
elif len(line) > 6:
# Is the length of the line > 6 (since we want to check up to index 5)?
if line[0] == 'C' and line[1] == 'R' and line[2] == 'E' and line[3] == 'A' and line[4] == 'T' and line[5] == 'E':
# Yep, did the first 5 char spell create? Yep!
# We're making a new table then
waiting = True # Set waiting to true.
sql = sql + line # Add the line to the sql variable
continue # Move on with the for loop
elif waiting:
# The length is indeed longer, but we're not a create statement
# and we are waiting to be executed
sql = sql + line # Add the line to the sql variable
continue # Move on with the for loop
else:
# The length is indeed longer, but we're not waiting either
# Print and execute the command and continue on
#print(line)
cursor.execute(line)
continue
elif waiting:
# None of the above are true, but we're waiting
sql = sql + line # Add the line to the sql variable
continue # Move on with the for loop
# Nothing above was true, and we're not waiting for an ';'
# Print the command and execute it.
#print(line)
cursor.execute(line)
# Create our response to the client and return it
message = {
'status': 200,
'message': 'Database successfully initialized!',
}
response = jsonify(message)
response.status_code = 200
return response
except Exception as e:
# Was there some error in our code above?
# Print it out to the terminal so we can try and debug it
print(e)
finally:
if rejected == False:
# If we've made it here, then we successfully executed out try
# Now we can close our cursor and connection
cursor.close()
conn.close()
# Route for logging in?
@app.route('/api/login', methods=["GET", "POST"])
def login():
response = ''
rejected = True
try:
# Read the payload data from the request and save the values we need
# Usually a javascript object sent with the fetch call
_json = request.json
_username = _json['username']
_password = _json['password']
# If we have all of these things, then we wanna try and log the user in
if _username and _password and request.method == 'POST':
# First, let's make sure our payload doesn't contain anything malicious
if check_payload(_username) or check_payload(_password):
# Check Payload returned true, so we have some malicious data
# Return status code 418: I'm a teapot.
rejected = True
message = {
'status': 418,
'message': "I'm a teapot. Go away",
}
response = jsonify(message)
response.status_code = 418
return response
rejected = False
# Our payload was fine, let's create a new SQL query with it then
sqlQuery = 'CALL sp_login(%s, %s, @userConfirmed, @passConfirmed)'
bindData = (_username, _password)
# Make a new connection to the MySQL server
conn = mysql.connect()
cursor = conn.cursor()
# Execute the query
cursor.execute(sqlQuery, bindData)
cursor.execute('SELECT @userConfirmed, @passConfirmed')
data = cursor.fetchall()
# Check if the username was confirmed
if data[0][0] == False:
rejected = True
# Username was not confirmed! Don't let them log in
message = {
'status': 409,
'message': 'Username does not exist!',
}
response = jsonify(message)
response.status_code = 409
return response
# Check if our password was confirmed
if data[0][1] == False:
# Password was not confirmed! Don't let them log in
message = {
'status': 409,
'message': 'Invalid password given',
}
response = jsonify(message)
response.status_code = 409
return response
# Both values were good, let's let the client know
message = {
'status': 200,
'message': 'User successfully logged in',
}
response = jsonify(message)
response.status_code = 200
return response
else:
# Hm, we didn't get anything in our payload, return 404
return not_found()
except Exception as e:
# Was there some error in our code above?
# Print it out to the terminal so we can try and debug it
print(e)
finally:
if rejected == False:
# If we've made it here, then we successfully executed our try
# Now we can close our cursor and connection
cursor.close()
conn.close()
# Basic route for error handling
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Record not found: ' + request.url,
}
response = jsonify(message)
response.status_code = 404
return response
if __name__ == "__main__":
app.run(host='127.0.0.1', port='5555', debug=True)
| 38.566596
| 150
| 0.562822
|
d62de208266c4082920001222182af94b25660f8
| 2,617
|
py
|
Python
|
reddit_experiments/variant_sets/rollout_variant_set.py
|
isugimpy/experiments.py
|
ddfcd6fd010c8806a88cd3c51f55332d857622c5
|
[
"BSD-3-Clause"
] | 5
|
2021-04-04T05:24:47.000Z
|
2021-08-12T21:42:23.000Z
|
reddit_experiments/variant_sets/rollout_variant_set.py
|
Seanpm2001-reddit/experiments.py
|
c5f8373d051845ab550a9ae65041afcc4c9f996b
|
[
"BSD-3-Clause"
] | 1
|
2021-04-06T02:06:50.000Z
|
2021-05-17T15:34:59.000Z
|
reddit_experiments/variant_sets/rollout_variant_set.py
|
Seanpm2001-reddit/experiments.py
|
c5f8373d051845ab550a9ae65041afcc4c9f996b
|
[
"BSD-3-Clause"
] | 4
|
2021-04-08T10:02:42.000Z
|
2022-01-12T22:16:46.000Z
|
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from reddit_experiments.variant_sets.base import VariantSet
class RolloutVariantSet(VariantSet):
"""VariantSet designed for feature rollouts. Takes a single variant.
Changing the size of the variant will minimize the treatment of bucketed
users. Those users going from no treatment to the provided treatment
(or vice versa) are limited to the change in the provided treatment size.
For instance, going from 45% to 55% will result in only the new 10% of
users changing treatments. The initial 45% will not change. Conversely,
going from 55% to 45% will result in only 10% of users losing the
treatment.
:param variants: array of dicts, each containing the keys 'name'
and 'size'. Name is the variant name, and size is the fraction of
users to bucket into the corresponding variant. Sizes are expressed
as a floating point value between 0 and 1.
:param num_buckets: the number of potential buckets that can be
passed in for a variant call. Defaults to 1000, which means maximum
granularity of 0.1% for bucketing
"""
# pylint: disable=super-init-not-called
def __init__(self, variants: List[Dict[str, Any]], num_buckets: int = 1000):
# validate before assigning anything on this type, since we're expecting
# only a single variant
self._validate_variants(variants)
self.variant = variants[0]
self.num_buckets = num_buckets
def __contains__(self, item: str) -> bool:
return self.variant.get("name") == item
def _validate_variants(self, variants: List[Dict[str, Any]]) -> None:
if variants is None:
raise ValueError("No variants provided")
if len(variants) != 1:
raise ValueError("Rollout variant only supports one variant.")
size = variants[0].get("size")
if size is None or size < 0.0 or size > 1.0:
raise ValueError("Variant size must be between 0 and 1")
def choose_variant(self, bucket: int) -> Optional[str]:
"""Deterministically choose a percentage-based variant.
Every call with the same bucket and variants will result in the same
answer.
:param bucket: an integer bucket representation
:return: the variant name, or None if bucket doesn't fall into
any of the variants
"""
if bucket < int(self.variant["size"] * self.num_buckets):
return self.variant.get("name")
return None
| 39.059701
| 80
| 0.677111
|
5188b3d10195196296c2b1bbf3c0ef70269dce8a
| 9,598
|
py
|
Python
|
projects/wizard_of_wikipedia/interactive_end2end/interactive_end2end.py
|
cloudygoose/ParlAI
|
a73b683bf1c1a3b1fd2c4e2135a69b6048681f66
|
[
"MIT"
] | 9
|
2020-01-17T09:34:00.000Z
|
2021-11-19T07:46:52.000Z
|
projects/wizard_of_wikipedia/interactive_end2end/interactive_end2end.py
|
cloudygoose/ParlAI
|
a73b683bf1c1a3b1fd2c4e2135a69b6048681f66
|
[
"MIT"
] | 5
|
2019-12-29T07:52:39.000Z
|
2022-03-12T00:10:03.000Z
|
projects/wizard_of_wikipedia/interactive_end2end/interactive_end2end.py
|
cloudygoose/ParlAI
|
a73b683bf1c1a3b1fd2c4e2135a69b6048681f66
|
[
"MIT"
] | 2
|
2020-01-28T01:41:52.000Z
|
2020-12-27T07:22:07.000Z
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Hooks up the components for the wizard generator to do live retrieval.
"""
from parlai.core.agents import Agent, create_agent, create_agent_from_shared
from projects.wizard_of_wikipedia.generator.agents import EndToEndAgent
from parlai.tasks.wizard_of_wikipedia.agents import TOKEN_KNOWLEDGE
import json
import os
class InteractiveEnd2endAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.debug = opt['debug']
self.model_path = os.path.join(
opt['datapath'],
'models',
'wizard_of_wikipedia',
'full_dialogue_retrieval_model',
)
if not shared:
# Create responder
self._set_up_responder(opt)
# Create retriever
self._set_up_retriever(opt)
else:
self.opt = shared['opt']
self.retriever = shared['retriever']
self.responder = create_agent_from_shared(shared['responder_shared_opt'])
self.sent_tok = shared['sent_tok']
self.wiki_map = shared['wiki_map']
self.id = 'WizardGenerativeInteractiveAgent'
self.ret_history = {}
@staticmethod
def add_cmdline_args(argparser):
"""Add command-line arguments specifically for this agent."""
EndToEndAgent.add_cmdline_args(argparser)
parser = argparser.add_argument_group('InteractiveEnd2end Arguments')
parser.add_argument(
'--retriever-model-file',
type=str,
default='zoo:wikipedia_full/tfidf_retriever/model',
)
parser.add_argument(
'--responder-model-file',
type=str,
default='zoo:wizard_of_wikipedia/end2end_generator/model',
)
parser.add_argument(
'--num-retrieved',
type=int,
default=7,
help='how many passages to retrieve for each' 'category',
)
parser.add_argument('--debug', type='bool', default=False)
return parser
def _set_up_retriever(self, opt):
retriever_opt = {
'model_file': opt['retriever_model_file'],
'remove_title': False,
'datapath': opt['datapath'],
'override': {'remove_title': False},
}
self.retriever = create_agent(retriever_opt)
self._set_up_sent_tok()
wiki_map_path = os.path.join(self.model_path, 'chosen_topic_to_passage.json')
with open(wiki_map_path, 'r') as f:
self.wiki_map = json.load(f)
def _set_up_responder(self, opt):
responder_opts = opt.copy()
# override these opts to build the responder model
override_opts = {
'model_file': opt['responder_model_file'],
'model': 'projects.wizard_of_wikipedia.generator.agents:EndToEndAgent',
'datapath': opt['datapath'],
}
for k, v in override_opts.items():
responder_opts[k] = v
responder_opts['override'][k] = v
self.responder = create_agent(responder_opts)
def _set_up_sent_tok(self):
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (e.g. pip install nltk).')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format('english')
try:
self.sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
self.sent_tok = nltk.data.load(st_path)
def get_chosen_topic_passages(self, chosen_topic):
retrieved_txt_format = []
if chosen_topic in self.wiki_map:
retrieved_txt = self.wiki_map[chosen_topic]
retrieved_txts = retrieved_txt.split('\n')
if len(retrieved_txts) > 1:
combined = ' '.join(retrieved_txts[2:])
sentences = self.sent_tok.tokenize(combined)
total = 0
for sent in sentences:
if total >= 10:
break
if len(sent) > 0:
retrieved_txt_format.append(' '.join([chosen_topic, sent]))
total += 1
if len(retrieved_txt_format) > 0:
passages = '\n'.join(retrieved_txt_format)
else:
passages = ''
return passages
def get_passages(self, act):
"""Format passages retrieved by taking the first paragraph of the
top `num_retrieved` passages.
"""
retrieved_txt = act.get('text', '')
cands = act.get('text_candidates', [])
if len(cands) > 0:
retrieved_txts = cands[: self.opt['num_retrieved']]
else:
retrieved_txts = [retrieved_txt]
retrieved_txt_format = []
for ret_txt in retrieved_txts:
paragraphs = ret_txt.split('\n')
if len(paragraphs) > 2:
sentences = self.sent_tok.tokenize(paragraphs[2])
for sent in sentences:
delim = ' ' + TOKEN_KNOWLEDGE + ' '
retrieved_txt_format.append(delim.join([paragraphs[0], sent]))
if len(retrieved_txt_format) > 0:
passages = '\n'.join(retrieved_txt_format)
else:
passages = ''
return passages
def retriever_act(self, history):
"""Combines and formats texts retrieved by the TFIDF retriever for the
chosen topic, the last thing the wizard said, and the last thing the
apprentice said.
"""
# retrieve on chosen topic
chosen_topic_txts = None
if self.ret_history.get('chosen_topic'):
chosen_topic_txts = self.get_chosen_topic_passages(
self.ret_history['chosen_topic']
)
# retrieve on apprentice
apprentice_txts = None
if self.ret_history.get('apprentice'):
apprentice_act = {
'text': self.ret_history['apprentice'],
'episode_done': True,
}
self.retriever.observe(apprentice_act)
apprentice_txts = self.get_passages(self.retriever.act())
# retrieve on wizard
wizard_txts = None
if self.ret_history.get('wizard'):
wizard_act = {'text': self.ret_history['wizard'], 'episode_done': True}
self.retriever.observe(wizard_act)
wizard_txts = self.get_passages(self.retriever.act())
# combine everything
combined_txt = ''
if chosen_topic_txts:
combined_txt += chosen_topic_txts
if wizard_txts:
combined_txt += '\n' + wizard_txts
if apprentice_txts:
combined_txt += '\n' + apprentice_txts
return combined_txt
def observe(self, observation):
obs = observation.copy()
self.maintain_retrieved_texts(self.ret_history, obs)
if self.debug:
print('DEBUG: Retriever history:\n{}'.format(self.ret_history))
responder_knowledge = self.retriever_act(self.ret_history)
obs['knowledge'] = responder_knowledge
self.observation = obs
def maintain_retrieved_texts(self, history, observation):
"""Maintain texts retrieved by the retriever to mimic the set-up
from the data collection for the task.
"""
if 'chosen_topic' not in history:
history['episode_done'] = False
history['chosen_topic'] = ''
history['wizard'] = ''
history['apprentice'] = ''
if history['episode_done']:
history['chosen_topic'] = ''
history['wizard'] = ''
history['apprentice'] = ''
if 'next_wizard' in history:
del history['next_wizard']
history['episode_done'] = False
# save chosen topic
if 'chosen_topic' in observation:
history['chosen_topic'] = observation['chosen_topic']
if 'text' in observation:
history['apprentice'] = observation['text']
if 'next_wizard' in history:
history['wizard'] = history['next_wizard']
# save last thing wizard said (for next time)
if 'labels' in observation:
history['next_wizard'] = observation['labels'][0]
elif 'eval_labels' in observation:
history['next_wizard'] = observation['eval_labels'][0]
history['episode_done'] = observation['episode_done']
def act(self):
obs = self.observation
# choose a knowledge sentence
responder_obs = obs.copy()
if self.debug:
print('DEBUG: Responder is observing:\n{}'.format(responder_obs))
self.responder.observe(responder_obs)
responder_act = self.responder.act()
if self.debug:
print('DEBUG: Responder is acting:\n{}'.format(responder_act))
responder_act.force_set('id', 'WizardEnd2EndInteractiveAgent')
return responder_act
def share(self):
"""Share internal saved_model between parent and child instances."""
shared = super().share()
shared['opt'] = self.opt
shared['retriever'] = self.retriever
shared['responder_shared_opt'] = self.responder.share()
shared['sent_tok'] = self.sent_tok
shared['wiki_map'] = self.wiki_map
return shared
| 36.082707
| 85
| 0.592832
|
3810069af2172a326214f4c25c085c5731c62cbc
| 20,558
|
py
|
Python
|
official/recommendation/ncf_keras_main.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 5
|
2020-11-16T06:26:19.000Z
|
2022-03-27T02:01:40.000Z
|
official/recommendation/ncf_keras_main.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 5
|
2020-11-13T18:50:30.000Z
|
2022-02-10T01:42:36.000Z
|
official/recommendation/ncf_keras_main.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 3
|
2017-08-15T11:29:03.000Z
|
2020-12-07T18:06:12.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
# pylint: disable=g-bad-import-order
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import ncf_common
from official.recommendation import ncf_input_pipeline
from official.recommendation import neumf_model
from official.utils.logs import logger
from official.utils.logs import mlperf_helper
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
from official.utils.flags import core as flags_core
FLAGS = flags.FLAGS
def metric_fn(logits, dup_mask, params):
dup_mask = tf.cast(dup_mask, tf.float32)
logits = tf.slice(logits, [0, 1], [-1, -1])
in_top_k, _, metric_weights, _ = neumf_model.compute_top_k_and_ndcg(
logits,
dup_mask,
params["match_mlperf"])
metric_weights = tf.cast(metric_weights, tf.float32)
return in_top_k, metric_weights
class MetricLayer(tf.keras.layers.Layer):
"""Custom layer of metrics for NCF model."""
def __init__(self, params):
super(MetricLayer, self).__init__()
self.params = params
def call(self, inputs, training=False):
logits, dup_mask = inputs
if training:
hr_sum = 0.0
hr_count = 0.0
else:
metric, metric_weights = metric_fn(logits, dup_mask, self.params)
hr_sum = tf.reduce_sum(metric * metric_weights)
hr_count = tf.reduce_sum(metric_weights)
self.add_metric(hr_sum, name="hr_sum", aggregation="mean")
self.add_metric(hr_count, name="hr_count", aggregation="mean")
return logits
class LossLayer(tf.keras.layers.Layer):
"""Pass-through loss layer for NCF model."""
def __init__(self, loss_normalization_factor):
# The loss may overflow in float16, so we use float32 instead.
super(LossLayer, self).__init__(dtype="float32")
self.loss_normalization_factor = loss_normalization_factor
self.loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="sum")
def call(self, inputs):
logits, labels, valid_pt_mask_input = inputs
loss = self.loss(
y_true=labels, y_pred=logits, sample_weight=valid_pt_mask_input)
loss = loss * (1.0 / self.loss_normalization_factor)
self.add_loss(loss)
return logits
class IncrementEpochCallback(tf.keras.callbacks.Callback):
"""A callback to increase the requested epoch for the data producer.
The reason why we need this is because we can only buffer a limited amount of
data. So we keep a moving window to represent the buffer. This is to move the
one of the window's boundaries for each epoch.
"""
def __init__(self, producer):
self._producer = producer
def on_epoch_begin(self, epoch, logs=None):
self._producer.increment_request_epoch()
class CustomEarlyStopping(tf.keras.callbacks.Callback):
"""Stop training has reached a desired hit rate."""
def __init__(self, monitor, desired_value):
super(CustomEarlyStopping, self).__init__()
self.monitor = monitor
self.desired = desired_value
self.stopped_epoch = 0
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current and current >= self.desired:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning("Early stopping conditioned on metric `%s` "
"which is not available. Available metrics are: %s",
self.monitor, ",".join(list(logs.keys())))
return monitor_value
def _get_keras_model(params):
"""Constructs and returns the model."""
batch_size = params["batch_size"]
user_input = tf.keras.layers.Input(
shape=(1,), name=movielens.USER_COLUMN, dtype=tf.int32)
item_input = tf.keras.layers.Input(
shape=(1,), name=movielens.ITEM_COLUMN, dtype=tf.int32)
valid_pt_mask_input = tf.keras.layers.Input(
shape=(1,), name=rconst.VALID_POINT_MASK, dtype=tf.bool)
dup_mask_input = tf.keras.layers.Input(
shape=(1,), name=rconst.DUPLICATE_MASK, dtype=tf.int32)
label_input = tf.keras.layers.Input(
shape=(1,), name=rconst.TRAIN_LABEL_KEY, dtype=tf.bool)
base_model = neumf_model.construct_model(user_input, item_input, params)
logits = base_model.output
zeros = tf.keras.layers.Lambda(
lambda x: x * 0)(logits)
softmax_logits = tf.keras.layers.concatenate(
[zeros, logits],
axis=-1)
# Custom training loop calculates loss and metric as a part of
# training/evaluation step function.
if not params["keras_use_ctl"]:
softmax_logits = MetricLayer(params)([softmax_logits, dup_mask_input])
# TODO(b/134744680): Use model.add_loss() instead once the API is well
# supported.
softmax_logits = LossLayer(batch_size)(
[softmax_logits, label_input, valid_pt_mask_input])
keras_model = tf.keras.Model(
inputs={
movielens.USER_COLUMN: user_input,
movielens.ITEM_COLUMN: item_input,
rconst.VALID_POINT_MASK: valid_pt_mask_input,
rconst.DUPLICATE_MASK: dup_mask_input,
rconst.TRAIN_LABEL_KEY: label_input},
outputs=softmax_logits)
keras_model.summary()
return keras_model
def run_ncf(_):
"""Run NCF training and eval with Keras."""
keras_utils.set_session_config(enable_xla=FLAGS.enable_xla)
if FLAGS.seed is not None:
print("Setting tf seed")
tf.random.set_seed(FLAGS.seed)
model_helpers.apply_clean(FLAGS)
if FLAGS.dtype == "fp16" and FLAGS.fp16_implementation == "keras":
policy = tf.keras.mixed_precision.experimental.Policy(
"mixed_float16",
loss_scale=flags_core.get_loss_scale(FLAGS, default_for_fp16="dynamic"))
tf.keras.mixed_precision.experimental.set_policy(policy)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
params = ncf_common.parse_flags(FLAGS)
params["distribute_strategy"] = strategy
if not keras_utils.is_v2_0() and strategy is not None:
logging.error("NCF Keras only works with distribution strategy in TF 2.0")
return
if (params["keras_use_ctl"] and (
not keras_utils.is_v2_0() or strategy is None)):
logging.error(
"Custom training loop only works with tensorflow 2.0 and dist strat.")
return
if params["use_tpu"] and not params["keras_use_ctl"]:
logging.error("Custom training loop must be used when using TPUStrategy.")
return
batch_size = params["batch_size"]
time_callback = keras_utils.TimeHistory(batch_size, FLAGS.log_steps)
callbacks = [time_callback]
producer, input_meta_data = None, None
generate_input_online = params["train_dataset_path"] is None
if generate_input_online:
# Start data producing thread.
num_users, num_items, _, _, producer = ncf_common.get_inputs(params)
producer.start()
per_epoch_callback = IncrementEpochCallback(producer)
callbacks.append(per_epoch_callback)
else:
assert params["eval_dataset_path"] and params["input_meta_data_path"]
with tf.io.gfile.GFile(params["input_meta_data_path"], "rb") as reader:
input_meta_data = json.loads(reader.read().decode("utf-8"))
num_users = input_meta_data["num_users"]
num_items = input_meta_data["num_items"]
params["num_users"], params["num_items"] = num_users, num_items
if FLAGS.early_stopping:
early_stopping_callback = CustomEarlyStopping(
"val_HR_METRIC", desired_value=FLAGS.hr_threshold)
callbacks.append(early_stopping_callback)
(train_input_dataset, eval_input_dataset,
num_train_steps, num_eval_steps) = \
(ncf_input_pipeline.create_ncf_input_data(
params, producer, input_meta_data, strategy))
steps_per_epoch = None if generate_input_online else num_train_steps
with distribution_utils.get_strategy_scope(strategy):
keras_model = _get_keras_model(params)
optimizer = tf.keras.optimizers.Adam(
learning_rate=params["learning_rate"],
beta_1=params["beta1"],
beta_2=params["beta2"],
epsilon=params["epsilon"])
if FLAGS.fp16_implementation == "graph_rewrite":
optimizer = \
tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer,
loss_scale=flags_core.get_loss_scale(FLAGS,
default_for_fp16="dynamic"))
elif FLAGS.dtype == "fp16" and params["keras_use_ctl"]:
# When keras_use_ctl is False, instead Model.fit() automatically applies
# loss scaling so we don't need to create a LossScaleOptimizer.
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
optimizer,
tf.keras.mixed_precision.experimental.global_policy().loss_scale)
if params["keras_use_ctl"]:
train_loss, eval_results = run_ncf_custom_training(
params,
strategy,
keras_model,
optimizer,
callbacks,
train_input_dataset,
eval_input_dataset,
num_train_steps,
num_eval_steps,
generate_input_online=generate_input_online)
else:
# TODO(b/138957587): Remove when force_v2_in_keras_compile is on longer
# a valid arg for this model. Also remove as a valid flag.
if FLAGS.force_v2_in_keras_compile is not None:
keras_model.compile(
optimizer=optimizer,
run_eagerly=FLAGS.run_eagerly,
experimental_run_tf_function=FLAGS.force_v2_in_keras_compile)
else:
keras_model.compile(optimizer=optimizer, run_eagerly=FLAGS.run_eagerly)
if not FLAGS.ml_perf:
# Create Tensorboard summary and checkpoint callbacks.
summary_dir = os.path.join(FLAGS.model_dir, "summaries")
summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
checkpoint_path = os.path.join(FLAGS.model_dir, "checkpoint")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True)
callbacks += [summary_callback, checkpoint_callback]
history = keras_model.fit(
train_input_dataset,
epochs=FLAGS.train_epochs,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_data=eval_input_dataset,
validation_steps=num_eval_steps,
verbose=2)
logging.info("Training done. Start evaluating")
eval_loss_and_metrics = keras_model.evaluate(
eval_input_dataset, steps=num_eval_steps, verbose=2)
logging.info("Keras evaluation is done.")
# Keras evaluate() API returns scalar loss and metric values from
# evaluation as a list. Here, the returned list would contain
# [evaluation loss, hr sum, hr count].
eval_hit_rate = eval_loss_and_metrics[1] / eval_loss_and_metrics[2]
# Format evaluation result into [eval loss, eval hit accuracy].
eval_results = [eval_loss_and_metrics[0], eval_hit_rate]
if history and history.history:
train_history = history.history
train_loss = train_history["loss"][-1]
stats = build_stats(train_loss, eval_results, time_callback)
return stats
def run_ncf_custom_training(params,
strategy,
keras_model,
optimizer,
callbacks,
train_input_dataset,
eval_input_dataset,
num_train_steps,
num_eval_steps,
generate_input_online=True):
"""Runs custom training loop.
Args:
params: Dictionary containing training parameters.
strategy: Distribution strategy to be used for distributed training.
keras_model: Model used for training.
optimizer: Optimizer used for training.
callbacks: Callbacks to be invoked between batches/epochs.
train_input_dataset: tf.data.Dataset used for training.
eval_input_dataset: tf.data.Dataset used for evaluation.
num_train_steps: Total number of steps to run for training.
num_eval_steps: Total number of steps to run for evaluation.
generate_input_online: Whether input data was generated by data producer.
When data is generated by data producer, then train dataset must be
re-initialized after every epoch.
Returns:
A tuple of train loss and a list of training and evaluation results.
"""
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
reduction="sum", from_logits=True)
train_input_iterator = iter(
strategy.experimental_distribute_dataset(train_input_dataset))
def train_step(train_iterator):
"""Called once per step to train the model."""
def step_fn(features):
"""Computes loss and applied gradient per replica."""
with tf.GradientTape() as tape:
softmax_logits = keras_model(features)
# The loss can overflow in float16, so we cast to float32.
softmax_logits = tf.cast(softmax_logits, "float32")
labels = features[rconst.TRAIN_LABEL_KEY]
loss = loss_object(
labels,
softmax_logits,
sample_weight=features[rconst.VALID_POINT_MASK])
loss *= (1.0 / params["batch_size"])
if FLAGS.dtype == "fp16":
loss = optimizer.get_scaled_loss(loss)
grads = tape.gradient(loss, keras_model.trainable_variables)
if FLAGS.dtype == "fp16":
grads = optimizer.get_unscaled_gradients(grads)
# Converting gradients to dense form helps in perf on GPU for NCF
grads = neumf_model.sparse_to_dense_grads(
list(zip(grads, keras_model.trainable_variables)))
optimizer.apply_gradients(grads)
return loss
per_replica_losses = strategy.experimental_run_v2(
step_fn, args=(next(train_iterator),))
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
def eval_step(eval_iterator):
"""Called once per eval step to compute eval metrics."""
def step_fn(features):
"""Computes eval metrics per replica."""
softmax_logits = keras_model(features)
in_top_k, metric_weights = metric_fn(softmax_logits,
features[rconst.DUPLICATE_MASK],
params)
hr_sum = tf.reduce_sum(in_top_k * metric_weights)
hr_count = tf.reduce_sum(metric_weights)
return hr_sum, hr_count
per_replica_hr_sum, per_replica_hr_count = (
strategy.experimental_run_v2(
step_fn, args=(next(eval_iterator),)))
hr_sum = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_hr_sum, axis=None)
hr_count = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_hr_count, axis=None)
return hr_sum, hr_count
if not FLAGS.run_eagerly:
train_step = tf.function(train_step)
eval_step = tf.function(eval_step)
for callback in callbacks:
callback.on_train_begin()
# Not writing tensorboard summaries if running in MLPerf.
if FLAGS.ml_perf:
eval_summary_writer, train_summary_writer = None, None
else:
summary_dir = os.path.join(FLAGS.model_dir, "summaries")
eval_summary_writer = tf.summary.create_file_writer(
os.path.join(summary_dir, "eval"))
train_summary_writer = tf.summary.create_file_writer(
os.path.join(summary_dir, "train"))
train_loss = 0
for epoch in range(FLAGS.train_epochs):
for cb in callbacks:
cb.on_epoch_begin(epoch)
# As NCF dataset is sampled with randomness, not repeating
# data elements in each epoch has significant impact on
# convergence. As so, offline-generated TF record files
# contains all epoch worth of data. Thus we do not need
# to initialize dataset when reading from tf record files.
if generate_input_online:
train_input_iterator = iter(
strategy.experimental_distribute_dataset(train_input_dataset))
train_loss = 0
for step in range(num_train_steps):
current_step = step + epoch * num_train_steps
for c in callbacks:
c.on_batch_begin(current_step)
train_loss += train_step(train_input_iterator)
# Write train loss once in every 1000 steps.
if train_summary_writer and step % 1000 == 0:
with train_summary_writer.as_default():
tf.summary.scalar("training_loss", train_loss/(step + 1),
step=current_step)
for c in callbacks:
c.on_batch_end(current_step)
train_loss /= num_train_steps
logging.info("Done training epoch %s, epoch loss=%s.", epoch + 1,
train_loss)
eval_input_iterator = iter(
strategy.experimental_distribute_dataset(eval_input_dataset))
hr_sum = 0
hr_count = 0
for _ in range(num_eval_steps):
step_hr_sum, step_hr_count = eval_step(eval_input_iterator)
hr_sum += step_hr_sum
hr_count += step_hr_count
logging.info("Done eval epoch %s, hit_rate=%s.", epoch + 1,
hr_sum / hr_count)
if eval_summary_writer:
with eval_summary_writer.as_default():
tf.summary.scalar("hit_rate", hr_sum / hr_count, step=current_step)
if (FLAGS.early_stopping and
float(hr_sum / hr_count) > params["hr_threshold"]):
break
for c in callbacks:
c.on_train_end()
# Saving the model at the end of training.
if not FLAGS.ml_perf:
checkpoint = tf.train.Checkpoint(model=keras_model, optimizer=optimizer)
checkpoint_path = os.path.join(FLAGS.model_dir, "ctl_checkpoint")
checkpoint.save(checkpoint_path)
logging.info("Saving model as TF checkpoint: %s", checkpoint_path)
return train_loss, [None, hr_sum / hr_count]
def build_stats(loss, eval_result, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
loss: The final loss at training time.
eval_result: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback likely used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if loss:
stats["loss"] = loss
if eval_result:
stats["eval_loss"] = eval_result[0]
stats["eval_hit_rate"] = eval_result[1]
if time_callback:
timestamp_log = time_callback.timestamp_log
stats["step_timestamp_log"] = timestamp_log
stats["train_finish_time"] = time_callback.train_finish_time
if len(timestamp_log) > 1:
stats["avg_exp_per_second"] = (
time_callback.batch_size * time_callback.log_steps *
(len(time_callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def main(_):
with logger.benchmark_context(FLAGS), \
mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):
mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])
run_ncf(FLAGS)
if __name__ == "__main__":
ncf_common.define_ncf_flags()
app.run(main)
| 36.003503
| 80
| 0.700068
|
151120386d65da077907f1925569d2d8201c498a
| 1,987
|
py
|
Python
|
bench/lvars-gvars.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
bench/lvars-gvars.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
bench/lvars-gvars.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
# __COPYRIGHT__
#
# Functions and data for timing different idioms for fetching a keyword
# value from a pair of dictionaries for localand global values. This was
# used to select how to most efficiently expand single $KEYWORD strings
# in src/engine/SCons/Subst.py.
def Func1(var, gvars, lvars):
"""lvars try:-except:, gvars try:-except:"""
for i in IterationList:
try:
x = lvars[var]
except KeyError:
try:
x = gvars[var]
except KeyError:
x = ''
def Func2(var, gvars, lvars):
"""lvars has_key(), gvars try:-except:"""
for i in IterationList:
if var in lvars:
x = lvars[var]
else:
try:
x = gvars[var]
except KeyError:
x = ''
def Func3(var, gvars, lvars):
"""lvars has_key(), gvars has_key()"""
for i in IterationList:
if var in lvars:
x = lvars[var]
elif var in gvars:
x = gvars[var]
else:
x = ''
def Func4(var, gvars, lvars):
"""eval()"""
for i in IterationList:
try:
x = eval(var, gvars, lvars)
except NameError:
x = ''
def Func5(var, gvars, lvars):
"""Chained get with default values"""
for i in IterationList:
x = lvars.get(var,gvars.get(var,''))
# Data to pass to the functions on each run. Each entry is a
# three-element tuple:
#
# (
# "Label to print describing this data run",
# ('positional', 'arguments'),
# {'keyword' : 'arguments'},
# ),
Data = [
(
"Neither in gvars or lvars",
('x', {}, {}),
{},
),
(
"Missing from lvars, found in gvars",
('x', {'x':1}, {}),
{},
),
(
"Found in lvars",
('x', {'x':1}, {'x':2}),
{},
),
]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 23.104651
| 73
| 0.51082
|
db3ca56c48935777c658158ebe31e8a1baf238dc
| 3,293
|
py
|
Python
|
work/spider.py
|
BentosLYU/Twitter_Sentiment_Analyser
|
3ad1da1c35b4c1681541b759080e5fa843863576
|
[
"Apache-2.0"
] | null | null | null |
work/spider.py
|
BentosLYU/Twitter_Sentiment_Analyser
|
3ad1da1c35b4c1681541b759080e5fa843863576
|
[
"Apache-2.0"
] | null | null | null |
work/spider.py
|
BentosLYU/Twitter_Sentiment_Analyser
|
3ad1da1c35b4c1681541b759080e5fa843863576
|
[
"Apache-2.0"
] | null | null | null |
# @Time : 2020/7/15
# @Author : 江海彬
# @File : spider.py
import re
import time
import twint
import pymysql
# 爬虫主体
def spider(keyword, tweets):
# 启动配置
c = twint.Config()
c.Search = keyword
c.Limit = Limit
c.Count = True
# 过滤含连接的tweet
# c.Links = "exclude"
# 只爬取热门tweet
# c.Popular_tweets = True
# 过滤转发
c.Filter_retweets = True
# 统一翻译为英语,不然后面分词时鱼龙混杂
c.Lang = "en"
# 爬取时间段
c.Since = Since
c.Until = Until
# c.Year = "2020"
# 开启存储
c.Store_object = True
c.Store_object_tweets_list = tweets
# 隐藏控制台输出
c.Hide_output = True
# 代理
# c.Proxy_host = '127.0.0.1'
# c.Proxy_port = 7890
# c.Proxy_type = "socks5"
# 运行
twint.run.Search(c)
# # 保存数据库
# def insert_db(sql, value):
# conn = pymysql.connect(**config)
# try:
# with conn.cursor() as cursor:
# cursor.executemany(sql, value)
# conn.commit()
# print("插入数据库成功")
# except Exception as e:
# print("插入数据库失败 {}".format(e))
# conn.rollback()
# finally:
# conn.close()
# # 将爬虫和保存数据库操作封装
# def crawl_in_db(keyword):
# # 爬取结果
# tweets = []
# # 开始爬取
# spider(keyword, tweets)
# # 保存到数据库
# sql = "INSERT INTO `twitter` (`date`, `tweet`) VALUES (%s, %s)"
# values = []
# for tweet in tweets:
# values.append((tweet.datestamp, tweet.tweet))
# # for i in range(0, len(values), 1000):
# # insert_db(sql, values[i:i+1000])
# # time.sleep(4)
# insert_db(sql, values)
# print(keyword + "爬取完毕")
# # 加入延时
# time.sleep(2)
# 将爬虫和保存文件操作封装
def crawl_in_file(keyword):
# 爬取结果
tweets = []
# 开始爬取
spider(keyword, tweets)
# 文件输出路径
output = './data/{}.txt'.format(keyword)
# 写入文件
with open(output, 'wb') as f:
for tweet_item in tweets:
# 过滤,为分词做好准备
# print(tweet_item.tweet)
# 将含有&; # @ $ https的字符串、和较短(1-2位)的字符串删除,
# 删除后会导致头尾空格合在一起,形成多余空格 |\b\w{1,2}\b
first_filter = re.sub(r'\&\w*;|#\w*|@\w*|\$\w*|https?:\/\/.*\/*\w*|pic.twitter.com\/*\w*',
'', tweet_item.tweet)
# 将回车和换行替换为空格
# (如果替换为空可能导致两个单词连在一起,但替换为空格则有可能产生多余空格)
second_filter = re.sub(r'\n+|\r+|\t+', ' ', first_filter)
# 去掉多余空格
third_filter = re.sub(r'\s\s+', ' ', second_filter)
emoji_pattern = re.compile(u'[\U00010000-\U0010ffff]')
tweet = emoji_pattern.sub('', third_filter) + '\n'
print(tweet)
f.write(tweet.encode('UTF-8'))
print(keyword + "爬取完毕")
# 加入延时
time.sleep(2)
if __name__ == '__main__':
# ####################全局变量#################### #
# 数据库配置
# config = {
# 'host': 'localhost',
# 'user': 'root',
# 'password': 'root',
# 'port': 3306,
# 'database': 'com_jiang',
# 'charset': 'utf8mb4'
# }
# 爬取时间段
Since = "2020-01-01 00:00:00"
Until = "2020-12-30 23:59:59"
# 要限制爬取条数,防止内存压力过大
Limit = 10000
# ############################################### #
# 关键词序列
keywords = ["Li Ziqi"]
for keyword in keywords:
crawl_in_file(keyword.lower())
| 20.453416
| 102
| 0.505011
|
e2779e82ef72d12f45a37ef71165f37ad276f640
| 8,063
|
py
|
Python
|
cogs/eventsub.py
|
perplexistential/twitch-creamery
|
5d8a636600b480644696d74375cc12d2b5c9ea6d
|
[
"MIT"
] | null | null | null |
cogs/eventsub.py
|
perplexistential/twitch-creamery
|
5d8a636600b480644696d74375cc12d2b5c9ea6d
|
[
"MIT"
] | null | null | null |
cogs/eventsub.py
|
perplexistential/twitch-creamery
|
5d8a636600b480644696d74375cc12d2b5c9ea6d
|
[
"MIT"
] | 1
|
2022-02-17T23:10:31.000Z
|
2022-02-17T23:10:31.000Z
|
# Copyright Alex Morais (thatsamorais@gmail.com) for perplexistential.
# Important notice about using this cog
# The EventSub ext is made to receive eventsub webhook notifications from twitch. For those not familiar with eventsub, it allows you to subscribe to certain events, and when these events happen, Twitch will send you an HTTP request containing information on the event. This ext abstracts away the complex portions of this, integrating seamlessly into the twitchio Client event dispatching system.
# Warning:
# This ext requires you to have a public facing ip, and to be able to receive inbound requests.
# Note:
# Twitch requires EventSub targets to have TLS/SSL enabled (https). TwitchIO does not support this, as such you should use a reverse proxy such as nginx to handle TLS/SSL.
"""Cog composes bot features."""
import os
from bots.bot import Bot
from twitchio.ext import commands, eventsub
class Cog(commands.Cog):
"""Cog."""
def __init__(self, bot: Bot, data={}):
"""init."""
self.bot = bot
self.data = data
self.eventsub_client = eventsub.EventSubClient(
self.bot,
os.environ.get("EVENTSUB_SECRET_WORD", "some_secret_string"),
os.environ.get("EVENTSUB_CALLBACK", "/callback"),
)
@commands.Cog.event("event_ready")
async def is_ready(self):
"""Run when ready."""
print("eventsub cog is ready!")
bot = self.bot
for channel in bot.channels:
for event in self.data.get("events", []):
# for user in self.data.get("user_updated_users", []):
# await self.eventsub_client.subscribe_user_updated(user)
if event == "channel_raid":
await self.eventsub_client.subscribe_channel_raid(channel)
elif event == "channel_ban":
await self.eventsub_client.subscribe_channel_bans(channel)
elif event == "channel_unban":
await self.eventsub_client.subscribe_channel_unbans(channel)
elif event == "channel_subscription":
await self.eventsub_client.subscribe_channel_subscriptions(channel)
elif event == "channel_cheers":
await self.eventsub_client.subscribe_channel_cheers(channel)
elif event == "channel_update":
await self.eventsub_client.subscribe_channel_update(channel)
elif event == "channel_follow":
await self.eventsub_client.subscribe_channel_follows(channel)
elif event == "channel_moderators_add":
await self.eventsub_client.subscribe_channel_moderators_add(channel)
elif event == "channel_moderators_remove":
await self.eventsub_client.subscribe_channel_moderators_remove(
channel
)
elif event == "channel_hypetrain_begin":
await self.eventsub_client.subscribe_channel_hypetrain_begin(
channel
)
elif event == "channel_hypetrain_progress":
await self.eventsub_client.subscribe_channel_hypetrain_progress(
channel
)
elif event == "channel_hypetrain_end":
await self.eventsub_client.subscribe_channel_hypetrain_end(channel)
elif event == "channel_stream_start":
await self.eventsub_client.subscribe_channel_stream_start(channel)
elif event == "channel_stream_end":
await self.eventsub_client.subscribe_channel_stream_end(channel)
elif event == "channel_points_reward_added":
await self.eventsub_client.subscribe_channel_points_reward_added(
channel
)
elif event == "channel_points_reward_updated":
await self.eventsub_client.subscribe_channel_points_reward_updated(
channel
)
elif event == "channel_points_reward_removed":
await self.eventsub_client.subscribe_channel_points_reward_removed(
channel
)
elif event == "channel_points_redeemed":
await self.eventsub_client.subscribe_channel_points_redeemed(
channel
)
elif event == "channel_points_redeem_updated":
await self.eventsub_client.subscribe_channel_points_redeem_updated(
channel
)
port = self.data.get("port", "15543")
print(f"eventsub listening on port {port}")
self.bot.loop.create_task(self.eventsub_client.listen(port=port))
@bot.event()
async def eventsub_notification_user_update(payload: eventsub.UserUpdateData):
pass
@bot.event()
async def eventsub_notification_raid(payload: eventsub.ChannelRaidData):
pass
@bot.event()
async def eventsub_notification_bans(payload: eventsub.ChannelBanData):
pass
@bot.event()
async def eventsub_notification_unbans(payload: eventsub.ChannelUnbanData):
pass
@bot.event()
async def eventsub_notification_subscription(
payload: eventsub.ChannelSubscribeData,
):
pass
@bot.event()
async def eventsub_notification_cheer(payload: eventsub.ChannelCheerData):
pass
@bot.event()
async def eventsub_notification_update(payload: eventsub.ChannelUpdateData):
pass
@bot.event()
async def eventsub_notification_follow(payload: eventsub.ChannelFollowData):
pass
@bot.event()
async def eventsub_notification_moderator_add(
payload: eventsub.ChannelModeratorAddRemoveData,
):
pass
@bot.event()
async def eventsub_notification_moderator_remove(
payload: eventsub.ChannelModeratorAddRemoveData,
):
pass
@bot.event()
async def eventsub_notification_hypetrain_begin(
payload: eventsub.HypeTrainBeginProgressData,
):
pass
@bot.event()
async def eventsub_notification_hypetrain_progress(
payload: eventsub.HypeTrainBeginProgressData,
):
pass
@bot.event()
async def eventsub_notification_hypetrain_end(
payload: eventsub.HypeTrainEndData,
):
pass
@bot.event()
async def eventsub_notification_stream_start(
payload: eventsub.StreamOnlineData,
):
pass
@bot.event()
async def eventsub_notification_stream_end(payload: eventsub.StreamOfflineData):
pass
@bot.event()
async def eventsub_notification_channel_reward_add(
payload: eventsub.CustomRewardAddUpdateRemoveData,
):
pass
@bot.event()
async def eventsub_notification_channel_reward_update(
payload: eventsub.CustomRewardAddUpdateRemoveData,
):
pass
@bot.event()
async def eventsub_notification_channel_reward_remove(
payload: eventsub.CustomRewardAddUpdateRemoveData,
):
pass
@bot.event()
async def eventsub_notification_channel_reward_redeem(
payload: eventsub.CustomRewardRedemptionAddUpdateData,
):
pass
@bot.event()
async def eventsub_notification_channel_reward_redeem_updated(
payload: eventsub.CustomRewardRedemptionAddUpdateData,
):
pass
def prepare(bot: commands.Bot, data={}):
"""Load our cog with this module."""
bot.add_cog(Cog(bot, data=data))
| 38.951691
| 397
| 0.612055
|
71ffde69e5f0a7834a58181b55ba3dc7a6955363
| 939
|
py
|
Python
|
demo/demo/middleware.py
|
dakrauth/picker
|
a3faffb5eecb0586339158560a68350bec1757b3
|
[
"MIT"
] | 2
|
2016-07-15T20:42:31.000Z
|
2018-06-18T10:35:12.000Z
|
demo/demo/middleware.py
|
dakrauth/picker
|
a3faffb5eecb0586339158560a68350bec1757b3
|
[
"MIT"
] | 1
|
2019-12-18T23:54:57.000Z
|
2019-12-18T23:54:57.000Z
|
demo/demo/middleware.py
|
dakrauth/picker
|
a3faffb5eecb0586339158560a68350bec1757b3
|
[
"MIT"
] | 2
|
2016-07-15T20:46:19.000Z
|
2016-07-31T23:55:58.000Z
|
from django.conf import settings
from pprint import pformat
def demo_middleware(get_response):
results = []
DEMO = getattr(settings, 'DEMO', {})
def middleware(request):
fake_datetime = DEMO.get('fake_datetime')
if fake_datetime:
pass
if DEMO.get('dump_post_data'):
if request.method == 'POST' and request.path != '/accounts/login/':
data = request.POST.dict()
data.pop('csrfmiddlewaretoken', None)
result = {
'request.user': request.user.id if request.user else None,
'post': data,
'url': request.path
}
results.append(result)
print('{sep}\n{data}\n{sep}'.format(
sep='-' * 40,
data=pformat(results)
))
return get_response(request)
return middleware
| 31.3
| 79
| 0.511182
|
40c5dce9000e7d8b49d7d7629b6b87144b561ff2
| 9,397
|
py
|
Python
|
tests/test_power_network.py
|
stantontcady/psyspy
|
72bc0aaacea4191899a971ef46314f09d97b269f
|
[
"MIT"
] | 1
|
2016-06-02T16:58:36.000Z
|
2016-06-02T16:58:36.000Z
|
tests/test_power_network.py
|
stantontcady/psyspy
|
72bc0aaacea4191899a971ef46314f09d97b269f
|
[
"MIT"
] | null | null | null |
tests/test_power_network.py
|
stantontcady/psyspy
|
72bc0aaacea4191899a971ef46314f09d97b269f
|
[
"MIT"
] | null | null | null |
import unittest
from numpy import array, asarray, matrix, genfromtxt
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.sparse import lil_matrix
from mugridmod import Bus, PowerLine, PowerNetwork, PQBus, PVBus
# from ..microgrid_model import NodeError, PowerLineError, PowerNetworkError
def create_wecc_9_bus_network(set_slack_bus=True):
b1 = PVBus(P=0.716, V=1.04, theta0=0)
b2 = PVBus(P=1.63, V=1.025)
b3 = PVBus(P=0.85, V=1.025)
b4 = Bus(shunt_y=(0, 0.5*0.176 + 0.5*0.158))
b5 = PQBus(P=1.25, Q=0.5, shunt_y=(0, 0.5*0.176 + 0.5*0.306))
b6 = PQBus(P=0.9, Q=0.3, shunt_y=(0, 0.5*0.158 + 0.5*0.358))
b7 = Bus(shunt_y=(0, 0.5*0.306 + 0.5*0.149))
b8 = PQBus(P=1, Q=0.35, shunt_y=(0, 0.5*0.149 + 0.5*0.209))
b9 = Bus(shunt_y=(0, 0.5*0.358 + 0.5*0.209))
n = PowerNetwork(buses=[b1, b2, b3, b4, b5, b6, b7, b8, b9])
line1 = n.connect_buses(b1, b4, z=(0, 0.0576))
line4 = n.connect_buses(b4, b5, z=(0.01, 0.085))
line6 = n.connect_buses(b5, b7, z=(0.032, 0.161))
line5 = n.connect_buses(b4, b6, z=(0.017, 0.092))
line7 = n.connect_buses(b6, b9, z=(0.039, 0.17))
line8 = n.connect_buses(b7, b8, z=(0.0085, 0.072))
line3 = n.connect_buses(b3, b9, z=(0, 0.0586))
line9 = n.connect_buses(b8, b9, z=(0.0119, 0.1008))
line2 = n.connect_buses(b2, b7, z=(0, 0.0625))
if set_slack_bus is True:
n.set_slack_bus(b1)
return n
def create_wecc_9_bus_network_with_pq_buses():
b1 = PVBus(P=0.716, V=1.04, theta0=0)
b2 = PVBus(P=1.63, V=1.025)
b3 = PVBus(P=0.85, V=1.025)
b4 = Bus(shunt_y=(0, 0.5*0.176 + 0.5*0.158))
b5 = PQBus(P=1.25, Q=0.5, shunt_y=(0, 0.5*0.176 + 0.5*0.306))
b6 = PQBus(P=0.9, Q=0.3, shunt_y=(0, 0.5*0.158 + 0.5*0.358))
b7 = Bus(shunt_y=(0, 0.5*0.306 + 0.5*0.149))
b8 = PQBus(P=1, Q=0.35, shunt_y=(0, 0.5*0.149 + 0.5*0.209))
b9 = Bus(shunt_y=(0, 0.5*0.358 + 0.5*0.209))
n = PowerNetwork(buses=[b1, b2, b3, b4, b5, b6, b7, b8, b9])
line1 = n.connect_buses(b1, b4, z=(0, 0.0576))
line4 = n.connect_buses(b4, b5, z=(0.01, 0.085))
line6 = n.connect_buses(b5, b7, z=(0.032, 0.161))
line5 = n.connect_buses(b4, b6, z=(0.017, 0.092))
line7 = n.connect_buses(b6, b9, z=(0.039, 0.17))
line8 = n.connect_buses(b7, b8, z=(0.0085, 0.072))
line3 = n.connect_buses(b3, b9, z=(0, 0.0586))
line9 = n.connect_buses(b8, b9, z=(0.0119, 0.1008))
line2 = n.connect_buses(b2, b7, z=(0, 0.0625))
n.set_slack_bus(b1)
return n
class TestPowerNetwork(unittest.TestCase):
def test_node_functions(self):
def series_of_tests(object_to_test):
def assert_voltage_equal(expected_voltage, actual_voltage=None):
if actual_voltage is None:
actual_voltage_magnitude, actual_voltage_angle = object_to_test.get_current_voltage_polar()
expected_voltage_magnitude, expected_voltage_angle = expected_voltage
self.assertEqual(expected_voltage_magnitude, actual_voltage_magnitude)
self.assertEqual(expected_voltage_angle, actual_voltage_angle)
# initial conditions should be V=1, theta=0
expected_test_voltage = (1, 0)
assert_voltage_equal(expected_test_voltage)
expected_test_voltage = (1.2, 0.1)
_, _ = object_to_test.update_voltage_polar((1.2, 0.1), replace=True)
assert_voltage_equal(expected_test_voltage)
expected_test_voltage = (1.1, 0.053)
_, _ = object_to_test.update_voltage_polar((1.1, 0.053))
assert_voltage_equal(expected_test_voltage)
# node = Node()
# also want to test that Bus inherits these methods properly
bus = Bus()
# series_of_tests(node)
series_of_tests(bus)
def test_bus_functions(self):
bus = Bus()
# initial conditions should be V=1, theta=0
actual_voltage_magnitude, actual_voltage_angle = bus.get_current_voltage_polar()
expected_voltage_magnitude = 1
expected_voltage_angle = 0
self.assertEqual(expected_voltage_magnitude, actual_voltage_magnitude)
self.assertEqual(expected_voltage_angle, actual_voltage_angle)
expected_voltage_magnitude = array([1, 1.15])
expected_voltage_angle = array([0, 0.02])
_, _ = bus.update_voltage_polar((1.15, 0.02), replace=False)
assert_array_equal(expected_voltage_magnitude, bus.V)
assert_array_equal(expected_voltage_angle, bus.theta)
expected_voltage_magnitude = array([1, 1.15, 1.12])
_ = bus.update_voltage_magnitude(1.12, replace=False)
assert_array_equal(expected_voltage_magnitude, bus.V)
expected_voltage_angle = array([0, 0.02, 0.034])
_ = bus.update_voltage_angle(0.034, replace=False)
assert_array_equal(expected_voltage_angle, bus.theta)
expected_voltage_magnitude = array([1, 1.15, 1.14])
_ = bus.update_voltage_magnitude(1.14, replace=True)
assert_array_equal(expected_voltage_magnitude, bus.V)
expected_voltage_angle = array([0, 0.02, 0.023])
_ = bus.update_voltage_angle(0.023, replace=True)
assert_array_equal(expected_voltage_angle, bus.theta)
bus.reset_voltage_to_unity_magnitude_zero_angle()
expected_voltage_magnitude = array([1])
expected_voltage_angle = array([0])
assert_array_equal(expected_voltage_magnitude, bus.V)
assert_array_equal(expected_voltage_angle, bus.theta)
def test_generate_admittance_matrix(self):
expected_G = genfromtxt('resources/wecc9_conductance_matrix.csv', delimiter=',')
expected_B = genfromtxt('resources/wecc9_susceptance_matrix.csv', delimiter=',')
expected_G_optimal = genfromtxt('resources/wecc9_conductance_matrix_optimal_ordering.csv', delimiter=',')
expected_B_optimal = genfromtxt('resources/wecc9_susceptance_matrix_optimal_ordering.csv', delimiter=',')
network = create_wecc_9_bus_network()
actual_G, actual_B = network.generate_admittance_matrix(optimal_ordering=False)
actual_G = asarray(actual_G.todense())
actual_B = asarray(actual_B.todense())
assert_array_almost_equal(actual_G, expected_G, 8)
assert_array_almost_equal(actual_B, expected_B, 8)
actual_G_optimal, actual_B_optimal = network.generate_admittance_matrix(optimal_ordering=True)
actual_G_optimal = asarray(actual_G_optimal.todense())
actual_B_optimal = asarray(actual_B_optimal.todense())
assert_array_almost_equal(actual_G_optimal, expected_G_optimal, 8)
assert_array_almost_equal(actual_B_optimal, expected_B_optimal, 8)
def test_generate_jacobian_matrix(self):
expected_J = genfromtxt('resources/wecc9_jacobian_matrix.csv', delimiter=',')
expected_J_optimal = genfromtxt('resources/wecc9_jacobian_matrix_optimal_ordering.csv', delimiter=',')
network = create_wecc_9_bus_network()
_, _ = network.save_admittance_matrix(optimal_ordering=False)
actual_J = network._generate_jacobian_matrix()
actual_J = asarray(actual_J.todense())
assert_array_almost_equal(actual_J, expected_J, 8)
_, _ = network.save_admittance_matrix()
actual_J_optimal = network._generate_jacobian_matrix()
actual_J_optimal = asarray(actual_J_optimal.todense())
assert_array_almost_equal(actual_J_optimal, expected_J_optimal, 8)
def test_solve_power_flow(self):
def do_test(network_to_test):
actual_final_states = network_to_test.solve_power_flow(optimal_ordering=False)
assert_array_almost_equal(actual_final_states, expected_final_states, 8)
network_to_test.reset_voltages_to_flat_profile()
actual_final_states_optimal_ordering = network_to_test.solve_power_flow(optimal_ordering=True)
assert_array_almost_equal(actual_final_states_optimal_ordering, expected_final_states_optimal_ordering, 8)
network0 = create_wecc_9_bus_network()
network1 = create_wecc_9_bus_network_with_pq_buses()
expected_final_states = genfromtxt('resources/wecc9_final_states.csv', delimiter=',')
expected_final_states_optimal_ordering = genfromtxt('resources/wecc9_final_states_optimal_ordering.csv',
delimiter=',')
do_test(network0)
do_test(network1)
# def test_exceptions(self):
# network = create_wecc_9_bus_network(set_slack_bus=False)
#
# self.assertRaises(PowerNetworkError, network.get_slack_bus_id)
# self.assertRaises(PowerNetworkError, network._compute_and_save_line_power_flows, None)
# self.assertRaises(PowerNetworkError, network.get_admittance_matrix_index_bus_id_mapping)
# self.assertRaises(PowerNetworkError, network.get_admittance_matrix)
#
# line = PowerLine()
# self.assertRaises(PowerLineError, line.get_incident_buses)
#
# self.assertRaises(NodeError, Bus, [])
if __name__ == '__main__':
unittest.main()
| 41.764444
| 118
| 0.664893
|
e20fd23471e66bbf4ddcc42a74c7d99dbbfb8b8a
| 280
|
py
|
Python
|
demo.py
|
kevinmcaleer/pca9685_for_pico
|
2b795ece5b89493151830bf1899e729c14cb19fb
|
[
"MIT"
] | 1
|
2021-03-07T12:41:08.000Z
|
2021-03-07T12:41:08.000Z
|
demo.py
|
kevinmcaleer/pca9685_for_pico
|
2b795ece5b89493151830bf1899e729c14cb19fb
|
[
"MIT"
] | null | null | null |
demo.py
|
kevinmcaleer/pca9685_for_pico
|
2b795ece5b89493151830bf1899e729c14cb19fb
|
[
"MIT"
] | 3
|
2021-03-07T12:41:10.000Z
|
2021-07-19T02:25:17.000Z
|
from pca9685 import PCA9685
# from picocat import Servos
from machine import I2C, Pin
from servo import Servos
sda = Pin(0)
scl = Pin(1)
id = 0
i2c = I2C(id=id, sda=sda, scl=scl)
pca = PCA9685(i2c=i2c)
# pca.i2c = i2c
servo = Servos(i2c=i2c)
servo.position(index=0, degrees=180)
| 20
| 36
| 0.717857
|
ce3e1f9bdf51f09da6e58dc65654786684ba8cb1
| 365
|
py
|
Python
|
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P3_Task7_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | null | null | null |
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P3_Task7_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | null | null | null |
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P3_Task7_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | 1
|
2022-03-18T04:52:10.000Z
|
2022-03-18T04:52:10.000Z
|
#!/usr/bin/python3
# --- 001 > U5W1P1_Task6_w1
def solution( s, letter ):
count = 0
# print(s)
for x in s :
if( x == letter):
count += 1
return count
if __name__ == "__main__":
print('----------start------------')
s = "bacab"
letter = "b"
print(solution( s, letter ))
print('------------end------------')
| 20.277778
| 40
| 0.446575
|
9cd62c9949efbeadf1fa10910ec256496287b0d7
| 4,781
|
py
|
Python
|
arbiter/utils.py
|
kveretennicov/Arbiter
|
51008393ae8797da85bcd67807259a157f941dfd
|
[
"MIT"
] | 11
|
2015-11-15T17:01:56.000Z
|
2021-12-28T16:25:03.000Z
|
arbiter/utils.py
|
kveretennicov/Arbiter
|
51008393ae8797da85bcd67807259a157f941dfd
|
[
"MIT"
] | 8
|
2015-04-01T18:56:30.000Z
|
2022-02-20T18:27:34.000Z
|
arbiter/utils.py
|
kveretennicov/Arbiter
|
51008393ae8797da85bcd67807259a157f941dfd
|
[
"MIT"
] | 4
|
2017-05-18T05:40:15.000Z
|
2019-04-11T05:10:19.000Z
|
from datetime import timedelta
from functools import wraps, partial
from numbers import Integral
from time import sleep
class RetryCondition(object):
"""
Defines a retry condition, which by default doesn't trigger a retry
on either a value or an exception.
NOTE: I don't particularly like this solution.
"""
def __init__(self, function, kind='exception'):
"""
Returns a retry condition which will run the supplied function
in either on_value or on_exception depending if kind == 'exception'
or 'value'
Args:
function: The function to run.
kind: the type of condition exception or value based.
"""
self._function = function
self._kind = kind
if kind != 'exception' and kind != 'value':
raise ValueError(kind)
def on_value(self, value):
"""
Returns True or False as to whether or not the given value
should trigger a retry event (Defaults to False).
Args:
value: The value to check against.
"""
if self._kind == 'value':
return self._function(value)
return False
def on_exception(self, exc):
"""
Returns True or False as to whether or not the given exception
should trigger a retry event (Defaults to False).
Args:
exc (Exception): The exceptioin to check against.
"""
if self._kind == 'exception':
return self._function(exc)
return False
def retry_handler(retries=0, delay=timedelta(), conditions=[]):
"""
A simple wrapper function that creates a handler function by using
on the retry_loop function.
Args:
retries (Integral): The number of times to retry if a failure occurs.
delay (timedelta, optional, 0 seconds): A timedelta representing
the amount time to delay between retries.
conditions (list): A list of retry conditions.
Returns:
function: The retry_loop function partialed.
"""
delay_in_seconds = delay.total_seconds()
return partial(retry_loop, retries, delay_in_seconds, conditions)
def retry(retries=0, delay=timedelta(), conditions=[]):
"""
A decorator for making a function that retries on failure.
Args:
retries (Integral): The number of times to retry if a failure occurs.
delay (timedelta, optional, 0 seconds): A timedelta representing
the amount of time to delay between retries.
conditions (list): A list of retry conditions.
"""
delay_in_seconds = delay.total_seconds()
def decorator(function):
"""
The actual decorator for retrying.
"""
@wraps(function)
def wrapper(*args, **kwargs):
"""
The actual wrapper for retrying.
"""
func = partial(function, *args, **kwargs)
return retry_loop(retries, delay_in_seconds, conditions, func)
return wrapper
return decorator
def retry_loop(retries, delay_in_seconds, conditions, function):
"""
Actually performs the retry loop used by the retry decorator
and handler functions. Failures for retrying are defined by
the RetryConditions passed in. If the maximum number of
retries has been reached then it raises the most recent
error or a ValueError on the most recent result value.
Args:
retries (Integral): Maximum number of times to retry.
delay_in_seconds (Integral): Number of seconds to wait
between retries.
conditions (list): A list of retry conditions the can
trigger a retry on a return value or exception.
function (function): The function to wrap.
Returns:
value: The return value from function
"""
if not isinstance(retries, Integral):
raise TypeError(retries)
if delay_in_seconds < 0:
raise TypeError(delay_in_seconds)
attempts = 0
value = None
err = None
while attempts <= retries:
try:
value = function()
for condition in conditions:
if condition.on_value(value):
break
else:
return value
except Exception as exc:
err = exc
for condition in conditions:
if condition.on_exception(exc):
break
else:
raise
attempts += 1
sleep(delay_in_seconds)
else:
if err:
raise err
else:
raise ValueError(
"Max retries ({}) reached and return the value is still {}."
.format(attempts, value)
)
return value
| 30.069182
| 77
| 0.608868
|
d946ef5b90ce1d72772ef2490f107c7810d86268
| 1,201
|
py
|
Python
|
app/errors/handlers.py
|
boerniee/project-mate
|
072b0e871525d527d438f2ec0238fa94c4547f85
|
[
"MIT"
] | 2
|
2019-12-18T09:42:18.000Z
|
2019-12-20T13:16:52.000Z
|
app/errors/handlers.py
|
boerniee/project-mate
|
072b0e871525d527d438f2ec0238fa94c4547f85
|
[
"MIT"
] | 17
|
2019-12-18T12:45:30.000Z
|
2021-02-06T14:44:36.000Z
|
app/errors/handlers.py
|
boerniee/project-mate
|
072b0e871525d527d438f2ec0238fa94c4547f85
|
[
"MIT"
] | null | null | null |
from flask import render_template, request
from app import db
from app.errors import bp
from app.api.errors import error_response as api_error_response
def wants_json_response():
return request.accept_mimetypes['application/json'] >= \
request.accept_mimetypes['text/html']
@bp.app_errorhandler(403)
def forbidden(error):
if wants_json_response():
return api_error_response(403)
return render_template('error/403.html'), 403
@bp.app_errorhandler(404)
def not_found_error(error):
if wants_json_response():
return api_error_response(404)
return render_template('error/404.html'), 404
@bp.app_errorhandler(400)
def not_found_error(error):
if wants_json_response():
return api_error_response(400)
return render_template('error/400.html'), 400
@bp.app_errorhandler(500)
def internal_error(error):
db.session.rollback()
if wants_json_response():
return api_error_response(500)
return render_template('error/500.html'), 500
@bp.app_errorhandler(413)
def internal_error(error):
db.session.rollback()
if wants_json_response():
return api_error_response(413)
return render_template('error/413.html'), 413
| 29.292683
| 63
| 0.742714
|
9cd09adda6cf9395b5d2cdab67ffd7f12d893fb8
| 17,592
|
py
|
Python
|
erica/erica_legacy/pyeric/eric.py
|
digitalservice4germany/erica
|
7e07d88f3db78ab6e4f7cccad8dfef2a4b3a71b2
|
[
"MIT"
] | 3
|
2022-01-31T15:17:17.000Z
|
2022-03-01T16:15:47.000Z
|
erica/erica_legacy/pyeric/eric.py
|
digitalservice4germany/erica
|
7e07d88f3db78ab6e4f7cccad8dfef2a4b3a71b2
|
[
"MIT"
] | 59
|
2022-01-31T14:04:20.000Z
|
2022-03-31T20:08:47.000Z
|
erica/erica_legacy/pyeric/eric.py
|
digitalservice4germany/erica
|
7e07d88f3db78ab6e4f7cccad8dfef2a4b3a71b2
|
[
"MIT"
] | 1
|
2022-03-10T09:24:28.000Z
|
2022-03-10T09:24:28.000Z
|
import logging
import os
import tempfile
from contextlib import contextmanager
from ctypes import Structure, c_int, c_uint32, c_char_p, c_void_p, pointer, CDLL, RTLD_GLOBAL
from dataclasses import dataclass
from typing import ByteString
from erica.config import get_settings, Settings
from erica.erica_legacy.pyeric.eric_errors import check_result, check_handle, check_xml, EricWrongTaxNumberError
logger = logging.getLogger('eric')
@dataclass
class EricResponse:
result_code: int
eric_response: ByteString
server_response: ByteString
pdf: ByteString = None
# As explained in the original ERiC documentation
class EricDruckParameterT(Structure):
_fields_ = [("version", c_int),
("vorschau", c_int),
("ersteSeite", c_int),
("duplexDruck", c_int),
("pdfName", c_char_p),
("fussText", c_char_p)]
# As explained in the original ERiC documentation
class EricVerschluesselungsParameterT(Structure):
_fields_ = [("version", c_int),
("zertifikatHandle", c_int),
("pin", c_char_p),
("abrufCode", c_char_p)]
# TODO: Unify usage of EricWrapper; rethink having eric_wrapper as a parameter
@contextmanager
def get_eric_wrapper():
"""This context manager returns an initialised eric wrapper; it will ensure that the ERiC API is shutdown after
use. """
eric = EricWrapper()
with tempfile.TemporaryDirectory() as tmp_dir:
eric.initialise(log_path=tmp_dir)
try:
yield eric
finally:
eric.shutdown()
with open(os.path.join(tmp_dir, 'eric.log')) as eric_log:
logger.debug(eric_log.read())
def verify_using_stick():
"""Calls into eric to verify whether we are using a token of type "Stick"."""
with get_eric_wrapper() as eric_wrapper:
try:
cert_properties = eric_wrapper.get_cert_properties()
return "<TokenTyp>Stick</TokenTyp>" in cert_properties
except Exception as e:
logger.debug("Exception while trying to verify Stick", exc_info=e)
return False
class EricWrapper(object):
"""A Python wrapper for the native ERiC library. It uses `ctypes` for calling
the respective functions of the `.so` file.
"""
ERIC_VALIDIERE = 1 << 1
ERIC_SENDE = 1 << 2
ERIC_DRUCKE = 1 << 5
cert_path = get_settings().get_cert_path().encode()
cert_pin = get_settings().cert_pin
def __init__(self):
"""Creates a new instance of the pyeric wrapper.
"""
self.eric = CDLL(Settings.get_eric_dll_path(), RTLD_GLOBAL)
self.eric_instance = None
logger.debug(f"eric: {self.eric}")
def initialise(self, log_path=None):
"""Initialises ERiC and a successful return from this method shall indicate
that the .so file was found and loaded successfully. Where `initialise` is called,
`shutdown` shall be called when done.
"""
fun_init = self.eric.EricMtInstanzErzeugen
fun_init.argtypes = [c_char_p, c_char_p]
fun_init.restype = c_void_p
curr_dir = os.path.dirname(os.path.realpath(__file__))
plugin_path = c_char_p(os.path.join(curr_dir, "../lib/plugins2").encode())
log_path = c_char_p(log_path.encode() if log_path else None)
self.eric_instance = fun_init(plugin_path, log_path)
logger.debug(f"fun_init instance: {self.eric_instance}")
def shutdown(self):
"""Shuts down ERiC and releases resources. One must not use the object afterwards."""
fun_shutdown = self.eric.EricMtInstanzFreigeben
fun_shutdown.argtypes = [c_void_p]
fun_shutdown.restype = c_int
res = fun_shutdown(self.eric_instance)
check_result(res)
logger.debug(f"fun_shutdown res: {res}")
def validate(self, xml, data_type_version):
"""Validate the given XML using the built-in plausibility checks."""
return self.process(xml, data_type_version, EricWrapper.ERIC_VALIDIERE)
def validate_and_send(self, xml, data_type_version):
"""Validate and (more importantly) send the given XML using the built-in
plausibility checks. For this a test certificate and pin must be provided and the
`data_type_version` shall match the XML data. When a `print_path` is given, a PDF
will be created under that path."""
with tempfile.NamedTemporaryFile() as temporary_pdf_file:
print_params = self.alloc_eric_druck_parameter_t(temporary_pdf_file.name)
cert_handle = self.get_cert_handle()
try:
cert_params = self.alloc_eric_verschluesselungs_parameter_t(cert_handle)
flags = EricWrapper.ERIC_SENDE | EricWrapper.ERIC_DRUCKE
eric_result = self.process(
xml, data_type_version,
flags,
cert_params=pointer(cert_params),
print_params=pointer(print_params))
temporary_pdf_file.seek(0)
eric_result.pdf = temporary_pdf_file.read()
return eric_result
finally:
self.close_cert_handle(cert_handle)
@staticmethod
def alloc_eric_druck_parameter_t(print_path):
return EricDruckParameterT(
version=2,
vorschau=0,
ersteSeite=0,
duplexDruck=0,
pdfName=c_char_p(print_path.encode()) if print_path else None,
fussText=None,
)
@staticmethod
def alloc_eric_verschluesselungs_parameter_t(zertifikat_handle, abruf_code=None):
return EricVerschluesselungsParameterT(
version=2,
zertifikatHandle=zertifikat_handle,
pin=EricWrapper.cert_pin.encode(),
abrufCode=abruf_code.encode() if abruf_code else None,
)
def get_cert_handle(self):
fun_get_cert_handle = self.eric.EricMtGetHandleToCertificate
fun_get_cert_handle.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p]
fun_get_cert_handle.restype = c_int
cert_handle_out = c_int()
res = fun_get_cert_handle(self.eric_instance, pointer(cert_handle_out), None, EricWrapper.cert_path)
check_result(res)
logger.debug(f"fun_get_cert_handle res: {res}")
return cert_handle_out
def close_cert_handle(self, cert_handle):
fun_close_cert_handle = self.eric.EricMtCloseHandleToCertificate
fun_close_cert_handle.argtypes = [c_void_p, c_int]
fun_close_cert_handle.restype = c_int
res = fun_close_cert_handle(self.eric_instance, cert_handle)
check_result(res)
logger.debug(f"fun_close_cert_handle res: {res}")
def get_cert_properties(self):
fun_get_cert_properties = self.eric.EricMtHoleZertifikatEigenschaften
fun_get_cert_properties.argtypes = [c_void_p, c_int, c_char_p, c_void_p]
fun_get_cert_properties.restype = c_int
try:
cert_handle = self.get_cert_handle()
return self._call_and_return_buffer_contents_and_decode(fun_get_cert_properties, cert_handle,
EricWrapper.cert_pin.encode())
finally:
if cert_handle:
self.close_cert_handle(cert_handle)
def process(self,
xml, data_type_version, flags,
transfer_handle=None, cert_params=None, print_params=None) -> EricResponse:
logger.debug(xml)
xml = xml.encode('utf-8')
data_type_version = data_type_version.encode('utf-8')
try:
eric_response_buffer = self.create_buffer()
server_response_buffer = self.create_buffer()
fun_process = self.eric.EricMtBearbeiteVorgang
fun_process.argtypes = [c_void_p, c_char_p, c_char_p, c_uint32,
c_void_p, c_void_p, c_void_p,
c_void_p, c_void_p]
fun_process.restype = c_int
res = fun_process(self.eric_instance, xml, data_type_version, flags,
print_params, cert_params, transfer_handle,
eric_response_buffer, server_response_buffer)
logger.debug(f"fun_process res: {res}")
eric_response = self.read_buffer(eric_response_buffer)
check_xml(eric_response)
server_response = self.read_buffer(server_response_buffer)
check_xml(server_response)
logger.debug(f"eric_response: {eric_response.decode()}")
logger.debug(f"server_response: {server_response.decode()}")
if server_response and res in [610101210, 610101292]:
# only for ERIC_TRANSFER_ERR_XML_NHEADER and ERIC_TRANSFE R_ERR_XML_THEADER is error in server response
_, th_res_code, th_error_message, ndh_err_xml = self.get_error_message_from_xml_response(
server_response)
server_err_msg = {'TH_RES_CODE': th_res_code,
'TH_ERR_MSG': th_error_message,
'NDH_ERR_XML': ndh_err_xml}
else:
server_err_msg = None
check_result(res, eric_response, server_response, server_err_msg)
return EricResponse(res, eric_response, server_response)
finally:
self.close_buffer(eric_response_buffer)
self.close_buffer(server_response_buffer)
def create_buffer(self):
fun_create_buffer = self.eric.EricMtRueckgabepufferErzeugen
fun_create_buffer.argtypes = [c_void_p]
fun_create_buffer.restype = c_void_p
handle = fun_create_buffer(self.eric_instance)
check_handle(handle)
logger.debug(f"fun_create_buffer handle: {handle}")
return handle
def read_buffer(self, buffer):
fun_read_buffer = self.eric.EricMtRueckgabepufferInhalt
fun_read_buffer.argtypes = [c_void_p, c_void_p]
fun_read_buffer.restype = c_char_p
return fun_read_buffer(self.eric_instance, buffer)
def close_buffer(self, buffer):
fun_close_buffer = self.eric.EricMtRueckgabepufferFreigeben
fun_close_buffer.argtypes = [c_void_p, c_void_p]
fun_close_buffer.restype = int
res = fun_close_buffer(self.eric_instance, buffer)
check_result(res)
logger.debug(f"fun_close_buffer res: {res}")
def create_th(self,
xml, datenart='ESt', verfahren='ElsterErklaerung', vorgang='send-Auth',
testmerker='700000004', hersteller_id=get_settings().hersteller_id,
daten_lieferant='Softwaretester ERiC',
version_client='1'):
fun_create_th = self.eric.EricMtCreateTH
fun_create_th.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p, c_char_p,
c_char_p, c_char_p, c_char_p, c_char_p,
c_char_p, c_void_p]
fun_create_th.restype = int
return self._call_and_return_buffer_contents(
fun_create_th, xml.encode(), verfahren.encode(), datenart.encode(),
vorgang.encode(), testmerker.encode(), hersteller_id.encode(), daten_lieferant.encode(),
version_client.encode(), None)
def process_verfahren(self, xml_string, verfahren, abruf_code=None, transfer_handle=None) \
-> EricResponse:
""" Send the xml_string to Elster with given verfahren and certificate parameters. """
cert_handle = self.get_cert_handle()
cert_params = self.alloc_eric_verschluesselungs_parameter_t(cert_handle, abruf_code=abruf_code)
try:
return self.process(xml_string, verfahren, EricWrapper.ERIC_SENDE | EricWrapper.ERIC_VALIDIERE,
transfer_handle=transfer_handle, cert_params=pointer(cert_params))
finally:
self.close_cert_handle(cert_handle)
def check_tax_number(self, tax_number):
fun_check_tax_number = self.eric.EricMtPruefeSteuernummer
fun_check_tax_number.argtypes = [c_void_p, c_char_p]
fun_check_tax_number.restype = c_int
try:
res = fun_check_tax_number(self.eric_instance, tax_number.encode())
check_result(res)
return True
except EricWrongTaxNumberError:
return False
def decrypt_data(self, data):
fun_decrypt_data = self.eric.EricMtDekodiereDaten
fun_decrypt_data.argtypes = [c_void_p, c_int, c_char_p, c_char_p, c_void_p]
fun_decrypt_data.restype = int
try:
cert_handle = self.get_cert_handle()
return self._call_and_return_buffer_contents_and_decode(
fun_decrypt_data,
cert_handle,
EricWrapper.cert_pin.encode(),
data.encode())
finally:
if cert_handle:
self.close_cert_handle(cert_handle)
def get_tax_offices(self, state_id):
"""
Get all the tax offices for a specific state
:param state_id: A valid state id for which the tax office list is provided
"""
fun_get_tax_offices = self.eric.EricMtHoleFinanzaemter
fun_get_tax_offices.argtypes = [c_void_p, c_char_p, c_void_p]
fun_get_tax_offices.restype = int
return self._call_and_return_buffer_contents_and_decode(
fun_get_tax_offices,
state_id.encode())
def get_state_id_list(self):
"""
Get a list of all the state codes
"""
fun_get_tax_offices = self.eric.EricMtHoleFinanzamtLandNummern
fun_get_tax_offices.argtypes = [c_void_p, c_void_p]
fun_get_tax_offices.restype = int
return self._call_and_return_buffer_contents_and_decode(
fun_get_tax_offices)
def get_electronic_aktenzeichen(self, aktenzeichen, bundesland):
""" Make the elster format out of the given aktenzeichen """
fun_make_elster_ewaz = self.eric.EricMtMakeElsterEWAz
fun_make_elster_ewaz.argtypes = [c_void_p, c_char_p, c_char_p, c_void_p]
fun_make_elster_ewaz.restype = int
return self._call_and_return_buffer_contents_no_xml(
fun_make_elster_ewaz,
aktenzeichen.encode(),
bundesland.encode()).decode()
def _call_and_return_buffer_contents(self, function, *args):
"""
:param function: The ERIC function to be called. The argtypes and restype have to be set before.
"""
buf = self.create_buffer()
try:
res = function(self.eric_instance, *args, buf)
check_result(res)
logger.debug(f"function {function.__name__} from _call_and_return_buffer_contents res {res}")
returned_xml = self.read_buffer(buf)
check_xml(returned_xml)
return returned_xml
finally:
self.close_buffer(buf)
def _call_and_return_buffer_contents_no_xml(self, function, *args):
"""
:param function: The ERIC function to be called. The argtypes and restype have to be set before.
"""
buf = self.create_buffer()
try:
res = function(self.eric_instance, *args, buf)
check_result(res)
logger.debug(f"function {function.__name__} from _call_and_return_buffer_contents res {res}")
return self.read_buffer(buf)
finally:
self.close_buffer(buf)
def _call_and_return_buffer_contents_and_decode(self, function, *args):
"""
This calls the ERIC function, reads the buffer and decodes the returned_xml.
:param function: The ERIC function to be called. The argtypes and restype have to be set before.
"""
return self._call_and_return_buffer_contents(function, *args).decode()
def get_error_message_from_xml_response(self, xml_response):
"""Extract error message from server response"""
fun_get_error_message = self.eric.EricMtGetErrormessagesFromXMLAnswer
fun_get_error_message.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]
fun_get_error_message.restypes = int
transferticket_buffer = self.create_buffer()
th_res_code_buffer = self.create_buffer()
th_error_message_buffer = self.create_buffer()
ndh_err_xml_buffer = self.create_buffer()
try:
res_code = fun_get_error_message(self.eric_instance,
xml_response,
transferticket_buffer,
th_res_code_buffer,
th_error_message_buffer,
ndh_err_xml_buffer)
check_result(res_code)
transferticket = self.read_buffer(transferticket_buffer).decode()
th_res_code = self.read_buffer(th_res_code_buffer).decode()
th_error_message = self.read_buffer(th_error_message_buffer).decode()
ndh_err_xml = self.read_buffer(ndh_err_xml_buffer).decode()
finally:
self.close_buffer(ndh_err_xml_buffer)
self.close_buffer(th_error_message_buffer)
self.close_buffer(th_res_code_buffer)
self.close_buffer(transferticket_buffer)
return transferticket, th_res_code, th_error_message, ndh_err_xml
| 40.164384
| 119
| 0.649329
|
8e28c9b4f4db057a25a870caf66d7696171be747
| 1,136
|
py
|
Python
|
tests/ui_tests/test_ui_pixel_mapping_widget.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | 3
|
2019-05-31T08:38:25.000Z
|
2022-01-06T09:23:21.000Z
|
tests/ui_tests/test_ui_pixel_mapping_widget.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | 709
|
2019-02-06T08:23:07.000Z
|
2022-03-29T23:03:37.000Z
|
tests/ui_tests/test_ui_pixel_mapping_widget.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | 2
|
2020-03-06T09:58:56.000Z
|
2020-08-04T18:32:57.000Z
|
import pytest
from PySide2.QtCore import Qt
from nexus_constructor.pixel_mapping_widget import PixelMappingWidget
CYLINDER_TEXT = "cylinder"
ID_NO = 3
@pytest.fixture(scope="function")
def pixel_mapping_widget(qtbot, template):
return PixelMappingWidget(template, ID_NO, CYLINDER_TEXT)
def test_GIVEN_id_number_and_text_WHEN_creating_pixel_mapping_widget_THEN_widget_is_created_with_expected_values(
pixel_mapping_widget,
):
assert pixel_mapping_widget.pixelIDLabel.text() == "Pixel ID for {} #{}:".format(
CYLINDER_TEXT, ID_NO
)
def test_GIVEN_id_WHEN_calling_set_id_THEN_id_is_set(pixel_mapping_widget):
id = 5
pixel_mapping_widget.id = id
assert pixel_mapping_widget.pixelIDLineEdit.text() == str(id)
def test_GIVEN_id_has_been_given_WHEN_calling_get_id_THEN_id_is_returned(
qtbot, pixel_mapping_widget
):
id = 5
qtbot.keyClick(pixel_mapping_widget.pixelIDLineEdit, Qt.Key_5)
assert pixel_mapping_widget.id == id
def test_GIVEN_id_has_not_been_given_WHEN_calling_get_id_THEN_none_is_returned(
pixel_mapping_widget,
):
assert pixel_mapping_widget.id is None
| 25.244444
| 113
| 0.801937
|
9e72e11bcc7875b9ea7feade034413568ae2f61d
| 10,312
|
py
|
Python
|
sdk/python/pulumi_azure_native/alertsmanagement/smart_detector_alert_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/alertsmanagement/smart_detector_alert_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/alertsmanagement/smart_detector_alert_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SmartDetectorAlertRule']
class SmartDetectorAlertRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action_groups: Optional[pulumi.Input[pulumi.InputType['ActionGroupsInformationArgs']]] = None,
alert_rule_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
detector: Optional[pulumi.Input[pulumi.InputType['DetectorArgs']]] = None,
frequency: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
severity: Optional[pulumi.Input[Union[str, 'Severity']]] = None,
state: Optional[pulumi.Input[Union[str, 'AlertRuleState']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
throttling: Optional[pulumi.Input[pulumi.InputType['ThrottlingInformationArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The alert rule information
API Version: 2019-06-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ActionGroupsInformationArgs']] action_groups: The alert rule actions.
:param pulumi.Input[str] alert_rule_name: The name of the alert rule.
:param pulumi.Input[str] description: The alert rule description.
:param pulumi.Input[pulumi.InputType['DetectorArgs']] detector: The alert rule's detector.
:param pulumi.Input[str] frequency: The alert rule frequency in ISO8601 format. The time granularity must be in minutes and minimum value is 5 minutes.
:param pulumi.Input[str] location: The resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] scope: The alert rule resources scope.
:param pulumi.Input[Union[str, 'Severity']] severity: The alert rule severity.
:param pulumi.Input[Union[str, 'AlertRuleState']] state: The alert rule state.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
:param pulumi.Input[pulumi.InputType['ThrottlingInformationArgs']] throttling: The alert rule throttling information.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if action_groups is None and not opts.urn:
raise TypeError("Missing required property 'action_groups'")
__props__['action_groups'] = action_groups
__props__['alert_rule_name'] = alert_rule_name
__props__['description'] = description
if detector is None and not opts.urn:
raise TypeError("Missing required property 'detector'")
__props__['detector'] = detector
if frequency is None and not opts.urn:
raise TypeError("Missing required property 'frequency'")
__props__['frequency'] = frequency
if location is None:
location = 'global'
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if scope is None and not opts.urn:
raise TypeError("Missing required property 'scope'")
__props__['scope'] = scope
if severity is None and not opts.urn:
raise TypeError("Missing required property 'severity'")
__props__['severity'] = severity
if state is None and not opts.urn:
raise TypeError("Missing required property 'state'")
__props__['state'] = state
__props__['tags'] = tags
__props__['throttling'] = throttling
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:alertsmanagement:SmartDetectorAlertRule"), pulumi.Alias(type_="azure-native:alertsmanagement/latest:SmartDetectorAlertRule"), pulumi.Alias(type_="azure-nextgen:alertsmanagement/latest:SmartDetectorAlertRule"), pulumi.Alias(type_="azure-native:alertsmanagement/v20190301:SmartDetectorAlertRule"), pulumi.Alias(type_="azure-nextgen:alertsmanagement/v20190301:SmartDetectorAlertRule"), pulumi.Alias(type_="azure-native:alertsmanagement/v20190601:SmartDetectorAlertRule"), pulumi.Alias(type_="azure-nextgen:alertsmanagement/v20190601:SmartDetectorAlertRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SmartDetectorAlertRule, __self__).__init__(
'azure-native:alertsmanagement:SmartDetectorAlertRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SmartDetectorAlertRule':
"""
Get an existing SmartDetectorAlertRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["action_groups"] = None
__props__["description"] = None
__props__["detector"] = None
__props__["frequency"] = None
__props__["location"] = None
__props__["name"] = None
__props__["scope"] = None
__props__["severity"] = None
__props__["state"] = None
__props__["tags"] = None
__props__["throttling"] = None
__props__["type"] = None
return SmartDetectorAlertRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="actionGroups")
def action_groups(self) -> pulumi.Output['outputs.ActionGroupsInformationResponse']:
"""
The alert rule actions.
"""
return pulumi.get(self, "action_groups")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The alert rule description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def detector(self) -> pulumi.Output['outputs.DetectorResponse']:
"""
The alert rule's detector.
"""
return pulumi.get(self, "detector")
@property
@pulumi.getter
def frequency(self) -> pulumi.Output[str]:
"""
The alert rule frequency in ISO8601 format. The time granularity must be in minutes and minimum value is 5 minutes.
"""
return pulumi.get(self, "frequency")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def scope(self) -> pulumi.Output[Sequence[str]]:
"""
The alert rule resources scope.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def severity(self) -> pulumi.Output[str]:
"""
The alert rule severity.
"""
return pulumi.get(self, "severity")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The alert rule state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def throttling(self) -> pulumi.Output[Optional['outputs.ThrottlingInformationResponse']]:
"""
The alert rule throttling information.
"""
return pulumi.get(self, "throttling")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.61157
| 643
| 0.637607
|
64f70397c851946172abc0a29b069629b5e2eb33
| 6,109
|
py
|
Python
|
lib/config/default.py
|
ahmedelmahy/HRNet-Bottom-Up-Pose-Estimation
|
cf5831249999f0b307d5aa948ebdcdef981ba68f
|
[
"MIT"
] | 129
|
2020-06-30T02:52:28.000Z
|
2022-03-06T04:37:00.000Z
|
lib/config/default.py
|
ahmedelmahy/HRNet-Bottom-Up-Pose-Estimation
|
cf5831249999f0b307d5aa948ebdcdef981ba68f
|
[
"MIT"
] | 14
|
2020-07-20T03:34:08.000Z
|
2022-01-09T14:18:27.000Z
|
lib/config/default.py
|
ahmedelmahy/HRNet-Bottom-Up-Pose-Estimation
|
cf5831249999f0b307d5aa948ebdcdef981ba68f
|
[
"MIT"
] | 20
|
2020-07-21T09:37:28.000Z
|
2021-07-06T17:12:57.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The code is based on HigherHRNet-Human-Pose-Estimation.
# (https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation)
# Modified by Ke Sun (sunk@mail.ustc.edu.cn).
# Modified by Zigang Geng (aa397601@mail.ustc.edu.cn).
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from yacs.config import CfgNode as CN
_C = CN()
_C.OUTPUT_DIR = ''
_C.LOG_DIR = ''
_C.DATA_DIR = ''
_C.GPUS = (0,)
_C.WORKERS = 4
_C.PRINT_FREQ = 20
_C.AUTO_RESUME = False
_C.PIN_MEMORY = True
_C.RANK = 0
_C.VERBOSE = True
_C.DIST_BACKEND = 'nccl'
_C.MULTIPROCESSING_DISTRIBUTED = True
# Cudnn related params
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# common params for NETWORK
_C.MODEL = CN()
_C.MODEL.NAME = 'pose_hrnet'
_C.MODEL.INIT_WEIGHTS = True
_C.MODEL.PRETRAINED = ''
_C.MODEL.NUM_JOINTS = 17
_C.MODEL.EXTRA = CN(new_allowed=True)
_C.LOSS = CN()
_C.LOSS.NUM_STAGES = 1
_C.LOSS.WITH_HEATMAPS_LOSS = (True,)
_C.LOSS.HEATMAPS_LOSS_FACTOR = (1.0,)
_C.LOSS.WITH_OFFSETS_LOSS = (True,)
_C.LOSS.OFFSETS_LOSS_FACTOR = (1.0,)
# DATASET related params
_C.DATASET = CN()
_C.DATASET.ROOT = ''
_C.DATASET.DATASET = 'coco_kpt'
_C.DATASET.DATASET_TEST = ''
_C.DATASET.NUM_JOINTS = 17
_C.DATASET.MAX_NUM_PEOPLE = 30
_C.DATASET.TRAIN = 'train2017'
_C.DATASET.TEST = 'val2017'
_C.DATASET.GET_RESCORE_DATA = False
_C.DATASET.DATA_FORMAT = 'jpg'
_C.DATASET.USE_MASK = False
_C.DATASET.USE_BBOX_CENTER = False
_C.DATASET.OFFSET_REG = False
_C.DATASET.OFFSET_RADIUS = 4
_C.DATASET.BG_WEIGHT = [1.0]
# training data augmentation
_C.DATASET.MAX_ROTATION = 30
_C.DATASET.MIN_SCALE = 0.75
_C.DATASET.MAX_SCALE = 1.25
_C.DATASET.SCALE_TYPE = 'short'
_C.DATASET.MAX_TRANSLATE = 40
_C.DATASET.INPUT_SIZE = 512
_C.DATASET.OUTPUT_SIZE = [128, 256, 512]
_C.DATASET.FLIP = 0.5
# heatmap generator (default is OUTPUT_SIZE/64)
_C.DATASET.SIGMA = [2.0,]
_C.DATASET.CENTER_SIGMA = 4
_C.DATASET.BASE_SIZE = 256.0
_C.DATASET.BASE_SIGMA = 2.0
_C.DATASET.MIN_SIGMA = 1
_C.DATASET.WITH_CENTER = False
# train
_C.TRAIN = CN()
_C.TRAIN.LR_FACTOR = 0.1
_C.TRAIN.LR_STEP = [90, 110]
_C.TRAIN.LR = 0.001
_C.TRAIN.OPTIMIZER = 'adam'
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.WD = 0.0001
_C.TRAIN.NESTEROV = False
_C.TRAIN.GAMMA1 = 0.99
_C.TRAIN.GAMMA2 = 0.0
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 140
_C.TRAIN.RESUME = False
_C.TRAIN.CHECKPOINT = ''
_C.TRAIN.IMAGES_PER_GPU = 32
_C.TRAIN.SHUFFLE = True
# testing
_C.TEST = CN()
# size of images for each device
_C.TEST.IMAGES_PER_GPU = 32
_C.TEST.FLIP_TEST = True
_C.TEST.SCALE_FACTOR = [1]
# group
_C.TEST.MODEL_FILE = ''
_C.TEST.IGNORE_CENTER = False
_C.TEST.NMS_KERNEL = 3
_C.TEST.NMS_PADDING = 1
_C.TEST.BBOX_GROUPING = False
_C.TEST.BBOX_FILE = ''
# for reg group
_C.TEST.REG_GROUP = True
_C.TEST.USE_HEATMAP = False
_C.TEST.REG_THRESHOLD = 0.98
_C.TEST.DIST_THRESHOLD = 10
_C.TEST.OVERLAP_THRESHOLD = 10
_C.TEST.USE_DECREASE_SCORE = True
_C.TEST.SCALE_DECREASE = 0.001
_C.TEST.KEYPOINT_THRESHOLD = 0.01
_C.TEST.ADJUST_THRESHOLD = 0.05
_C.TEST.MAX_ABSORB_DISTANCE = 75
_C.TEST.POOL_THRESHOLD1 = 300
_C.TEST.POOL_THRESHOLD2 = 200
_C.TEST.GUASSIAN_KERNEL = 6
_C.TEST.GUASSIAN_SIGMA = 2.0
_C.TEST.WITH_HEATMAPS = (True,)
_C.TEST.LOG_PROGRESS = True
_C.RESCORE = CN()
_C.RESCORE.USE = True
_C.RESCORE.END_EPOCH = 20
_C.RESCORE.LR = 0.001
_C.RESCORE.HIDDEN_LAYER = 256
_C.RESCORE.BATCHSIZE = 1024
_C.RESCORE.MODEL_ROOT = 'model/rescore/'
_C.RESCORE.MODEL_FILE = 'model/rescore/final_rescore_coco_kpt.pth'
_C.RESCORE.DATA_FILE = 'data/rescore_data/rescore_dataset_train_coco_kpt'
# debug
_C.DEBUG = CN()
_C.DEBUG.DEBUG = True
_C.DEBUG.SAVE_BATCH_IMAGES_GT = False
_C.DEBUG.SAVE_BATCH_IMAGES_PRED = False
_C.DEBUG.SAVE_HEATMAPS_GT = True
_C.DEBUG.SAVE_HEATMAPS_PRED = True
_C.DEBUG.SAVE_TAGMAPS_PRED = True
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
if not os.path.exists(cfg.DATASET.ROOT):
cfg.DATASET.ROOT = os.path.join(
cfg.DATA_DIR, cfg.DATASET.ROOT
)
cfg.MODEL.PRETRAINED = os.path.join(
cfg.DATA_DIR, cfg.MODEL.PRETRAINED
)
if cfg.TEST.MODEL_FILE:
cfg.TEST.MODEL_FILE = os.path.join(
cfg.DATA_DIR, cfg.TEST.MODEL_FILE
)
if cfg.DATASET.WITH_CENTER:
cfg.DATASET.NUM_JOINTS += 1
cfg.MODEL.NUM_JOINTS = cfg.DATASET.NUM_JOINTS
if not isinstance(cfg.DATASET.OUTPUT_SIZE, (list, tuple)):
cfg.DATASET.OUTPUT_SIZE = [cfg.DATASET.OUTPUT_SIZE]
if not isinstance(cfg.LOSS.WITH_HEATMAPS_LOSS, (list, tuple)):
cfg.LOSS.WITH_HEATMAPS_LOSS = (cfg.LOSS.WITH_HEATMAPS_LOSS)
if not isinstance(cfg.LOSS.HEATMAPS_LOSS_FACTOR, (list, tuple)):
cfg.LOSS.HEATMAPS_LOSS_FACTOR = (cfg.LOSS.HEATMAPS_LOSS_FACTOR)
if not isinstance(cfg.LOSS.WITH_OFFSETS_LOSS, (list, tuple)):
cfg.LOSS.WITH_OFFSETS_LOSS = (cfg.LOSS.WITH_OFFSETS_LOSS)
if not isinstance(cfg.LOSS.OFFSETS_LOSS_FACTOR, (list, tuple)):
cfg.LOSS.OFFSETS_LOSS_FACTOR = (cfg.LOSS.OFFSETS_LOSS_FACTOR)
cfg.freeze()
def check_config(cfg):
assert cfg.LOSS.NUM_STAGES == len(cfg.LOSS.WITH_HEATMAPS_LOSS), \
'LOSS.NUM_SCALE should be the same as the length of LOSS.WITH_HEATMAPS_LOSS'
assert cfg.LOSS.NUM_STAGES == len(cfg.LOSS.HEATMAPS_LOSS_FACTOR), \
'LOSS.NUM_SCALE should be the same as the length of LOSS.HEATMAPS_LOSS_FACTOR'
assert cfg.LOSS.NUM_STAGES == len(cfg.TEST.WITH_HEATMAPS), \
'LOSS.NUM_SCALE should be the same as the length of TEST.WITH_HEATMAPS'
assert cfg.LOSS.NUM_STAGES == len(cfg.LOSS.OFFSETS_LOSS_FACTOR), \
'LOSS.NUM_SCALE should be the same as the length of LOSS.OFFSETS_LOSS_FACTOR'
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'w') as f:
print(_C, file=f)
| 26.79386
| 86
| 0.708954
|
0ddf9e342b83ff3cc67968428438e9c79ac01635
| 223
|
py
|
Python
|
contrib/libs/zstd01/gen.py
|
SitdikovRustam/CatBoost
|
39fb9dfddb24e977ed87efc71063b03cd4bc8f16
|
[
"Apache-2.0"
] | 1
|
2017-08-27T20:55:56.000Z
|
2017-08-27T20:55:56.000Z
|
contrib/libs/zstd01/gen.py
|
dsferz/machinelearning_yandex
|
8fde8314c5c70299ece8b8f00075ddfcd5e07ddf
|
[
"Apache-2.0"
] | null | null | null |
contrib/libs/zstd01/gen.py
|
dsferz/machinelearning_yandex
|
8fde8314c5c70299ece8b8f00075ddfcd5e07ddf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys
print '#pragma once\n'
for i in sys.stdin:
i = i.strip()
if '.' not in i:
print '#define', i, 'Legacy_' + i
print '#define ZSTD_decompressBlock Legacy_ZSTD_decompressBlock'
| 15.928571
| 64
| 0.64574
|
73870984491e8ed833248d1a1d272ac965a4e894
| 3,592
|
py
|
Python
|
data/global-configuration/packs/azure/hostingdrivers/hostingdriver_azure.py
|
naparuba/opsbro
|
98618a002cd47250d21e7b877a24448fc95fec80
|
[
"MIT"
] | 32
|
2017-08-29T08:57:16.000Z
|
2021-04-21T08:53:04.000Z
|
data/global-configuration/packs/azure/hostingdrivers/hostingdriver_azure.py
|
naparuba/opsbro
|
98618a002cd47250d21e7b877a24448fc95fec80
|
[
"MIT"
] | 108
|
2017-09-04T19:59:28.000Z
|
2022-03-31T08:12:07.000Z
|
data/global-configuration/packs/azure/hostingdrivers/hostingdriver_azure.py
|
naparuba/kunai
|
98618a002cd47250d21e7b877a24448fc95fec80
|
[
"MIT"
] | 8
|
2015-02-03T12:30:53.000Z
|
2017-07-29T19:43:54.000Z
|
import os
from opsbro.httpclient import get_http_exceptions, httper
from opsbro.hostingdrivermanager import InterfaceHostingDriver, HOSTING_DRIVER_LAYER_CLOUD
from opsbro.jsonmgr import jsoner
class AzureHostingDriver(InterfaceHostingDriver):
name = 'azure'
layer = HOSTING_DRIVER_LAYER_CLOUD
def __init__(self):
super(AzureHostingDriver, self).__init__()
self.__meta_data = None
# We are in an Azure host if we have the /sys/class/dmi/id/board_vendor with 'Microsoft Corporation'
def is_active(self):
if os.path.exists('/sys/class/dmi/id/board_vendor'):
with open('/sys/class/dmi/id/board_vendor') as f:
buf = f.read().strip()
if buf == 'Microsoft Corporation':
return True
return False
# {u'compute': {u'location': u'westeurope',
# u'name': u'test-ubuntu',
# u'offer': u'UbuntuServer',
# u'osType': u'Linux',
# u'placementGroupId': u'',
# u'platformFaultDomain': u'0',
# u'platformUpdateDomain': u'0',
# u'publisher': u'Canonical',
# u'resourceGroupName': u'groupetest',
# u'sku': u'17.10',
# u'subscriptionId': u'ef6838db-2a0d-4e54-b2c7-a000c30cdb82',
# u'tags': u'keu1:value1;key2:value33',
# u'version': u'17.10.201802220',
# u'vmId': u'3489ed45-f7b8-4fd4-9967-fbc2457e551f',
# u'vmSize': u'Standard_A1'},
# u'network': {u'interface': [{u'ipv4': {u'ipAddress': [{u'privateIpAddress': u'10.0.0.4',
# u'publicIpAddress': u'13.95.156.4'}],
# u'subnet': [{u'address': u'10.0.0.0',
# u'prefix': u'24'}]},
# u'ipv6': {u'ipAddress': []},
# u'macAddress': u'000D3A2D6367'}]}}
def get_meta_data(self):
if self.__meta_data is not None:
return self.__meta_data
uri = 'http://169.254.169.254/metadata/instance?api-version=2017-08-01'
try:
s = httper.get(uri, headers={'Metadata': 'True'})
except get_http_exceptions() as exp:
self.logger.error('Cannot get pubic IP for your Azure instance from %s. Error: %s.Exiting' % (uri, exp))
raise
raw_data = jsoner.loads(s)
# We want to merge the structure into a more flatten one between compute and network
self.__meta_data = raw_data['compute']
first_network_interface = raw_data['network']['interface'][0]
self.__meta_data.update(first_network_interface)
return self.__meta_data
def get_public_address(self):
try:
meta_data = self.get_meta_data()
except Exception as exp:
self.logger.error('Cannot get pubic IP for your Azure instance. Error: %s' % exp)
raise
addr = meta_data['ipv4']['ipAddress'][0]['publicIpAddress']
return addr
# As a unique uuid we can give our vmId (that's a uuid)
def get_unique_uuid(self):
try:
meta_data = self.get_meta_data()
except Exception as exp:
self.logger.error('Cannot get a unique uuid for your Azure instance. Error: %s' % exp)
raise
addr = meta_data['vmId']
self.logger.info('Using Azure instance id as unique uuid for this node.')
return addr
| 40.818182
| 116
| 0.558463
|
4e5f354d3357221f8ede60f38f79a70cd86a153a
| 7,249
|
py
|
Python
|
pycardano/backend/blockfrost.py
|
henryyuanheng-wang/pycardano
|
d58c53791ffef542762e6d0220d4ccd1c0950e5e
|
[
"MIT"
] | 72
|
2022-01-09T03:54:06.000Z
|
2022-03-30T22:05:44.000Z
|
pycardano/backend/blockfrost.py
|
cffls/pycardano
|
62161ad072964406d9ce79b3605d55a276ff98b9
|
[
"MIT"
] | 13
|
2022-02-19T13:08:11.000Z
|
2022-03-30T16:57:33.000Z
|
pycardano/backend/blockfrost.py
|
henryyuanheng-wang/pycardano
|
d58c53791ffef542762e6d0220d4ccd1c0950e5e
|
[
"MIT"
] | 15
|
2022-02-07T23:54:51.000Z
|
2022-03-30T17:06:12.000Z
|
import os
import tempfile
import time
from typing import Dict, List, Union
from blockfrost import ApiUrls, BlockFrostApi
from pycardano.address import Address
from pycardano.backend.base import ChainContext, GenesisParameters, ProtocolParameters
from pycardano.exception import TransactionFailedException
from pycardano.hash import SCRIPT_HASH_SIZE, DatumHash, ScriptHash
from pycardano.network import Network
from pycardano.plutus import ExecutionUnits
from pycardano.transaction import (
Asset,
AssetName,
MultiAsset,
TransactionInput,
TransactionOutput,
UTxO,
Value,
)
__all__ = ["BlockFrostChainContext"]
class BlockFrostChainContext(ChainContext):
"""A `BlockFrost <https://blockfrost.io/>`_ API wrapper for the client code to interact with.
Args:
project_id (str): A BlockFrost project ID obtained from https://blockfrost.io.
network (Network): Network to use.
"""
def __init__(self, project_id: str, network: Network = Network.TESTNET):
self._network = network
self._project_id = project_id
self._base_url = (
ApiUrls.testnet.value
if self.network == Network.TESTNET
else ApiUrls.mainnet.value
)
self.api = BlockFrostApi(project_id=self._project_id, base_url=self._base_url)
self._epoch_info = self.api.epoch_latest()
self._epoch = None
self._genesis_param = None
self._protocol_param = None
def _check_epoch_and_update(self):
if int(time.time()) >= self._epoch_info.end_time:
self._epoch_info = self.api.epoch_latest()
return True
else:
return False
@property
def network(self) -> Network:
return self._network
@property
def epoch(self) -> int:
if not self._epoch or self._check_epoch_and_update():
self._epoch = self.api.epoch_latest().epoch
return self._epoch
@property
def last_block_slot(self) -> int:
block = self.api.block_latest()
return block.slot
@property
def genesis_param(self) -> GenesisParameters:
if not self._genesis_param or self._check_epoch_and_update():
params = vars(self.api.genesis())
self._genesis_param = GenesisParameters(**params)
return self._genesis_param
@property
def protocol_param(self) -> ProtocolParameters:
if not self._protocol_param or self._check_epoch_and_update():
params = self.api.epoch_latest_parameters()
self._protocol_param = ProtocolParameters(
min_fee_constant=int(params.min_fee_b),
min_fee_coefficient=int(params.min_fee_a),
max_block_size=int(params.max_block_size),
max_tx_size=int(params.max_tx_size),
max_block_header_size=int(params.max_block_header_size),
key_deposit=int(params.key_deposit),
pool_deposit=int(params.pool_deposit),
pool_influence=float(params.a0),
monetary_expansion=float(params.rho),
treasury_expansion=float(params.tau),
decentralization_param=float(params.decentralisation_param),
extra_entropy=params.extra_entropy,
protocol_major_version=int(params.protocol_major_ver),
protocol_minor_version=int(params.protocol_minor_ver),
min_utxo=int(params.min_utxo),
price_mem=float(params.price_mem),
price_step=float(params.price_step),
max_tx_ex_mem=int(params.max_tx_ex_mem),
max_tx_ex_steps=int(params.max_tx_ex_steps),
max_block_ex_mem=int(params.max_block_ex_mem),
max_block_ex_steps=int(params.max_block_ex_steps),
max_val_size=int(params.max_val_size),
collateral_percent=int(params.collateral_percent),
max_collateral_inputs=int(params.max_collateral_inputs),
coins_per_utxo_word=int(params.coins_per_utxo_word),
)
return self._protocol_param
def utxos(self, address: str) -> List[UTxO]:
results = self.api.address_utxos(address, gather_pages=True)
utxos = []
for result in results:
tx_in = TransactionInput.from_primitive(
[result.tx_hash, result.output_index]
)
amount = result.amount
lovelace_amount = None
multi_assets = MultiAsset()
for item in amount:
if item.unit == "lovelace":
lovelace_amount = int(item.quantity)
else:
# The utxo contains Multi-asset
data = bytes.fromhex(item.unit)
policy_id = ScriptHash(data[:SCRIPT_HASH_SIZE])
asset_name = AssetName(data[SCRIPT_HASH_SIZE:])
if policy_id not in multi_assets:
multi_assets[policy_id] = Asset()
multi_assets[policy_id][asset_name] = int(item.quantity)
datum_hash = (
DatumHash.from_primitive(result.data_hash) if result.data_hash else None
)
if not multi_assets:
tx_out = TransactionOutput(
Address.from_primitive(address),
amount=lovelace_amount,
datum_hash=datum_hash,
)
else:
tx_out = TransactionOutput(
Address.from_primitive(address),
amount=Value(lovelace_amount, multi_assets),
datum_hash=datum_hash,
)
utxos.append(UTxO(tx_in, tx_out))
return utxos
def submit_tx(self, cbor: Union[bytes, str]):
if isinstance(cbor, str):
cbor = bytes.fromhex(cbor)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(cbor)
self.api.transaction_submit(f.name)
os.remove(f.name)
def evaluate_tx(self, cbor: Union[bytes, str]) -> Dict[str, ExecutionUnits]:
"""Evaluate execution units of a transaction.
Args:
cbor (Union[bytes, str]): The serialized transaction to be evaluated.
Returns:
Dict[str, ExecutionUnits]: A list of execution units calculated for each of the transaction's redeemers
Raises:
:class:`TransactionFailedException`: When fails to evaluate the transaction.
"""
if isinstance(cbor, bytes):
cbor = cbor.hex()
with tempfile.NamedTemporaryFile(delete=False, mode="w") as f:
f.write(cbor)
result = self.api.transaction_evaluate(f.name).result
os.remove(f.name)
return_val = {}
if not hasattr(result, "EvaluationResult"):
raise TransactionFailedException(result)
else:
for k in vars(result.EvaluationResult):
return_val[k] = ExecutionUnits(
getattr(result.EvaluationResult, k).memory,
getattr(result.EvaluationResult, k).steps,
)
return return_val
| 37.755208
| 115
| 0.616499
|
3fc8be2ce0a2e6444697cbdf47197bdb159bf49a
| 11,523
|
py
|
Python
|
tensorflow_ranking/python/head_test.py
|
FroilanYue/ranking
|
31fc134816cc4974a46a11e7bb2df0066d0a88f0
|
[
"Apache-2.0"
] | 1
|
2021-03-04T21:54:41.000Z
|
2021-03-04T21:54:41.000Z
|
tensorflow_ranking/python/head_test.py
|
FroilanYue/ranking
|
31fc134816cc4974a46a11e7bb2df0066d0a88f0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_ranking/python/head_test.py
|
FroilanYue/ranking
|
31fc134816cc4974a46a11e7bb2df0066d0a88f0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ranking head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_ranking.python import head as ranking_head
from tensorflow_ranking.python import metrics as metrics_lib
def _initialize_variables(test_case, scaffold):
"""Initializes variables for a session.
Args:
test_case: A TensorFlowTestCase object.
scaffold: A train.Scaffold object.
"""
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _make_loss_fn(weights_feature_name=None):
"""Make a fake loss function."""
def _loss_fn(labels, logits, features):
"""A fake loss function."""
logits = tf.convert_to_tensor(value=logits)
labels = tf.cast(labels, dtype=tf.float32)
weights = features[
weights_feature_name] if weights_feature_name is not None else 1.
loss = tf.reduce_sum(input_tensor=logits -
labels) * tf.reduce_sum(input_tensor=weights)
return loss
return _loss_fn
class RankingHeadTest(tf.test.TestCase):
def setUp(self):
tf.compat.v1.reset_default_graph()
self._default_features_dict = {}
self._default_signature = (tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
logits = [[1., 3., 2.], [1., 2., 3.]]
labels = [[0., 0., 1.], [0., 0., 2.]]
weights = [1.] * 3
self._default_logits = logits
self._default_labels = labels
self._default_loss = 9.
self._default_weights = weights
self._default_weights_feature_name = 'weights'
self._default_weighted_loss = 27
def test_name(self):
head = ranking_head.create_ranking_head(
loss_fn=_make_loss_fn(), name='fake_head')
self.assertEqual('fake_head', head.name)
def test_predict(self):
with tf.Graph().as_default():
head = ranking_head.create_ranking_head(loss_fn=_make_loss_fn())
logits = [[1., 3.], [1., 2.]]
spec = head.create_estimator_spec(
features=self._default_features_dict,
mode=tf.estimator.ModeKeys.PREDICT,
logits=logits)
# Assert spec contains expected tensors.
self.assertIsNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNone(spec.train_op)
self.assertItemsEqual((self._default_signature, 'regression', 'predict'),
spec.export_outputs.keys())
# Assert predictions.
with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits, predictions)
self.assertAllClose(
logits,
sess.run(spec.export_outputs[self._default_signature].value))
def test_eval(self):
with tf.Graph().as_default():
metric_fns = {
'metric/precision@1':
metrics_lib.make_ranking_metric_fn(
metrics_lib.RankingMetricKey.PRECISION, topn=1),
}
head = ranking_head.create_ranking_head(
loss_fn=_make_loss_fn(), eval_metric_fns=metric_fns)
# Create estimator spec.
spec = head.create_estimator_spec(
features=self._default_features_dict,
mode=tf.estimator.ModeKeys.EVAL,
logits=self._default_logits,
labels=self._default_labels)
expected_metrics = [
'labels_mean',
'logits_mean',
'metric/precision@1',
]
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
self.assertItemsEqual(expected_metrics, spec.eval_metric_ops.keys())
# Assert predictions, loss, and metrics.
with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
update_ops = {
k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops
}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(self._default_loss, loss)
self.assertItemsEqual(expected_metrics, metrics.keys())
def test_train_create_loss(self):
with tf.Graph().as_default():
head = ranking_head.create_ranking_head(loss_fn=_make_loss_fn())
# Create loss.
training_loss = head.create_loss(
features=self._default_features_dict,
mode=tf.estimator.ModeKeys.TRAIN,
logits=self._default_logits,
labels=self._default_labels)[0]
with self.cached_session():
_initialize_variables(self, tf.compat.v1.train.Scaffold())
self.assertAllClose(self._default_loss, training_loss.eval())
def test_train(self):
with tf.Graph().as_default():
expected_train_result = b'my_train_op'
def _train_op_fn(loss):
with tf.control_dependencies((tf.compat.v1.assert_near(
tf.cast(self._default_loss, dtype=tf.float32),
tf.cast(loss, dtype=tf.float32),
name='assert_loss'),)):
return tf.constant(expected_train_result)
head = ranking_head.create_ranking_head(
loss_fn=_make_loss_fn(), train_op_fn=_train_op_fn)
# Create estimator spec.
spec = head.create_estimator_spec(
features=self._default_features_dict,
mode=tf.estimator.ModeKeys.TRAIN,
logits=self._default_logits,
labels=self._default_labels)
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
# Assert predictions, loss, and train_op.
with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(self._default_loss, loss)
self.assertEqual(expected_train_result, train_result)
def test_train_with_optimizer(self):
with tf.Graph().as_default():
expected_train_result = b'my_train_op'
expected_loss = self._default_loss
class _Optimizer(object):
def minimize(self, loss, global_step):
del global_step
with tf.control_dependencies((tf.compat.v1.assert_equal(
tf.cast(expected_loss, dtype=tf.float32),
tf.cast(loss, dtype=tf.float32),
name='assert_loss'),)):
return tf.constant(expected_train_result)
head = ranking_head.create_ranking_head(
loss_fn=_make_loss_fn(), optimizer=_Optimizer())
# Create estimator spec.
spec = head.create_estimator_spec(
features=self._default_features_dict,
mode=tf.estimator.ModeKeys.TRAIN,
logits=self._default_logits,
labels=self._default_labels)
with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
def test_train_with_regularization_losses(self):
with tf.Graph().as_default():
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
expected_train_result = b'my_train_op'
expected_loss = expected_regularization_loss + self._default_loss
def _train_op_fn(loss):
with tf.control_dependencies((tf.compat.v1.assert_equal(
tf.cast(expected_loss, dtype=tf.float32),
tf.cast(loss, dtype=tf.float32),
name='assert_loss'),)):
return tf.constant(expected_train_result)
head = ranking_head.create_ranking_head(
loss_fn=_make_loss_fn(), train_op_fn=_train_op_fn)
# Create estimator spec.
spec = head.create_estimator_spec(
features=self._default_features_dict,
mode=tf.estimator.ModeKeys.TRAIN,
logits=self._default_logits,
labels=self._default_labels,
regularization_losses=regularization_losses)
# Assert predictions, loss, and train_op.
with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss)
self.assertEqual(expected_train_result, train_result)
def test_multi_dim_weighted_train(self):
with tf.Graph().as_default():
weights_feature_name = self._default_weights_feature_name
def _train_op_fn(loss):
return loss
head = ranking_head.create_ranking_head(
loss_fn=_make_loss_fn(weights_feature_name), train_op_fn=_train_op_fn)
# Create estimator spec.
spec = head.create_estimator_spec(
features={weights_feature_name: self._default_weights},
mode=tf.estimator.ModeKeys.TRAIN,
logits=self._default_logits,
labels=self._default_labels)
# Assert predictions, loss, and train_op.
with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(self._default_weighted_loss, loss)
self.assertAllClose(self._default_weighted_loss, train_result)
def test_multi_dim_weighted_eval(self):
with tf.Graph().as_default():
weights_feature_name = self._default_weights_feature_name
metric_fns = {
'metric/precision@1':
metrics_lib.make_ranking_metric_fn(
metrics_lib.RankingMetricKey.PRECISION, topn=1),
}
head = ranking_head.create_ranking_head(
loss_fn=_make_loss_fn(weights_feature_name),
eval_metric_fns=metric_fns)
weights = self._default_weights
# Create estimator spec.
spec = head.create_estimator_spec(
features={weights_feature_name: weights},
mode=tf.estimator.ModeKeys.EVAL,
logits=self._default_logits,
labels=self._default_labels)
expected_metrics = [
'labels_mean',
'logits_mean',
'metric/precision@1',
]
with self.cached_session() as sess:
_initialize_variables(self, spec.scaffold)
update_ops = {
k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops
}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(self._default_weighted_loss, loss)
self.assertItemsEqual(expected_metrics, metrics.keys())
if __name__ == '__main__':
tf.test.main()
| 36.009375
| 80
| 0.683589
|
dae636de0feefe8c3e644fab78d156240799a160
| 2,897
|
py
|
Python
|
Basics/E02_Elements/E24_Wiggles.py
|
freder/PageBotExamples
|
eb4ced53a673b9376e8357afa9ea0795b022b13c
|
[
"Ruby",
"MIT"
] | 5
|
2020-06-20T22:01:23.000Z
|
2021-08-06T04:39:50.000Z
|
Basics/E02_Elements/E24_Wiggles.py
|
freder/PageBotExamples
|
eb4ced53a673b9376e8357afa9ea0795b022b13c
|
[
"Ruby",
"MIT"
] | 5
|
2020-05-17T09:32:27.000Z
|
2021-03-15T19:45:52.000Z
|
Basics/E02_Elements/E24_Wiggles.py
|
freder/PageBotExamples
|
eb4ced53a673b9376e8357afa9ea0795b022b13c
|
[
"Ruby",
"MIT"
] | 2
|
2021-02-25T19:07:45.000Z
|
2022-01-09T21:14:06.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# www.pagebot.io
# Licensed under MIT conditions
#
# -----------------------------------------------------------------------------
#
# Draw Wiggles using Drawbot:
# TODO: convert to PageBot script.
#
# Original script by Roberto Arista, you can find the related tutorial here:
# https://medium.com/@roberto_arista/how-to-draw-a-wiggle-between-two-points-with-python-and-drawbot-788006c18fb0
### Modules
from math import radians, atan2, sqrt, sin, cos
from collections import namedtuple
### Constants
BLACK = (0, 0, 0)
Point = namedtuple('Point', ['x', 'y'])
### Function & procedures
def calcAngle(pt1, pt2):
return atan2((pt2.y - pt1.y), (pt2.x - pt1.x))
def calcDistance(pt1, pt2):
return sqrt((pt1.x - pt2.x)**2 + (pt1.y - pt2.y)**2)
def calcWiggle(pt1, pt2, waveLength, waveHeight, curveSquaring=.57):
assert 0 <= curveSquaring <= 1, 'curveSquaring should be a value between 0 and 1: {}'.format(curveSquaring)
assert waveLength > 0, 'waveLength smaller or equal to zero: {}'.format(waveLength)
diagonal = calcDistance(pt1, pt2)
angleRad = calcAngle(pt1, pt2)
howManyWaves = diagonal//int(waveLength)
waveInterval = diagonal/float(howManyWaves)
maxBcpLength = sqrt((waveInterval/4.)**2+(waveHeight/2.)**2)
bcpLength = maxBcpLength*curveSquaring
bcpInclination = calcAngle(Point(0,0), Point(waveInterval/4., waveHeight/2.))
wigglePoints = [pt1]
prevFlexPt = pt1
polarity = 1
for waveIndex in range(0, int(howManyWaves*2)):
bcpOutAngle = angleRad+bcpInclination*polarity
bcpOut = Point(prevFlexPt.x+cos(bcpOutAngle)*bcpLength, prevFlexPt.y+sin(bcpOutAngle)*bcpLength)
flexPt = Point(prevFlexPt.x+cos(angleRad)*waveInterval/2., prevFlexPt.y+sin(angleRad)*waveInterval/2.)
bcpInAngle = angleRad+(radians(180)-bcpInclination)*polarity
bcpIn = Point(flexPt.x+cos(bcpInAngle)*bcpLength, flexPt.y+sin(bcpInAngle)*bcpLength)
wigglePoints.append((bcpOut, bcpIn, flexPt))
polarity *= -1
prevFlexPt = flexPt
return wigglePoints
def drawCurvesSequence(wigglePoints):
myBez = BezierPath()
myBez.moveTo(wigglePoints[0])
for eachBcpOut, eachBcpIn, eachAnchor in wigglePoints[1:]:
myBez.curveTo(eachBcpOut, eachBcpIn, eachAnchor)
print(eachBcpOut, eachBcpIn, eachAnchor)
myBez.endPath()
for contour in myBez:
for seg in contour:
print(seg)
drawPath(myBez)
### Variables
pt1 = Point(0, 0)
pt2 = Point(100, 100)
### Instructions
size(400, 400)
oval(pt1.x-1, pt1.y-1, 2, 2)
oval(pt2.x-1, pt2.y-1, 2, 2)
stroke(*BLACK)
strokeWidth(.5)
fill(None)
wigglePoints = calcWiggle(pt1, pt2, 16, 36, .7)
drawCurvesSequence(wigglePoints)
| 29.561224
| 113
| 0.644115
|
b600d30264396a3da104698efd2346a71b36b463
| 5,776
|
py
|
Python
|
test_QueryMyGene.py
|
kevinxin90/RTX_BioThings_Explorer
|
16de49de9e0db75c7616a85c2592166ea055faa7
|
[
"Apache-2.0"
] | 1
|
2018-05-24T13:16:57.000Z
|
2018-05-24T13:16:57.000Z
|
test_QueryMyGene.py
|
kevinxin90/RTX_BioThings_Explorer
|
16de49de9e0db75c7616a85c2592166ea055faa7
|
[
"Apache-2.0"
] | 1
|
2018-06-01T02:04:23.000Z
|
2018-06-01T20:21:32.000Z
|
test_QueryMyGene.py
|
kevinxin90/RTX_BioThings_Explorer
|
16de49de9e0db75c7616a85c2592166ea055faa7
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from QueryMyGene import QueryMyGene
class QueryMyGeneTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mg = QueryMyGene()
def test_convert_uniprot_id_to_gene_symbol(self):
bte_result = self.mg.convert_uniprot_id_to_gene_symbol('Q8NBZ7')
rtx_result = {'UXS1'}
self.assertSetEqual(bte_result, rtx_result)
bte_result = self.mg.convert_uniprot_id_to_gene_symbol('P12345')
rtx_result = set()
self.assertSetEqual(bte_result, rtx_result)
def test_get_gene_ontology_ids_bp_for_uniprot_id(self):
bte_result = self.mg.get_gene_ontology_ids_bp_for_uniprot_id('Q05925')
rtx_result = {'GO:0000122': 'negative regulation of transcription by RNA polymerase II',
'GO:0001501': 'skeletal system development',
'GO:0008344': 'adult locomotory behavior',
'GO:0009653': 'anatomical structure morphogenesis',
'GO:0009953': 'ventral pattern formation',
'GO:0009954': 'distal pattern formation',
'GO:0021549': 'cerebellum development',
'GO:0030901': 'midbrain development',
'GO:0030917': 'midbrain-hindbrain boundary development',
'GO:0035115': 'embryonic forelimb morphogenesis',
'GO:0035176': 'social behavior',
'GO:0035264': 'multicellular organism growth',
'GO:0042756': 'drinking behavior',
'GO:0043473': 'pigmentation',
'GO:0043524': 'negative regulation of neuron apoptotic process',
'GO:0045944': 'positive regulation of transcription by RNA polymerase II',
'GO:0048666': 'neuron development',
'GO:0061743': 'motor learning',
'GO:0071542': 'dopaminergic neuron differentiation',
'GO:1990403': 'embryonic brain development'}
self.assertDictEqual(bte_result, rtx_result)
bte_result = self.mg.get_gene_ontology_ids_bp_for_uniprot_id('Q8NBZ7')
rtx_result = {'GO:0033320': 'UDP-D-xylose biosynthetic process',
'GO:0051262': 'protein tetramerization'}
self.assertDictEqual(bte_result, rtx_result)
def test_uniprot_id_is_human(self):
bte_result = self.mg.uniprot_id_is_human("P02794")
rtx_result = True
self.assertEqual(bte_result, rtx_result)
bte_result = self.mg.uniprot_id_is_human("P10592")
rtx_result = False
self.assertEqual(bte_result, rtx_result)
bte_result = self.mg.uniprot_id_is_human("P12345")
rtx_result = False
self.assertEqual(bte_result, rtx_result)
def test_convert_entrez_gene_ID_to_mirbase_ID(self):
bte_result = self.mg.convert_entrez_gene_ID_to_mirbase_ID(407053)
rtx_result = {'MI0000098'}
self.assertSetEqual(bte_result, rtx_result)
bte_result = self.mg.convert_entrez_gene_ID_to_mirbase_ID(12345) # Not a human gene
rtx_result = set()
self.assertEqual(bte_result, rtx_result)
def test_get_gene_ontology_ids_bp_for_entrez_gene_id(self):
bte_result = self.mg.get_gene_ontology_ids_bp_for_entrez_gene_id(407053)
rtx_result = {'GO:0035278': 'miRNA mediated inhibition of translation',
'GO:1904706': 'negative regulation of vascular smooth muscle cell proliferation'}
self.assertDictEqual(bte_result, rtx_result)
# bte_result = self.mg.get_gene_ontology_ids_bp_for_entrez_gene_id(12345) # Not a human gene
# rtx_result = dict()
# self.assertDictEqual(bte_result, rtx_result) # TODO: BTE does not specify species to human
def test_convert_gene_symbol_to_uniprot_id(self):
bte_result = self.mg.convert_gene_symbol_to_uniprot_id('A2M')
rtx_result = {'P01023'}
self.assertSetEqual(bte_result, rtx_result)
bte_result = self.mg.convert_gene_symbol_to_uniprot_id('A1BG')
rtx_result = {'P04217'}
self.assertSetEqual(bte_result, rtx_result)
bte_result = self.mg.convert_gene_symbol_to_uniprot_id('HMOX1')
rtx_result = {'P09601'}
self.assertSetEqual(bte_result, rtx_result)
bte_result = self.mg.convert_gene_symbol_to_uniprot_id('RAD54B')
rtx_result = {'O95073', 'Q9Y620'}
self.assertSetEqual(bte_result, rtx_result)
bte_result = self.mg.convert_gene_symbol_to_uniprot_id('NS2')
rtx_result = set()
self.assertSetEqual(bte_result, rtx_result)
def test_convert_gene_symbol_to_entrez_gene_ID(self):
bte_result = self.mg.convert_gene_symbol_to_entrez_gene_ID('MIR96')
rtx_result = {407053}
self.assertSetEqual(bte_result, rtx_result)
bte_result = self.mg.convert_gene_symbol_to_entrez_gene_ID('MIR48')
rtx_result = set()
self.assertSetEqual(bte_result, rtx_result)
def test_convert_uniprot_id_to_entrez_gene_ID(self):
bte_result = self.mg.convert_uniprot_id_to_entrez_gene_ID("P09601")
rtx_result = {3162}
self.assertSetEqual(bte_result, rtx_result)
bte_result = self.mg.convert_uniprot_id_to_entrez_gene_ID("XYZZY")
rtx_result = set()
self.assertSetEqual(bte_result, rtx_result)
def test_get_cui(self):
bte_result = self.mg.get_cui("NCBIGene:407053")
rtx_result = ['C1537752']
self.assertListEqual(bte_result, rtx_result)
bte_result = self.mg.get_cui("UniProtKB:P09601")
rtx_result = ['C1415619']
self.assertListEqual(bte_result, rtx_result)
if __name__ == '__main__':
unittest.main()
| 44.091603
| 103
| 0.66205
|
7a1adfe207b19672e9119b8301d9840cbd40a165
| 453
|
py
|
Python
|
setup.py
|
robertfasano/labyak
|
8f1896aeebdc7d9f1a6adc58f8bb2009a1254ab7
|
[
"MIT"
] | 1
|
2019-09-04T19:40:30.000Z
|
2019-09-04T19:40:30.000Z
|
setup.py
|
robertfasano/labyak
|
8f1896aeebdc7d9f1a6adc58f8bb2009a1254ab7
|
[
"MIT"
] | null | null | null |
setup.py
|
robertfasano/labyak
|
8f1896aeebdc7d9f1a6adc58f8bb2009a1254ab7
|
[
"MIT"
] | 1
|
2019-09-04T19:40:34.000Z
|
2019-09-04T19:40:34.000Z
|
from distutils.core import setup
from setuptools import find_packages
setup(
name='labyak',
version='0.1',
description='High-level wrappers around the LJM Python library for data acquisition and waveform or pattern generation',
author='Robert Fasano',
author_email='robert.j.fasano@colorado.edu',
packages=find_packages(),
license='MIT',
long_description=open('README.md').read(),
install_requires=['labjack-ljm']
)
| 30.2
| 124
| 0.726269
|
f6d5e7fa233ed13aa6546afa054f0ca7ca9e2317
| 200
|
py
|
Python
|
data_collection/gazette/spiders/sc_sao_joao_do_sul.py
|
Jefersonalves/diario-oficial
|
9a4bdfe2e31414c993d88831a67160c49a5ee657
|
[
"MIT"
] | 3
|
2021-08-18T17:50:31.000Z
|
2021-11-12T23:36:33.000Z
|
data_collection/gazette/spiders/sc_sao_joao_do_sul.py
|
Jefersonalves/diario-oficial
|
9a4bdfe2e31414c993d88831a67160c49a5ee657
|
[
"MIT"
] | 4
|
2021-02-10T02:36:48.000Z
|
2022-03-02T14:55:34.000Z
|
data_collection/gazette/spiders/sc_sao_joao_do_sul.py
|
Jefersonalves/diario-oficial
|
9a4bdfe2e31414c993d88831a67160c49a5ee657
|
[
"MIT"
] | null | null | null |
from gazette.spiders.base import FecamGazetteSpider
class ScSaoJoaoDoSulSpider(FecamGazetteSpider):
name = "sc_sao_joao_do_sul"
FECAM_QUERY = "cod_entidade:249"
TERRITORY_ID = "4216404"
| 25
| 51
| 0.78
|
dc800754487b8da18e3afdb2cabad8f61c05f4c3
| 3,864
|
py
|
Python
|
python/tink/testing/bytes_io.py
|
Baha-sk/tink
|
285f7dd4f50d2870b3f8137291fda2def9212d63
|
[
"Apache-2.0"
] | 12,366
|
2017-05-12T11:22:39.000Z
|
2022-03-31T13:40:46.000Z
|
python/tink/testing/bytes_io.py
|
Baha-sk/tink
|
285f7dd4f50d2870b3f8137291fda2def9212d63
|
[
"Apache-2.0"
] | 505
|
2017-05-18T20:54:30.000Z
|
2022-03-30T19:51:56.000Z
|
python/tink/testing/bytes_io.py
|
Baha-sk/tink
|
285f7dd4f50d2870b3f8137291fda2def9212d63
|
[
"Apache-2.0"
] | 1,179
|
2017-05-12T11:25:34.000Z
|
2022-03-31T14:31:15.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a variant of BytesIO that lets you read the value after close().
This class can be used when an interface that writes to a stream and closes it
in the end need to be transformed into a function that returns a value.
An example is the implementation of normal AEAD encryption interface using
the streaming AEAD encryption interface.
"""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
import errno
import io
from typing import Optional
class BytesIOWithValueAfterClose(io.BytesIO):
"""A BytesIO that lets you read the written value after close()."""
def __init__(self, initial_bytes=None):
self._finalvalue = None
if initial_bytes:
super(BytesIOWithValueAfterClose, self).__init__(initial_bytes)
else:
super(BytesIOWithValueAfterClose, self).__init__()
def close(self) -> None:
if not self.closed:
self._value_after_close = self.getvalue()
super(BytesIOWithValueAfterClose, self).close()
def value_after_close(self) -> bytes:
if not self.closed:
raise ValueError('call to value_after_close before close()')
return self._value_after_close
class SlowBytesIO(io.BytesIO):
"""A readable BytesIO that raised BlockingIOError on some calls to read."""
def __init__(self, data: bytes, seekable: bool = False):
super(SlowBytesIO, self).__init__(data)
self._seekable = seekable
self._state = -1
def read(self, size: int = -1) -> bytes:
if size > 0:
self._state += 1
if self._state > 10000000:
raise AssertionError('too many read. Is there an infinite loop?')
if self._state % 3 == 0: # block on every third call.
raise io.BlockingIOError(
errno.EAGAIN,
'write could not complete without blocking', 0)
# read at most 5 bytes.
return super(SlowBytesIO, self).read(min(size, 5))
return super(SlowBytesIO, self).read(size)
def seek(self, pos: int, whence: int = 0) -> int:
if self._seekable:
return super(SlowBytesIO, self).seek(pos, whence)
raise io.UnsupportedOperation('seek')
def seekable(self)-> bool:
return self._seekable
class SlowReadableRawBytes(io.RawIOBase):
"""A readable io.RawIOBase stream that only sometimes returns data."""
def __init__(self, data: bytes, seekable: bool = False):
super(SlowReadableRawBytes, self).__init__()
self._bytes_io = io.BytesIO(data)
self._seekable = seekable
self._state = -1
def readinto(self, b: bytearray) -> Optional[int]:
try:
self._state += 1
if self._state > 10000000:
raise AssertionError('too many read. Is there an infinite loop?')
if self._state % 3 == 0: # return None on every third call.
return None
# read at most 5 bytes
q = self._bytes_io.read(5)
b[:len(q)] = q
return len(q)
except io.BlockingIOError:
raise ValueError('io.BytesIO should not raise BlockingIOError')
def readable(self):
return True
def seek(self, pos: int, whence: int = 0) -> int:
if self._seekable:
return self._bytes_io.seek(pos, whence)
raise io.UnsupportedOperation('seek')
def seekable(self)-> bool:
return self._seekable
| 33.025641
| 78
| 0.703934
|
bfd644760e817942348cfa78a2384aee0f84c2a7
| 9,396
|
py
|
Python
|
PathSearch.py
|
m-bone/Bond_React_Python
|
e2f9fe5473e5b15d32484ccf2be6f6820a60b53f
|
[
"MIT"
] | 1
|
2021-02-19T06:17:40.000Z
|
2021-02-19T06:17:40.000Z
|
PathSearch.py
|
m-bone/Bond_React_Python
|
e2f9fe5473e5b15d32484ccf2be6f6820a60b53f
|
[
"MIT"
] | null | null | null |
PathSearch.py
|
m-bone/Bond_React_Python
|
e2f9fe5473e5b15d32484ccf2be6f6820a60b53f
|
[
"MIT"
] | null | null | null |
##############################################################################
# Developed by: Matthew Bone
# Last Updated: 30/07/2021
# Updated by: Matthew Bone
#
# Contact Details:
# Bristol Composites Institute (BCI)
# Department of Aerospace Engineering - University of Bristol
# Queen's Building - University Walk
# Bristol, BS8 1TR
# U.K.
# Email - matthew.bone@bristol.ac.uk
#
# File Description:
# This is the primary mapping code that utilises a custom path search to
# determine similar atoms in pre- and post-bond molecule files. This is only
# called from MapProcessor; it can no longer be used to create a map independently.
##############################################################################
import os
import logging
import sys
from LammpsSearchFuncs import element_atomID_dict
from AtomObjectBuilder import build_atom_objects, compare_symmetric_atoms
from QueueFuncs import Queue, queue_bond_atoms, run_queue
def map_delete_atoms(preDeleteAtoms, postDeleteAtoms, mappedIDList):
# If delete atoms provided, add them to the mappedIDList. No purpose to including them in the queue
if preDeleteAtoms is not None:
assert postDeleteAtoms is not None, 'Delete atoms found in pre-bond file but not in post-bond file.'
assert len(preDeleteAtoms) == len(postDeleteAtoms), 'Pre-bond and post-bond files have different numbers of delete atoms.'
for index, preAtom in enumerate(preDeleteAtoms):
mappedIDList.append([preAtom, postDeleteAtoms[index]])
logging.debug(f'Pre: {preAtom}, Post: {postDeleteAtoms[index]} found with user specified delete atom')
def get_missing_atom_objects(missingAtomList, atomObjectDict):
missingAtomObjects = []
for atom in missingAtomList:
atomObject = atomObjectDict[atom]
missingAtomObjects.append(atomObject)
return missingAtomObjects
def map_missing_atoms(missingPreAtomObjects, missingPostAtomObjects, mappedIDList, queue, allowInference):
missingCheckCounter = 1
while missingCheckCounter < 4 and len(missingPostAtomObjects) > 0:
mappedPreAtomIndex = []
for preIndex, preAtom in enumerate(missingPreAtomObjects):
missingPostAtomElements = [atom.element for atom in missingPostAtomObjects]
elementOccurence = missingPostAtomElements.count(preAtom.element)
if elementOccurence == 0:
print(f"Error: Couldn't find a match in post missing atom for {preAtom.atomID}. Please try again or map the atom manually.")
elif elementOccurence == 1:
postIndex = missingPostAtomElements.index(preAtom.element)
logging.debug(f'Pre: {preAtom.atomID}, Post: {missingPostAtomObjects[postIndex].atomID} found with missing atoms single element occurence')
match_missing(preAtom, postIndex, missingPostAtomObjects, mappedIDList, queue, preIndex, mappedPreAtomIndex)
elif elementOccurence > 1:
if preAtom.element == 'H':
postHydrogenIndexList = [index for index, element in enumerate(missingPostAtomElements) if element == 'H']
postIndex = postHydrogenIndexList.pop()
logging.debug(f'Pre: {preAtom.atomID}, Post: {missingPostAtomObjects[postIndex].atomID} found with missing atoms hydrogen symmetry inference')
match_missing(preAtom, postIndex, missingPostAtomObjects, mappedIDList, queue, preIndex, mappedPreAtomIndex)
else:
potentialPostAtomObjects = [atomObject for atomObject in missingPostAtomObjects if atomObject.element == preAtom.element]
postIndex = compare_symmetric_atoms(potentialPostAtomObjects, preAtom, 'index', allowInference=allowInference)
if postIndex is not None:
match_missing(preAtom, postIndex, missingPostAtomObjects, mappedIDList, queue, preIndex, mappedPreAtomIndex)
logging.debug(f'The above atomID pair was found with missing atoms symmetry comparison')
# Refresh missingPreAtomObjects so that it doesn't print needless error messages on subsequent loops
for index in sorted(mappedPreAtomIndex, reverse=True):
del missingPreAtomObjects[index]
missingCheckCounter += 1
def match_missing(preAtom, postAtomMissingIndex, missingPostAtomObjects, mapList, queue, preIndex, mappedPreAtomIndex):
# Get post atom
postAtom = missingPostAtomObjects[postAtomMissingIndex]
mapList.append([preAtom.atomID, postAtom.atomID])
if preAtom.element != 'H':
queue.add([[preAtom, postAtom]]) # This circumvents add_to_queue()
missingPostAtomObjects.pop(postAtomMissingIndex)
mappedPreAtomIndex.append(preIndex)
def update_missing_list(missingAtomList, mappedIDList, mapIndex):
mappedAtoms = [pair[mapIndex] for pair in mappedIDList]
# Update missingAtomList to remove atoms that have been matched
newMissingAtomList = [atom for atom in missingAtomList if atom not in mappedAtoms]
return newMissingAtomList
def map_from_path(directory, preFileName, postFileName, elementsByType, debug, preBondingAtoms, preDeleteAtoms, postBondingAtoms, postDeleteAtoms):
# Set log level
if debug:
logging.basicConfig(level='DEBUG')
else:
logging.basicConfig(level='INFO')
# Build atomID to element dict
os.chdir(directory)
preElementDict = element_atomID_dict(preFileName, elementsByType)
postElementDict = element_atomID_dict(postFileName, elementsByType)
elementDictList = [preElementDict, postElementDict]
# Generate atom class objects list
preAtomObjectDict = build_atom_objects(preFileName, preElementDict, preBondingAtoms)
postAtomObjectDict = build_atom_objects(postFileName, postElementDict, postBondingAtoms)
# Assert the same number of atoms are in pre and post - maps have the same number of atoms in
assert len(preAtomObjectDict) == len(postAtomObjectDict), f'Different numbers of atoms in pre- and post-bond files. Pre: {len(preAtomObjectDict)}, Post: {len(postAtomObjectDict)}'
# Initialise lists
missingPreAtomList = []
missingPostAtomList = []
mappedIDList = []
# Initialise queue
queue = Queue()
# Populate queue with bonding atoms and update mappedIDList
queue_bond_atoms(preAtomObjectDict, preBondingAtoms, postAtomObjectDict, postBondingAtoms, mappedIDList, queue)
# Add delete atoms to the mappedIDList
map_delete_atoms(preDeleteAtoms, postDeleteAtoms, mappedIDList)
# Search through queue creating new maps based on all elements in a given path
run_queue(queue, mappedIDList, preAtomObjectDict, postAtomObjectDict, missingPreAtomList, missingPostAtomList, elementDictList)
# Update missingPreAtoms to check if the missing atom search loop is needed
missingPreAtomList = update_missing_list(missingPreAtomList, mappedIDList, 0)
# If missing pre atoms are present, map missing atoms and rerun the queue until success or timeout
timeoutCounter = 1
# Disable inference for the first search
inference = False
while len(missingPreAtomList) > 0 and timeoutCounter < 11:
# Update missing atom lists
missingPreAtomList = update_missing_list(missingPreAtomList, mappedIDList, 0)
missingPostAtomList = update_missing_list(missingPostAtomList, mappedIDList, 1)
# Get pre atom objects and record length of missing
missingPreAtomObjects = get_missing_atom_objects(missingPreAtomList, preAtomObjectDict)
missingPreAtomCount = len(missingPostAtomList)
# Add any post atoms that aren't in the map or already in the missing atoms
mappedPostAtoms = [pair[1] for pair in mappedIDList]
totalPostAtomList = list(postAtomObjectDict.keys())
unfoundMissingPostAtoms = [atomID for atomID in totalPostAtomList if atomID not in mappedPostAtoms and atomID not in missingPostAtomList]
missingPostAtomList.extend(unfoundMissingPostAtoms)
# Get post atom objects
missingPostAtomObjects = get_missing_atom_objects(missingPostAtomList, postAtomObjectDict)
map_missing_atoms(missingPreAtomObjects, missingPostAtomObjects, mappedIDList, queue, inference)
# Refresh missingAtomLists
missingPreAtomList = update_missing_list(missingPreAtomList, mappedIDList, 0)
missingPostAtomList = update_missing_list(missingPostAtomList, mappedIDList, 1)
# Rerun the queue based on atom pairs added to queue from missingAtoms
run_queue(queue, mappedIDList, preAtomObjectDict, postAtomObjectDict, missingPreAtomList, missingPostAtomList, elementDictList)
logging.debug(f'missingPreAtoms after loop {timeoutCounter}: {missingPreAtomList}')
# Enable inference if no new missing atoms were solved this loop
if missingPreAtomCount == len(missingPreAtomList):
inference = True
else: # Disable inference if missing atoms were solved as other atoms may now be found without inference
inference = False
timeoutCounter += 1
if len(missingPreAtomList) > 0:
print('Error: Missing Atom Search timed out. Atoms will be missing from the map. Please raise an issue on GitHub if the problem persists.')
sys.exit()
return mappedIDList
| 51.065217
| 183
| 0.723925
|
78841d93a897221c370ce49c73dc69c59af140bf
| 9,320
|
py
|
Python
|
tools/envoy_collect/envoy_collect.py
|
rishabhkumar296/envoy
|
1b040ff0e029059c7aaa6816fccb2419c02675b1
|
[
"Apache-2.0"
] | 27
|
2017-10-27T03:18:58.000Z
|
2019-02-07T21:22:20.000Z
|
tools/envoy_collect/envoy_collect.py
|
rishabhkumar296/envoy
|
1b040ff0e029059c7aaa6816fccb2419c02675b1
|
[
"Apache-2.0"
] | 14
|
2018-02-16T20:47:38.000Z
|
2019-01-19T23:03:01.000Z
|
tools/envoy_collect/envoy_collect.py
|
rishabhkumar296/envoy
|
1b040ff0e029059c7aaa6816fccb2419c02675b1
|
[
"Apache-2.0"
] | 7
|
2017-11-26T06:26:49.000Z
|
2019-03-26T03:09:00.000Z
|
#!/usr/bin/env python
"""Wrapper for Envoy command-line that collects stats/log/profile.
Example use:
./tools/envoy_collect.py --output-path=./envoy.tar -c
./configs/google_com_proxy.json --service-node foo
<Ctrl-C>
tar -tvf ./envoy.tar
-rw------- htuch/eng 0 2017-08-13 21:13 access_0.log
-rw------- htuch/eng 876 2017-08-13 21:13 clusters.txt
-rw------- htuch/eng 19 2017-08-13 21:13 listeners.txt
-rw------- htuch/eng 70 2017-08-13 21:13 server_info.txt
-rw------- htuch/eng 8443 2017-08-13 21:13 stats.txt
-rw------- htuch/eng 1551 2017-08-13 21:13 config.json
-rw------- htuch/eng 32681 2017-08-13 21:13 envoy.log
The Envoy process will execute as normal and will terminate when interrupted
with SIGINT (ctrl-c on stdin), collecting the various stats/log/profile in the
--output-path tarball.
TODO(htuch):
- Generate the full perf trace as well, since we may have a different version
of perf local vs. remote.
- Add a Bazel run wrapper.
- Support v2 proto config in ModifyEnvoyConfig().
- Flamegraph generation in post-processing.
- Support other modes of data collection (e.g. snapshotting on SIGUSR,
periodic).
- Validate in performance mode that we're using an opt binary.
- Consider handling other signals.
- Optional real time logging while Envoy process is running.
- bz2 compress tarball.
- Use freeze or something similar to build a static binary with embedded
Python, ending need to have Python on remote host (and care about version).
"""
from __future__ import print_function
import argparse
import ctypes
import ctypes.util
import datetime
import json
import os
import pipes
import shutil
import signal
import subprocess as sp
import sys
import tarfile
import tempfile
from six.moves import urllib
DEFAULT_ENVOY_PATH = os.getenv('ENVOY_PATH', 'bazel-bin/source/exe/envoy-static')
PERF_PATH = os.getenv('PERF_PATH', 'perf')
PR_SET_PDEATHSIG = 1 # See prtcl(2).
DUMP_HANDLERS = ['clusters', 'listeners', 'server_info', 'stats']
def fetch_url(url):
return urllib.request.urlopen(url).read().decode('utf-8')
def modify_envoy_config(config_path, perf, output_directory):
"""Modify Envoy config to support gathering logs, etc.
Args:
config_path: the command-line specified Envoy config path.
perf: boolean indicating whether in performance mode.
output_directory: directory path for additional generated files.
Returns:
(modified Envoy config path, list of additional files to collect)
"""
# No modifications yet when in performance profiling mode.
if perf:
return config_path, []
# Load original Envoy config.
with open(config_path, 'r') as f:
envoy_config = json.loads(f.read())
# Add unconditional access logs for all listeners.
access_log_paths = []
for n, listener in enumerate(envoy_config['listeners']):
for network_filter in listener['filters']:
if network_filter['name'] == 'http_connection_manager':
config = network_filter['config']
access_log_path = os.path.join(output_directory, 'access_%d.log' % n)
access_log_config = {'path': access_log_path}
if 'access_log' in config:
config['access_log'].append(access_log_config)
else:
config['access_log'] = [access_log_config]
access_log_paths.append(access_log_path)
# Write out modified Envoy config.
modified_envoy_config_path = os.path.join(output_directory, 'config.json')
with open(modified_envoy_config_path, 'w') as f:
f.write(json.dumps(envoy_config, indent=2))
return modified_envoy_config_path, access_log_paths
def run_envoy(envoy_shcmd_args, envoy_log_path, admin_address_path, dump_handlers_paths):
"""Run Envoy subprocess and trigger admin endpoint gathering on SIGINT.
Args:
envoy_shcmd_args: list of Envoy subprocess args.
envoy_log_path: path to write Envoy stderr log to.
admin_address_path: path to where admin address is written by Envoy.
dump_handlers_paths: map from admin endpoint handler to path to where the respective contents
are to be written.
Returns:
The Envoy subprocess exit code.
"""
envoy_shcmd = ' '.join(map(pipes.quote, envoy_shcmd_args))
print(envoy_shcmd)
# Some process setup stuff to ensure the child process gets cleaned up properly if the
# collector dies and doesn't get its signals implicitly.
def envoy_preexec_fn():
os.setpgrp()
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
libc.prctl(PR_SET_PDEATHSIG, signal.SIGTERM)
# Launch Envoy, register for SIGINT, and wait for the child process to exit.
with open(envoy_log_path, 'w') as envoy_log:
envoy_proc = sp.Popen(
envoy_shcmd, stdin=sp.PIPE, stderr=envoy_log, preexec_fn=envoy_preexec_fn, shell=True)
def signal_handler(signum, frame):
# The read is deferred until the signal so that the Envoy process gets a
# chance to write the file out.
with open(admin_address_path, 'r') as f:
admin_address = 'http://%s' % f.read()
# Fetch from the admin endpoint.
for handler, path in dump_handlers_paths.items():
handler_url = '%s/%s' % (admin_address, handler)
print('Fetching %s' % handler_url)
with open(path, 'w') as f:
f.write(fetch_url(handler_url))
# Send SIGINT to Envoy process, it should exit and execution will
# continue from the envoy_proc.wait() below.
print('Sending Envoy process (PID=%d) SIGINT...' % envoy_proc.pid)
envoy_proc.send_signal(signal.SIGINT)
signal.signal(signal.SIGINT, signal_handler)
return envoy_proc.wait()
def envoy_collect(parse_result, unknown_args):
"""Run Envoy and collect its artifacts.
Args:
parse_result: Namespace object with envoy_collect.py's args.
unknown_args: list of remaining args to pass to Envoy binary.
"""
# Are we in performance mode? Otherwise, debug.
perf = parse_result.performance
return_code = 1 # Non-zero default return.
envoy_tmpdir = tempfile.mkdtemp(prefix='envoy-collect-tmp-')
# Try and do stuff with envoy_tmpdir, rm -rf regardless of success/failure.
try:
# Setup Envoy config and determine the paths of the files we're going to
# generate.
modified_envoy_config_path, access_log_paths = modify_envoy_config(
parse_result.config_path, perf, envoy_tmpdir)
dump_handlers_paths = {h: os.path.join(envoy_tmpdir, '%s.txt' % h) for h in DUMP_HANDLERS}
envoy_log_path = os.path.join(envoy_tmpdir, 'envoy.log')
# The manifest of files that will be placed in the output .tar.
manifest = access_log_paths + list(
dump_handlers_paths.values()) + [modified_envoy_config_path, envoy_log_path]
# This is where we will find out where the admin endpoint is listening.
admin_address_path = os.path.join(envoy_tmpdir, 'admin_address.txt')
# Only run under 'perf record' in performance mode.
if perf:
perf_data_path = os.path.join(envoy_tmpdir, 'perf.data')
manifest.append(perf_data_path)
perf_record_args = [
PERF_PATH,
'record',
'-o',
perf_data_path,
'-g',
'--',
]
else:
perf_record_args = []
# This is how we will invoke the wrapped envoy.
envoy_shcmd_args = perf_record_args + [
parse_result.envoy_binary,
'-c',
modified_envoy_config_path,
'-l',
'error' if perf else 'trace',
'--admin-address-path',
admin_address_path,
] + unknown_args[1:]
# Run the Envoy process (under 'perf record' if needed).
return_code = run_envoy(envoy_shcmd_args, envoy_log_path, admin_address_path,
dump_handlers_paths)
# Collect manifest files and tar them.
with tarfile.TarFile(parse_result.output_path, 'w') as output_tar:
for path in manifest:
if os.path.exists(path):
print('Adding %s to archive' % path)
output_tar.add(path, arcname=os.path.basename(path))
else:
print('%s not found' % path)
print('Wrote Envoy artifacts to %s' % parse_result.output_path)
finally:
shutil.rmtree(envoy_tmpdir)
return return_code
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Envoy wrapper to collect stats/log/profile.')
default_output_path = 'envoy-%s.tar' % datetime.datetime.now().isoformat('-')
parser.add_argument('--output-path', default=default_output_path, help='path to output .tar.')
# We either need to interpret or override these, so we declare them in
# envoy_collect.py and always parse and present them again when invoking
# Envoy.
parser.add_argument(
'--config-path', '-c', required=True, help='Path to Envoy configuration file.')
parser.add_argument(
'--log-level', '-l', help='Envoy log level. This will be overridden when invoking Envoy.')
# envoy_collect specific args.
parser.add_argument(
'--performance',
action='store_true',
help='Performance mode (collect perf trace, minimize log verbosity).')
parser.add_argument(
'--envoy-binary',
default=DEFAULT_ENVOY_PATH,
help='Path to Envoy binary (%s by default).' % DEFAULT_ENVOY_PATH)
sys.exit(envoy_collect(*parser.parse_known_args(sys.argv)))
| 38.196721
| 97
| 0.700429
|
71b1c387c4128e98a3b2f733b25fc46ab3595f9a
| 714
|
py
|
Python
|
system_step/__init__.py
|
molssi-seamm/system_step
|
6b9f4416eddcefa56a147ee102cab7c3edf261b1
|
[
"BSD-3-Clause"
] | null | null | null |
system_step/__init__.py
|
molssi-seamm/system_step
|
6b9f4416eddcefa56a147ee102cab7c3edf261b1
|
[
"BSD-3-Clause"
] | 2
|
2020-12-13T00:27:36.000Z
|
2020-12-15T17:06:58.000Z
|
system_step/__init__.py
|
molssi-seamm/system_step
|
6b9f4416eddcefa56a147ee102cab7c3edf261b1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
system_step
A step for working with the molecular/crystal system in SEAMM
"""
# Bring up the classes so that they appear to be directly in
# the system_step package.
from system_step.system import System # noqa: F401, E501
from system_step.system_parameters import SystemParameters # noqa: F401, E501
from system_step.system_step import SystemStep # noqa: F401, E501
from system_step.tk_system import TkSystem # noqa: F401, E501
# Handle versioneer
from ._version import get_versions
__author__ = """Paul Saxe"""
__email__ = 'psaxe@molssi.org'
versions = get_versions()
__version__ = versions['version']
__git_revision__ = versions['full-revisionid']
del get_versions, versions
| 29.75
| 78
| 0.768908
|
ed73cb09e30cc969f6a2844bfeca08e065e5d4b5
| 7,571
|
py
|
Python
|
Operator_Components/Operator_SLR/start_slr_flow.py
|
TamSzaGot/mydata-sdk
|
9c8afb75077f0b993819aa534b904501a8112f76
|
[
"MIT"
] | 4
|
2018-04-21T00:46:40.000Z
|
2019-12-03T13:52:03.000Z
|
Operator_Components/Operator_SLR/start_slr_flow.py
|
TamSzaGot/mydata-sdk
|
9c8afb75077f0b993819aa534b904501a8112f76
|
[
"MIT"
] | 1
|
2019-01-09T10:45:23.000Z
|
2019-01-09T10:45:23.000Z
|
Operator_Components/Operator_SLR/start_slr_flow.py
|
TamSzaGot/mydata-sdk
|
9c8afb75077f0b993819aa534b904501a8112f76
|
[
"MIT"
] | 4
|
2018-04-21T01:12:12.000Z
|
2020-09-24T06:19:29.000Z
|
# -*- coding: utf-8 -*-
__author__ = 'alpaloma'
import logging
import traceback
from json import loads
from flask import Blueprint, current_app, render_template_string, make_response, redirect
from flask_cors import CORS
from flask_restful import Resource, Api, request
from requests import get, post
from requests.exceptions import ConnectionError, Timeout
from base64 import urlsafe_b64encode
from DetailedHTTPException import DetailedHTTPException, error_handler
from helpers_op import Helpers, ServiceRegistryHandler, Sequences, get_am, format_request, api_logging
from uuid import uuid4 as guid
import time
'''
Operator_Components Mgmnt->Service_Components Mgmnt: Fetch code from service_mgmnt
Service_Components Mgmnt->Service_Components Mgmnt: Generate code
Service_Components Mgmnt->Service_Components Mgmnt: Store code in db
Service_Components Mgmnt-->Operator_Components Mgmnt: Returning code
Operator_Components Mgmnt->Operator_Components Mgmnt: Check code request is valid
Operator_Components Mgmnt->Operator_Components Mgmnt: Load code object for use
Operator_Components Mgmnt->Operator_Components Mgmnt: Add user_id to code dictionary {'code': 'code', 'user_id': 'user_id'}
Operator_Components Mgmnt->Service_Components Mgmnt: Redirect user to Service_Components Mgmnt login
'''
# Blueprint and Flask api stuff
api_SLR_Start = Blueprint("api_SLR_Start", __name__)
CORS(api_SLR_Start)
api = Api()
api.init_app(api_SLR_Start)
# Logger stuff
debug_log = logging.getLogger("debug")
sq = Sequences("OpMgmt")
class StartSlrFlow(Resource):
def __init__(self):
"""
"""
super(StartSlrFlow, self).__init__()
self.service_registry_handler = ServiceRegistryHandler(current_app.config["SERVICE_REGISTRY_SEARCH_DOMAIN"],
current_app.config["SERVICE_REGISTRY_SEARCH_ENDPOINT"])
self.request_timeout = current_app.config["TIMEOUT"]
self.uid = current_app.config["UID"]
self.return_url = current_app.config["RETURN_URL"]
self.helper = Helpers(current_app.config)
self.store_session = self.helper.store_session
@error_handler
@api_logging
def get(self, account_id, service_id):
"""
:param account_id: Account Manager user id
:param service_id: Service id as in Service Registry
"""
debug_log.info("#### Request to start SLR flow with parameters: account_id ({}), service_id ({})"
.format(account_id, service_id))
try:
AM = get_am(current_app, request.headers)
key_check = AM.verify_user_key(account_id)
debug_log.info("Verifying User Key resulted: {}".format(key_check))
# Check Active SLR for this account/service pair doesn't exist
AM.check_for_existing_slr(service_id, account_id)
# We need to store some session information for later parts of flow.
session_information = {}
sq.task("Fetch service address from Service Registry")
service_json = self.service_registry_handler.getService(service_id)
service_domain = service_json["serviceInstance"][0]["loginDomain"] # Domain to Login of Service
service_access_uri = service_json["serviceInstance"][0]["serviceAccessEndPoint"]["serviceAccessURI"]
service_login_uri = service_json["serviceInstance"][0]["loginUri"]
sq.task("Generate code for session")
code = str(guid())
debug_log.info("Session information contains: code {}, account id {} and service_id {}"
.format(code, account_id, service_id))
sq.task("Store session_information to database")
session_information[code] = {"account_id": account_id,
"service_id": service_id,
"user_key": request.headers["Api-Key-User"]}
self.store_session(session_information)
service_endpoint = "{}{}{}".format(service_domain, service_access_uri, service_login_uri)
service_query = "?code={}&operator_id={}&return_url={}&linkingFrom={}".format(
# TODO: Get return url from somewhere
code, self.uid, urlsafe_b64encode(self.return_url), "Operator")
debug_log.info("Redirect url with parameters:\n{}{}\nCode contains: {}".format(service_endpoint,
service_query,
code))
sq.send_to("UI(Operator)", "Redirect user to Service Mockup login")
response = make_response(redirect(service_endpoint+service_query))
return response
except DetailedHTTPException as e:
debug_log.exception(e)
if "code" in locals():
self.helper.delete_session(code)
raise DetailedHTTPException(exception=e,
title="SLR registration failed.",
status=500,
detail="Something failed during creation of SLR.",
trace=traceback.format_exc(limit=100).splitlines())
except Exception as e:
debug_log.exception(e)
if "code" in locals():
self.helper.delete_session(code)
raise DetailedHTTPException(status=500,
title="Something went really wrong during SLR registration.",
detail="Error: {}".format(repr(e)),
exception=e,
trace=traceback.format_exc(limit=100).splitlines())
class CancelSlrFlow(Resource):
def __init__(self):
"""
"""
super(CancelSlrFlow, self).__init__()
self.helper = Helpers(current_app.config)
@error_handler
@api_logging
def delete(self, account_id, code):
AM = get_am(current_app, request.headers)
key_check = AM.verify_user_key(account_id)
debug_log.info("Verifying User Key resulted: {}".format(key_check))
sq.task("Load json payload as object")
js = request.json
sq.task("Load account_id from database")
code = js["code"]
try:
stored_session_from_db = self.helper.restore_session(code)
except TypeError as e:
debug_log.info("Failed restoring session from DB with code '{}'".format(code))
debug_log.exception(e)
raise DetailedHTTPException(status=403,
detail={"msg": "Invalid or expired session"},
title="Invalid session")
debug_log.debug("The session data contains: {}".format(stored_session_from_db))
session_data = loads(stored_session_from_db)
debug_log.debug("{} {}".format(type(stored_session_from_db), stored_session_from_db))
account_id_from_session = session_data["account_id"]
if account_id_from_session == account_id:
self.helper.delete_session(code)
return {"msg": {"status": "deleted", "id": code}}, 200
api.add_resource(StartSlrFlow, '/account/<string:account_id>/service/<string:service_id>')
api.add_resource(CancelSlrFlow, '/account/<string:account_id>/session_code/<string:code>')
| 44.798817
| 123
| 0.62779
|
74d446b18dc21d070cc5ac7ca44bb4c35279aa5f
| 19,601
|
py
|
Python
|
evaluate/eval.py
|
HoloClean/RecordFusion
|
4094cd9e6c779584b9dbed6f3ae68eb11b13e2b2
|
[
"Apache-2.0"
] | 2
|
2022-01-11T21:08:20.000Z
|
2022-01-22T03:13:00.000Z
|
evaluate/eval.py
|
HoloClean/RecordFusion
|
4094cd9e6c779584b9dbed6f3ae68eb11b13e2b2
|
[
"Apache-2.0"
] | null | null | null |
evaluate/eval.py
|
HoloClean/RecordFusion
|
4094cd9e6c779584b9dbed6f3ae68eb11b13e2b2
|
[
"Apache-2.0"
] | 1
|
2022-02-24T06:01:04.000Z
|
2022-02-24T06:01:04.000Z
|
import logging
import os
from string import Template
import time
import pandas as pd
from dataset import AuxTables
from dataset.table import Table, Source
errors_template = Template('SELECT count(*) ' \
'FROM "$init_table" as t1, "$grdt_table" as t2 ' \
'WHERE t1._tid_ = t2._tid_ ' \
' AND t2._attribute_ = \'$attr\' ' \
' AND t1."$attr" != t2._value_')
"""
The 'errors' aliased subquery returns the (_tid_, _attribute_, _value_)
from the ground truth table for all cells that have an error in the original
raw data.
The 'repairs' aliased table contains the cells and values we've inferred.
We then count the number of cells that we repaired to the correct ground
truth value.
"""
correct_repairs_template = Template('SELECT COUNT(*) FROM '
' (SELECT t2._tid_, t2._attribute_, t2._value_ '
' FROM "$init_table" as t1, "$grdt_table" as t2 '
' WHERE t1._tid_ = t2._tid_ '
' AND t2._attribute_ = \'$attr\' '
' AND t1."$attr" != t2._value_ ) as errors, $inf_dom as repairs '
'WHERE errors._tid_ = repairs._tid_ '
' AND errors._attribute_ = repairs.attribute '
' AND errors._value_ = repairs.rv_value')
class EvalEngine:
def __init__(self, env, dataset):
self.env = env
self.ds = dataset
def load_data(self, name, fpath, tid_col, attr_col, val_col, na_values=None):
tic = time.clock()
try:
raw_data = pd.read_csv(fpath, na_values=na_values, encoding='utf-8')
raw_data.fillna('_nan_', inplace=True)
raw_data.rename({tid_col: '_tid_',
attr_col: '_attribute_',
val_col: '_value_'},
axis='columns',
inplace=True)
raw_data = raw_data[['_tid_', '_attribute_', '_value_']]
# Normalize string to whitespaces.
raw_data['_value_'] = raw_data['_value_'].str.strip().str.lower()
self.clean_data = Table(name, Source.DF, df=raw_data)
self.clean_data.store_to_db(self.ds.engine.engine)
self.clean_data.create_db_index(self.ds.engine, ['_tid_'])
self.clean_data.create_db_index(self.ds.engine, ['_attribute_'])
status = 'DONE Loading {fname}'.format(fname=os.path.basename(fpath))
except Exception:
logging.error('load_data for table %s', name)
raise
toc = time.clock()
load_time = toc - tic
return status, load_time
def evaluate_repairs(self):
self.compute_total_repairs()
self.compute_total_repairs_grdt()
self.compute_total_errors()
self.compute_detected_errors()
self.compute_correct_repairs()
prec = self.compute_precision()
rec = self.compute_recall()
rep_recall = self.compute_repairing_recall()
f1 = self.compute_f1()
rep_f1 = self.compute_repairing_f1()
if self.env['verbose']:
self.log_weak_label_stats()
return prec, rec, rep_recall, f1, rep_f1
def eval_report(self):
tic = time.clock()
try:
prec, rec, rep_recall, f1, rep_f1 = self.evaluate_repairs()
report = "Precision = %.2f, Recall = %.2f, Repairing Recall = %.2f, F1 = %.2f, Repairing F1 = %.2f, Detected Errors = %d, Total Errors = %d, Correct Repairs = %d, Total Repairs = %d, Total Repairs on correct cells (Grdth present) = %d, Total Repairs on incorrect cells (Grdth present) = %d" % (
prec, rec, rep_recall, f1, rep_f1,
self.detected_errors, self.total_errors, self.correct_repairs,
self.total_repairs, self.total_repairs_grdt_correct, self.total_repairs_grdt_incorrect)
report_list = [prec, rec, rep_recall, f1, rep_f1, self.detected_errors, self.total_errors,
self.correct_repairs, self.total_repairs, self.total_repairs_grdt]
except Exception as e:
logging.error("ERROR generating evaluation report %s" % e)
raise
toc = time.clock()
report_time = toc - tic
return report, report_time, report_list
def compute_total_repairs(self):
"""
compute_total_repairs memoizes the number of repairs:
the # of cells that were inferred and where the inferred value
is not equal to the initial value.
"""
query = "SELECT count(*) FROM " \
" (SELECT _vid_ " \
" FROM {} as t1, {} as t2 " \
" WHERE t1._tid_ = t2._tid_ " \
" AND t1.attribute = t2.attribute " \
" AND t1.init_value != t2.rv_value) AS t".format(AuxTables.cell_domain.name,
AuxTables.inf_values_dom.name)
res = self.ds.engine.execute_query(query)
self.total_repairs = float(res[0][0])
def compute_total_repairs_grdt(self):
"""
compute_total_repairs_grdt memoizes the number of repairs for cells
that are specified in the clean/ground truth data. Otherwise repairs
are defined the same as compute_total_repairs.
We also distinguish between repairs on correct cells and repairs on
incorrect cells (correct cells are cells where init == ground truth).
"""
query = """
SELECT
(t1.init_value = t3._value_) AS is_correct,
count(*)
FROM {} as t1, {} as t2, {} as t3
WHERE t1._tid_ = t2._tid_
AND t1.attribute = t2.attribute
AND t1.init_value != t2.rv_value
AND t1._tid_ = t3._tid_
AND t1.attribute = t3._attribute_
GROUP BY is_correct
""".format(AuxTables.cell_domain.name,
AuxTables.inf_values_dom.name,
self.clean_data.name)
res = self.ds.engine.execute_query(query)
# Memoize the number of repairs on correct cells and incorrect cells.
# Since we do a GROUP BY we need to check which row of the result
# corresponds to the correct/incorrect counts.
self.total_repairs_grdt_correct, self.total_repairs_grdt_incorrect = 0, 0
self.total_repairs_grdt = 0
if not res:
return
if res[0][0]:
correct_idx, incorrect_idx = 0, 1
else:
correct_idx, incorrect_idx = 1, 0
if correct_idx < len(res):
self.total_repairs_grdt_correct = float(res[correct_idx][1])
if incorrect_idx < len(res):
self.total_repairs_grdt_incorrect = float(res[incorrect_idx][1])
self.total_repairs_grdt = self.total_repairs_grdt_correct + self.total_repairs_grdt_incorrect
def compute_total_errors(self):
"""
compute_total_errors memoizes the number of cells that have a
wrong initial value: requires ground truth data.
"""
queries = []
total_errors = 0.0
for attr in self.ds.get_attributes():
query = errors_template.substitute(init_table=self.ds.raw_data.name,
grdt_table=self.clean_data.name,
attr=attr)
queries.append(query)
results = self.ds.engine.execute_queries(queries)
for res in results:
total_errors += float(res[0][0])
self.total_errors = total_errors
def compute_detected_errors(self):
"""
compute_detected_errors memoizes the number of error cells that
were detected in error detection: requires ground truth.
This value is always equal or less than total errors (see
compute_total_errors).
"""
query = "SELECT count(*) FROM " \
" (SELECT _vid_ " \
" FROM %s as t1, %s as t2, %s as t3 " \
" WHERE t1._tid_ = t2._tid_ AND t1._cid_ = t3._cid_ " \
" AND t1.attribute = t2._attribute_ " \
" AND t1.init_value != t2._value_) AS t" \
% (AuxTables.cell_domain.name, self.clean_data.name, AuxTables.dk_cells.name)
res = self.ds.engine.execute_query(query)
self.detected_errors = float(res[0][0])
def compute_correct_repairs(self):
"""
compute_correct_repairs memoizes the number of error cells
that were correctly inferred.
This value is always equal or less than total errors (see
compute_total_errors).
"""
queries = []
correct_repairs = 0.0
for attr in self.ds.get_attributes():
query = correct_repairs_template.substitute(init_table=self.ds.raw_data.name, grdt_table=self.clean_data.name,
attr=attr, inf_dom=AuxTables.inf_values_dom.name)
queries.append(query)
results = self.ds.engine.execute_queries(queries)
for res in results:
correct_repairs += float(res[0][0])
self.correct_repairs = correct_repairs
def compute_recall(self):
"""
Computes the recall (# of correct repairs / # of total errors).
"""
if self.total_errors == 0:
return 0
return self.correct_repairs / self.total_errors
def compute_repairing_recall(self):
"""
Computes the _repairing_ recall (# of correct repairs / # of total
_detected_ errors).
"""
if self.detected_errors == 0:
return 0
return self.correct_repairs / self.detected_errors
def compute_precision(self):
"""
Computes precision (# correct repairs / # of total repairs w/ ground truth)
"""
if self.total_repairs_grdt == 0:
return 0
return self.correct_repairs / self.total_repairs_grdt
def compute_f1(self):
prec = self.compute_precision()
rec = self.compute_recall()
if prec+rec == 0:
return 0
f1 = 2*(prec*rec)/(prec+rec)
return f1
def compute_repairing_f1(self):
prec = self.compute_precision()
rec = self.compute_repairing_recall()
if prec+rec == 0:
return 0
f1 = 2*(prec*rec)/(prec+rec)
return f1
def log_weak_label_stats(self):
query = """
select
(t3._tid_ is NULL) as clean,
(t1.fixed) as status,
(t4._tid_ is NOT NULL) as inferred,
(t1.init_value = t2._value_) as init_eq_grdth,
(t1.init_value = t4.rv_value) as init_eq_infer,
(t1.weak_label = t1.init_value) as wl_eq_init,
(t1.weak_label = t2._value_) as wl_eq_grdth,
(t1.weak_label = t4.rv_value) as wl_eq_infer,
(t2._value_ = t4.rv_value) as infer_eq_grdth,
count(*) as count
from
{cell_domain} as t1,
{clean_data} as t2
left join {dk_cells} as t3 on t2._tid_ = t3._tid_ and t2._attribute_ = t3.attribute
left join {inf_values_dom} as t4 on t2._tid_ = t4._tid_ and t2._attribute_ = t4.attribute where t1._tid_ = t2._tid_ and t1.attribute = t2._attribute_
group by
clean,
status,
inferred,
init_eq_grdth,
init_eq_infer,
wl_eq_init,
wl_eq_grdth,
wl_eq_infer,
infer_eq_grdth
""".format(cell_domain=AuxTables.cell_domain.name,
clean_data=self.clean_data.name,
dk_cells=AuxTables.dk_cells.name,
inf_values_dom=AuxTables.inf_values_dom.name)
res = self.ds.engine.execute_query(query)
df_stats = pd.DataFrame(res,
columns=["is_clean", "cell_status", "is_inferred",
"init = grdth", "init = inferred",
"w. label = init", "w. label = grdth", "w. label = inferred",
"infer = grdth", "count"])
df_stats = df_stats.sort_values(list(df_stats.columns)).reset_index(drop=True)
logging.debug("weak label statistics:")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', len(df_stats))
pd.set_option('display.max_colwidth', -1)
logging.debug("%s", df_stats)
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
pd.reset_option('display.max_colwidth')
#------fusion
# GM
def load_data(self, name, fpath, tid_col, attr_col, val_col, na_values=None):
tic = time.clock()
try:
raw_data = pd.read_csv(fpath, na_values=na_values, encoding='utf-8')
raw_data.fillna('_nan_', inplace=True)
raw_data.rename({tid_col: '_tid_',
attr_col: '_attribute_',
val_col: '_value_'},
axis='columns',
inplace=True)
raw_data = raw_data[['_tid_', '_attribute_', '_value_']]
# Normalize string to whitespaces.
raw_data['_value_'] = raw_data['_value_'].str.strip().str.lower()
self.clean_data = Table(name, Source.DF, df=raw_data)
self.clean_data.store_to_db(self.ds.engine.engine)
self.clean_data.create_db_index(self.ds.engine, ['_tid_'])
self.clean_data.create_db_index(self.ds.engine, ['_attribute_'])
status = 'DONE Loading {fname}'.format(fname=os.path.basename(fpath))
except Exception:
logging.error('load_data for table %s', name)
raise
toc = time.clock()
load_time = toc - tic
return status, load_time
def load_data_fusion(self, name, fpath, tid_col, attr_col, val_col, na_values=None):
tic = time.clock()
try:
#everything is a string
raw_data = pd.read_csv(fpath, dtype=object, na_values=na_values, encoding='utf-8')
raw_data.fillna('_nan_', inplace=True)
raw_data.rename({tid_col: '_tid_',
attr_col: '_attribute_',
val_col: '_value_'},
axis='columns',
inplace=True)
raw_data = raw_data[['_tid_', '_attribute_', '_value_']]
# Normalize string to whitespaces.
raw_data['_value_'] = raw_data['_value_'].str.strip().str.lower()
raw_data['_tid_'] = raw_data['_tid_'].str.strip().str.lower()
self.clean_data = Table(name, Source.DF, df=raw_data)
self.clean_data.store_to_db(self.ds.engine.engine)
self.clean_data.create_db_index(self.ds.engine, ['_tid_'])
self.clean_data.create_db_index(self.ds.engine, ['_attribute_'])
object_dict = {}
records = self.clean_data.df.to_records()
self.all_attrs = list(records.dtype.names)
for row in list(records):
object_key = str(row['_tid_'])
if object_key not in object_dict:
object_dict[object_key] = {}
object_dict[object_key][row['_attribute_']]= str(row['_value_'])
self.ds.correct_object_dict = object_dict
status = 'DONE Loading {fname}'.format(fname=os.path.basename(fpath))
except Exception:
logging.error('load_data for table %s', name)
raise
toc = time.clock()
load_time = toc - tic
return status, load_time
def eval_report_fusion(self):
tic = time.clock()
try:
self.ds.prec = self.evaluate_repairs_fusion()
report = "Precision of iteration is = %.4f" % (self.ds.prec)
except Exception as e:
report = "ERROR generating evaluation report: %s" % str(e)
toc = time.clock()
report_time = toc - tic
return report, report_time
def eval_report_fusion_recurr(self, validation):
tic = time.clock()
try:
f =open('prec.txt','a+')
self.ds.prec = self.evaluate_repairs_fusion_recurr()
if validation:
report = "Precision of validation set = %.4f" % (self.ds.prec)
else:
report = "Precision = %.4f" % (self.ds.prec)
f.write('Precision' + str(self.ds.prec))
f.close()
except Exception as e:
report = "ERROR generating evaluation report: %s" % str(e)
toc = time.clock()
report_time = toc - tic
return report, report_time
# GM
def evaluate_repairs_fusion(self):
# we get the accuracy for each attribute
for attribute in self.ds.raw_data.df.columns:
if attribute!="_tid_" and attribute!=self.ds.src and attribute != self.ds.key:
query = "select count(*) from %s AS t1 where attribute= '%s'" % (
AuxTables.inf_values_dom.name, attribute)
total_repair = self.ds.engine.execute_query(query)
# count number of instances in which inferred values match the ground truth
query = "select count(*) from %s AS t1, %s as t2 where t1.object = cast(t2._tid_ as varchar(100)) and t1.attribute = t2._attribute_ and t1.rv_value = t2._value_ and t1.attribute = '%s'" % (
AuxTables.inf_values_dom.name, self.clean_data.name, attribute)
correct = self.ds.engine.execute_query(query)
# return precision based on count
prec = float(correct[0][0]) / float(total_repair[0][0])
print("The precision of the attribute %s is: %s" % (attribute, str(prec)))
#we get the accuracy for all the predictions
query = "select count(*) from %s AS t1" % (AuxTables.inf_values_dom.name)
total_repair = self.ds.engine.execute_query(query)
# count number of instances in which inferred values match the ground truth
query = "select count(*) from %s AS t1, %s as t2 where t1.object = cast(t2._tid_ as varchar(100)) and t1.attribute = t2._attribute_ and t1.rv_value = t2._value_" % (
AuxTables.inf_values_dom.name, self.clean_data.name)
correct = self.ds.engine.execute_query(query)
# return precision based on count
prec = float(correct[0][0]) / float(total_repair[0][0])
return prec
def evaluate_repairs_fusion_recurr(self):
#we get the accuracy for all the predictions
query = "select count(*) from %s AS t1" % (AuxTables.inf_values_dom.name)
total_repair = self.ds.engine.execute_query(query)
# count number of instances in which inferred values match the ground truth
query = "select count(*) from %s AS t1, %s as t2 where t1.object = cast(t2._tid_ as varchar(100)) and t1.attribute = t2._attribute_ and t1.rv_value = t2._value_" % (
AuxTables.inf_values_dom.name, self.clean_data.name)
correct = self.ds.engine.execute_query(query)
# return precision based on count
prec = float(correct[0][0]) / float(total_repair[0][0])
return prec
| 42.518438
| 306
| 0.578389
|
62fc653305aece7765c3e7e0fd4a2022e5677dbb
| 3,302
|
py
|
Python
|
generated-libraries/python/netapp/lun/iscsi_pr_nexus.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/lun/iscsi_pr_nexus.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/lun/iscsi_pr_nexus.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class IscsiPrNexus(NetAppObject):
"""
Information about iscsi nexus owning the persistent reservation
These three componients identify the relationship between the
iSCSI initiator and the target.
"""
_tpgtag = None
@property
def tpgtag(self):
"""
The target portal group tag of the persistent
reservation owner. For historical reasons, the
value is represented as a 4-byte hexadecimal
number in little-endian byte order.
"""
return self._tpgtag
@tpgtag.setter
def tpgtag(self, val):
if val != None:
self.validate('tpgtag', val)
self._tpgtag = val
_tpgroup_tag = None
@property
def tpgroup_tag(self):
"""
The target portal group tag of the
persistent reservation owner.
"""
return self._tpgroup_tag
@tpgroup_tag.setter
def tpgroup_tag(self, val):
if val != None:
self.validate('tpgroup_tag', val)
self._tpgroup_tag = val
_initiator = None
@property
def initiator(self):
"""
Name of initiator holding the reservation
i.e. iqn.1987-06.com.initvendor1:appsrv.sn.2346.
"""
return self._initiator
@initiator.setter
def initiator(self, val):
if val != None:
self.validate('initiator', val)
self._initiator = val
_isid = None
@property
def isid(self):
"""
The Initiator Session ID for the persistent reservation
owner. The ISID is a numeric Initiator Session ID
assigned by the initiator which acts as part of the
initiators identity.
"""
return self._isid
@isid.setter
def isid(self, val):
if val != None:
self.validate('isid', val)
self._isid = val
_third_party_initiator_name = None
@property
def third_party_initiator_name(self):
"""
The name of the third-party initiator the reservation
is being held for. For use in SCSI-2 only.
i.e. iqn.1987-06.com.initvendor1:appsrv.sn.2346.
"""
return self._third_party_initiator_name
@third_party_initiator_name.setter
def third_party_initiator_name(self, val):
if val != None:
self.validate('third_party_initiator_name', val)
self._third_party_initiator_name = val
@staticmethod
def get_api_name():
return "iscsi-pr-nexus"
@staticmethod
def get_desired_attrs():
return [
'tpgtag',
'tpgroup-tag',
'initiator',
'isid',
'third-party-initiator-name',
]
def describe_properties(self):
return {
'tpgtag': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'tpgroup_tag': { 'class': int, 'is_list': False, 'required': 'optional' },
'initiator': { 'class': basestring, 'is_list': False, 'required': 'required' },
'isid': { 'class': basestring, 'is_list': False, 'required': 'required' },
'third_party_initiator_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 30.859813
| 108
| 0.592065
|
28740823180a64efeb4ec898279764d30025a14c
| 3,830
|
py
|
Python
|
orio-0.1.0/src/module/loop/submodule/boundreplace/boundreplace.py
|
nn4ip/pluto
|
92ace2441b6b8d6b66d1bb7ef3e893df4ff23a4d
|
[
"MIT"
] | 183
|
2017-01-28T17:23:29.000Z
|
2022-03-25T08:58:56.000Z
|
orio-0.1.0/src/module/loop/submodule/boundreplace/boundreplace.py
|
nn4ip/pluto
|
92ace2441b6b8d6b66d1bb7ef3e893df4ff23a4d
|
[
"MIT"
] | 70
|
2017-03-29T09:51:04.000Z
|
2021-12-28T07:00:44.000Z
|
orio-0.1.0/src/module/loop/submodule/boundreplace/boundreplace.py
|
nn4ip/pluto
|
92ace2441b6b8d6b66d1bb7ef3e893df4ff23a4d
|
[
"MIT"
] | 57
|
2017-03-29T07:27:58.000Z
|
2022-01-14T03:13:39.000Z
|
#
# Bound replacement transformation
#
import sys
import module.loop.submodule.submodule, transformator
#---------------------------------------------------------------------
class BoundReplace(module.loop.submodule.submodule.SubModule):
'''The bound replacement transformation submodule'''
def __init__(self, perf_params = None, transf_args = None, stmt = None):
'''To instantiate a bound replacement transformation submodule'''
module.loop.submodule.submodule.SubModule.__init__(self, perf_params, transf_args, stmt)
#-----------------------------------------------------------------
def readTransfArgs(self, perf_params, transf_args):
'''Process the given transformation arguments'''
# all expected argument names
LPREFIX = 'lprefix'
UPREFIX = 'uprefix'
# all expected transformation arguments
lprefix = None
uprefix = None
# iterate over all transformation arguments
for aname, rhs, line_no in transf_args:
# evaluate the RHS expression
try:
rhs = eval(rhs, perf_params)
except Exception, e:
print 'error:%s: failed to evaluate the argument expression: %s' % (line_no, rhs)
print ' --> %s: %s' % (e.__class__.__name__, e)
sys.exit(1)
# prefix name for lower bound
if aname == LPREFIX:
lprefix = (rhs, line_no)
# prefix name for upper bound
elif aname == UPREFIX:
uprefix = (rhs, line_no)
# unknown argument name
else:
print 'error:%s: unrecognized transformation argument: "%s"' % (line_no, aname)
sys.exit(1)
# check semantics of the transformation arguments
lprefix, uprefix = self.checkTransfArgs(lprefix, uprefix)
# return information about the transformation arguments
return (lprefix, uprefix)
#-----------------------------------------------------------------
def checkTransfArgs(self, lprefix, uprefix):
'''Check the semantics of the given transformation arguments'''
# evaluate the prefix name for lower/upper bounds
for i, prefix in enumerate([lprefix, uprefix]):
if prefix != None:
rhs, line_no = prefix
if rhs != None and not isinstance(rhs, str):
print (('error:%s: the prefix name of the lower/upper bound must be ' +
'a string: %s') % (line_no, rhs))
sys.exit(1)
if i == 0:
lprefix = rhs
elif i == 1:
uprefix = rhs
# return information about the transformation arguments
return (lprefix, uprefix)
#-----------------------------------------------------------------
def replaceBounds(self, lprefix, uprefix, stmt):
'''To apply bound replacement transformation'''
# perform the bound replacement transformation
t = transformator.Transformator(lprefix, uprefix, stmt)
transformed_stmt = t.transform()
# return the transformed statement
return transformed_stmt
#-----------------------------------------------------------------
def transform(self):
'''To perform code transformations'''
# read all transformation arguments
lprefix, uprefix = self.readTransfArgs(self.perf_params, self.transf_args)
# perform the bound replacement transformation
transformed_stmt = self.replaceBounds(lprefix, uprefix, self.stmt)
# return the transformed statement
return transformed_stmt
| 34.504505
| 97
| 0.534204
|
97b9be85f0d33cbb9c9220d9dff1243624e1e472
| 337
|
py
|
Python
|
src/06_tool/test_collection.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | 1
|
2022-03-06T13:03:56.000Z
|
2022-03-06T13:03:56.000Z
|
src/06_tool/test_collection.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | null | null | null |
src/06_tool/test_collection.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | null | null | null |
'''
集合
常用的模块
builtins
'''
var = {} # dict
var2 = {1,2,3,4} # set
print(var)
print(var2)
var3 = {3,6,5,4}
print(var2 & var3) # 交集
print(var2 | var3) # 并集
print(var2 ^ var3) # 并集 - 交集
print(var2 - var3) # 差集
print(var3 - var2) # 差集
# append extend insert
var2.add(4)
var2.add(40)
var2.remove(2) # 按值添加,按值删除,没有索引
print(var2)
| 10.212121
| 32
| 0.599407
|
c3c2684597fe91a23870134465c99b9b2c42a201
| 3,678
|
py
|
Python
|
src/core/app/app/crud/crud_need.py
|
WeAreBeep/FrontlineUkraine
|
9ace8222af347f8ebbcaf444f375b2736f49cd9f
|
[
"MIT"
] | null | null | null |
src/core/app/app/crud/crud_need.py
|
WeAreBeep/FrontlineUkraine
|
9ace8222af347f8ebbcaf444f375b2736f49cd9f
|
[
"MIT"
] | null | null | null |
src/core/app/app/crud/crud_need.py
|
WeAreBeep/FrontlineUkraine
|
9ace8222af347f8ebbcaf444f375b2736f49cd9f
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List, Set, Optional
from pydantic import BaseModel
from sqlalchemy import BigInteger, SmallInteger
from sqlalchemy.sql import func
from sqlalchemy.orm import Query, Session
from app.crud.base import CRUDBase
from app.models.need import Need
from app.models.need_ppe_type import NeedPpeType
from app.models.post_status import PostStatus
from app.schemas import NeedCreate, Coordinates
def to_model(create_model: NeedCreate) -> Need:
need_model = Need(
timestamp=datetime.utcnow(),
publishAnonymously=create_model.publish_anonymously,
orgTypeId=create_model.org_type,
orgTypeOther=create_model.org_type_other,
organisationName=create_model.organisation_name,
orgCityId=create_model.org_city_id,
orgRegCode=create_model.org_reg_code,
email=create_model.email,
phoneNumber=create_model.phone_number,
contactName=create_model.contact_name,
postcode=create_model.postcode,
tellUsMore=create_model.tell_us_more,
jobTitle=create_model.job_title,
department=create_model.department,
addressLineOne=create_model.address_line_one,
addressLineTwo=create_model.address_line_two,
)
for ppe_type in create_model.ppe_types:
need_ppe_type = NeedPpeType(
ppeTypeId=ppe_type.type,
ppeTypeOther=ppe_type.type_other,
dailyShortage=ppe_type.daily_shortage,
dailyShortageForWhom=ppe_type.daily_shortage_for_whom,
statusId=PostStatus.UnderReview,
)
need_model.ppeTypes.append(need_ppe_type)
return need_model
class CRUDNeed(CRUDBase[Need, NeedCreate, BaseModel]):
def get_queryable_by_post_status(self, db: Session, *, status: PostStatus) -> Query:
return db.query(Need).join(Need.ppeTypes).filter(Need.statusId == status)
def get_all_by_post_status(self, db: Session, *, status: PostStatus) -> List[Need]:
return self.get_queryable_by_post_status(db, status=status).all()
def get_all_published_by_ppe_status(
self, db: Session, *, statuses: Set
) -> List[Need]:
subquery: Query = db.query(NeedPpeType.needId).filter(
NeedPpeType.statusId.in_(statuses)
).having(func.count(NeedPpeType.needId) > 0).group_by(
NeedPpeType.needId, NeedPpeType.statusId
).subquery()
return (
self.get_queryable_by_post_status(db, status=PostStatus.Published)
.filter(Need.id.in_(subquery))
.all()
)
def create_from_request(self,
db: Session,
*,
request: NeedCreate,
converted_coordinates: Optional[Coordinates]) -> Need:
need_model = to_model(request)
if converted_coordinates is not None:
need_model.latitude = converted_coordinates.lat
need_model.longitude = converted_coordinates.lng
return self.create(db, need_model)
def get_city_meta(self,
db: Session,
*,
post_status: PostStatus,
ppe_statuses: Set) -> list[tuple[BigInteger, SmallInteger, int]]:
q = db.query(Need.orgCityId, NeedPpeType.ppeTypeId, func.coalesce(func.sum(NeedPpeType.dailyShortage), 0)) \
.join(Need.ppeTypes) \
.filter(Need.statusId == post_status) \
.filter(NeedPpeType.statusId.in_(ppe_statuses)) \
.group_by(Need.orgCityId, NeedPpeType.ppeTypeId)
return q.all()
need = CRUDNeed(Need)
| 39.12766
| 116
| 0.664763
|
1e0d00cea7324e3e467f0e164551ee2f3fae18b1
| 6,121
|
py
|
Python
|
google/ads/google_ads/v5/proto/enums/month_of_year_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v5/proto/enums/month_of_year_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v5/proto/enums/month_of_year_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v5/proto/enums/month_of_year.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v5/proto/enums/month_of_year.proto',
package='google.ads.googleads.v5.enums',
syntax='proto3',
serialized_options=b'\n!com.google.ads.googleads.v5.enumsB\020MonthOfYearProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v5/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V5.Enums\312\002\035Google\\Ads\\GoogleAds\\V5\\Enums\352\002!Google::Ads::GoogleAds::V5::Enums',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n7google/ads/googleads_v5/proto/enums/month_of_year.proto\x12\x1dgoogle.ads.googleads.v5.enums\x1a\x1cgoogle/api/annotations.proto\"\xd1\x01\n\x0fMonthOfYearEnum\"\xbd\x01\n\x0bMonthOfYear\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0b\n\x07JANUARY\x10\x02\x12\x0c\n\x08\x46\x45\x42RUARY\x10\x03\x12\t\n\x05MARCH\x10\x04\x12\t\n\x05\x41PRIL\x10\x05\x12\x07\n\x03MAY\x10\x06\x12\x08\n\x04JUNE\x10\x07\x12\x08\n\x04JULY\x10\x08\x12\n\n\x06\x41UGUST\x10\t\x12\r\n\tSEPTEMBER\x10\n\x12\x0b\n\x07OCTOBER\x10\x0b\x12\x0c\n\x08NOVEMBER\x10\x0c\x12\x0c\n\x08\x44\x45\x43\x45MBER\x10\rB\xe5\x01\n!com.google.ads.googleads.v5.enumsB\x10MonthOfYearProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v5/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V5.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V5\\Enums\xea\x02!Google::Ads::GoogleAds::V5::Enumsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MONTHOFYEARENUM_MONTHOFYEAR = _descriptor.EnumDescriptor(
name='MonthOfYear',
full_name='google.ads.googleads.v5.enums.MonthOfYearEnum.MonthOfYear',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='JANUARY', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FEBRUARY', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MARCH', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='APRIL', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MAY', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='JUNE', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='JULY', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='AUGUST', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SEPTEMBER', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='OCTOBER', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NOVEMBER', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DECEMBER', index=13, number=13,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=141,
serialized_end=330,
)
_sym_db.RegisterEnumDescriptor(_MONTHOFYEARENUM_MONTHOFYEAR)
_MONTHOFYEARENUM = _descriptor.Descriptor(
name='MonthOfYearEnum',
full_name='google.ads.googleads.v5.enums.MonthOfYearEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MONTHOFYEARENUM_MONTHOFYEAR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=121,
serialized_end=330,
)
_MONTHOFYEARENUM_MONTHOFYEAR.containing_type = _MONTHOFYEARENUM
DESCRIPTOR.message_types_by_name['MonthOfYearEnum'] = _MONTHOFYEARENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MonthOfYearEnum = _reflection.GeneratedProtocolMessageType('MonthOfYearEnum', (_message.Message,), {
'DESCRIPTOR' : _MONTHOFYEARENUM,
'__module__' : 'google.ads.googleads_v5.proto.enums.month_of_year_pb2'
,
'__doc__': """Container for enumeration of months of the year, e.g., "January".""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.enums.MonthOfYearEnum)
})
_sym_db.RegisterMessage(MonthOfYearEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.237179
| 912
| 0.757883
|
bb1c395485306bb7b4870fc133567958b2ccdfe4
| 18,621
|
py
|
Python
|
tests/test_importer.py
|
cclauss/personfinder
|
62417192e79c9711d0c6c7cfc042f6d6b0dc2dc2
|
[
"Apache-2.0"
] | 561
|
2015-02-16T07:59:42.000Z
|
2022-03-30T17:31:21.000Z
|
tests/test_importer.py
|
Anthonymcqueen21/personfinder
|
ee7791fbc434eb4ec5cfad449288a1e884db5b1e
|
[
"Apache-2.0"
] | 591
|
2015-01-30T05:09:30.000Z
|
2022-02-26T09:31:25.000Z
|
tests/test_importer.py
|
Anthonymcqueen21/personfinder
|
ee7791fbc434eb4ec5cfad449288a1e884db5b1e
|
[
"Apache-2.0"
] | 258
|
2015-01-25T18:35:12.000Z
|
2021-12-25T01:44:14.000Z
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for importer.py."""
import datetime
import unittest
from google.appengine.ext import db
from pytest import raises
import model
import importer
TEST_DATETIME = datetime.datetime(2010, 1, 1, 0, 0, 0)
def put_dummy_person_record(repo, person_record_id):
db.put(model.Person(
key_name='%s:%s' % (repo, person_record_id),
repo=repo,
author_name='_test_author_name',
full_name='_test_given_name _test_family_name',
given_name='_test_given_name',
family_name='_test_family_name',
source_date=TEST_DATETIME,
entry_date=TEST_DATETIME,
))
class ImporterTests(unittest.TestCase):
"""Test the import utilities."""
def tearDown(self):
db.delete(model.Person.all())
db.delete(model.Note.all())
def test_strip(self):
assert importer.strip('') == ''
assert importer.strip(None) == ''
assert importer.strip(0) == ''
assert importer.strip(' ') == ''
assert importer.strip(' \t') == ''
assert importer.strip('\t ') == ''
assert importer.strip(' \n ') == ''
assert importer.strip('abc') == 'abc'
assert importer.strip('a b\tc ') == 'a b\tc'
assert importer.strip(' a b\tc\t') == 'a b\tc'
def test_validate_datetime(self):
assert importer.validate_datetime('2010-01-01T00:00:00Z') == \
datetime.datetime(2010, 1, 1, 0, 0, 0)
assert importer.validate_datetime('2010-01-01T01:23:45Z') == \
datetime.datetime(2010, 1, 1, 1, 23, 45)
assert importer.validate_datetime('') == None
assert importer.validate_datetime(0) == None
raises(ValueError, importer.validate_datetime, ' ')
raises(ValueError, importer.validate_datetime, '2010-02-28')
raises(ValueError, importer.validate_datetime, '2010-02-28 01:23:45')
raises(ValueError, importer.validate_datetime, '2010-02-28 01:23:45Z')
raises(ValueError, importer.validate_datetime, '2010-02-28 1:23:45')
# Invalid format
raises(ValueError, importer.validate_datetime, '2010-02-28T1:23:45Z')
# Invalid date
raises(ValueError, importer.validate_datetime, '2010-02-29T01:23:45Z')
# Invalid time
raises(ValueError, importer.validate_datetime, '2010-01-01T25:00:00Z')
def test_validate_boolean(self):
assert importer.validate_boolean('true')
assert importer.validate_boolean('TRUE')
assert importer.validate_boolean('True')
assert importer.validate_boolean('trUe')
assert importer.validate_boolean('1')
assert not importer.validate_boolean('false')
assert not importer.validate_boolean('ture')
assert not importer.validate_boolean('')
assert not importer.validate_boolean(None)
assert not importer.validate_boolean(1)
def test_create_person(self):
# clone record
fields = {'given_name': ' Zhi\n',
'family_name': ' Qiao',
'person_record_id': ' test_domain/person_1 '}
person = importer.create_person('haiti', fields)
assert hasattr(person, 'entry_date')
assert hasattr(person, 'last_modified')
assert person.given_name == 'Zhi'
assert person.family_name == 'Qiao'
assert person.record_id == 'test_domain/person_1'
assert person.key().kind() == 'Person'
assert person.key().id() == None
assert person.key().name() == 'haiti:test_domain/person_1'
# original record with new record_id
fields = {'given_name': ' Zhi\n',
'family_name': ' Qiao'}
person = importer.create_person('haiti', fields)
assert person.record_id.startswith(
'haiti.%s/person.' % model.HOME_DOMAIN)
# original record with specified record_id
fields = {'given_name': ' Zhi\n',
'family_name': ' Qiao',
'person_record_id': model.HOME_DOMAIN + '/person.23 '}
person = importer.create_person('haiti', fields)
assert person.record_id == model.HOME_DOMAIN + '/person.23'
def test_create_note(self):
# clone record
fields = {'note_record_id': ' test_domain/note_1',
'person_record_id': ' test_domain/person_1 '}
# source_date should be required.
raises(AssertionError, importer.create_note, 'haiti', fields)
# With source_date, the conversion should succeed.
fields['source_date'] = '2010-01-02T12:34:56Z'
note = importer.create_note('haiti', fields)
assert note.record_id == 'test_domain/note_1'
assert note.person_record_id == 'test_domain/person_1'
assert note.status == ''
assert note.key().kind() == 'Note'
assert note.key().id() == None
assert note.key().name() == 'haiti:test_domain/note_1'
# original record
fields = {'person_record_id': ' test_domain/person_1 ',
'source_date': '2010-01-02T03:04:05Z'}
note = importer.create_note('haiti', fields)
assert note.record_id.startswith('haiti.%s/note.' % model.HOME_DOMAIN)
assert note.person_record_id == 'test_domain/person_1'
def test_import_person_records(self):
records = []
for i in range(20):
given_name = "given_name_%d" % i
family_name = "family_name_%d" % i
source_date = "2010-01-01T01:23:45Z"
record_id = "test_domain/%d" % i
# Records 0, 8, and 16 have bad domains.
if not i % 8:
record_id = "other_domain/%d" % i
# Records 0, 9, and 18 have invalid dates.
elif not i % 9:
source_date = "2010-01-01 01:23:45"
records.append({'given_name': given_name,
'family_name': family_name,
'person_record_id': record_id,
'source_date': source_date})
written, skipped, total = importer.import_records(
'haiti', 'test_domain', importer.create_person, records, False,
True, None)
assert written == 15
assert len(skipped) == 5
assert skipped[0] == (
'Not in authorized domain: u\'other_domain/0\'', {
'given_name': 'given_name_0',
'family_name': 'family_name_0',
'source_date': '2010-01-01T01:23:45Z',
'person_record_id': 'other_domain/0'
})
assert skipped[3] == (
'Not in authorized domain: u\'other_domain/16\'', {
'given_name': 'given_name_16',
'family_name': 'family_name_16',
'source_date': '2010-01-01T01:23:45Z',
'person_record_id': 'other_domain/16'
})
assert skipped[2] == (
'ValueError: Bad datetime: \'2010-01-01 01:23:45\'', {
'given_name': 'given_name_9',
'family_name': 'family_name_9',
'source_date': '2010-01-01 01:23:45',
'person_record_id': 'test_domain/9'
})
assert skipped[4] == (
'ValueError: Bad datetime: \'2010-01-01 01:23:45\'', {
'given_name': 'given_name_18',
'family_name': 'family_name_18',
'source_date': '2010-01-01 01:23:45',
'person_record_id': 'test_domain/18'
})
assert total == 20
# Also confirm that 15 records were put into the datastore.
assert model.Person.all().count() == 15
def test_import_note_records(self):
# Prepare person records which the notes will be added to.
for domain in ['test_domain', 'other_domain']:
for i in range(20):
put_dummy_person_record('haiti', '%s/person_%d' % (domain, i))
records = []
for i in range(20):
source_date = '2010-01-01T01:23:45Z'
note_id = 'test_domain/record_%d' % i
person_id = 'test_domain/person_%d' % i
# Records 0, 8, and 16 have bad note record domains.
if not i % 8:
note_id = 'other_domain/record_%d' % i
# Records 0, 9, and 18 have bad person record domains.
# This should not matter for note records.
elif not i % 9:
person_id = 'other_domain/person_%d' % i
# Records 0, 5, 10, and 15 have invalid dates.
elif not i % 5:
source_date = '2010-01-01 01:23:45'
records.append({'person_record_id': person_id,
'note_record_id': note_id,
'source_date': source_date})
written, skipped, total = importer.import_records(
'haiti', 'test_domain', importer.create_note, records, False,
True, None)
assert written == 14
assert len(skipped) == 6
assert skipped[0] == (
'Not in authorized domain: u\'other_domain/record_0\'', {
'person_record_id': 'test_domain/person_0',
'source_date': '2010-01-01T01:23:45Z',
'note_record_id': 'other_domain/record_0'
})
assert skipped[2] == (
'Not in authorized domain: u\'other_domain/record_8\'', {
'person_record_id': 'test_domain/person_8',
'source_date': '2010-01-01T01:23:45Z',
'note_record_id': 'other_domain/record_8'
})
assert skipped[1] == (
'ValueError: Bad datetime: \'2010-01-01 01:23:45\'', {
'person_record_id': 'test_domain/person_5',
'source_date': '2010-01-01 01:23:45',
'note_record_id': 'test_domain/record_5'
})
assert skipped[4] == (
'ValueError: Bad datetime: \'2010-01-01 01:23:45\'', {
'person_record_id': 'test_domain/person_15',
'source_date': '2010-01-01 01:23:45',
'note_record_id': 'test_domain/record_15'
})
assert total == 20
# Also confirm that 14 records were put into the datastore.
assert model.Note.all().count() == 14
# Confirm all records are NOT marked reviewed.
for note in model.Note.all():
assert note.reviewed == False
def test_authorize_write_believed_dead_note_records(self):
# Prepare person records which the notes will be added to.
for i in range(20):
put_dummy_person_record('haiti', 'test_domain/person_%d' % i)
# Prepare input data
records = []
for i in range(20):
source_date = '2010-01-01T01:23:45Z'
note_id = 'test_domain/record_%d' % i
person_id = 'test_domain/person_%d' % i
status = 'believed_missing'
# Records 0, 8, and 16 have status 'believed_dead'.
if not i % 8:
status = 'believed_dead'
records.append({'person_record_id': person_id,
'note_record_id': note_id,
'source_date': source_date,
'status': status})
# Disallow import notes with status 'believed_dead'.
written, skipped, total = importer.import_records(
'haiti', 'test_domain', importer.create_note, records, False,
False, None)
assert written == 17
assert len(skipped) == 3
assert skipped[0] == (
'Not authorized to post notes with the status'
' \"believed_dead\"', {
'person_record_id': 'test_domain/person_0',
'source_date': '2010-01-01T01:23:45Z',
'note_record_id': 'test_domain/record_0',
'status': 'believed_dead'
})
assert skipped[1] == (
'Not authorized to post notes with the status'
' \"believed_dead\"', {
'person_record_id': 'test_domain/person_8',
'source_date': '2010-01-01T01:23:45Z',
'note_record_id': 'test_domain/record_8',
'status': 'believed_dead'
})
assert skipped[2] == (
'Not authorized to post notes with the status'
' \"believed_dead\"', {
'person_record_id': 'test_domain/person_16',
'source_date': '2010-01-01T01:23:45Z',
'note_record_id': 'test_domain/record_16',
'status': 'believed_dead'
})
assert total == 20
assert model.Note.all().count() == 17
# Allow import notes with status 'believed_dead'.
model.Note.all().get().delete()
written, skipped, total = importer.import_records(
'haiti', 'test_domain', importer.create_note, records, False,
True, None)
assert written == 20
assert len(skipped) == 0
assert model.Note.all().count() == 20
for note in model.Note.all():
if note.person_record_id in ['test_domain/person_0',
'test_domain/person_8',
'test_domain/person_16']:
assert note.status == 'believed_dead'
else:
assert note.status == 'believed_missing'
def test_import_reviewed_note_records(self):
# Prepare person records which the notes will be added to.
for i in range(3):
put_dummy_person_record('haiti', 'test_domain/person_%d' % i)
records = []
for i in range(3):
source_date = '2010-01-01T01:23:45Z'
note_id = 'test_domain/record_%d' % i
person_id = 'test_domain/person_%d' % i
records.append({'person_record_id': person_id,
'note_record_id': note_id,
'source_date': source_date})
# Import reviewed notes.
written, skipped, total = importer.import_records(
'haiti', 'test_domain', importer.create_note, records, True,
True, None)
assert written == 3
assert len(skipped) == 0
assert total == 3
# Confirm all records are marked reviewed.
for note in model.Note.all():
assert note.reviewed == True
def test_import_notes_disabled_note_records(self):
'''Check that notes will be rejected from API import when
notes_disabled is set to be True by the record author.'''
records = []
# First prepare and import two person records
for i in range(2):
given_name = "given_name_%d" % i
family_name = "family_name_%d" % i
source_date = "2010-01-01T01:23:45Z"
record_id = "test_domain/person_%d" % i
author_name = "test_author"
author_email = "test_email"
records.append({'given_name': given_name,
'family_name': family_name,
'person_record_id': record_id,
'source_date': source_date,
'author_name': author_name,
'author_email': author_email})
written, skipped, total = importer.import_records(
'haiti', 'test_domain', importer.create_person, records, False,
True, None)
assert written == 2
assert len(skipped) == 0
assert total == 2
assert model.Person.all().count() == 2
# Disable comments for first person record
person = model.Person.get('haiti', 'test_domain/person_0')
assert person
person.notes_disabled = True
db.put([person])
for person in model.Person.all():
if person.person_record_id == 'test_domain/person_0':
assert person.notes_disabled == True
# Import notes
records = []
for i in range(2):
source_date = '2010-01-01T01:23:45Z'
note_id = 'test_domain/record_%d' % i
person_id = 'test_domain/person_%d' % i
records.append({'person_record_id': person_id,
'note_record_id': note_id,
'source_date': source_date})
written, skipped, total = importer.import_records(
'haiti', 'test_domain', importer.create_note, records, False,
True, None)
# Check that the note associted with first person record is skipped.
assert written == 1
assert len(skipped) == 1
assert skipped[0] == (
'The author has disabled new commenting on this record',
{
'person_record_id': 'test_domain/person_0',
'source_date': '2010-01-01T01:23:45Z',
'note_record_id': 'test_domain/record_0',
})
assert total == 2
assert model.Note.all().count() == 1
def test_import_note_records_for_non_existent_person(self):
records = [{
'person_record_id': 'test_domain/non_existent_person',
'note_record_id': 'test_domain/record_0',
'source_date': '2010-01-01T01:23:45Z',
}]
written, skipped, total = importer.import_records(
'haiti', 'test_domain', importer.create_note, records, False,
True, None)
assert written == 0
assert len(skipped) == 1
assert skipped[0] == (
"There is no person record with the person_record_id "
"'test_domain/non_existent_person'", {
'person_record_id': 'test_domain/non_existent_person',
'source_date': '2010-01-01T01:23:45Z',
'note_record_id': 'test_domain/record_0'
})
assert total == 1
assert model.Note.all().count() == 0
if __name__ == "__main__":
unittest.main()
| 40.218143
| 78
| 0.566833
|
108014fd8f7115ef7be5a3aefc13b01933641326
| 1,555
|
py
|
Python
|
tests/pyLDAvis/test_gensim_models.py
|
settinghead/pyLDAvis
|
30ba3c6974cf080a4db7e219d093c53a6c71e68a
|
[
"BSD-3-Clause"
] | 5
|
2021-05-04T02:27:14.000Z
|
2022-03-10T14:41:32.000Z
|
tests/pyLDAvis/test_gensim_models.py
|
settinghead/pyLDAvis
|
30ba3c6974cf080a4db7e219d093c53a6c71e68a
|
[
"BSD-3-Clause"
] | 1
|
2021-05-04T02:26:30.000Z
|
2021-05-04T02:26:30.000Z
|
tests/pyLDAvis/test_gensim_models.py
|
settinghead/pyLDAvis
|
30ba3c6974cf080a4db7e219d093c53a6c71e68a
|
[
"BSD-3-Clause"
] | 1
|
2022-02-25T15:06:52.000Z
|
2022-02-25T15:06:52.000Z
|
#! /usr/bin/venv python
from gensim.models import LdaModel, HdpModel
from gensim.corpora.dictionary import Dictionary
import pyLDAvis.gensim
import os
def get_corpus_dictionary():
"""Crafts a toy corpus and the dictionary associated."""
# Toy corpus.
corpus = [
['carrot', 'salad', 'tomato'],
['carrot', 'salad', 'dish'],
['tomato', 'dish'],
['tomato', 'salad'],
['car', 'break', 'highway'],
['highway', 'accident', 'car'],
['moto', 'break'],
['accident', 'moto', 'car']
]
dictionary = Dictionary(corpus)
# Transforming corpus with dictionary.
corpus = [dictionary.doc2bow(doc) for doc in corpus]
# Building reverse index.
for (token, uid) in dictionary.token2id.items():
dictionary.id2token[uid] = token
return corpus, dictionary
def test_lda():
"""Trains a LDA model and tests the html outputs."""
corpus, dictionary = get_corpus_dictionary()
lda = LdaModel(corpus=corpus,
num_topics=2)
data = pyLDAvis.gensim.prepare(lda, corpus, dictionary)
pyLDAvis.save_html(data, 'index_lda.html')
os.remove('index_lda.html')
def test_hdp():
"""Trains a HDP model and tests the html outputs."""
corpus, dictionary = get_corpus_dictionary()
hdp = HdpModel(corpus, dictionary.id2token)
data = pyLDAvis.gensim.prepare(hdp, corpus, dictionary)
pyLDAvis.save_html(data, 'index_hdp.html')
os.remove('index_hdp.html')
if __name__ == "__main__":
test_lda()
test_hdp()
| 25.080645
| 60
| 0.636013
|
873a972489eca74553ce27a78ecc383eea689ae9
| 1,917
|
py
|
Python
|
tests/test_extension_telnet.py
|
Respawnz/scrapy
|
22f7934fcc4912c1f9cdf87e426eb29d777db349
|
[
"BSD-3-Clause"
] | 2
|
2020-02-08T15:49:40.000Z
|
2021-08-21T18:57:38.000Z
|
tests/test_extension_telnet.py
|
youyangxyb/scrapy
|
caa1dea890e9cb2024cf9895efe54b3cf0ac1ae9
|
[
"BSD-3-Clause"
] | 1
|
2021-07-24T14:26:22.000Z
|
2021-07-24T14:26:22.000Z
|
tests/test_extension_telnet.py
|
youyangxyb/scrapy
|
caa1dea890e9cb2024cf9895efe54b3cf0ac1ae9
|
[
"BSD-3-Clause"
] | 1
|
2020-01-28T07:57:35.000Z
|
2020-01-28T07:57:35.000Z
|
from twisted.trial import unittest
from twisted.conch.telnet import ITelnetProtocol
from twisted.cred import credentials
from twisted.internet import defer
from scrapy.extensions.telnet import TelnetConsole
from scrapy.utils.test import get_crawler
class TelnetExtensionTest(unittest.TestCase):
def _get_console_and_portal(self, settings=None):
crawler = get_crawler(settings_dict=settings)
console = TelnetConsole(crawler)
username = console.username
password = console.password
# This function has some side effects we don't need for this test
console._get_telnet_vars = lambda: {}
console.start_listening()
protocol = console.protocol()
portal = protocol.protocolArgs[0]
return console, portal
@defer.inlineCallbacks
def test_bad_credentials(self):
console, portal = self._get_console_and_portal()
creds = credentials.UsernamePassword(b'username', b'password')
d = portal.login(creds, None, ITelnetProtocol)
yield self.assertFailure(d, ValueError)
console.stop_listening()
@defer.inlineCallbacks
def test_good_credentials(self):
console, portal = self._get_console_and_portal()
creds = credentials.UsernamePassword(
console.username.encode('utf8'),
console.password.encode('utf8')
)
d = portal.login(creds, None, ITelnetProtocol)
yield d
console.stop_listening()
@defer.inlineCallbacks
def test_custom_credentials(self):
settings = {
'TELNETCONSOLE_USERNAME': 'user',
'TELNETCONSOLE_PASSWORD': 'pass',
}
console, portal = self._get_console_and_portal(settings=settings)
creds = credentials.UsernamePassword(b'user', b'pass')
d = portal.login(creds, None, ITelnetProtocol)
yield d
console.stop_listening()
| 34.232143
| 73
| 0.684403
|
7c0f0bf7bf28773464a56ff64a6b9dc115677444
| 4,479
|
py
|
Python
|
autumn/demography/social_mixing.py
|
MattSegal/AuTuMN
|
49d78d9c07ea3825ac31682a4d124eab9d3365ce
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
autumn/demography/social_mixing.py
|
MattSegal/AuTuMN
|
49d78d9c07ea3825ac31682a4d124eab9d3365ce
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
autumn/demography/social_mixing.py
|
MattSegal/AuTuMN
|
49d78d9c07ea3825ac31682a4d124eab9d3365ce
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import os
import numpy as np
import pandas as pd
def load_specific_prem_sheet(mixing_location, country):
"""
Load a mixing matrix sheet, according to name of the sheet (i.e. country)
:param: mixing_location: str
One of the four mixing locations - ('all_locations', 'home', 'other_locations', 'school', 'work')
:param: country: str
Name of the country of interest
"""
if country == "victoria":
country = "australia"
# Files with name ending with _1 have a header, but not those ending with _2 - plus need to determine file to read
sheet_number, header_argument = ("1", 0) if country.title() < "Mozambique" else ("2", None)
file_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"social_mixing_data",
"MUestimates_" + mixing_location + "_" + sheet_number + ".xlsx",
)
return np.array(pd.read_excel(file_dir, sheet_name=country.title(), header=header_argument))
def load_all_prem_types(country):
"""
Collate the matrices of different location types for a given country
:param country: str
Name of the requested country
"""
matrices = {}
for sheet_type in ("all_locations", "home", "other_locations", "school", "work"):
matrices[sheet_type] = load_specific_prem_sheet(sheet_type, country)
return matrices
def load_age_calibration():
"""
converts the age group specific cases to covid_19 agegroup
0–9, 10–19, 20–29, 30–39, 40–49, 50–59, 60–69, 70–79, 80+
2, 2, 13, 11, 11, 14, 8, 6, 4
Returns:
a pandas series
"""
age_breakpoints = [int(i_break) for i_break in list(range(0, 80, 5))]
# split the case numbers into 5 year groups
case_numbers = [2, 2, 13, 11, 11, 14, 8, 6, 4]
case_numbers = [each / 2 for each in case_numbers for y in range(2)]
# create case numbers for 75+
y = case_numbers[:-3]
y.append(sum(case_numbers[-3:]))
return pd.Series(y, index=age_breakpoints)
def update_mixing_with_multipliers(mixing_matrix, multipliers):
"""
Updates the mixing matrix using some age-specific multipliers
:param mixing_matrix: the baseline mixing-matrix
:param multipliers: a matrix with the ages-specific multipliers
:return: the updated mixing-matrix
"""
assert mixing_matrix.shape == multipliers.shape
return np.multiply(mixing_matrix, multipliers)
def apply_age_specific_contact_multipliers(mixing_matrix, age_specific_multipliers):
"""
Update a mixing matrix using age-specific multipliers specified through a dictionary
:param mixing_matrix: the original mixing matrix
:param age_specific_multipliers: dict
keys are age indices between 0 and 15, values are multipliers
:return: the updated mixing matrix
"""
mixing_multipliers_matrix = np.ones((16, 16))
for age_index, multiplier in age_specific_multipliers.items():
assert(0 <= age_index <= 15)
mixing_multipliers_matrix[age_index, :] *= multiplier
mixing_multipliers_matrix[:, age_index] *= multiplier
return update_mixing_with_multipliers(mixing_matrix, mixing_multipliers_matrix)
def get_all_prem_countries():
"""
Return the list of countries for which Prem et al provide contact matrices
"""
sheet_names = []
for file_number in ("1", "2"):
filepath = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"social_mixing_data",
"MUestimates_all_locations_" + file_number + ".xlsx",
)
xl = pd.ExcelFile(filepath)
sheet_names += xl.sheet_names
return sheet_names
def get_total_contact_rates_by_age(mixing_matrix, direction='horizontal'):
"""
Sum the contact-rates by age group
:param mixing_matrix: the input mixing matrix
:param direction: either 'horizontal' (infectee's perspective) or 'vertical' (infector's perspective)
:return: dict
keys are the age categories and values are the aggregated contact rates
"""
assert direction in ['horizontal', 'vertical'], "direction should be in ['horizontal', 'vertical']"
aggregated_contact_rates = {}
for i in range(16):
if direction == 'horizontal':
aggregated_contact_rates[str(5 * i)] = mixing_matrix[i, :].sum()
else:
aggregated_contact_rates[str(5 * i)] = mixing_matrix[:, i].sum()
return aggregated_contact_rates
| 35.547619
| 118
| 0.673141
|
41462f237f3e79e68584a252cbec486f8cef8227
| 1,640
|
py
|
Python
|
lib/surface/iap/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/iap/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/iap/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The super-group for the IAP CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Iap(base.Group):
"""Manage IAP policies.
Cloud Identity-Aware Proxy (Cloud IAP) controls access to your cloud
applications running on Google Cloud Platform. Cloud IAP works by
verifying user identity and context of the request to determine if a user
should be allowed to access the application.
More information on Cloud IAP can be found here:
https://cloud.google.com/iap and detailed documentation can be found here:
https://cloud.google.com/iap/docs/
"""
category = 'Identity and Security'
def Filter(self, context, args):
# TODO(b/190535352): Determine if command group works with project number
base.RequireProjectID(args)
del context, args
| 35.652174
| 79
| 0.742073
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.