content stringlengths 5 1.05M |
|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Functions of generating summary protocol buffers. Adapted from
https://github.com/lanpa/tensorboard-pytorch/blob/master/tensorboardX/summary.py"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import io
import wave
import struct
import json
import re as _re
import numpy as np
try:
import mxnet
from distutils.version import LooseVersion
if LooseVersion(mxnet.__version__) < LooseVersion('1.2.0'):
logging.warning('The currently installed MXNet version %s is less than 1.2.0.'
' Some functionality of MXBoard may not work.', mxnet.__version__)
except ImportError:
raise ImportError('MXBoard requires MXNet with version >= 1.2.0.'
' Please follow the instruction here to install MXNet first.'
' http://mxnet.incubator.apache.org/install/index.html')
from mxnet.ndarray import NDArray
from mxnet.symbol import Symbol
from mxnet.gluon import HybridBlock
from .proto.summary_pb2 import Summary
from .proto.summary_pb2 import HistogramProto
from .proto.summary_pb2 import SummaryMetadata
from .proto.tensor_pb2 import TensorProto
from .proto.tensor_shape_pb2 import TensorShapeProto
from .proto.plugin_pr_curve_pb2 import PrCurvePluginData
from .proto.node_def_pb2 import NodeDef
from .proto.graph_pb2 import GraphDef
from .proto.attr_value_pb2 import AttrValue
from .proto.versions_pb2 import VersionDef
from .utils import _make_numpy_array, _prepare_image
try:
from PIL import Image
except ImportError:
Image = None
_INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]')
def _clean_tag(name):
"""Cleans a tag. Removes illegal characters for instance.
Adapted from the TensorFlow function `clean_tag()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/summary_op_util.py
Parameters
----------
name : str
The original tag name to be processed.
Returns
-------
The cleaned tag name.
"""
# In the past, the first argument to summary ops was a tag, which allowed
# arbitrary characters. Now we are changing the first argument to be the node
# name. This has a number of advantages (users of summary ops now can
# take advantage of the tf name scope system) but risks breaking existing
# usage, because a much smaller set of characters are allowed in node names.
# This function replaces all illegal characters with _s, and logs a warning.
# It also strips leading slashes from the name.
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub('_', name)
new_name = new_name.lstrip('/') # Remove leading slashes
if new_name != name:
logging.warning('Summary name %s is illegal; using %s instead.', name, new_name)
name = new_name
return name
def scalar_summary(tag, scalar):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Adapted from the TensorFlow function `scalar()` at
https://github.com/tensorflow/tensorflow/blob/r1.6/tensorflow/python/summary/summary.py
Parameters
----------
tag : str
A name for the generated summary. Will also serve as the series name in TensorBoard.
scalar : int, MXNet `NDArray`, or `numpy.ndarray`
A scalar value or an ndarray of shape (1,).
Returns
-------
A `Summary` protobuf of the `scalar` value.
Raises
------
ValueError: If the scalar has the wrong shape or type.
"""
tag = _clean_tag(tag)
scalar = _make_numpy_array(scalar)
assert(scalar.squeeze().ndim == 0), 'scalar should be 0D'
scalar = float(scalar)
return Summary(value=[Summary.Value(tag=tag, simple_value=scalar)])
def histogram_summary(tag, values, bins):
"""Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize the data's distribution in
TensorBoard. See detailed explanation of the TensorBoard histogram dashboard at
https://www.tensorflow.org/get_started/tensorboard_histograms
This op reports an `InvalidArgument` error if any value is not finite.
Adapted from the TensorFlow function `histogram()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/summary/summary.py
Parameters
----------
tag : str
A name for the summary of the histogram. Will also serve as a series name in
TensorBoard.
values : MXNet `NDArray` or `numpy.ndarray`
Values for building the histogram.
Returns
-------
A `Summary` protobuf of the histogram.
"""
tag = _clean_tag(tag)
values = _make_numpy_array(values)
hist = _make_histogram(values.astype(float), bins)
return Summary(value=[Summary.Value(tag=tag, histo=hist)])
def _make_histogram(values, bins):
"""Converts values into a histogram proto using logic from
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc"""
values = values.reshape(-1)
counts, limits = np.histogram(values, bins=bins)
limits = limits[1:]
sum_sq = values.dot(values)
return HistogramProto(min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits,
bucket=counts)
def image_summary(tag, image):
"""Outputs a `Summary` protocol buffer with image(s).
Parameters
----------
tag : str
A name for the generated summary. Will also serve as a series name in TensorBoard.
image : MXNet `NDArray` or `numpy.ndarray`
Image data that is one of the following layout: (H, W), (C, H, W), (N, C, H, W).
The pixel values of the image are assumed to be normalized in the range [0, 1].
The image will be rescaled to the range [0, 255] and cast to `np.uint8` before creating
the image protobuf.
Returns
-------
A `Summary` protobuf of the image.
"""
tag = _clean_tag(tag)
image = _prepare_image(image)
image = _make_image(image)
return Summary(value=[Summary.Value(tag=tag, image=image)])
def _make_image(tensor):
"""Converts an NDArray type image to Image protobuf"""
assert isinstance(tensor, NDArray)
if Image is None:
raise ImportError('need to install PIL for visualizing images')
height, width, channel = tensor.shape
tensor = _make_numpy_array(tensor)
image = Image.fromarray(tensor)
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return Summary.Image(height=height, width=width, colorspace=channel,
encoded_image_string=image_string)
def audio_summary(tag, audio, sample_rate=44100):
"""Outputs a `Summary` protocol buffer with audio data.
Parameters
----------
tag : str
A name for the generated summary. Will also serve as a series name in TensorBoard.
audio : MXNet `NDArray` or `numpy.ndarray`
Audio data that can be squeezed into 1D array. The values are in the range [-1, 1].
sample_rate : int
Sampling frequency. 44,100Hz is a common sampling frequency.
Returns
-------
A `Summary` protobuf of the audio data.
"""
audio = audio.squeeze()
if audio.ndim != 1:
raise ValueError('input audio must be squeezable to 1D, input audio squeezed '
'shape is {}'.format(audio.shape))
audio = _make_numpy_array(audio)
tensor_list = [int(32767.0 * x) for x in audio]
fio = io.BytesIO()
wave_writer = wave.open(fio, 'wb')
wave_writer.setnchannels(1)
wave_writer.setsampwidth(2)
wave_writer.setframerate(sample_rate)
tensor_enc = b''
for v in tensor_list: # pylint: disable=invalid-name
tensor_enc += struct.pack('<h', v)
wave_writer.writeframes(tensor_enc)
wave_writer.close()
audio_string = fio.getvalue()
fio.close()
audio = Summary.Audio(sample_rate=sample_rate,
num_channels=1,
length_frames=len(tensor_list),
encoded_audio_string=audio_string,
content_type='audio/wav')
return Summary(value=[Summary.Value(tag=tag, audio=audio)])
def text_summary(tag, text):
"""Outputs a `Summary` protocol buffer with audio data.
Parameters
----------
tag : str
A name for the generated summary. Will also serve as a series name in TensorBoard.
text : str
Text data.
Returns
-------
A `Summary` protobuf of the audio data.
"""
plugin_data = [SummaryMetadata.PluginData(plugin_name='text')]
smd = SummaryMetadata(plugin_data=plugin_data)
tensor = TensorProto(dtype='DT_STRING',
string_val=[text.encode(encoding='utf_8')],
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]))
return Summary(value=[Summary.Value(node_name=tag, metadata=smd, tensor=tensor)])
def pr_curve_summary(tag, labels, predictions, num_thresholds, weights=None):
"""Outputs a precision-recall curve `Summary` protocol buffer.
Parameters
----------
tag : str
A tag attached to the summary. Used by TensorBoard for organization.
labels : MXNet `NDArray` or `numpy.ndarray`.
The ground truth values. A tensor of 0/1 values with arbitrary shape.
predictions : MXNet `NDArray` or `numpy.ndarray`.
A float32 tensor whose values are in the range `[0, 1]`. Dimensions must
match those of `labels`.
num_thresholds : int
Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for.
Should be `>= 2`. This value should be a constant integer value, not a tensor
that stores an integer.
The thresholds for computing the pr curves are calculated in the following way:
`width = 1.0 / (num_thresholds - 1),
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]`.
weights : MXNet `NDArray` or `numpy.ndarray`.
Optional float32 tensor. Individual counts are multiplied by this value.
This tensor must be either the same shape as or broadcastable to the `labels` tensor.
Returns
-------
A `Summary` protobuf of the pr_curve.
"""
# num_thresholds > 127 results in failure of creating protobuf,
# probably a bug of protobuf
if num_thresholds > 127:
logging.warning('num_thresholds>127 would result in failure of creating pr_curve protobuf,'
' clipping it at 127')
num_thresholds = 127
labels = _make_numpy_array(labels)
predictions = _make_numpy_array(predictions)
if weights is not None:
weights = _make_numpy_array(weights)
data = _compute_curve(labels, predictions, num_thresholds=num_thresholds, weights=weights)
pr_curve_plugin_data = PrCurvePluginData(version=0,
num_thresholds=num_thresholds).SerializeToString()
plugin_data = [SummaryMetadata.PluginData(plugin_name='pr_curves',
content=pr_curve_plugin_data)]
smd = SummaryMetadata(plugin_data=plugin_data)
tensor = TensorProto(dtype='DT_FLOAT',
float_val=data.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[TensorShapeProto.Dim(size=data.shape[0]),
TensorShapeProto.Dim(size=data.shape[1])]))
return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
# A value that we use as the minimum value during division of counts to prevent
# division by 0. 1.0 does not work: Certain weights could cause counts below 1.
_MINIMUM_COUNT = 1e-7
def _compute_curve(labels, predictions, num_thresholds, weights=None):
"""This function is another implementation of functions in
https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py"""
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return np.stack((tp, fp, tn, fn, precision, recall))
def _scoped_name(scope_name, node_name):
return '/'.join([scope_name, node_name])
def _get_nodes_from_symbol(sym):
"""Given a symbol and shapes, return a list of `NodeDef`s for visualizing the
the graph in TensorBoard."""
if not isinstance(sym, Symbol):
raise TypeError('sym must be an `mxnet.symbol.Symbol`,'
' received type {}'.format(str(type(sym))))
conf = json.loads(sym.tojson())
nodes = conf['nodes']
data2op = {} # key: data id, value: list of ops to whom data is an input
for i, node in enumerate(nodes):
if node['op'] != 'null': # node is an operator
input_list = node['inputs']
for idx in input_list:
if idx[0] == 0: # do not include 'data' node in the op scope
continue
if idx[0] in data2op:
# nodes[idx[0]] is a data as an input to op nodes[i]
data2op[idx[0]].append(i)
else:
data2op[idx[0]] = [i]
# In the following, we group data with operators they belong to
# by attaching them with operator names as scope names.
# The parameters with the operator name as the prefix will be
# assigned with the scope name of that operator. For example,
# a convolution op has name 'conv', while its weight and bias
# have name 'conv_weight' and 'conv_bias'. In the end, the operator
# has scope name 'conv' prepended to its name, i.e. 'conv/conv'.
# The parameters are named 'conv/conv_weight' and 'conv/conv_bias'.
node_defs = []
for i, node in enumerate(nodes):
node_name = node['name']
op_name = node['op']
kwargs = {'op': op_name, 'name': node_name}
if op_name != 'null': # node is an operator
inputs = []
input_list = node['inputs']
for idx in input_list:
input_node = nodes[idx[0]]
input_node_name = input_node['name']
if input_node['op'] != 'null':
inputs.append(_scoped_name(input_node_name, input_node_name))
elif idx[0] in data2op and len(data2op[idx[0]]) == 1 and data2op[idx[0]][0] == i:
# the data is only as an input to nodes[i], no else
inputs.append(_scoped_name(node_name, input_node_name))
else: # the data node has no scope name, e.g. 'data' as the input node
inputs.append(input_node_name)
kwargs['input'] = inputs
kwargs['name'] = _scoped_name(node_name, node_name)
elif i in data2op and len(data2op[i]) == 1:
# node is a data node belonging to one op, find out which operator this node belongs to
op_node_name = nodes[data2op[i][0]]['name']
kwargs['name'] = _scoped_name(op_node_name, node_name)
if 'attrs' in node:
# TensorBoard would escape quotation marks, replace it with space
attr = json.dumps(node['attrs'], sort_keys=True).replace("\"", ' ')
attr = {'param': AttrValue(s=attr.encode(encoding='utf-8'))}
kwargs['attr'] = attr
node_def = NodeDef(**kwargs)
node_defs.append(node_def)
return node_defs
def _sym2pb(sym):
"""Converts an MXNet symbol to its graph protobuf definition."""
return GraphDef(node=_get_nodes_from_symbol(sym), versions=VersionDef(producer=100))
def _net2pb(net):
if isinstance(net, HybridBlock):
# TODO(junwu): may need a more approprite way to get symbol from a HybridBlock
if not net._cached_graph:
raise RuntimeError(
"Please first call net.hybridize() and then run forward with "
"this net at least once before calling add_graph().")
net = net._cached_graph[1]
elif not isinstance(net, Symbol):
raise TypeError('only accepts mxnet.gluon.HybridBlock and mxnet.symbol.Symbol '
'as input network, received type {}'.format(str(type(net))))
return _sym2pb(net)
|
from fastapi import APIRouter,WebSocket,Request,HTTPException
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
ftemps = Jinja2Templates(directory="app/templates")
fileres=APIRouter()
@fileres.get("/addone",response_class=HTMLResponse)
async def add_one(request:Request):
return ftemps.TemplateResponse("addone.html",context={"request": request})
|
from dataclasses import fields
from rest_framework import serializers
from core import models
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = models.Tag
fields = ("name", "id")
read_only_fields = ("id",)
class ActorSerializer(serializers.ModelSerializer):
class Meta:
model = models.Actor
fields = ("name", "last_name", "id")
read_only_fields = ("id",) |
from JumpScale import j
descr = """
Check on average cpu for last 15 min
"""
organization = "jumpscale"
maxperiod = 17*60 # always in sec
enable = True
roles = []
def check(watchdogevent):
pass
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
from . import *
class wElement(wComponent):
"""
Class which models an element. Every object what can be added to a page, is an element.
"""
def __init__(self):
wComponent.__init__(self)
|
""" Python 3.6+
Data import from Excel file and addition to CTVdb
Carmen Sheppard 2020-2022
"""
import argparse
import os
import sys
import pandas as pd
from Database_tools.db_functions import searchexact, session_maker
from Database_tools.sqlalchemydeclarative import Serotype, SerotypeVariants, Group, Variants, Genes, \
VariantGroup
from run_scripts.tools import check_db_path
def parse_args():
"""
:return: args
"""
parser = argparse.ArgumentParser()
parser.add_argument('--infile', '-i', help="Input excel file path", required=True)
parser.add_argument('--serotype', '-s', help="Import only SEROTYPE sheet (ONLY for TYPES not in a group)"
"- see documentation for help")
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
return args
def sort_sheets(args):
"""
Parse in the input excel file process sheets specified in args.
:param import_file: Excel file in standardised format
:param tables: list with sheet numbers (eg. [1,3,4])
:return: dict of dataframes
"""
table_dict = {"1": "Serotype", "2": "SerotypeVariants", "3": "Group", "4": "Variants"}
out = {}
if args.serotype:
tables = ["1"]
else:
tables = ["1", "2", "3", "4"]
try:
for table in tables:
# get table name for data formatting
table_name = table_dict[table]
sys.stdout.write(f"Reading {table_name} data.\n")
# read in correct sheet (compensate for 0 indexing) specify datatypes for those that may be ambiguous
# Although position is used as an int in the script, it must be specified as a float due to presence
# of NaN rows it is impossible to set datatype = int when NaN's are present (numpy compatibility)
df = pd.read_excel(args.infile, sheet_name=int(table) - 1,
dtype={"predicted_pheno": str, "subtypes": bool,
"alt_vars": str, "var1": str, "serotype_1": str,
"position": float},engine='openpyxl')
# drop any empty rows
df = df.dropna(how="all")
# flag error if empty sheet
if df.empty:
sys.stderr.write(f"No data in {table_name} sheet. Please check format of file\n")
sys.exit(1)
# remove leading/trailing whitespaces
df = df.apply(lambda x: x.str.strip() if x.dtype == object else x)
out[table_name] = df
return out
except IOError:
sys.stderr.write('ERROR: error occurred with reading Excel file\n')
sys.exit(1)
except IndexError:
sys.stderr.write('ERROR: error occurred with reading columns check format of file\n')
sys.exit(1)
except KeyError:
sys.stderr.write('ERROR: error occurred with reading column names check names and'
'input args\n')
sys.exit(1)
def add_group(df, dbpath):
"""
Checks data integrity in Group table, searches database and adds if not present.
:param df: input dataframe
:param dbpath: path to database folder
"""
try:
# drop where predicted pheno or serotype 1 is empty
df = df.dropna()
# iterate through dataframe checking for existing data and adding if new.
for row, column in df.iterrows():
session = session_maker(dbpath)
query_group = searchexact(df["group_name"][row], Group, Group.group_name, session)
# if it's found skip if not add to DB
if query_group == ['No results found']:
new_group = Group(group_name=df["group_name"][row])
session.add(new_group)
# commit session changes to database
session.commit()
sys.stdout.write(f"Added {df['group_name'][row]} to genogroup table.\n")
else:
sys.stdout.write(f"{df['group_name'][row]} already exists in genogroup table.\n")
session.close()
# KeyError if any headers are not as expected
except KeyError:
sys.stderr.write('ERROR: error occurred while checking input check format of file.\n')
sys.exit(1)
def add_serotype(df, dbpath):
"""
Checks data integrity in Serotype table and prepares for addition to database
Serotype table has one record PER INITIAL HIT SEQUENCE. This function breaks down
input data rows to separate records for each alternative hit per phenotype
and inputs each to Serotype table.
:param df: input dataframe
:param dbpath: Path to database
"""
try:
# drop where predicted pheno or serotype_1 is empty
df = df.dropna(subset=["predicted_pheno", "serotype_1"])
# find number of columns in dataframe (allow flex for adding stage 1 refs)
# remove whitespace
df["predicted_pheno"] = df["predicted_pheno"].str.strip()
sys.stdout.write("Adding serotypes.... \n")
# iterate through dataframe checking for existing data and adding if new.
for row, column in df.iterrows():
# query for each serotype hits - from all serotype_hit columns in input file
for col_idx in range(3, df.shape[1]):
session = session_maker(dbpath)
# ignore nan columns
if df.isnull().iloc[row][col_idx]:
session.close()
break
# Check for existence of predicted phenotype in column (MUST BE FILLED)
elif not df.iloc[row]["predicted_pheno"]:
sys.stderr.write(f"Predicted phenotype column CANNOT be empty -"
f" data for {df.iloc[row]['serotype_1']} NOT added")
break
else:
# Check if entered sero is in group if it is check db for existance of group
if df.isnull().iloc[row]["stage2_group"]:
grp_id = None
else:
# search database for id of group
group_info = session.query(Group).filter(
Group.group_name == df.iloc[row]["stage2_group"]).all()
grp_id = group_info[0].id
# query for exact matches for row with each serotype_hit
query_sero = session.query(Serotype).filter(Serotype.serotype_hit == df.iloc[row][col_idx],
Serotype.predicted_pheno == df.iloc[row][
"predicted_pheno"],
Serotype.group_id == grp_id,
Serotype.subtype == df.iloc[row]["subtypes"]).all()
# if it's not found add to DB
if not query_sero:
# initialise new serotype class object
new_sero = Serotype(
predicted_pheno=df.iloc[row]["predicted_pheno"],
serotype_hit=df.iloc[row][col_idx],
subtype=df.iloc[row]["subtypes"],
group_id=grp_id
)
# add new serotype to database
session.add(new_sero)
# commit session changes to database
session.commit()
sys.stdout.write(f"Added {df.iloc[row][col_idx]} information to serotype table.\n")
else:
sys.stdout.write(f"{df.iloc[row][col_idx]} information already exists in serotype table.\n")
session.close()
# KeyError if any headers are not as expected
except KeyError:
sys.stderr.write('ERROR: error occurred while checking input, check format of file.\n')
sys.exit(1)
def add_variant(df, dbpath):
"""
Checks data integrity in Variant sheet and prepares for addition to database
:param df: input dataframe
:param dbpath: path to database
:return: checked dataframe
"""
# try:
# drop where anything except position is empty
df = df.dropna(subset=["var_type", "gene", "variant", "group_id"])
# remove extra white space
df["gene"] = df["gene"].str.strip()
df["var_type"] = df["var_type"].str.strip()
df["variant"] = df["variant"].str.strip()
sys.stdout.write("Adding variants...\n")
# iterate through rows
for row, column in df.iterrows():
session = session_maker(dbpath)
# check if positions present:
if df.isnull().iloc[row]["position"]:
gene_pos = None
# if position present convert to integer
else:
# convert to integer (numpy compatibility requires storage as float when NaN in column)
gene_pos = int(df.iloc[row]["position"])
# query for exact matches in gene table for row
query_gene = session.query(Genes).filter(Genes.gene_name == df.iloc[row]["gene"]).all()
# if gene not found in genes table, add to DB
if not query_gene:
# Insert a gene in the gene table
new_gene = Genes(gene_name=df.iloc[row]["gene"])
session.add(new_gene)
sys.stdout.write(f"Adding {df.iloc[row]['gene']} to genes table\n")
# flush session to allow retrieval of gene.id before committing.
session.flush()
# get gene id from DB for use later
gene_id = new_gene.id
else:
# get gene_id from DB for use later
gene_id = query_gene[0].id
sys.stdout.write(f"Gene {df.iloc[row]['gene']} already in genes table\n")
# query for exact matches in variants table for row
query_var = session.query(Variants).filter(
Variants.var_type == df.iloc[row]["var_type"],
Variants.gene == gene_id,
Variants.position == gene_pos,
Variants.variant == str(df.iloc[row]["variant"]),
).all()
if not query_var:
# Insert a variant in the variants table
new_var = Variants(var_type=df.iloc[row]["var_type"],
position=gene_pos,
variant=str(df.iloc[row]["variant"]),
gene=gene_id
)
session.add(new_var)
# flush session to allow retrieval of var.id before committing.
session.flush()
# get variant id from session
var_id = new_var.id
else:
# get variant id from database
var_id = query_var[0].id
sys.stdout.write(f"Variant: {df.iloc[row]['var_type']} already in variants table\n")
# Find Group ID for group name
grp_id = session.query(Group.id).filter(
Group.group_name == df.iloc[row]["group_id"]).all()
grp_id = grp_id[0][0]
if not grp_id:
sys.stderr.write(f"Error: Check group_id for {df.iloc[row]['group_id']}"
"-MUST match group in Serotype import sheet or in genogroup table")
break
# Check VariantGroup table for existence.
query_vargrp = session.query(VariantGroup).filter(
VariantGroup.grp_id == grp_id, VariantGroup.var_id == var_id
).all()
# if doesn't exist already insert a variant group into variant groups table
if not query_vargrp:
new_variantgroup = VariantGroup(
grp_id=grp_id,
var_id=var_id
)
# add new variant. commit and close session
session.add(new_variantgroup)
session.commit()
session.close()
sys.stdout.write("Variant added to variant_group table.\n")
else:
# commit and close session
session.commit()
session.close()
def add_serotypevariants(df, dbpath):
"""
Checks data integrity in SerotypeVariant sheet and adds to database
:param df: input dataframe
:param dbpath: path to database
"""
try:
# drop where anything except position is empty
df = df.dropna(subset=["predicted_pheno", "var_type", "gene", "variant"])
for row, column in df.iterrows():
session = session_maker(dbpath)
# get gene id
query_gene = session.query(Genes).filter(
Genes.gene_name == df.iloc[row]["gene"],
).all()
# check if positions present:
if df.isnull().iloc[row]["position"]:
# get variant ids that match
query_var2 = session.query(Variants).filter(
Variants.var_type == df.iloc[row]["var_type"],
Variants.gene == query_gene[0].id,
Variants.variant == df.iloc[row]["variant"]
).all()
else:
# convert to integer (numpy compatibility requires storage as float when NaN in column)
gene_pos = int(df.iloc[row]["position"])
# get variant ids that match
query_var2 = session.query(Variants).filter(
Variants.var_type == df.iloc[row]["var_type"],
Variants.gene == query_gene[0].id,
Variants.position == gene_pos,
Variants.variant == df.iloc[row]["variant"]
).all()
# get serotype ids that match
query_sero = session.query(Serotype).filter(
Serotype.predicted_pheno == df.iloc[row]["predicted_pheno"]).all()
# if sero not found in serotype raise error
if not query_sero:
sys.stderr.write(f"Phenotypical serotype {df.iloc[row]['predicted_pheno']} not found"
" in database, check match to serotype data!\n")
continue
elif not query_var2:
sys.stderr.write(f"Variant {df.iloc[row]['variant']} not found"
" in database, check match to variant data!\n")
continue
else:
# Iterate through sero ids and var ids checking if in serovariants table.
for sero in query_sero:
for var in query_var2:
# query for exact matches in serotype_variants table for row
query_serovar = session.query(SerotypeVariants).filter(
SerotypeVariants.sero_id == sero.id,
SerotypeVariants.variant_id == var.id,
).all()
# if doesn't exist already insert a serotypevariant into Serotype variants table
if not query_serovar:
new_serovariant = SerotypeVariants(
sero_id=sero.id,
variant_id=var.id
)
# add new variant. commit and close session
session.add(new_serovariant)
session.commit()
sys.stdout.write("Variant added to serotype_variants table.\n")
else:
# commit and close session
session.commit()
session.close()
# KeyError if any headers are not as expected
except KeyError:
sys.stderr.write('ERROR: Checking input data integrity - see DOCS.\n')
sys.exit(1)
if __name__ == "__main__":
# collect arguments from commandline
args = parse_args()
#change directory to PneumoKITy home
# find root path for database
path_parent = os.path.dirname(os.getcwd())
db_path = os.path.join(path_parent, "ctvdb")
# check integrity of given path
check_db_path(db_path)
# read in dataframes
dfs = sort_sheets(args)
# run functions to add info
# if all sheets added
if not args.serotype:
# Database attributes must be updated in the following order.
# Groups must be added first due to order of dependencies on primary keys in database
add_group(dfs['Group'], db_path)
add_serotype(dfs['Serotype'], db_path)
add_variant(dfs['Variants'], db_path)
add_serotypevariants(dfs['SerotypeVariants'], db_path)
# if only serotype sheet added.
else:
sdf = add_serotype(dfs['Serotype'])
sys.stdout.write("Database updated\n") |
import torch
def get_torch_device():
if torch.cuda.is_available():
device_name = torch.cuda.get_device_name()
n_gpu = torch.cuda.device_count()
print(f"Found device: {device_name}, n_gpu: {n_gpu}")
device = torch.device("cuda")
else:
device = torch.device('cpu')
return device
device = get_torch_device()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines functions common to coalesced feature column files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import collections
import copy
import contextlib
from tensorflow.core.framework import variable_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework.sparse_tensor import SparseTensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.util import nest
class CoalescedScopeBase(object):
def __init__(self, name=None):
self._columns = dict()
self._coalesced_map = dict()
self._output_map = dict()
self._built = False
if name is None:
name = 'CoalescedScopeBase'
self._name = name
self._name_set = set()
@property
def name(self):
return self._name
def get_name(self):
scope = variable_scope.get_variable_scope()
index = 0
while True:
name = self._name if index == 0 else '{}_{}'.format(self._name, index)
real_name = (scope.name + '/' + name) if scope.name else name
if real_name not in self._name_set:
self._name_set.add(real_name)
return name
index += 1
def allowed_column_types(self):
raise NotImplementedError("must be implemented in descendants")
def add_column(self, column):
if not isinstance(column, self.allowed_column_types()):
raise ValueError('{} is not allowd for coalescing, must be {}'.format(
column, self.allowd_column_types()))
if column.name in self._columns:
raise ValueError('column {} already exists: {}'.format(column.name, column))
self._columns[column.name] = column
def build(self):
raise NotImplementedError("must be implemented in descendants")
def get_coalesced_column_by_column(self, column):
if not self._built:
raise RuntimeError('CoalescedScope not built yet, can only use result '
'outside of scope definition')
name = column.name
if name not in self._columns or name not in self._coalesced_map:
raise ValueError('column {} not coalesced in any scope'.format(name))
return self._coalesced_map[name]
def get_dense_tensor_by_column(
self, column, inputs, weight_collections=None, trainable=None):
name = column.name
if name in self._output_map:
return self._output_map[name]
coalesced_column = self.get_coalesced_column_by_column(column)
embeddings = coalesced_column._get_dense_tensor(
inputs, weight_collections, trainable)
for column, embedding in zip(coalesced_column.columns, embeddings):
self._output_map[column.name] = embedding
return self._output_map[name]
def get_dense_tensor_by_column_v2(
self, column, transformation_cache, state_manager):
name = column.name
if name in self._output_map:
return self._output_map[name]
coalesced_column = self.get_coalesced_column_by_column(column)
embeddings = coalesced_column.get_dense_tensor(
transformation_cache, state_manager)
for column, embedding in zip(coalesced_column.columns, embeddings):
self._output_map[column.name] = embedding
return self._output_map[name]
def create_state_by_column(
self, column):
coalesced_column = self.get_coalesced_column_by_column(column)
coalesced_column.get_or_create_embedding_weights()
def get_coalesced_name_by_column(self, column):
coalesced_column = self.get_coalesced_column_by_column(column)
return coalesced_column.name
class EmbeddingAttributes(object):
def __init__(self,
dimension,
dtype,
initializer,
combiner,
trainable,
hash_combiner='',
bucket_size=None):
self._dimension = dimension
self._dtype = dtype
self._initializer = initializer
self._combiner = combiner
self._trainable = trainable
self._hash_combiner = hash_combiner
self._bucket_size = bucket_size
@property
def dimension(self):
return self._dimension
@property
def dtype(self):
return self._dtype
@property
def initializer(self):
return self._initializer
@property
def combiner(self):
return self._combiner
@property
def trainable(self):
return self._trainable
@property
def hash_combiner(self):
return self._hash_combiner
@property
def bucket_size(self):
return self._bucket_size
class CoalescedSaveSliceInfo(object):
def __init__(self,
full_name,
full_shape,
var_offset,
var_shape,
var_full_name,
save_slices,
tensor_slices):
self._full_name = full_name
self._full_shape = full_shape
self._var_offset = var_offset
self._var_shape = var_shape
self._var_full_name = var_full_name
self._save_slices = save_slices
self._tensor_slices = tensor_slices
@property
def full_name(self):
return self._full_name
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
@property
def var_shape(self):
return self._var_shape
@property
def var_full_name(self):
return self._var_full_name
@property
def save_slices(self):
return self._save_slices
@property
def tensor_slices(self):
return self._tensor_slices
def slot_save_slice_info(self, slot_name):
full_name = self._full_name + "/" + slot_name
var_full_name = self._var_full_name + "/" + slot_name
result = CoalescedSaveSliceInfo(full_name,
self.full_shape,
self.var_offset,
self.var_shape,
var_full_name,
copy.deepcopy(self._save_slices),
copy.deepcopy(self._tensor_slices))
for info in result.save_slices:
info.full_name += "/" + slot_name
info.var_full_name += "/" + slot_name
return result
def to_proto(self):
"""Returns a SaveSliceInfoDef() proto.
Args:
export_scope: Optional `string`. Name scope to remove
Returns:
A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
"""
return variable_pb2.SaveSliceInfoDef()
_embedding_signatures = collections.defaultdict(dict)
def get_embedding_signature():
global _embedding_signatures
return _embedding_signatures
def add_embedding_signature(column, dimension, combiner, initializer,
trainable, bucket_size, dtype=dtypes.float32,
hash_combiner=''):
global _embedding_signatures
if column in _embedding_signatures:
raise ValueError('EmbeddingColumn already exists: {}'.format(column))
_embedding_signatures[column] = EmbeddingAttributes(
dimension, dtype, initializer, combiner, trainable, hash_combiner,
bucket_size)
def make_cluster_signature(column, hashtable_column=False):
if hashtable_column:
attr = column
else:
global _embedding_signatures
if column not in _embedding_signatures:
raise ValueError('signautre not found for column: {}'.format(column))
attr = _embedding_signatures[column]
signature = {
'dimension': str(tensor_shape.TensorShape(attr.dimension)),
'dtype': dtypes.as_dtype(attr.dtype).name,
'initializer': type(attr.initializer).__name__,
'initializer_config': attr.initializer.get_config(),
}
return json.dumps(signature, sort_keys=True)
def _make_runtime_signature(column, hashtable_column=False):
if hashtable_column:
attr = column
else:
global _embedding_signatures
if column not in _embedding_signatures:
raise ValueError('signautre not found for column: {}'.format(column))
attr = _embedding_signatures[column]
signature = {
'combiner': attr.combiner,
'trainable': attr.trainable,
}
if hashtable_column:
signature['filter_hook'] = [type(hook).__name__ for hook in attr.embedding_lookup_hooks]
signature['filter_hook_config'] = [hook.get_config() for hook in attr.embedding_lookup_hooks]
else:
signature['hash_combiner'] = attr.hash_combiner
return json.dumps(signature, sort_keys=True)
def get_signature_attributes(column):
if column not in _embedding_signatures:
raise ValueError('signautre not found for column: {}'.format(column))
return _embedding_signatures[column]
def check_coalesced_columns_compatible(columns, hashtable_column=False):
base = None
for i, c in enumerate(columns):
if base is None:
base = make_cluster_signature(c, hashtable_column)
elif make_cluster_signature(c, hashtable_column) != base:
raise ValueError('signature of column 0 not match with column %d' % i)
def deduplicate_shared_embedding(columns):
index = 0
unique_columns = []
indices_map = dict()
for column in columns:
if hasattr(column, 'embedding_name'):
name = column.embedding_name
else:
name = column.name
if name not in indices_map:
unique_columns.append(column)
indices_map[name] = index
index += 1
else:
indices_map[name] = indices_map[name]
return unique_columns, indices_map
def build_slice_info(columns, partitioner):
global _embedding_signatures
bucket_size_sum = 0
# calculate slice length for each column
parts_list = []
start_index = 0
save_slice_infos = []
for c in columns:
attr = _embedding_signatures[c]
bucket_size = attr.bucket_size
dimension = attr.dimension
bucket_size_sum += bucket_size
dtype = attr.dtype
size = partitioner(shape=tensor_shape.as_shape(bucket_size), dtype=dtype)[0]
step = bucket_size // size
extra = bucket_size % size
parts = [0] * size
for i in range(size):
parts[(start_index + i) % size] = step + 1 if i < extra else step
parts_list.append(parts)
start_index = (extra + start_index) % size
offset = 0
full_shape = [bucket_size, dimension]
slice_list = []
for i in range(len(parts)):
var_offset = [offset, 0]
var_shape = [parts[i], dimension]
slice_list.append(variables.Variable.SaveSliceInfo(
'', full_shape, var_offset, var_shape, var_full_name=''))
offset += parts[i]
save_slice_infos.append(slice_list)
# check all columns have same number of partitions
size = None
for i, infos in enumerate(save_slice_infos):
if i == 0:
size = len(infos)
elif size != len(infos):
raise ValueError(
'Coalesced columns should be partitioned to same size,'
'but column 0 and column {} not equal: {} vs {}'.format(
i, size, len(infos)))
# calculate tensor slices
tensor_slices = []
for i in range(size):
offset = 0
tensor_slice_list = []
for j in range(len(parts_list)):
begin = offset
offset += parts_list[j][i]
tensor_slice_list.append((slice(begin, offset), slice(None)))
tensor_slices.append(tensor_slice_list)
return save_slice_infos, tensor_slices, bucket_size_sum
def _merge_sparse_tensor(tensors, tensor_rank=2):
if not all(isinstance(t, SparseTensor) for t in tensors):
raise ValueError("Expected inputs of SparseTensor")
values = array_ops.concat([t.values for t in tensors], axis=0)
row_counts = [t.dense_shape[0] for t in tensors]
dense_shape = [math_ops.reduce_sum(row_counts)]
for i in range(1, tensor_rank):
column_counts = [t.dense_shape[i] for t in tensors]
dense_shape.append(math_ops.reduce_max(column_counts))
row_offset = array_ops.split(
math_ops.cumsum(row_counts, exclusive=True), len(tensors))
indices = []
for i, t in enumerate(tensors):
offset = array_ops.concat(
[row_offset[i],
math_ops.to_int64(array_ops.fill([tensor_rank - 1], 0))], axis=0)
indices.append(t.indices + offset)
result = SparseTensor(indices=array_ops.concat(indices, axis=0),
values=values,
dense_shape=dense_shape)
return result, row_counts
def _safe_merge_sparse_tensor(tensor_pairs, format_rank=2):
if not tensor_pairs:
return tensor_pairs
origin_shape_list = []
format_id_tensors = []
weight_values = []
for id_tensor, weight_tensor in tensor_pairs:
original_shape = id_tensor.dense_shape
original_rank_dim = id_tensor.dense_shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
diff_rank = original_rank - format_rank
id_tensor = sparse_ops.sparse_reshape(
id_tensor,
array_ops.concat([[
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [diff_rank + 1]))],
original_shape[-format_rank + 1:]], 0))
format_id_tensors.append(id_tensor)
if weight_tensor is not None:
weight_values.append(weight_tensor.values)
else:
weight_values.append(None)
origin_shape_list.append(math_ops.cast(original_shape, dtypes.int32))
merged_ids, row_counts = _merge_sparse_tensor(format_id_tensors, tensor_rank=format_rank)
if all([w is None for w in weight_values]):
merged_weights = None
else:
weights_values = array_ops.concat(weight_values,
axis=0)
merged_weights = SparseTensor(indices=merged_ids.indices,
values=weights_values,
dense_shape=merged_ids.dense_shape)
return merged_ids, merged_weights, row_counts, origin_shape_list
def coalesce_sparse_data(ids_list, weights_list, weight_type, format_rank=2):
if all([w is None for w in weights_list]):
weights_list = [None] * len(ids_list)
else:
for i, weights in enumerate(weights_list):
if weights_list[i] is None:
values = array_ops.ones_like(ids_list[i].values, dtype=weight_type)
weights_list[i] = SparseTensor(indices=ids_list[i].indices,
values=values,
dense_shape=ids_list[i].dense_shape)
ids_pair = [(ids, weights) for ids, weights in zip(ids_list, weights_list)]
merged_ids, merged_weights, size_list, origin_shape_list = _safe_merge_sparse_tensor(ids_pair, format_rank=format_rank)
return (merged_ids, merged_weights, size_list, origin_shape_list)
def add_to_collections(var, weight_collections):
"""Adds a var to the list of weight_collections provided.
Handles the case for partitioned and non-partitioned variables.
Args:
var: A variable or Partitioned Variable.
weight_collections: List of collections to add variable to.
"""
for weight_collection in weight_collections:
# The layer self.add_variable call already adds it to GLOBAL_VARIABLES.
if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:
continue
if isinstance(var, variables.PartitionedVariable):
for constituent_var in list(var):
ops.add_to_collection(weight_collection, constituent_var)
else:
ops.add_to_collection(weight_collection, var)
def check_initializer_compatible(i1, i2):
if not isinstance(i1, init_ops.Initializer):
raise ValueError('i1 must be an Initializer object')
if not isinstance(i2, init_ops.Initializer):
raise ValueError('i2 must be an Initializer object')
if i1.__class__ != i2.__class__:
return False
if i1.get_config() != i2.get_config():
return False
return True
def check_share_compatible(c1, c2):
error = 'Cannot share HashTable with incompatible {}: {} vs {}'
if c1.dimension != c2.dimension:
return error.format('dimension', c1.dimension, c2.dimension)
if c1.dtype != c2.dtype:
return error.format('dtype', c1.dtype, c2.dtype)
if not check_initializer_compatible(c1.initializer, c2.initializer):
return 'Cannot share HashTable with incompatible initializer'
if c1.trainable != c2.trainable:
return error.format('trainable', c1.trainable, c2.trainable)
return None
@contextlib.contextmanager
def merged_embedding_lookup_hook(hooks):
if hooks is None:
hooks = tuple()
if hasattr(contextlib, "nested"):
with contextlib.nested(*hooks):
yield
else:
with contextlib.ExitStack() as stack:
for hook in hooks:
stack.enter_context(hook)
yield
|
"""empty message
Revision ID: 4f6c3b920cba
Revises: 7632c2a5255f
Create Date: 2016-02-01 01:30:18.381681
"""
# revision identifiers, used by Alembic.
revision = '4f6c3b920cba'
down_revision = '7632c2a5255f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
# -*- coding:utf-8 -*-
from lxml import etree
import urllib
import os
import requests
urllib2=urllib.request
def getHtml(url):
html = requests.get(url).content
selector = etree.HTML(html)
return selector
def getContent(htm, xpathStr):
selector = htm
content = selector.xpath(xpathStr)
return content
def getFlv(cons, title, folder):
fn = '%s' % title
pa = os.path.dirname(__file__) + '/' + 'youku/' + folder
# check and create folder
if not os.path.exists(pa):
os.mkdir(pa)
fl = pa + '/%s.flv' % fn
r = requests.get(cons)
with open(fl, "wb") as code:
code.write(r.content)
# = = = = = = #
videourl = 'http://vali-dns.cp31.ott.cibntv.net/677291A877B4371B0BE6B26B1/03000A0E0459E2A50507CC06C0F0FCD8579A8C-BBC1-7677-178F-9B250EB2777D.mp4?ccode=0502&duration=391&expire=18000&psid=a1c4a14d5cea8e5c246067056079be79&ups_client_netip=7a35d136&ups_ts=1546925450&ups_userid=&utid=0DGiFIKtYmYCAXo10TY%2BEq%2FS&vid=XMzA4NjI3ODUxMg&vkey=A473cfcc2618c0a28365fcb325b90e207&sp=180'
format = 'high' # 'high' 'normal' 'super'
url = 'http://www.flvcd.com/parse.php?kw=' + urllib.parse.quote(videourl) + '&format=' + format
print(url)
req = urllib2.Request(url)
req.add_header('Referer', 'http://www.flvcd.com/')
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0')
res = urllib2.urlopen(req)
html = res.read()
# print html
selector = etree.HTML(html)
# get flv title
xp_title='//*[@id="subtitle"]'
htm0=getHtml(videourl)
cons=getContent(htm0,xp_title)
title=cons[0].text
print(title)
# get flv href
xp = '//*[@class="mn STYLE4"]//@href'
content = selector.xpath(xp)
print (content)
x=0
for con in content:
if 'http://k.youku.com' in con:
print (con)
getFlv(con, '%s' % x, title)
# urllib.urlretrieve(con, getPath('%s' % x, title))# , callbackfunc)
x+=1 |
'''
No 2. Buatlah fungsi tanpa pengembalian nilai, yaitu fungsi segitigabintang.
Misal, jika dipanggil dg segitigabintang(4), keluarannya :
*
**
***
****
'''
def segitigabintang(baris):
for i in range(baris):
print('*' * (i+1))
|
import argparse
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import common
import inputparser
def write_snvs(variants, sampnames, garbage, snv_fn, normal_vaf=0.0):
sampnames = ['Normal'] + sampnames
snv_indices = {}
with open(snv_fn, 'w') as F:
print('#chr', 'position', 'description', *sampnames, sep='\t', file=F)
vids = common.sort_vids(variants.keys())
idx = 1
for vid in vids:
if vid in garbage:
continue
vaf = (variants[vid]['var_reads'] / variants[vid]['total_reads']).tolist()
vaf = [normal_vaf] + vaf
print('1', idx, vid, *vaf, sep='\t', file=F)
snv_indices[vid] = idx
idx += 1
return snv_indices
def extract_mat(variants, key):
mat = np.array([V[key] for V in variants])
return mat
def write_clusters(variants, clusters, snv_indices, cluster_fn, normal_vaf=0.0):
rows = []
for cluster in clusters:
cvars = [variants[V] for V in cluster]
var_reads = np.sum(extract_mat(cvars, 'var_reads'), axis=0)
total_reads = np.sum(extract_mat(cvars, 'total_reads'), axis=0)
cvaf = (var_reads / total_reads).tolist()
cvaf = [normal_vaf] + cvaf
sampmask = '0' + (len(cvaf) - 1)*'1'
snv_idxs = [str(snv_indices[V]) for V in common.sort_vids(cluster)]
rows.append([sampmask] + cvaf + [','.join(snv_idxs)])
with open(cluster_fn, 'w') as F:
for row in rows:
print(*row, sep='\t', file=F)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--uniform-proposal', action='store_true')
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('lichee_snv_fn')
parser.add_argument('lichee_cluster_fn')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
params = inputparser.load_params(args.params_fn)
sampnames = params['samples']
clusters = params['clusters']
garbage = set(params['garbage'])
snv_indices = write_snvs(variants, sampnames, garbage, args.lichee_snv_fn)
write_clusters(variants, clusters, snv_indices, args.lichee_cluster_fn)
if __name__ == '__main__':
main()
|
import os
import teek
try:
# examples/soup.py does bs4.BeautifulSoup(html_string, 'lxml')
import bs4 # noqa
import lxml # noqa
soup_py_can_run = True
except ImportError:
soup_py_can_run = False
EXAMPLES_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'examples')
# magic ftw
# TODO: this doesn't work with pytest-xdist and pythons that don't have
# ordered dict, i have no idea why and i don't know how to fix it
def _create_test_function(filename):
if filename == 'soup.py' and not soup_py_can_run:
return
with open(os.path.join(EXAMPLES_DIR, filename), 'r') as file:
code = file.read()
def func(monkeypatch, handy_callback):
@handy_callback
def fake_run():
pass
with monkeypatch.context() as monkey:
monkey.setattr(teek, 'run', fake_run)
exec(code, {'__file__': os.path.join(EXAMPLES_DIR, filename)})
assert fake_run.ran_once()
# make sure that nothing breaks if the real .run() is called
teek.update()
teek.after_idle(teek.quit)
teek.run()
func.__name__ = func.__qualname__ = 'test_' + filename.replace('.', '_')
globals()[func.__name__] = func
for filename in sorted(os.listdir(EXAMPLES_DIR)):
if filename.endswith('.py') and not filename.startswith('_'):
_create_test_function(filename)
|
font = "Josefin Sans Light"
TITLE_FONT = (font, 35)
HEADING_FONT = (font, 30)
SIDEBAR_STAT_FONT = (font, 30)
SIDEBAR_LABEL_FONT = (font, 20)
LOADING_FONT = (font, 17)
SEARCHBAR_FONT = (font, 18)
COUNTRIES_FONT = (font, 16)
COUNTRY_NOT_FOUND_FONT = (font, 20)
#LARGE_FONT = (font, 20)
#SUBTITLE_FONT = (font, 18)
#NORMAL_FONT = (font, 16)
#TITLE_FONT = (font, 25)
|
'''It is a modified version of the official implementation of
"Scale-steerable filters for the locally-scale invariant convolutional neural network"
Paper: https://arxiv.org/pdf/1906.03861.pdf
Code: https://github.com/rghosh92/SS-CNN
MIT License
Copyright (c) 2020 Ivan Sosnovik, Michał Szmaja
'''
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
from .impl.scale_steerable import *
class MNIST_SS(nn.Module):
def __init__(self, pool_size=4, ker_size_range=np.arange(7, 19, 2)):
super().__init__()
kernel_sizes = [11, 11, 11]
pads = (np.array(kernel_sizes) - 1) / 2
pads = pads.astype(int)
lays = [30, 60, 90]
self.conv1 = ScaleConv_steering(1, lays[0], [kernel_sizes[0], kernel_sizes[0]], 1,
padding=pads[0], sigma_phi_range=[np.pi / 16],
k_range=[0.5, 1, 2], ker_size_range=ker_size_range,
phi_range=np.linspace(0, np.pi, 9),
phase_range=[-np.pi / 4],
mode=1)
self.conv2 = ScaleConv_steering(lays[0], lays[1], [kernel_sizes[1], kernel_sizes[1]], 1, padding=pads[1],
k_range=[0.5, 1, 2], sigma_phi_range=[np.pi / 16],
ker_size_range=ker_size_range,
phi_range=np.linspace(0, np.pi, 9),
phase_range=[-np.pi / 4],
mode=1, drop_rate=2)
self.conv3 = ScaleConv_steering(lays[1], lays[2], [kernel_sizes[2], kernel_sizes[2]], 1, padding=pads[2],
k_range=[0.5, 1, 2], sigma_phi_range=[np.pi / 16],
phase_range=[-np.pi / 4],
phi_range=np.linspace(0, np.pi, 9),
ker_size_range=ker_size_range,
mode=1, drop_rate=4)
self.pool1 = nn.MaxPool2d(2)
self.bn1 = nn.BatchNorm2d(lays[0])
self.pool2 = nn.MaxPool2d(2)
self.bn2 = nn.BatchNorm2d(lays[1])
self.pool3 = nn.MaxPool2d(pool_size, padding=2)
self.bn3 = nn.BatchNorm2d(lays[2])
self.bn3_mag = nn.BatchNorm2d(lays[2])
self.fc1 = nn.Conv2d(lays[2] * 4, 256, 1)
self.fc1bn = nn.BatchNorm2d(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.7)
self.fc2 = nn.Conv2d(256, 10, 1) # FC2
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.bn1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.bn2(x)
x = self.conv3(x)
xm = self.pool3(x)
xm = self.bn3_mag(xm)
xm = xm.view([xm.shape[0], xm.shape[1] * xm.shape[2] * xm.shape[3], 1, 1])
xm = self.fc1(xm)
xm = self.relu(self.fc1bn(xm))
xm = self.dropout(xm)
xm = self.fc2(xm)
xm = xm.view(xm.size()[0], xm.size()[1])
return xm
def mnist_ss_28(**kwargs):
return MNIST_SS(pool_size=4, ker_size_range=np.arange(5, 15, 2))
def mnist_ss_56(**kwargs):
return nn.Sequential(nn.Upsample(scale_factor=2), MNIST_SS(pool_size=8, ker_size_range=np.arange(7, 19, 2)))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-02 21:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('entrance', '0047_auto_20170515_1434'),
('enrolled_scans', '0006_auto_20170603_0205'),
]
operations = [
migrations.CreateModel(
name='EnrolledScansEntranceStep',
fields=[
('abstractentrancestep_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='entrance.AbstractEntranceStep')),
('text_before_start_date', models.TextField(blank=True, help_text='Текст, который показывается до даты начала заполнения шага. Поддерживается Markdown')),
('text_after_finish_date_if_passed', models.TextField(blank=True, help_text='Текст, который показывается после даты окончания заполнения, если шаг выполнен. Поддерживается Markdown')),
('text_after_finish_date_if_not_passed', models.TextField(blank=True, help_text='Текст, который показывается после даты окончания заполнения, если шаг не выполнен. Поддерживается Markdown')),
('text_waiting_for_other_step', models.TextField(blank=True, help_text='Текст, который показывается, когда не пройден один из предыдущих шагов. Поддерживается Markdown')),
('text_step_is_not_passed', models.TextField(blank=True, help_text='Текст, который показывается, когда шаг ещё не пройден. Поддерживается Markdown')),
('text_step_is_passed', models.TextField(blank=True, help_text='Текст, который показывается, когда шаг пройден пользователем. Поддерживается Markdown')),
],
options={
'abstract': False,
},
bases=('entrance.abstractentrancestep', models.Model),
),
]
|
a = 1
b = 1
c = a
a = 2
b = b + 3
c = 'строка'
print(a, b, c)
|
import os
input_path = os.path.join(os.path.dirname(__file__), "input.txt")
with open(input_path) as f:
data = f.read()
lines = data.splitlines()
def solve(binary_nums: list[str]) -> int:
pos = 0
o2_gen_lst = binary_nums
while len(o2_gen_lst) > 1:
bit_count = sum(int(b_num[pos]) for b_num in o2_gen_lst)
keep = "1" if bit_count >= len(o2_gen_lst) / 2 else "0"
o2_gen_lst = [b_num for b_num in o2_gen_lst if b_num[pos] == keep]
pos += 1
pos = 0
co2_scrubber_lst = binary_nums
while len(co2_scrubber_lst) > 1:
bit_count = sum(int(b_num[pos]) for b_num in co2_scrubber_lst)
keep = "1" if bit_count < len(co2_scrubber_lst) / 2 else "0"
co2_scrubber_lst = [b_num for b_num in co2_scrubber_lst if b_num[pos] == keep]
pos += 1
return int(o2_gen_lst[0], 2) * int(co2_scrubber_lst[0], 2)
assert (
solve(
[
"00100",
"11110",
"10110",
"10111",
"10101",
"01111",
"00111",
"11100",
"10000",
"11001",
"00010",
"01010",
]
)
== 230
)
print(solve(lines)) # 4790390
|
## ---------------------------------------------------------------------------
## DM-Sim: Density-Matrix Quantum Circuit Simulation Environement
## ---------------------------------------------------------------------------
## Ang Li, Senior Computer Scientist
## Pacific Northwest National Laboratory(PNNL), U.S.
## Homepage: http://www.angliphd.com
## GitHub repo: http://www.github.com/pnnl/DM-Sim
## PNNL-IPID: 31919-E, ECCN: EAR99, IR: PNNL-SA-143160
## BSD Lincese.
### ---------------------------------------------------------------------------
## File: adder_n10_mpi.py
## A 10-qubit adder example using Python API using MPI for GPU Cluster.
## Requires GPUDirect-RDMA support.
## Requires: GCC-9.1.0 (require latest GCC)
## LLVM-10.0.1 (required by QIR on Summit)
## CUDA-11.0 or newer (required by QIR on Summit)
## MPI4PY (https://github.com/mpi4py/mpi4py)
# ---------------------------------------------------------------------------
from scipy.optimize import minimize
import subprocess
import time
#import mpi4py
#from mpi4py import MPI
#comm = MPI.COMM_WORLD
#rank = comm.Get_rank()
#size = comm.Get_size()
total_time = 0
def run_program(var_params):
global total_time
# TODO: call QIR program here
# run parameterized quantum program for VQE algorithm
## MPI
#cmd = "jsrun -n4 -a1 -g1 -c1 --smpiargs='-gpu' ./vqe_qir_mpi " + str(var_params[0]) + " " + str(var_params[1]) + " " + str(var_params[2])
## OMP
cmd = "./VariationalQuantumEigensolver_sin " + str(var_params[0]) + " " + str(var_params[1]) + " " + str(var_params[2])
print (cmd)
start = time.time()
feedback = subprocess.getoutput(cmd)
stop = time.time()
total_time += stop - start
print (feedback)
#print (var_params)
#return -1.1372704220924401
return float(feedback)
def VQE(initial_var_params):
""" Run VQE Optimization to find the optimal energy and the associated variational parameters """
opt_result = minimize(run_program,
initial_var_params,
method="COBYLA",
tol=0.000001,
options={'disp': True, 'maxiter': 200,'rhobeg' : 0.05})
return opt_result
if __name__ == "__main__":
# Initial variational parameters
var_params = [0.001, -0.001, 0.001]
# Run VQE and print the results of the optimization process
# A large number of samples is selected for higher accuracy
#opt_result = VQE(var_params, jw_hamiltonian, n_samples=10)
vqe_start = time.time()
opt_result = VQE(var_params)
vqe_stop = time.time()
print(opt_result)
# Print difference with exact FCI value known for this bond length
fci_value = -1.1372704220924401
print("Difference with exact FCI value :: ", abs(opt_result.fun - fci_value))
print("Simulation Time:" + str(total_time))
print("Overal Time:" + str(vqe_stop-vqe_start))
|
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from src.domain.user import User
from .base import RepoBase
class UserRepo(RepoBase[User]):
model = User
_session: AsyncSession
async def get_by_username(self, /, username: str) -> User | None:
return (
await self._session.execute(select(self.model).filter_by(username=username))
).scalar()
async def get_by_email(self, /, email: str) -> User | None:
return (
await self._session.execute(select(self.model).filter_by(email=email))
).scalar()
|
######################################################################
# Author: Concepta Njolima TODO: Change this to your name
# Username: njolimac TODO: Change this to your username
#
# Assignment: A01
#
# Purpose: A program that returns your Chinese Zodiac animal given a
# birth year between 1988 and 1999. Also prints your friend's animal,
# and your compatibility with that friend's animal.
######################################################################
# Acknowledgements:
# Original Author: Dr. Scott Heggen
######################################################################
# Remember to read the detailed notes about each task in the A01 document.
######################################################################
# (Required) Task 1
# TODO Ask user for their birth year
birth_year = int(input("Enter your Year of Birth"))
# TODO Check the year using if conditionals, and print the correct animal for that year.
# See the a01_pets.py for examples
if birth_year== 1997:
print(" You are an Ox")
elif birth_year == 1998:
print(" You are an Tiger")
elif birth_year == 1999:
print(" You are a Rabbit")
elif birth_year == 2000:
print(" You are a Dragon")
elif birth_year == 2001:
print(" You are a snake")
elif birth_year == 2002:
print(" You are a Horse")
elif birth_year == 2003:
print(" You are a Goat")
elif birth_year == 2004:
print(" You are a Monkey")
elif birth_year == 2005:
print(" You are a Rooster")
else:
print("Enter a year between 1997 and 2005 ")
######################################################################
# (Required) Task 2
# TODO Ask the user for their friend's birth year
friend_birth = int(input("Enter your friend's year of birth"))
# TODO Similar to above, check your friend's year using if conditionals, and print the correct animal for that year
if friend_birth== 1997:
print(" Your friend an Ox")
elif friend_birth == 1998:
print(" Your friend an Tiger")
elif friend_birth == 1999:
print(" Your friend a Rabbit")
elif friend_birth == 2000:
print(" Your friend a Dragon")
elif friend_birth == 2001:
print(" Your friend is a snake")
elif friend_birth == 2002:
print(" Your friend is a Horse")
elif friend_birth == 2003:
print(" Your friend is a Goat")
elif friend_birth == 2004:
print(" Your friend is a Monkey")
elif friend_birth == 2005:
print(" Your friend a Rooster")
else:
print("Enter a year between 1997 and 2005 ")
######################################################################
# (Optional) Task 3
# TODO Check for compatibility between your birth year and your friend's birth year
# NOTE: You can always assume the first input is your birth year (i.e., 1982 for me).
# This way, you are not writing a ton of code to consider every possibility.
# In other words, only do one row of the sample compatibility table.
if birth_year == 1997 and friend_birth == 1997 or friend_birth == 2001 or friend_birth == 2005:
print("Great friends!!! hahahaaahaha")
elif birth_year== 2003:
print("Goat!!! Got all reasons to keep away from me!")
# TODO print if you are a strong match, no match, or in between
|
import os
import json
import requests
import logging
import ssl
from pymongo import MongoClient
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s/%(module)s @ %(threadName)s: %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
class ConfigurationException(Exception):
pass
def parse_bool(s):
if s is None:
return False
if isinstance(s, bool):
return s
return s.lower() == 'true'
class Repository(object):
def __init__(self, api_root, api_key):
self.api_root = api_root
self.api_key = api_key
def get_connection(self, repository_id):
headers = {'X-CogScale-Key': self.api_key}
r = requests.get("%s/v1/repository/%s/connection" % (self.api_root, repository_id), headers=headers)
r.raise_for_status()
return r.json()
def get_client(self, repository_id):
conn = self.get_connection(repository_id)
return Repository._create_client(conn)
def get_database(self, repository_id):
conn = self.get_connection(repository_id)
db_name = conn["database"]
return Repository._create_client(conn)[db_name]
@staticmethod
def _create_client(conn):
mongo_uri = "mongodb://{}:{}@{}:{}/{}".format(
conn["username"]
, conn["password"]
, conn["server"]["host"]
, conn["server"]["port"]
, conn["database"]
)
opts = conn["server"].get("options", {})
if parse_bool(opts.get("ssl", False)):
return MongoClient(mongo_uri, ssl=True, ssl_cert_reqs=ssl.CERT_NONE)
else:
return MongoClient(mongo_uri)
class Observers(object):
def __init__(self, api_root, api_key, observers):
self.api_root = api_root
self.api_key = api_key
self.observers = observers or {}
def on_error(self, error):
error_observers = self.observers.get('error')
if error_observers:
for queue in error_observers:
self._post_message(queue, error)
def on_completion(self, msg):
completion_observers = self.observers.get('completion')
if completion_observers:
for queue in completion_observers:
self._post_message(queue, msg)
def _post_message(self, queue, msg):
queue_msg = {
"messages": [{
"body": json.dumps(msg)
}]
}
headers = {'X-CogScale-Key': self.api_key}
r = requests.post("%s/v1/queues/%s/messages" % (self.api_root, queue), json=queue_msg, headers=headers)
if r.status_code != 201:
logger.error('Observer failed to post message to queue %s; error was "%s"' % (queue, r.text))
class Foundation(object):
"""
Create a Foundation connection.
Configuration is loaded in the following order:
1. Keyword args passed here
2. Payload (PAYLOAD_FILE)
3. Config JSON (CONFIG_FILE) set at job deploy time
4. ENV variables set at job deploy time
Keywords override payload, payload overrides config JSON, config JSON overrides environment.
"""
def __init__(self, **kwargs):
payload = Foundation._load_json_from_env('PAYLOAD_FILE')
config = Foundation._load_json_from_env('CONFIG_FILE')
self.api_root = kwargs.get('foundation_api_root',
payload.get('foundation_api_root',
config.get('foundation_api_root', os.getenv('FOUNDATION_API_ROOT'))))
if not self.api_root:
raise ConfigurationException(
'Foundation API root not configured: see http://docs.foundation.insights.ai for details')
self.api_key = kwargs.get('foundation_api_key',
payload.get('foundation_api_key',
config.get('foundation_api_key', os.getenv('FOUNDATION_API_KEY'))))
if not self.api_key:
raise ConfigurationException(
'Foundation API key not configured: see http://docs.foundation.insights.ai for details')
def repository(self):
return Repository(self.api_root, self.api_key)
def observers(self, observers):
return Observers(self.api_root, self.api_key, observers)
@staticmethod
def _load_json_from_env(env_var):
file_path = os.getenv(env_var)
if file_path and os.path.isfile(file_path):
with open(file_path) as f:
return json.load(f)
else:
return {}
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import unittest
from pymatgen.io.abinit.launcher import ScriptEditor
class ScriptEditorTest(unittest.TestCase):
def test_base(self):
"base test"
se = ScriptEditor()
se.shebang()
se.declare_var("FOO", "BAR")
se.add_emptyline()
se.add_comment("This is a comment")
se.declare_vars({"FOO1": "BAR1"})
se.load_modules(["module1", "module2"])
print(se.get_script_str())
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 - 2021 Cheng Zhao <zhaocheng03@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from astropy.io import fits
import numpy as np
import os
dr = 'dr8'
caps = ['north', 'south']
legacy_dir = '/global/project/projectdirs/cosmo/data/legacysurvey'
bricks_fmt = '{}/{}/survey-bricks-{}-{}.fits.gz'
maskbit_fmt = '{}/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'
output_fmt = 'legacysurvey_maskbits_{}_{}.txt'
ilist = f'/global/project/projectdirs/cosmo/data/legacysurvey/{dr}/{{}}/survey-bricks-{dr}-{{}}.fits.gz'
olist = f'legacysurvey_maskbits_{dr}_{{}}.txt'
for cap in caps:
ifile = '{}/{}'.format(legacy_dir, bricks_fmt.format(dr,cap,dr,cap))
ofile = output_fmt.format(dr, cap)
if not os.path.isfile(ifile):
print(f'Error: cannot access brick list: {ifile}')
exit(1)
names = np.unique(fits.open(ifile)[1].data['brickname'])
fnames = ['{}/{}\n'.format(legacy_dir, maskbit_fmt.format(dr,cap,n[:3],n,n)) for n in names]
with open(ofile, 'w') as f:
f.writelines(fnames)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .original_encoder import OriginalEncoder
from .paraphrase_encoder import ParaEncoder
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Encoder(nn.Module):
def __init__(self, params):
super(Encoder, self).__init__()
self.params = params
self.original_encoder = OriginalEncoder(self.params)
self.paraphrase_encoder = ParaEncoder(self.params)
def forward(self, original_input, paraphrase_input, ):
"""
:param input: [batch_size, seq_len, embed_size] tensor
:return: context of input sentences with shape of [batch_size, latent_variable_size]
# """
# [_, seq_len, embed_size] = original.size()
# original_encoder_hidden = self.original_encoder.init_hidden()
#
# for original_input, paraphrase_input in zip(original, paraphrase):
original_encoder_hidden = self.original_encoder(torch.reshape(original_input, (1, seq_len, embed_size)),
original_encoder_hidden)
paraphrase_final_state = self.paraphrase_encoder(torch.reshape(paraphrase_input, (1, seq_len, embed_size)),
original_encoder_hidden)
return paraphrase_final_state
def init_hidden(self):
return torch.zeros(2*self.params.encoder_num_layers,
1, self.params.encoder_rnn_size, device=device)
|
# -*- coding: utf-8 -*-
"""
--------------------------------------
@File : model_constant.py
@Author : maixiaochai
@Email : maixiaochai@outlook.com
@Created on : 2020/5/10 15:47
--------------------------------------
"""
# 权限值
USER_COMMON = 0
USER_BLACK = 1
USER_VIP = 2
ADMIN_NONE = 0
ADMIN_COMMON = 2
# 通用msg
MSG_403 = "Permission denied."
|
import web
from web import form
import socket
print("http://" + socket.gethostbyname(socket.gethostname()) + ":8080")
urls = (
'/index', 'Index',
'/','Login',
'/page_one','Page_one'
)
loginform = form.Form(
form.Textbox("USERNAME",
form.notnull,
form.Validator('wrong', lambda x: x == "martin")),
form.Textbox("PASSWORD",
form.notnull,
form.Validator('wrong', lambda x: x == "12341234")),
form.Checkbox('I am not a robot'))
left_form = form.Form(form.Button("left", value="left", id="left", style="height:30px;width:80px" ))
foward_form = form.Form(form.Button("foward", value="foward", style="height:30px;width:80px"))
backward_form = form.Form(form.Button("backward", value="backward", style="height:30px;width:80px"))
right_form = form.Form(form.Button("right", value="right", style="height:30px;width:80px"))
start_form = form.Form(form.Button("start", value="start", style="height:30px;width:80px"))
stop_form = form.Form(form.Button("stop", value="stop", style="height:30px;width:80px"))
render = web.template.render('template/')
class Index:
def GET(self):
left = left_form
right = right_form
foward = foward_form
backward = backward_form
start = start_form
stop = stop_form
return render.index(form,left,right,foward,backward,start,stop)
def POST(self):
left = left_form
right = right_form
foward = foward_form
backward = backward_form
start = start_form
stop = stop_form
userData = web.data()
print(userData)
if userData == "backward=backward":
pass
elif userData == "left=left":
pass
elif userData == "right=right":
pass
elif userData == "foward=foward":
pass
elif userData == "stop=stop":
pass
elif userData == "left=left":
pass
elif userData == "gotopagone":
return web.seeother('/page_one')
return render.index(form,left,right,foward,backward,start,stop)
class Login:
def GET(self):
form = loginform()
return render.login(form)
def POST(self):
form = loginform()
if not form.validates():
return render.login(form)
else:
return web.seeother('/page_one')
class Page_one:
def GET(self):
left = left_form
right = right_form
foward = foward_form
backward = backward_form
start = start_form
stop = stop_form
return render.page_one(form,left,right,foward,backward,start,stop)
pass
def POST(self):
left = left_form
right = right_form
foward = foward_form
backward = backward_form
start = start_form
stop = stop_form
userData=web.data()
print(userData)
return render.page_one(form,left,right,foward,backward,start,stop)
pass
if __name__ == "__main__" :
app = web.application(urls, globals())
app.run()
|
from tkinter import *
import random
import time
class Coords:
def init(self, x1=0, y1=0, x2=0, y2=0):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def within_x(co1, co2):
if co1.x1 > co2.x1 and co1.x1 < co2.x1:
return True
elif co1.x2 > co2.x1 and co1.x2 < co2.x2:
return True
elif co2.x1 > co1.x1 and co2.x1 < co1.x2:
return True
elif co2.x2 > co1.x1 and co2.x2 < co1.x2:
return True
else:
return False
class Game:
def init(self):
self.tk = Tk()
self.tk.title("Mr. Stick Man Races for the Exit")
self.tk.resizable(0, 0)
self.tk.wm_attributes("-topmost", 1)
self.canvas = Canvas(self.tk, width=500, height=500, \
highlightthickness=0)
self.canvas.pack()
self.tk.update()
self.canvas_height = 500
self.canvas_width = 500
self.bg = PhotoImage(file="background.gif")
w = self.bg.width()
h = self.bg.height()
for x in range(0, 5):
for y in range(0, 5):
self.canvas.create_image(x * w, y * h, \
image=self.bg, anchor='nw')
self.sprites = []
self.running = True
def mainloop(self):
while 1:
if self.running == True:
for sprite in self.sprites:
sprite.move()
self.tk.update_idletasks()
self.tk.update()
time.sleep(0.01)
g = Game()
g.mainloop()
|
from unittest import TestCase
from typing import List
from mapperr import to_dict, to_obj
class A:
a1: str
a2: int
class B:
b1: str
b2: int
b3: A
class C:
c1: str
c2: int
c3: List[B]
c4: List[int]
c5: dict
c6: list
c7: float
def test(self):
print("hi")
class TestMapperr(TestCase):
def setUp(self) -> None:
self.a = A()
self.a.a1, self.a.a2 = "text", 1
self.ad = {"a1": "text", "a2": 1}
self.a2 = A()
self.a2.a1, self.a2.a2 = "text2", 1
self.b = B()
self.b.b1, self.b.b2, self.b.b3 = "text", 1, self.a
self.bd = {'b1': 'text', 'b2': 1, 'b3': {'a1': 'text', 'a2': 1}}
self.b2 = B()
self.b2.b1, self.b2.b2, self.b2.b3 = "text2", 1, self.a2
self.c = C()
self.c.c1, self.c.c2, self.c.c3, self.c.c4, self.c.c5, self.c.c6 = "text", 1, [self.b, self.b2], [1,2], {"msg": "hello"}, ["x", "y", "z"]
self.c.c7 = 3.14
self.cd = {'c1': 'text', 'c2': 1, 'c3': [{'b1': 'text', 'b2': 1, 'b3': {'a1': 'text', 'a2': 1}}, {'b1': 'text2', 'b2': 1, 'b3': {'a1': 'text2', 'a2': 1}}], 'c4': [1, 2], 'c5': {'msg': 'hello'}, 'c6': ['x', 'y', 'z'], 'c7' : 3.14}
def test_todict_level1(self):
self.assertEqual(to_dict(self.a), self.ad)
def test_todict_level2(self):
self.assertEqual(to_dict(self.b), self.bd)
def test_todict_level3(self):
self.assertEqual(to_dict(self.c), self.cd)
def test_toobj_level1(self):
self.assertEqual(to_dict( to_obj(self.ad, A) ), self.ad)
def test_toobj_level2(self):
self.assertEqual(to_dict( to_obj(self.bd, B) ), self.bd)
def test_toobj_level3(self):
self.assertEqual(to_dict( to_obj(self.cd, C) ), self.cd) |
import sys, tty, termios
import RPi.GPIO as GPIO
from motor import Motor
import picamera
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from model import load_model
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
motor = Motor(15,16,18,13,11,7)
load_model()
def getKey():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
while(True):
camera = picamera.Camera()
camera.resolution = (128, 128)
camera.capture('/Desktop/Self-Driving-Car/CurrentData/img.jpeg',format='jpeg')
|
"""
All test are from the webs
https://realpython.com/primer-on-python-decorators/#decorators-with-arguments
"""
import functools
import math
from typing import Callable, Any
def dec(*args: Any, **kwargs: Any):
def inner():
print("hello")
# return f
return inner
@dec
def hello_test(a):
print("main func")
class ClassDecorator:
def on(self, event_name: Any, *args: Any, **kwargs: Any) -> Callable:
# def decorator(f: Callable) -> Callable:
def decorator(f):
print("decorated")
return f
return decorator
@staticmethod
def test():
pass
# def on(event_name: Any, *args: Any, **kwargs: Any) -> Callable:
def on():
# def decorator(f: Callable) -> Callable:
def decorator(f, *args, **kwargs):
print("decorated")
return f
return decorator
def repeat(_func=None, *, num_times=2):
def decorator_repeat(func):
@functools.wraps(func)
def wrapper_repeat(*args, **kwargs):
value = None
for _ in range(num_times):
value = func(*args, **kwargs)
return value
return wrapper_repeat
if _func is None:
return decorator_repeat
else:
return decorator_repeat(_func)
def count_calls(func):
@functools.wraps(func)
def wrapper_count_calls(*args, **kwargs):
wrapper_count_calls.num_calls += 1
print(f"Call {wrapper_count_calls.num_calls} of {func.__name__!r}")
return func(*args, **kwargs)
wrapper_count_calls.num_calls = 0
return wrapper_count_calls
import functools
class CountCalls:
def __init__(self, func):
functools.update_wrapper(self, func)
self.func = func
self.num_calls = 0
def __call__(self, *args, **kwargs):
self.num_calls += 1
print(f"Call {self.num_calls} of {self.func.__name__!r}")
return self.func(*args, **kwargs)
# @count_calls
@CountCalls
def say_whee():
print("Whee!")
# @repeat
# def say_whee():
# print("Whee!")
@repeat(num_times=3)
def greet(name):
print(f"Hello {name}")
def singleton(cls):
"""Make a class a Singleton class (only one instance)"""
@functools.wraps(cls)
def wrapper_singleton(*args, **kwargs):
if not wrapper_singleton.instance:
wrapper_singleton.instance = cls(*args, **kwargs)
return wrapper_singleton.instance
wrapper_singleton.instance = None
return wrapper_singleton
@singleton
class TheOne:
pass
def cache(func):
"""Keep a cache of previous function calls"""
@functools.wraps(func)
def wrapper_cache(*args, **kwargs):
cache_key = args + tuple(kwargs.items())
if cache_key not in wrapper_cache.cache:
wrapper_cache.cache[cache_key] = func(*args, **kwargs)
return wrapper_cache.cache[cache_key]
wrapper_cache.cache = dict()
return wrapper_cache
@cache
@count_calls
def fibonacci(num):
if num < 2:
return num
return fibonacci(num - 1) + fibonacci(num - 2)
def set_unit(unit):
"""Register a unit on a function"""
def decorator_set_unit(func):
func.unit = unit
return func
return decorator_set_unit
@set_unit("cm^3")
def volume(radius, height):
return math.pi * radius**2 * height
from flask import Flask, request, abort
import functools
app = Flask(__name__)
def validate_json(*expected_args): # 1
def decorator_validate_json(func):
@functools.wraps(func)
def wrapper_validate_json(*args, **kwargs):
json_object = request.get_json()
for expected_arg in expected_args: # 2
if expected_arg not in json_object:
abort(400)
return func(*args, **kwargs)
return wrapper_validate_json
return decorator_validate_json
@app.route("/grade", methods=["POST"])
@validate_json("student_id")
def update_grade():
json_data = request.get_json()
# Update database.
return "success!"
if __name__ == "__main__":
# dec = ClassDecorator()
#
# @on()
# def log_losses(trainer):
# print(trainer)
# print("do nothing")
#
# log_losses("test")
# say_whee()
# greet("val")
# first_one = TheOne()
# another_one = TheOne()
#
# print(first_one is another_one)
# print(id(first_one))
# print(id(another_one))
print(fibonacci(100)) |
from typing import Any, Union
from boa3.builtin import CreateNewEvent, NeoMetadata, metadata, public
from boa3.builtin.interop.contract import update_contract
from boa3.builtin.interop.runtime import check_witness
from boa3.builtin.interop.storage import get, put
from boa3.builtin.type import UInt160
# -------------------------------------------
# TOKEN SETTINGS
# -------------------------------------------
# Script hash of the contract owner
OWNER = UInt160()
SUPPLY_KEY = 'totalSupply'
TOKEN_TOTAL_SUPPLY = 10_000_000 * 10**8 # 10m total supply * 10^8 (decimals)
# -------------------------------------------
# METADATA
# -------------------------------------------
@metadata
def manifest_metadata() -> NeoMetadata:
"""
Defines this smart contract's metadata information.
"""
meta = NeoMetadata()
meta.author = "Mirella Medeiros, Ricardo Prado and Lucas Uezu. COZ in partnership with Simpli"
meta.description = "Update Contract Example. This contract represents the updated smart contract to be deployed " \
"on the blockchain, with the method now working properly"
meta.email = "contact@coz.io"
return meta
# -------------------------------------------
# Events
# -------------------------------------------
on_transfer = CreateNewEvent(
[
('from_addr', Union[UInt160, None]),
('to_addr', Union[UInt160, None]),
('amount', int)
],
'Transfer'
)
# -------------------------------------------
# Methods
# -------------------------------------------
@public
def update_sc(nef_file: bytes, manifest: bytes, data: Any = None):
"""
Updates the smart contract. In this example there is a bugged method, so, the smart contract will be updated to fix
the bug.
"""
if check_witness(OWNER):
update_contract(nef_file, manifest, data)
@public
def method(account: UInt160):
"""
This method is now working properly.
"""
# some omitted code
# now there is a verification to this method
if check_witness(OWNER):
put(account, get(account).to_int() + 2 * 10**8)
on_transfer(None, account, 2 * 10**8)
# more omitted code
@public
def _deploy(data: Any, update: bool):
"""
Initializes the storage when the smart contract is deployed. When this smart contract is updated, it should do nothing.
"""
if not update:
put(SUPPLY_KEY, TOKEN_TOTAL_SUPPLY)
put(OWNER, TOKEN_TOTAL_SUPPLY)
on_transfer(None, OWNER, TOKEN_TOTAL_SUPPLY)
@public
def balanceOf(account: UInt160) -> int:
"""
Get the current balance of an address.
"""
assert len(account) == 20
return get(account).to_int()
|
from logging import debug, info, warning, error
from datetime import datetime, timezone
import requests
import re
from .. import AbstractPollHandler
from data.models import Poll
class PollHandler(AbstractPollHandler):
OPTION_V2_PLUS = 'Like'
OPTION_V2_MINUS = 'Dislike'
OPTIONS_V3 = ['Excellent', 'Great', 'Good', 'Mediocre', 'Bad']
_poll_post_url = 'https://youpoll.me'
_poll_post_headers = {'User-Agent': None}
_poll_post_data = {'address': '',
'poll-1[question]': None,
'poll-1[option1]': OPTIONS_V3[0],
'poll-1[option2]': OPTIONS_V3[1],
'poll-1[option3]': OPTIONS_V3[2],
'poll-1[option4]': OPTIONS_V3[3],
'poll-1[option5]': OPTIONS_V3[4],
'poll-1[min]': '1',
'poll-1[max]': 10,
'poll-1[voting-system]': '0',
'poll-1[approval-validation-type]': '0',
'poll-1[approval-validation-value]': '1',
'poll-1[basic]': '',
'voting-limits-dropdown': '3',
'captcha-test-checkbox': 'on',
'reddit-link-karma': '0',
'reddit-comment-karma': '0',
'reddit-days-old': '8',
'responses-input': '',
}
_poll_id_re = re.compile('youpoll.me/(\d+)', re.I)
_poll_link = 'https://youpoll.me/{id}/'
_poll_results_link = 'https://youpoll.me/{id}/r'
def __init__(self):
super().__init__("youpoll")
def create_poll(self, title, submit, **kwargs):
if not submit:
return None
#headers = _poll_post_headers
#headers['User-Agent'] = config.useragent
data = self._poll_post_data
data['poll-1[question]'] = title
#resp = requests.post(_poll_post_url, data = data, headers = headers, **kwargs)
try:
resp = requests.post(self._poll_post_url, data = data, **kwargs)
except:
error("Could not create poll (exception in POST)")
return None
if resp.ok:
match = self._poll_id_re.search(resp.url)
return match.group(1)
else:
error("Could not create poll (resp !OK)")
return None
def get_link(self, poll):
return self._poll_link.format(id = poll.id)
def get_results_link(self, poll):
return self._poll_results_link.format(id = poll.id)
def get_score(self, poll):
debug(f"Getting score for show {poll.show_id} / episode {poll.episode}")
try:
response = self.request(self.get_results_link(poll), html = True)
except:
error(f"Couldn't get scores for poll {self.get_results_link(poll)} (query error)")
return None
try:
if response.find('div', class_='basic-type-results') is None: # numeric score
# v1 votes, 1-10 range
value_text = response.find("span", class_="rating-mean-value").text
num_votes = response.find("span", class_="admin-total-votes").text
try:
return float(value_text)
except ValueError:
warning(f"Invalid value '{value_text}' (v1), no score returned")
return None
else: # options-based score
divs = response.find_all('div', class_='basic-option-wrapper')
if len(divs) == 2:
# v2 votes, like dislike
# returned as fraction of likes
divs = response.find_all('div', class_='basic-option-wrapper')
num_votes = int(response.find("span", class_="admin-total-votes").text)
if num_votes == 0:
warning('No vote recorded, no score returned')
return None
for div in divs:
if div.find('span', class_='basic-option-title').text == self.OPTION_V2_PLUS:
value_text = div.find('span', class_='basic-option-percent').text
score = float(value_text.strip('%')) / 100
print(f'Score: {score}')
return score
error(f'Could not find the score (v2), no score returned')
return None
elif len(divs) == 5:
# v3 votes, 5 points scale
divs = response.find_all('div', class_='basic-option-wrapper')
num_votes_str = response.find("span", class_="admin-total-votes").text
num_votes = int(num_votes_str.replace(',', ''))
if num_votes == 0:
warning('No vote recorded, no score returned')
return None
values = dict()
for div in divs:
label = div.find('span', class_='basic-option-title').text
if label not in self.OPTIONS_V3:
error(f'Found unexpected label {label}, aborted')
return None
value_text = div.find('span', class_='basic-option-percent').text
score = float(value_text.strip('%')) / 100
values[label] = score
results = [values[k] for k in self.OPTIONS_V3]
info(f'Results: {str(results)}')
total = sum([r * s for r, s in zip(results, range(5, 0, -1))])
total = round(total, 2)
return total
except:
error(f"Couldn't get scores for poll {self.get_results_link(poll)} (parsing error)")
return None
@staticmethod
def convert_score_str(score):
if score is None:
return '----'
elif score <= 1.0: # New style votes
return f'{round(100 * score)}%'
else:
return str(score)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Trainer script for src.pl_modules.supervised_learning. Example run command: bin/train.py save_to_folder configs/cnn.gin.
"""
import gin
import logging
import os
import json
from src.data import get_dataset
from src.callbacks import get_callback
from src.utils import summary, acc, gin_wrap, parse_gin_config
from src.modules import supervised_training
from src import models
from src.training_loop import training_loop
from pytorch_lightning.callbacks import ModelCheckpoint
logger = logging.getLogger(__name__)
@gin.configurable
def train(save_path, model, batch_size=128, seed=777, callbacks=[], resume=True, evaluate=True):
# Create dynamically dataset generators
train, valid, test, meta_data = get_dataset(batch_size=batch_size, seed=seed)
# Create dynamically model
model = models.__dict__[model]()
summary(model)
# Create dynamically callbacks
callbacks_constructed = []
for name in callbacks:
clbk = get_callback(name, verbose=0)
if clbk is not None:
callbacks_constructed.append(clbk)
if not resume and os.path.exists(os.path.join(save_path, "last.ckpt")):
raise IOError("Please clear folder before running or pass train.resume=True")
# Create module and pass to trianing
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(save_path, "weights"),
verbose=True,
save_last=True, # For resumability
monitor='valid_acc',
mode='max'
)
pl_module = supervised_training.SupervisedLearning(model, meta_data=meta_data)
trainer = training_loop(train, valid, pl_module=pl_module, checkpoint_callback=checkpoint_callback,
callbacks=callbacks_constructed, save_path=save_path)
# Evaluate
if evaluate:
results, = trainer.test(test_dataloaders=test)
logger.info(results)
with open(os.path.join(save_path, "eval_results.json"), "w") as f:
json.dump(results, f)
if __name__ == "__main__":
gin_wrap(train)
|
from .array_sum_to_one import array_sum_to_one
from .exists import exists
from .list_to_list_two_tuples import list_to_list_two_tuples
from .to_percent import to_percent
|
import hashlib
import numpy as np
import torch
from dataclasses import dataclass
from typing import List
import logging
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import (
construct_single_input_tokens_and_segment_ids,
create_input_set_from_tokens_and_segments,
)
from jiant.utils.python.io import read_file, read_file_lines
logger = logging.getLogger(__name__)
@dataclass
class Example(BaseExample):
guid: str
is_english: bool
text: str
text_hash: str
def tokenize(self, tokenizer):
return TokenizedExample(
guid=self.guid,
is_english=self.is_english,
text_tokens=tokenizer.tokenize(self.text),
text_hash=self.text_hash,
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
is_english: bool
text_tokens: List
text_hash: str
def featurize(self, tokenizer, feat_spec):
unpadded_inputs = construct_single_input_tokens_and_segment_ids(
input_tokens=self.text_tokens, tokenizer=tokenizer, feat_spec=feat_spec,
)
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return DataRow(
guid=self.guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
is_english=self.is_english,
tokens=unpadded_inputs.unpadded_tokens,
text_hash=self.text_hash,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.ndarray
input_mask: np.ndarray
segment_ids: np.ndarray
is_english: bool
tokens: list
text_hash: str
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
is_english: torch.BoolTensor
tokens: list
text_hash: list
guid: list
class Bucc2018Task(Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.EMBEDDING
def __init__(self, name, path_dict, language):
super().__init__(name=name, path_dict=path_dict)
self.language = language
def get_train_examples(self):
raise RuntimeError("This task does not support train examples")
def get_val_examples(self):
return self._get_examples(phase="val")
def get_test_examples(self):
return self._get_examples(phase="test")
def get_val_labels(self):
return read_file(self.path_dict["val"]["labels"]).strip().splitlines()
def _get_examples(self, phase):
eng_examples = self._create_examples(
lines=read_file_lines(self.path_dict[phase]["eng"]), is_english=True, set_type=phase,
)
other_examples = self._create_examples(
lines=read_file_lines(self.path_dict[phase]["other"]), is_english=False, set_type=phase,
)
return eng_examples + other_examples
@classmethod
def _create_examples(cls, lines, is_english, set_type):
examples = []
for (i, line) in enumerate(lines):
idx, text = line.split("\t")
examples.append(
Example(
guid="%s-%s" % (set_type, idx),
is_english=is_english,
text=text.strip(),
text_hash=hashlib.sha1(text.strip().encode("utf-8")).hexdigest(),
)
)
return examples
# noinspection PyUnboundLocalVariable
def mine_bitext(
x,
y,
src_inds,
trg_inds,
mode="mine",
retrieval="max",
margin="ratio",
threshold=0,
neighborhood=4,
use_gpu=False,
dist="cosine",
use_shift_embeds=False,
):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
import faiss
src_orig_inds = np.arange(len(x))
trg_orig_inds = np.arange(len(y))
out_ls = []
x = unique_embeddings(x, src_inds)
y = unique_embeddings(y, trg_inds)
if dist == "cosine":
faiss.normalize_L2(x)
faiss.normalize_L2(y)
if use_shift_embeds:
x2y, y2x = shift_embeddings(x, y)
# calculate knn in both directions
if retrieval != "bwd":
if use_shift_embeds:
# project x to y space, and search k-nn ys for each x
x2y_sim, x2y_ind = knn(x2y, y, min(y.shape[0], neighborhood), use_gpu, dist)
x2y_mean = x2y_sim.mean(axis=1)
else:
x2y_sim, x2y_ind = knn(x, y, min(y.shape[0], neighborhood), use_gpu, dist)
x2y_mean = x2y_sim.mean(axis=1)
if retrieval != "fwd":
if use_shift_embeds:
y2x_sim, y2x_ind = knn(y2x, x, min(x.shape[0], neighborhood), use_gpu, dist)
y2x_mean = y2x_sim.mean(axis=1)
else:
y2x_sim, y2x_ind = knn(y, x, min(x.shape[0], neighborhood), use_gpu, dist)
y2x_mean = y2x_sim.mean(axis=1)
# margin function
if margin == "absolute":
# noinspection PyUnusedLocal
def margin(a, b):
return a
elif margin == "distance":
def margin(a, b):
return a - b
else: # margin == 'ratio':
def margin(a, b):
return a / b
if mode == "search":
scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
best = x2y_ind[np.arange(x.shape[0]), scores.argmax(axis=1)]
for i in src_inds:
out_ls.append(trg_orig_inds[best[i]])
elif mode == "score":
for i, j in zip(src_inds, trg_inds):
s = score(x[i], y[j], x2y_mean[i], y2x_mean[j], margin)
out_ls.append((s, src_orig_inds[i], trg_orig_inds[j]))
elif mode == "mine":
if use_shift_embeds:
fwd_scores = score_candidates(x2y, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y2x, x, y2x_ind, y2x_mean, x2y_mean, margin)
else:
fwd_scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y, x, y2x_ind, y2x_mean, x2y_mean, margin)
fwd_best = x2y_ind[np.arange(x.shape[0]), fwd_scores.argmax(axis=1)]
bwd_best = y2x_ind[np.arange(y.shape[0]), bwd_scores.argmax(axis=1)]
if retrieval == "fwd":
for i, j in enumerate(fwd_best):
out_ls.append((fwd_scores[i].max(), src_orig_inds[i], trg_orig_inds[j]))
if retrieval == "bwd":
for j, i in enumerate(bwd_best):
out_ls.append((bwd_scores[j].max(), src_orig_inds[i], trg_orig_inds[j]))
if retrieval == "intersect":
for i, j in enumerate(fwd_best):
if bwd_best[j] == i:
out_ls.append((fwd_scores[i].max(), src_orig_inds[i], trg_orig_inds[j]))
if retrieval == "max":
indices = np.stack(
(
np.concatenate((np.arange(x.shape[0]), bwd_best)),
np.concatenate((fwd_best, np.arange(y.shape[0]))),
),
axis=1,
)
# noinspection PyArgumentList
scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1)))
seen_src, seen_trg = set(), set()
for i in np.argsort(-scores):
src_ind, trg_ind = indices[i]
if src_ind not in seen_src and trg_ind not in seen_trg:
seen_src.add(src_ind)
seen_trg.add(trg_ind)
if scores[i] > threshold:
out_ls.append((scores[i], src_orig_inds[src_ind], trg_orig_inds[trg_ind]))
return out_ls
def bucc_eval(candidates2score, gold, threshold=None):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
if threshold is not None:
logger.info(" - using threshold {}".format(threshold))
else:
threshold = bucc_optimize(candidates2score, gold)
gold = set(gold)
bitexts = bucc_extract(candidates2score, threshold)
ncorrect = len(gold.intersection(bitexts))
if ncorrect > 0:
precision = ncorrect / len(bitexts)
recall = ncorrect / len(gold)
f1 = 2 * precision * recall / (precision + recall)
else:
precision = recall = f1 = 0
return {
"best-threshold": threshold,
"precision": precision,
"recall": recall,
"F1": f1,
}
def bucc_optimize(candidate2score, gold):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
items = sorted(candidate2score.items(), key=lambda x: -x[1])
ngold = len(gold)
nextract = ncorrect = 0
threshold = 0
best_f1 = 0
for i in range(len(items)):
nextract += 1
if items[i][0] in gold:
ncorrect += 1
if ncorrect > 0:
precision = ncorrect / nextract
recall = ncorrect / ngold
f1 = 2 * precision * recall / (precision + recall)
if f1 > best_f1:
best_f1 = f1
threshold = (items[i][1] + items[i + 1][1]) / 2
return threshold
def bucc_extract(cand2score, th):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
bitexts = []
for (src, trg), score_ in cand2score.items():
if score_ >= th:
bitexts.append((src, trg))
return bitexts
def unique_embeddings(emb, ind):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
aux = {j: i for i, j in enumerate(ind)}
return emb[[aux[i] for i in range(len(aux))]]
def shift_embeddings(x, y):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
delta = x.mean(axis=0) - y.mean(axis=0)
x2y = x - delta
y2x = y + delta
return x2y, y2x
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin, dist="cosine"):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin, dist)
return scores
def score(x, y, fwd_mean, bwd_mean, margin, dist="cosine"):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
if dist == "cosine":
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
else:
l2 = ((x - y) ** 2).sum()
sim = 1 / (1 + l2)
return margin(sim, (fwd_mean + bwd_mean) / 2)
def knn(x, y, k, use_gpu, dist="cosine"):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
return knn_gpu(x, y, k) if use_gpu else knn_cpu(x, y, k, dist)
def knn_gpu(x, y, k, mem=5 * 1024 * 1024 * 1024):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
import faiss
dim = x.shape[1]
batch_size = mem // (dim * 4)
sim = np.zeros((x.shape[0], k), dtype=np.float32)
ind = np.zeros((x.shape[0], k), dtype=np.int64)
for xfrom in range(0, x.shape[0], batch_size):
xto = min(xfrom + batch_size, x.shape[0])
bsims, binds = [], []
for yfrom in range(0, y.shape[0], batch_size):
yto = min(yfrom + batch_size, y.shape[0])
idx = faiss.IndexFlatIP(dim)
idx = faiss.index_cpu_to_all_gpus(idx)
idx.add(y[yfrom:yto])
bsim, bind = idx.search(x[xfrom:xto], min(k, yto - yfrom))
bsims.append(bsim)
binds.append(bind + yfrom)
del idx
bsims = np.concatenate(bsims, axis=1)
binds = np.concatenate(binds, axis=1)
aux = np.argsort(-bsims, axis=1)
for i in range(xfrom, xto):
for j in range(k):
sim[i, j] = bsims[i - xfrom, aux[i - xfrom, j]]
ind[i, j] = binds[i - xfrom, aux[i - xfrom, j]]
return sim, ind
def knn_cpu(x, y, k, dist="cosine"):
# Adapted From: https://github.com/google-research/xtreme/blob/
# 522434d1aece34131d997a97ce7e9242a51a688a/third_party/utils_retrieve.py
import faiss
# x: query, y: database
dim = x.shape[1]
if dist == "cosine":
idx = faiss.IndexFlatIP(dim)
else:
idx = faiss.IndexFlatL2(dim)
idx.add(y)
sim, ind = idx.search(x, k)
if dist != "cosine":
sim = 1 / (1 + sim)
return sim, ind
def get_unique_lines(text_hashes):
"""Get the unique lines out of a list of text-hashes
Args:
text_hashes (list): A list of (hashes of) strings
Returns:
unique_indices (List): List (of the same length as text_hashes) indicating, for each
element of text_hash, the index of the corresponding entry in
unique_text_hashes
(i.e. "Which unique text-hash does this correspond to?")
unique_text_hashes (List): List of unique elements of text_hashes
"""
unique_text_hashes = []
unique_indices = []
unique_lookup = {}
for text_hash in text_hashes:
if text_hash not in unique_lookup:
unique_lookup[text_hash] = len(unique_lookup)
unique_text_hashes.append(text_hash)
unique_indices.append(unique_lookup[text_hash])
return unique_indices, unique_text_hashes
def create_ids_map(inds, guids):
ids_map = {}
for guid, idx in zip(guids, inds):
if idx not in ids_map:
ids_map[idx] = []
ids_map[idx].append(guid)
return ids_map
|
from django.views.generic import View
from highcharts.views.common import HighChartsDualAxisView
class HighChartsBarView(HighChartsDualAxisView, View):
chart_type = 'bar'
class HighChartsStackedView(HighChartsBarView):
@property
def plot_options(self):
plot_options = super(HighChartsBarView, self).plot_options
if plot_options is None:
plot_options = {}
if 'series' not in plot_options:
plot_options['series'] = {}
plot_options['series']['stacking'] = 'normal'
return plot_options
class HighChartsColumnView(HighChartsBarView):
chart_type = 'column'
|
#!/usr/bin/env python3
# coding=utf-8
'''
#-------------------------------------------------------------------------------
Project : Instagram Bot
Module : bot
Purpose : Follow users, like posts, upload photo, unfollow
Version : 0.1.2 beta
Status : Development
Modified : 2020 Jun 28
Created : 2019 Nov 03
#-------------------------------------------------------------------------------
'''
from pathlib import Path
from instapy_cli import client
from instapy import InstaPy
from instapy import smart_run
from colorama import Fore, Back, Style
import instapy
import os
import sys
import random
import time
import json
import threading
sys.path.insert(0, str(Path(Path(__file__).parents[0] / 'lib')))
import logz
SCRIPT_DIR = str(Path(__file__).parent.absolute())
CONFIG = { 'upload-interval': 6, # hours
'accounts_file': SCRIPT_DIR + '/accounts.json',
'uploaded_file': SCRIPT_DIR + '/uploaded.txt',
'history_file': SCRIPT_DIR + '/history.txt',
'quotes_file': SCRIPT_DIR + '/quotes.txt',
# CREDENTIALS
#'username': 'morellanasel',
#'password': 'JFN_8365ldbybdv',
#'cookie_file': 'morellanasel_ig.json',
}
def upload():
global CONFIG, IG_ACCOUNTS, SCRIPT_DIR, QUOTES
# SELECT IG ACCOUNT
for account in IG_ACCOUNTS:
print(f"{logz.timestamp()}{Fore.MAGENTA} UPLOAD → ACCOUNT → {Style.RESET_ALL}{account['username']}")
#
# LOAD PHOTOS / VDEOS
#
# Image folder
media_dir = SCRIPT_DIR + '/' + 'accounts/' + account['username'] + '/upload'
# media_dir = str(Path(Path(__file__).parents[0] / 'accounts' / account['username'] )) # "__file__"
# media_dir = str(Path(Path(os.path.dirname(os.path.realpath('__file__'))).parents[0] / 'upload'))
medias = []
for file in os.listdir(media_dir):
if file.endswith(".jpg") or file.endswith(".mp4"):
medias.append(os.path.join(media_dir, file))
#
# SELECT RANDOM MEDIA
#
try:
media_path = random.choice(medias)
except Exception as e:
print(f"{logz.timestamp()}{Fore.RED} ERROR → UPLOAD → No file found{Style.RESET_ALL}")
return False
# media_path = medias[-1]
print(f"{logz.timestamp()}{Fore.MAGENTA} UPLOAD → MEDIA → {Style.RESET_ALL}{media_path.split('/')[-1]}")
# Find JSON file for post text
json_found = False
media_id = media_path.split('/')[-1].replace('.jpg', '').replace('.mp4', '')
for file in os.listdir(media_dir):
if file.endswith(".json") and media_id in file:
json_found = True
break
#
# READ MEDIA CAPTION FROM .JSON
#
if json_found:
try:
with open(media_dir + '/' + file, 'r') as file:
data = file.read().replace('\n', '')
json_data = json.loads(data)
# Read post text
post_txt_temp = json_data['edge_media_to_caption']['edges'][0]['node']['text']
# Remove mention & hashtag
post_txt = ''
# if '#' in post_txt_temp or '@' in post_txt_temp: # '@' in post_txt_temp or 'xenia' in post_txt_temp.lower()
# post_txt_temp = post_txt_temp.split(' ')
# for word in post_txt_temp:
# # if '@' in word or 'xenia' in word:
# if 'aloyoga' in word:
# continue
# else:
# # if '@' not in word or 'xenia' not in word.lower():
# post_txt = post_txt + ' ' + word + ' '
# else:
# post_txt = post_txt_temp
post_txt = post_txt_temp
# Trim
# post_txt = post_txt_temp
post_txt = post_txt.replace(' ',' ')
post_txt = post_txt.rstrip().lstrip()
# Credit OP
# try:
# post_txt = f"{post_txt}\n\nCredit @{json_data['owner']['username']}"
# except Exception as e:
# print(f"{logz.timestamp()}{Fore.YELLOW} WARNING → UPLOAD → {Style.RESET_ALL}Cannot credit user\n{e}")
# Select random quote
# post_txt = random.choice(QUOTES)
# Add Hashtags
post_txt = f'{post_txt}\n\n#yoga #fitness #meditation #yogainspiration #yogapractice #love #yogalife #yogaeverydamnday #yogi #yogateacher #namaste #yogalove #pilates #yogaeveryday #mindfulness #workout #gym #yogagirl #wellness #health #motivation #yogaeverywhere #yogachallenge #yogini #yogapose #healthylifestyle #nature #fitnessmotivation #asana #bhfyp'
print(f"{logz.timestamp()}{Fore.MAGENTA} UPLOAD → TEXT → {Style.RESET_ALL}{post_txt[:20]}")
except Exception as e:
post_txt = None
print(f"{logz.timestamp()}{Fore.YELLOW} WARNING → UPLOAD → JSON file not found\n{e}{Style.RESET_ALL}")
# raise e
else:
post_txt = ''
#
# UPLOAD
#
uploaded = True
print(f"{logz.timestamp()}{Fore.MAGENTA} UPLOAD → {Style.RESET_ALL}Uploading...")
try:
with client(username=account['username'],
password=account['password'],
cookie_file=SCRIPT_DIR + '/accounts/' + account['username'] + '/' + account['username'] + '_ig.json',
write_cookie_file=True) as cli:
# Generate Cookie
cookies = cli.get_cookie()
# # Upload with post text
if post_txt != '':
cli.upload(media_path, post_txt)
else:
cli.upload(media_path)
cli.upload(media_path)
uploaded = True
print(f"{logz.timestamp()}{Fore.MAGENTA} UPLOAD → {Style.RESET_ALL}Uploaded")
except Exception as e:
uploaded = False
print(f"{logz.timestamp()}{Fore.RED} ERROR → UPLOAD → {e}{Style.RESET_ALL}")
#
# MOVE TO 'UPLOADED' FOLDER
#
if uploaded:
account_dir = media_dir.split('/upload')[0]
media_name = media_path.split('/')[-1] #.replace('.png', '').replace('.mp4', '')
json_name = media_path.split('/')[-1].replace('.png', '').replace('.mp4', '').replace('.jpg', '') + '.json'
#print(f'media dir > {media_dir}') # /upload
# print(f'file name including JSON, exc. path > {file}') # /xxx.json
#print(f'media full path > {media_path}') # /.../upload/xxx.png
#print(f'account_dir > {account_dir}')
if json_found:
try:
path_json_current = media_dir + '/' + json_name
path_json_new = account_dir + '/uploaded/' + json_name
os.rename(path_json_current, path_json_new)
print(f"{logz.timestamp()}{Fore.MAGENTA} UPLOAD → {Style.RESET_ALL}JSON file moved to uploaded folder")
except Exception as e:
print(f"{logz.timestamp()}{Fore.RED} ERROR → UPLOAD → Cannot move JSON file{Style.RESET_ALL}\n{e}")
# raise e
try:
media_path_new = account_dir + '/uploaded/' + media_path.split('/')[-1]
os.rename(media_path, media_path_new)
print(f"{logz.timestamp()}{Fore.MAGENTA} UPLOAD → {Style.RESET_ALL}Media file moved to uploaded folder")
except Exception as e:
print(f"{logz.timestamp()}{Fore.RED} ERROR → UPLOAD → Cannot move Media file{Style.RESET_ALL}\n{e}")
# raise e
#
# ADD TO HISTORY
#
with open(CONFIG['history_file'], 'a+') as f:
f.write(media_name + '\n')
#print(f"{logz.timestamp()}{Fore.RED} CAUTION → SKIPPING NEXT ACCOUNT{Style.RESET_ALL}")
#break
# Return
return True
def thread_upload():
global CONFIG
while True:
r = upload()
print(f"{logz.timestamp()}{Fore.MAGENTA} UPLOAD → {Style.RESET_ALL}Next upload in {CONFIG['upload-interval']} hours\n---")
time.sleep(60 * 60 * CONFIG['upload-interval'])
def main():
global IG_ACCOUNTS, CONFIG, QUOTES
print(f"{logz.timestamp()}{Fore.MAGENTA} INIT → {Style.RESET_ALL}Starting threads")
# Loading IG Accounts
try:
with open(CONFIG['accounts_file'], 'r') as file:
data = file.read().replace('\n', '')
json_data = json.loads(data)
# Read file
IG_ACCOUNTS = json_data['accounts']
except Exception as e:
post_txt = None
raise e
# Load Quotes
try:
with open(CONFIG['quotes_file']) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
QUOTES = [x.strip() for x in content]
except Exception as e:
QUOTES = None
raise e
# Shuffle
random.shuffle(QUOTES)
# Uploader
t1 = threading.Thread(name='T1-uploader', target=thread_upload)
t1.start()
if __name__ == '__main__':
main() |
#! /usr/bin/env python
# -*- coding: iso-8859-15 -*-
"""Tools for automatic generation of some nt2 header files
"""
__author__ = "Lapreste Jean-thierry (lapreste@univ-bpclermont.fr)"
__version__ = "$Revision: 1.0 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyleft (c) 2010 Jean-thierry Lapreste"
__license__ = "Python"
__all__ = ["sub_list", "sub_if_match_list"]
import re
def sub_list(pattern, repl, stringlist, count=0):
"apply re.sub to all elements of a list"
outlist=[]
if type(pattern) is str :
pattern = re.compile(pattern)
for string in stringlist :
outlist.append(re.sub(pattern, repl, string, count))
return outlist
##def search_list(pattern, stringlist, flags=None):
## "apply re.search to all elements of a list"
## outlist=[]
## if type(pattern) is str :
## pattern = re.compile(pattern)
## for string in stringlist :
## outlist.append(re.sub(pattern, repl, string, flags))
## return outlist
def sub_if_match_list(start, pattern, repl, stringlist, count=0, flags=None):
"apply re.sub to all elements of a list that begin with start"
outlist=[]
if type(pattern) is str : pattern = re.compile(pattern)
if type(start) is str : start = re.compile(start)
for string in stringlist :
if re.match(start,string) :
outlist.append(re.sub(pattern, repl, string, count))
else :
outlist.append(string)
return outlist
if __name__ == "__main__":
from display_utils import show
inner_text = [
"",
"SET( SOURCES",
"# List of files for toolbox %s"% "gmp",
" )",
"",
"##****************************************************************************",
"# For each filename",
"##****************************************************************************",
"FOREACH( EXAMPLE ${SOURCES})",
" ##**************************************************************************",
" ## Build the executable filename from the example source filename",
" ##**************************************************************************",
' STRING(REGEX REPLACE ".cpp" ".%s.scalar.bench" EXECUTABLE "${EXAMPLE}")'%("gmp",),
' STRING(REGEX REPLACE ".cpp" "-%s.scalar.bench" TEST "${EXAMPLE}")'%("gmp",),
"",
" ##**************************************************************************",
" ## Add as a target",
" ##**************************************************************************",
" ADD_EXECUTABLE(${EXECUTABLE} ${EXAMPLE})",
" TARGET_LINK_LIBRARIES(${EXECUTABLE} nt2)",
" ADD_TEST(${TEST} ${CMAKE_CURRENT_BINARY_DIR}/${EXECUTABLE})",
"ENDFOREACH()",
]
l=sub_if_match_list(" *##", "\*", "#", inner_text)
show(l)
|
amount = int(input("Enter Amount:"))
if amount < 1000:
discount = amount * 0.05
print("Discount", discount)
elif amount<5000:
discount = amount * 0.10
print("Discount", discount)
else:
discount = amount*0.15
print("Discount", discount)
print("Net payable:", amount - discount) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 11 12:35:35 2020
@author: miyazakishinichi
設計
連続するビデオデータを入力とする
numpyバイナリへの変換, モデルによる予測, 結果の出力
ジャンプの時間帯の抽出とビデオ化
可能であれば, 判断に迷った挙句に0にしたデータ群も
出力するデータは, 周囲も含めて出力することで, その時間帯の印象を見分けられるようにする
→ハードネガティブマイニング??
"""
import pandas as pd
from tkinter import messagebox
from tkinter import filedialog
import tkinter
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import os, sys, cv2
from tqdm import tqdm
from tensorflow.keras.models import load_model
import time
import pathlib
from skimage import io
def csv_file_read(filepath):
file_dir, file_name = os.path.split(filepath)
base, ext = os.path.splitext(file_name)
if ext == '.csv':
data = pd.read_csv(filepath, index_col = 0)
return data
else:
return messagebox.showinfo('error',
'selected file is not csv file')
def image_cropper(ROI_file_path, Images_file_dir):
os.chdir(Images_file_dir)
imlist = os.listdir("./")
roi_data = csv_file_read(ROI_file_path)
roi_data['left'] = roi_data['BX']
roi_data['right'] = roi_data['BX'] + roi_data['Width']
roi_data['low'] = roi_data['BY']
roi_data['high'] = roi_data['BY'] + roi_data['Height']
roi = []
for i in range(len(roi_data)):
num = i+1
roi.append(roi_data.loc[num]['left':'high'])
os.makedirs("../ROI{}".format(num), exist_ok = True)
left, right, low, high = int(roi[i]['left']),\
int(roi[i]['right']),int(roi[i]['low']),int(roi[i]['high'])
for j in tqdm(range(len(imlist))):
tempimage = cv2.imread(imlist[j])
subimage = tempimage[low:high,left:right]
cv2.imwrite("../ROI{0}/{1}.jpg".format(num,str(j).zfill(7)), subimage)
return len(roi_data)
def image_crop_and_prediction_wo_image_creation(ROI_file_path, Images_file_dir, image_size,
model, fps):
Images_file_dir = pathlib.Path(Images_file_dir).resolve()
os.chdir(Images_file_dir)
imlist = os.listdir("./")
roi_data = csv_file_read(ROI_file_path)
roi_data['left'] = roi_data['BX']
roi_data['right'] = roi_data['BX'] + roi_data['Width']
roi_data['low'] = roi_data['BY']
roi_data['high'] = roi_data['BY'] + roi_data['Height']
roi = []
X=[]
image_size = tuple(image_size)
model = model
total_times = []
result = []
for i in range(len(roi_data)):
num = i+1
roi.append(roi_data.loc[num]['left':'high'])
os.chdir(Images_file_dir)
left, right, low, high = int(roi[i]['left']),\
int(roi[i]['right']),int(roi[i]['low']),int(roi[i]['high'])
data = [cv2.resize(cv2.imread(imlist[j],0)[low:high,left:right],
image_size) for j in tqdm(range(len(imlist)))]
X = np.asarray(data)
X = X.astype('float32')
X = X / 255.0
X = np.expand_dims(X, 1)
X = np.expand_dims(X, 4)
predict_value = pd.DataFrame(model.predict(X), columns = [0,1])
predict_value["label"] = predict_value[0] - predict_value[1]
predict_value["label"] = predict_value["label"] < 0
predict_value["label"] = predict_value["label"].astype(int)
#predict_classes = model.predict_classes(X)
predict_classes = predict_value["label"].values
result.append(predict_classes)
total_time = predict_classes.sum()/fps
total_times.append(total_time)
os.chdir("../")
np.savetxt("./ROI{}.csv".format(num), predict_classes, delimiter=",")
np.savetxt("./ROI{}value.csv".format(num), predict_value, delimiter=",")
return total_times, result
def image_crop_and_prediction(ROI_file_path, Images_file_dir, image_size,
model,fps):
Images_file_dir = pathlib.Path(Images_file_dir).resolve()
os.chdir(Images_file_dir)
imlist = os.listdir("./")
roi_data = csv_file_read(ROI_file_path)
roi_data['left'] = roi_data['BX']
roi_data['right'] = roi_data['BX'] + roi_data['Width']
roi_data['low'] = roi_data['BY']
roi_data['high'] = roi_data['BY'] + roi_data['Height']
roi = []
X=[]
image_size = tuple(image_size)
model = model
total_times = []
for i in range(len(roi_data)):
num = i+1
roi.append(roi_data.loc[num]['left':'high'])
os.chdir(Images_file_dir)
os.makedirs("../ROI{}".format(num), exist_ok = True)
left, right, low, high = int(roi[i]['left']),\
int(roi[i]['right']),int(roi[i]['low']),int(roi[i]['high'])
data = []
for j in tqdm(range(len(imlist))):
tempimage = cv2.imread(imlist[j])
subimage = tempimage[low:high,left:right]
data.append(cv2.resize(subimage, image_size))
X = np.asarray(data)
X = X.astype('float32')
X = X / 255.0
X = np.expand_dims(X, 1)
predict_classes = model.predict_classes(X)
total_time = predict_classes.sum()/fps
total_times.append(total_time)
predict_value = model.predict(X)
os.chdir("../")
np.savetxt("./ROI{}.csv".format(num), predict_classes, delimiter=",")
np.savetxt("./ROI{}value.csv".format(num), predict_value, delimiter=",")
return total_times
def save_all_frames(video_path, dir_path, basename,step, ext='jpg', num = 0):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return
os.makedirs(dir_path, exist_ok=True)
base_path = os.path.join(dir_path, basename)
digit = len(str(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))))
frame_num = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(frame_num)
for i in tqdm(range(0, int(frame_num), int(step))):
ret, frame = cap.read()
cv2.imwrite('{}_{}.{}'.format(base_path, str(i).zfill(digit), ext), frame)
def prediction(data_dir, model, image_size, suffix):
X = []
image_size = tuple(image_size)
model = model
os.chdir(data_dir)
dir_list = os.listdir("./")
#exclude non-image files
image_name_list = [i for i in dir_list if os.path.splitext(i)[1] == '.jpg']
data = [cv2.resize(cv2.imread(image_name_list[j]), image_size) \
for j in tqdm(range(len(image_name_list)))]
X = np.asarray(data)
X = X.astype('float32')
X = X / 255.0
X = np.expand_dims(X, 1)
predict_classes = model.predict_classes(X)
total_time = predict_classes.sum()
predict_value = model.predict(X)
os.chdir("../")
np.savetxt("./{}.csv".format(suffix), predict_classes, delimiter=",")
np.savetxt("./{}value.csv".format(suffix), predict_value, delimiter=",")
return total_time
|
# File: elasticsearch_consts.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
ELASTICSEARCH_JSON_DEVICE_URL = "url"
ELASTICSEARCH_JSON_QUERY = "query"
ELASTICSEARCH_JSON_INDEX = "index"
ELASTICSEARCH_JSON_TYPE = "type"
ELASTICSEARCH_JSON_ROUTING = "routing"
ELASTICSEARCH_JSON_TOTAL_HITS = "total_hits"
ELASTICSEARCH_JSON_TIMED_OUT = "timed_out"
ELASTICSEARCH_ERR_CONNECTIVITY_TEST = "Test Connectivity Failed"
ELASTICSEARCH_SUCC_CONNECTIVITY_TEST = "Test Connectivity Passed"
ELASTICSEARCH_ERR_SERVER_CONNECTION = "Connection failed"
ELASTICSEARCH_ERR_FROM_SERVER = "API failed, Status code: {status}, Detail: {detail}"
ELASTICSEARCH_MSG_CLUSTER_HEALTH = "Querying cluster health to check connectivity"
ELASTICSEARCH_ERR_API_UNSUPPORTED_METHOD = "Unsupported method"
ELASTICSEARCH_USING_BASE_URL = "Using url: {base_url}"
ELASTICSEARCH_ERR_JSON_PARSE = "Unable to parse reply as a Json, raw string reply: '{raw_text}'"
|
from flask import Flask, render_template, request, redirect, session
from colour import Color
app = Flask(__name__)
app.secret_key = 'darksecret'
@app.route("/")
def checker_8_by_8():
return render_template("checker.html", num_rows=8, num_cols=8, colors=["salmon", "black"])
@app.route("/<x>")
def checker_x_by_8(x):
try:
return render_template("checker.html", num_rows=int(x), num_cols=8,colors=["salmon", "black"])
except:
return checker_8_by_8()
@app.route("/<x>/<y>")
def checker_x_by_y(x,y):
try:
return render_template("checker.html", num_rows=int(x), num_cols=int(y),colors=["salmon", "black"])
except:
return checker_x_by_8(x)
def is_valid_color(str):
try:
c = Color(str)
return True
except:
return False
@app.route("/<x>/<y>/<color1>")
def checker_with_light_color(x,y,color1):
if color1 == "black":
color1="salmon"
if not is_valid_color(color1):
color1="salmon"
try:
return render_template("checker.html", num_rows=int(x), num_cols=int(y),colors=[color1,"black"])
except:
return checker_x_by_8(x)
@app.route("/<x>/<y>/<color1>/<color2>")
def checker_with_colors(x,y,color1,color2):
if not is_valid_color(color1):
color1="salmon"
if not is_valid_color(color2):
color2="black"
if color1 == color2:
color2 = "black"
if color1 == "black":
color1 = "salmon"
try:
return render_template("checker.html", num_rows=int(x), num_cols=int(y),colors=[color1,color2])
except:
return checker_x_by_8(x)
if __name__ == '__main__':
app.run(debug=True)
|
#!/usr/bin/env python3
"""
Conway's game of life, bounded grid, numpy 2-dimensional array rbh 2020
"""
import numpy as np
from time import sleep
import sys
from paint import paint
PTS = '.*#'
DEAD, ALIVE, WALL = 0, 1, 2
DCH, ACH, GCH = PTS[DEAD], PTS[ALIVE], PTS[WALL]
def point(r, c, cols):
return c + r*cols
"""
board functions
* represent board as 2-dimensional array
"""
def live_row(r, B, c):
for j in range(c):
if B[r, j] == ALIVE:
return True
return False
def live_col(c, B, r):
for k in range(r):
if B[k, c] == ALIVE:
return True
return False
def get_board():
B = []
print(sys.argv[1])
with open(sys.argv[1]) as f:
for line in f:
B.append(line.rstrip().replace(' ', ''))
rows, cols = len(B), len(B[0])
for j in range(1, rows):
assert(len(B[j]) == cols)
return B, rows, cols
def convert_board(B, r, c): # from string to numpy array
A = np.zeros((r, c), dtype=np.int8)
for j in range(r):
for k in range(c):
if B[j][k] == ACH:
A[j, k] = ALIVE
return A
def expand_grid(A, r, c, t): # add t empty rows and columns on each side
N = np.zeros((r+2*t, c+2*t), dtype=np.int8)
for j in range(r):
for k in range(c):
if A[j][k] == ALIVE:
N[j+t, k+t] = ALIVE
return N, r+2*t, c+2*t
def print_array(A, r, c):
print('')
for j in range(r):
out = ''
for k in range(c):
out += ACH if A[j, k] == ALIVE else DCH
print(out)
def show_array(A, r, c):
for j in range(r):
line = ''
for k in range(c):
line += str(A[j, k])
print(line)
print('')
"""
Conway's next-state formula
"""
def next_state(A, r, c):
N = np.zeros((r, c), dtype=np.int8)
changed = False
for j in range(r):
for k in range(c):
num = 0
if j > 0 and k > 0 and A[j-1, k-1] == ALIVE:
num += 1
if j > 0 and A[j-1, k] == ALIVE:
num += 1
if j > 0 and k < c-1 and A[j-1, k+1] == ALIVE:
num += 1
if k > 0 and A[j, k-1] == ALIVE:
num += 1
if k < c-1 and A[j, k+1] == ALIVE:
num += 1
if j < r-1 and k > 0 and A[j+1, k-1] == ALIVE:
num += 1
if j < r-1 and A[j+1, k] == ALIVE:
num += 1
if j < r-1 and k < c-1 and A[j+1, k+1] == ALIVE:
num += 1
if A[j, k] == ALIVE:
if num > 1 and num < 4:
N[j, k] = ALIVE
else:
N[j, k] = DEAD
changed = True
else:
if num == 3:
N[j, k] = ALIVE
changed = True
else:
N[j, k] = DEAD
return N, changed
"""
input, output
"""
pause = 0.2
def interact(max_itn):
itn = 0
B, r, c = get_board()
print(B)
X = convert_board(B, r, c)
A, r, c = expand_grid(X, r, c, 50)
print_array(A, r, c)
while itn <= max_itn:
sleep(pause)
newA, delta = next_state(A, r, c)
if not delta:
break
itn += 1
A = newA
print_array(A, r, c)
print('\niterations', itn)
def main():
interact(1200)
if __name__ == '__main__':
main()
|
import numpy as np
from kNN import kNN
from CIFAR import CIFARLoader
from CIFAR import CIFARPlotter
%matplotlib inline
########################################################################
# 2017 - Manny Grewal
# This class is the entry point for the KNN classifier.
########################################################################
TRAINING_BATCH_FILENAME = 'data_batch_1'
TEST_BATCH_FILENAME = 'test_batch'
# Size of test data
WINDOW_SIZE_OF_TEST_BATCH = 10
loader = CIFARLoader.CIFARLoader()
trainingDataSet, classesTraining= loader.GetFlattenedMatrix('data_batch_1')
testDataSet, classesTest= loader.GetFlattenedMatrix('test_batch')
names = loader.LoadClassNames()
#Visualizing CIFAR 10
#arrayImages =[]
#arrayClassLabels=[]
#for j in range(6):
# for k in range(6):
# i = np.random.choice(range(len(matrixImages)))
# arrayImages.append(matrixImages[i:i+1][0])
# arrayClassLabels.append(names[classNames[i]])
#PlotCIFAR.PlotImages(arrayImages,arrayClassLabels,True)
nn = kNN.kNN() # create a Nearest Neighbor classifier class
nn.Train(trainingDataSet, classesTraining) # train the classifier on the training images and labels
random_sample = np.random.randint(1, 9900)
Yte_predict,Ite_match = nn.Predict(testDataSet[random_sample:random_sample+WINDOW_SIZE_OF_TEST_BATCH],True) # predict labels on the test images
# and now print the classification accuracy, which is the average number
# of examples that are correctly predicted (i.e. label matches)
print('accuracy: %f' % ( np.mean(Yte_predict == classesTest[random_sample:random_sample+WINDOW_SIZE_OF_TEST_BATCH])))
print('Input data')
CIFARPlotter.PlotImages(testDataSet[random_sample:random_sample+WINDOW_SIZE_OF_TEST_BATCH],classesTest[random_sample:random_sample+WINDOW_SIZE_OF_TEST_BATCH],True)
print('Predicted data')
CIFARPlotter.PlotImages(Ite_match[0:WINDOW_SIZE_OF_TEST_BATCH],Yte_predict[0:WINDOW_SIZE_OF_TEST_BATCH],True) |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import sys
sys.path.append('..')
from op_test import OpTest
from test_pool2d_op import pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive
class TestPool2D_Op_Mixin(object):
def setUp(self):
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.op_type = "pool2d"
self.init_data_type()
self.init_test_case()
self.padding_algorithm = "EXPLICIT"
self.init_paddings()
self.init_global_pool()
self.init_pool_type()
self.init_ceil_mode()
self.init_exclusive()
self.init_adaptive()
self.init_data_format()
self.init_shape()
input = np.random.random(self.shape).astype(self.dtype)
output = pool2D_forward_naive(
input, self.ksize, self.strides, self.paddings, self.global_pool,
self.ceil_mode, self.exclusive, self.adaptive, self.data_format,
self.pool_type, self.padding_algorithm).astype(self.dtype)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
self.attrs = {
'strides': self.strides,
'paddings': self.paddings,
'ksize': self.ksize,
'pooling_type': self.pool_type,
'global_pooling': self.global_pool,
'ceil_mode': self.ceil_mode,
'data_format': self.data_format,
'exclusive': self.exclusive,
'adaptive': self.adaptive,
"padding_algorithm": self.padding_algorithm,
}
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
if self.dtype == np.float16:
return
if self.pool_type != "max":
self.check_grad_with_place(
self.place, set(['X']), 'Out', max_relative_error=0.07)
def init_data_format(self):
self.data_format = "NCHW"
def init_shape(self):
self.shape = [2, 3, 5, 5]
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [0, 0]
self.padding_algorithm = "EXPLICIT"
def init_data_type(self):
self.dtype = np.float32
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = True
def init_ceil_mode(self):
self.ceil_mode = False
def init_exclusive(self):
self.exclusive = True
# Not support adaptive pooling currently
def init_adaptive(self):
self.adaptive = False
class TestPool2D_Op(TestPool2D_Op_Mixin, OpTest):
pass
class TestCase1(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [0, 0]
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase2(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [1, 1]
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase3(TestPool2D_Op):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
class TestCase4(TestCase1):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
class TestCase5(TestCase2):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
def create_test_fp16_class(parent, check_grad=True):
class TestFp16Case(parent):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
place = core.MLUPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
place = core.MLUPlace(0)
if self.pool_type != "max" and check_grad:
self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=0.07)
cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
TestFp16Case.__name__ = cls_name
globals()[cls_name] = TestFp16Case
create_test_fp16_class(TestPool2D_Op)
create_test_fp16_class(TestCase1, check_grad=False)
create_test_fp16_class(TestCase2)
create_test_fp16_class(TestCase3)
create_test_fp16_class(TestCase4)
create_test_fp16_class(TestCase5)
#--------------------test pool2d use ceil mode--------------------
def create_test_use_ceil_class(parent):
class TestPool2DUseCeilCase(parent):
def init_ceil_mode(self):
self.ceil_mode = True
cls_name = "{0}_{1}".format(parent.__name__, "CeilModeCast")
TestPool2DUseCeilCase.__name__ = cls_name
globals()[cls_name] = TestPool2DUseCeilCase
create_test_use_ceil_class(TestCase1)
create_test_use_ceil_class(TestCase2)
class TestAvgInclude(TestCase2):
def init_exclusive(self):
self.exclusive = False
#-------test pool2d with asymmetric padding-----
class TestPool2D_AsyPadding(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 0, 1, 2]
def init_shape(self):
self.shape = [2, 3, 5, 5]
class TestCase1_AsyPadding(TestCase1):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 0, 1, 0]
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase2_AsyPadding(TestCase2):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 2, 1, 2]
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase3_AsyPadding(TestCase3):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 0, 1, 2]
def init_shape(self):
self.shape = [2, 3, 5, 5]
class TestCase4_AsyPadding(TestCase4):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 0, 1, 0]
def init_shape(self):
self.shape = [2, 3, 7, 7]
class TestCase5_AsyPadding((TestCase5)):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [2, 2, 1, 2]
def init_shape(self):
self.shape = [2, 3, 7, 7]
create_test_use_ceil_class(TestCase1_AsyPadding)
create_test_use_ceil_class(TestCase2_AsyPadding)
class TestAvgInclude_AsyPadding(TestCase2):
def init_exclusive(self):
self.exclusive = False
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
self.paddings = [1, 2, 1, 2]
def init_shape(self):
self.shape = [2, 3, 7, 7]
#----------- test channel_last --------------
class TestPool2D_channel_last(TestPool2D_Op):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 5, 5, 3]
class TestCase1_channel_last(TestCase1):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase2_channel_last(TestCase2):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase3_channel_last(TestCase3):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 5, 5, 3]
class TestCase4_channel_last(TestCase4):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase5_channel_last(TestCase5):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
create_test_use_ceil_class(TestCase1_channel_last)
create_test_use_ceil_class(TestCase2_channel_last)
class TestCase5_Max(TestCase2):
def init_pool_type(self):
self.pool_type = "max"
def test_check_grad(self):
if self.dtype == np.float16:
return
place = core.MLUPlace(0)
if self.pool_type == "max":
self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=1.00)
class TestCase5_channel_last_Max(TestCase5_Max):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestAvgInclude_channel_last(TestCase2_channel_last):
def init_exclusive(self):
self.exclusive = False
class TestPool2D_AsyPadding_channel_last(TestPool2D_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 5, 5, 3]
class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 5, 5, 3]
class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
create_test_use_ceil_class(TestCase1_AsyPadding_channel_last)
create_test_use_ceil_class(TestCase2_AsyPadding_channel_last)
class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding):
def init_data_format(self):
self.data_format = "NHWC"
def init_shape(self):
self.shape = [2, 7, 7, 3]
# test paddings: SAME VALID
def create_test_padding_SAME_class(parent):
class TestPaddingSMAECase(parent):
def init_paddings(self):
self.paddings = [0, 0]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
TestPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestPaddingSMAECase
create_test_padding_SAME_class(TestPool2D_Op)
create_test_padding_SAME_class(TestCase1)
create_test_padding_SAME_class(TestCase2)
create_test_padding_SAME_class(TestCase3)
create_test_padding_SAME_class(TestCase4)
create_test_padding_SAME_class(TestCase5)
create_test_padding_SAME_class(TestPool2D_channel_last)
create_test_padding_SAME_class(TestCase1_channel_last)
create_test_padding_SAME_class(TestCase2_channel_last)
create_test_padding_SAME_class(TestCase3_channel_last)
create_test_padding_SAME_class(TestCase4_channel_last)
create_test_padding_SAME_class(TestCase5_channel_last)
def create_test_padding_VALID_class(parent):
class TestPaddingVALIDCase(parent):
def init_paddings(self):
self.paddings = [1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
TestPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestPaddingVALIDCase
create_test_padding_VALID_class(TestPool2D_Op)
create_test_padding_VALID_class(TestCase1)
create_test_padding_VALID_class(TestCase2)
create_test_padding_VALID_class(TestCase3)
create_test_padding_VALID_class(TestCase4)
create_test_padding_VALID_class(TestCase5)
create_test_padding_VALID_class(TestPool2D_channel_last)
create_test_padding_VALID_class(TestCase1_channel_last)
create_test_padding_VALID_class(TestCase2_channel_last)
create_test_padding_VALID_class(TestCase3_channel_last)
create_test_padding_VALID_class(TestCase4_channel_last)
create_test_padding_VALID_class(TestCase5_channel_last)
class TestCase1_strides(TestCase1):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 2]
def init_shape(self):
self.shape = [2, 3, 4, 5]
create_test_padding_SAME_class(TestCase1_strides)
# ----- test API
class TestPool2DAPI(unittest.TestCase):
def test_api(self):
x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32")
x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32")
input_NHWC = fluid.layers.data(
name="input_NHWC",
shape=[2, 5, 5, 3],
append_batch_size=False,
dtype="float32")
input_NCHW = fluid.layers.data(
name="input_NCHW",
shape=[2, 3, 5, 5],
append_batch_size=False,
dtype="float32")
input_NHWC_negetive = fluid.layers.data(
name="input_NHWC_negetive",
shape=[2, -1, 5, 3],
append_batch_size=False,
dtype="float32")
input_NCHW_negetive = fluid.layers.data(
name="input_NCHW_negetive",
shape=[2, 3, -1, -1],
append_batch_size=False,
dtype="float32")
ksize = [3, 3]
out_1 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding=[1, 1],
data_format="NHWC")
out_2 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="avg",
pool_padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
data_format="NHWC")
out_3 = fluid.layers.pool2d(
input=input_NCHW,
pool_size=ksize,
pool_type="avg",
pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1]],
data_format="NCHW")
out_4 = fluid.layers.pool2d(
input=input_NCHW,
pool_size=ksize,
pool_type="avg",
pool_padding=[1, 2, 1, 0],
data_format="NCHW")
# test VALID
out_5 = fluid.layers.pool2d(
input=input_NCHW,
pool_size=ksize,
pool_type="avg",
pool_padding="VALID",
data_format="NCHW")
out_6 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding="VALID",
data_format="NHWC")
# test SAME
out_7 = fluid.layers.pool2d(
input=input_NCHW,
pool_size=[4, 4],
pool_type="avg",
pool_padding="SAME",
data_format="NCHW")
out_8 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=[4, 4],
pool_type="max",
pool_padding="SAME",
data_format="NHWC")
# test negetive
out_9 = fluid.layers.pool2d(
input=input_NHWC_negetive,
pool_size=ksize,
pool_type="avg",
pool_padding=[0, 0],
data_format="NHWC")
assert out_9.shape == (2, -1, 3, 3)
out_10 = fluid.layers.pool2d(
input=input_NCHW_negetive,
pool_size=ksize,
pool_type="avg",
pool_padding=[0, 0],
data_format="NCHW")
assert out_10.shape == (2, 3, -1, -1)
exe = fluid.Executor(place=fluid.MLUPlace(0))
[res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8] = exe.run(
fluid.default_main_program(),
feed={
"input_NHWC": x_NHWC,
"input_NCHW": x_NCHW,
"input_NHWC_negetive": x_NHWC,
"input_NCHW_negetive": x_NCHW
},
fetch_list=[
out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8
])
assert np.allclose(
res_1,
pool2D_forward_naive(
x=x_NHWC,
ksize=ksize,
pool_type="max",
strides=[1, 1],
paddings=[1, 1],
data_format="NHWC"))
assert np.allclose(
res_2,
pool2D_forward_naive(
x=x_NHWC,
ksize=ksize,
pool_type="avg",
strides=[1, 1],
paddings=[1, 1, 1, 1],
data_format="NHWC"))
assert np.allclose(
res_3,
pool2D_forward_naive(
x=x_NCHW,
ksize=ksize,
pool_type="avg",
strides=[1, 1],
paddings=[1, 1, 1, 1],
data_format="NCHW"),
rtol=0.07,
atol=1e-05)
assert np.allclose(
res_4,
pool2D_forward_naive(
x=x_NCHW,
ksize=ksize,
pool_type="avg",
strides=[1, 1],
paddings=[1, 2, 1, 0],
data_format="NCHW"),
rtol=0.07,
atol=1e-05)
# VALID
assert np.allclose(
res_5,
pool2D_forward_naive(
x=x_NCHW,
ksize=ksize,
pool_type="avg",
strides=[1, 1],
paddings=[10, 20], # any ele is ok
padding_algorithm="VALID",
data_format="NCHW"),
rtol=0.07,
atol=1e-05)
assert np.allclose(
res_6,
pool2D_forward_naive(
x=x_NHWC,
ksize=ksize,
pool_type="max",
strides=[1, 1],
paddings=[10, 20],
padding_algorithm="VALID",
data_format="NHWC"))
# SAME
assert np.allclose(
res_7,
pool2D_forward_naive(
x=x_NCHW,
ksize=[4, 4],
pool_type="avg",
strides=[1, 1],
paddings=[10, 20],
padding_algorithm="SAME",
data_format="NCHW"),
rtol=0.07,
atol=1e-05)
assert np.allclose(
res_8,
pool2D_forward_naive(
x=x_NHWC,
ksize=[4, 4],
pool_type="max",
strides=[1, 1],
paddings=[10, 20],
padding_algorithm="SAME",
data_format="NHWC"))
class TestPool2DAPI_Error(unittest.TestCase):
def test_api(self):
input_NHWC = fluid.layers.data(
name="input_NHWC",
shape=[2, 5, 5, 3],
append_batch_size=False,
dtype="float32")
ksize = [3, 3]
# data_format value error
def run_2():
out_2 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding=[1, 1],
data_format="NHWCC")
self.assertRaises(ValueError, run_2)
# padding str value error
def run_3():
out_3 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding="VALIDSAME",
data_format="NHWC")
self.assertRaises(ValueError, run_3)
# padding str valid and ceil_mode value error
def run_4():
out_4 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding="VALID",
ceil_mode=True,
data_format="NHWC")
self.assertRaises(ValueError, run_4)
# padding with 8 ele. value error
def run_5():
out_5 = fluid.layers.pool2d(
input=input_NHWC,
pool_size=ksize,
pool_type="max",
pool_padding=[[1, 1], [0, 0], [0, 0], [1, 1]],
data_format="NHWC")
self.assertRaises(ValueError, run_5)
class TestDygraphPool2DAPIError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of Pool2D must be Variable.
data1 = np.random.random((3, 32, 32, 5)).astype('float32')
pool2d = fluid.dygraph.Pool2D(
pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False)
self.assertRaises(TypeError, pool2d, data1)
# the input dtype of mlu Pool2D must be float16 or float32
data2 = fluid.layers.data(
name='x1', shape=[3, 32, 32, 5], dtype="int32")
self.assertRaises(TypeError, pool2d, data2)
def test_data_format_error(self):
with program_guard(Program(), Program()):
# the data_format must be 'NCHW' or 'NHWC'
data1 = np.random.random((3, 32, 32, 5)).astype('float32')
self.assertRaises(
ValueError,
fluid.dygraph.Pool2D,
pool_size=2,
pool_type='max',
pool_stride=1,
global_pooling=False,
data_format='NWHC')
class TestDygraphPool2DAPI(unittest.TestCase):
def test_nhwc(self):
with fluid.dygraph.guard():
data = np.random.random((3, 32, 32, 5)).astype('float32')
x = fluid.dygraph.to_variable(data)
pool2d = fluid.dygraph.Pool2D(
pool_size=2,
pool_type='max',
pool_stride=1,
pool_padding=[0, 0],
global_pooling=False,
data_format='NHWC')
out1 = pool2d(x)
out2 = pool2D_forward_naive(
data, [2, 2], [1, 1],
paddings=[0, 0],
pool_type='max',
data_format='NHWC')
self.assertTrue(np.allclose(out1.numpy(), out2))
def test_lower_case(self):
with fluid.dygraph.guard():
data = np.random.random((3, 32, 32, 5)).astype('float32')
x = fluid.dygraph.to_variable(data)
pool2d = fluid.dygraph.Pool2D(
pool_size=2,
pool_type='max',
pool_stride=1,
pool_padding=[0, 0],
global_pooling=False,
data_format='nhwc')
out1 = pool2d(x)
out2 = pool2D_forward_naive(
data, [2, 2], [1, 1],
paddings=[0, 0],
pool_type='max',
data_format='NHWC')
self.assertTrue(np.allclose(out1.numpy(), out2))
def test_upper_case(self):
with fluid.dygraph.guard():
data = np.random.random((3, 32, 32, 5)).astype('float32')
x = fluid.dygraph.to_variable(data)
pool2d = fluid.dygraph.Pool2D(
pool_size=2,
pool_type='MAX',
pool_stride=1,
pool_padding=[0, 0],
global_pooling=False,
data_format='nhwc')
out1 = pool2d(x)
out2 = pool2D_forward_naive(
data, [2, 2], [1, 1],
paddings=[0, 0],
pool_type='max',
data_format='NHWC')
self.assertTrue(np.allclose(out1.numpy(), out2))
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
|
import ringogw
def ringo_reader(fd, sze, fname):
import struct, zlib
MAGIC_HEAD = (0x47da66b5,)
MAGIC_TAIL = (0xacc50f5d,)
def read_really(s):
t = 0
buf = ""
while t < s:
r = fd.read(s - t)
if not r:
return buf
t += len(r)
buf += r
return buf
def check_body(head_body):
time, entryid, flags, keycrc, keylen, valcrc, vallen =\
struct.unpack("<IIIIIII", head_body)
tot = keylen + vallen + 4
body = read_really(tot)
if len(body) < tot:
return False, head_body + body
key = body[:keylen]
val = body[keylen:-4]
if zlib.crc32(key) != keycrc or zlib.crc32(val) != valcrc or\
struct.unpack("<I", body[-4:]) != MAGIC_TAIL:
return False, head_body + body
else:
return True, (entryid, flags, key, val)
def read_entry():
head = read_really(8)
while len(head) >= 8:
if struct.unpack("<I", head[:4]) == MAGIC_HEAD:
if len(head) < 36:
head += read_really(36 - len(head))
if len(head) < 36:
return None
head_crc = struct.unpack("<I", head[4:8])[0]
head_body = head[8:36]
if zlib.crc32(head_body) == head_crc:
ok, cont = check_body(head_body)
if ok:
return cont
head = cont
head = head[1:]
if len(head) < 8:
head += fd.read(1)
else:
return None
prev_id = None
while True:
entry = read_entry()
if not entry:
break
entryid, flags, key, val = entry
if flags & 1 or flags & 2:
continue
if entryid == prev_id:
continue
prev_id = entryid
yield key, val
def input_domain(ringo_host, name):
ringo = ringogw.Ringo(ringo_host)
code, res = ringo.request("/mon/domains/domain?name=" + name)
if code != 200:
return []
urls = []
for domainid, name, nodeid, chunk, owner, nrepl in res:
nodename, node = nodeid.split('@')
urls.append("disco://%s/_ringo/%s/rdomain-%s/data"\
% (node, nodename[6:], domainid))
return urls
if __name__ == "__main__":
import sys
print "\n".join(input_domain(sys.argv[1], sys.argv[2]))
|
"""
Application File
"""
import flask
import flask.logging
from flask import Flask, Response, request
import glob
import hmac
import logging
import os
import re
import subprocess
from subprocess import PIPE
import sys
from webapp import default_config
from webapp import webhook_status_messages
from webapp.common import readfile
from webapp.github import GitHubAuth
from webapp.models import GlobalData
from webapp.automerge_check import reportable_errors, rejectable_errors
app = Flask(__name__)
app.config.from_object(default_config)
app.config.from_pyfile("config.py", silent=True)
if "TOPOLOGY_CONFIG" in os.environ:
app.config.from_envvar("TOPOLOGY_CONFIG", silent=False)
if "LOGLEVEL" in app.config:
app.logger.setLevel(app.config["LOGLEVEL"])
global_data = GlobalData(app.config)
src_dir = os.path.abspath(os.path.dirname(__file__))
( _required_repo_owner, _required_repo_name
) = global_data.webhook_data_repo.split(':')[-1].split('/')[-2:]
_required_base_ref = global_data.webhook_data_branch
_required_base_label = "%s:%s" % (_required_repo_owner, _required_base_ref)
webhook_secret = readfile(global_data.webhook_secret_key, app.logger)
if not webhook_secret:
app.logger.warning("Note, no WEBHOOK_SECRET_KEY configured; "
"GitHub payloads will not be validated.")
gh_api_user = global_data.webhook_gh_api_user
gh_api_token = readfile(global_data.webhook_gh_api_token, app.logger).decode()
if gh_api_user and gh_api_token:
ghauth = GitHubAuth(gh_api_user, gh_api_token, app.logger)
ghrepo = ghauth.target_repo(_required_repo_owner, _required_repo_name)
publish_pr_review = ghrepo.publish_pr_review
publish_issue_comment = ghrepo.publish_issue_comment
hit_merge_button = ghrepo.hit_merge_button
else:
publish_pr_review = \
publish_issue_comment = \
hit_merge_button = lambda *a,**kw: (False, "No API token configured")
app.logger.warning("Note, no WEBHOOK_GH_API_TOKEN configured; "
"GitHub comments won't be published, nor PRs merged.")
def validate_webhook_signature(data, x_hub_signature):
if webhook_secret:
sha1 = hmac.new(webhook_secret, msg=data, digestmod='sha1').hexdigest()
our_signature = "sha1=" + sha1
return hmac.compare_digest(our_signature, x_hub_signature)
_max_payload_size = 1024 * 1024 # should be well under this
def validate_request_signature(request):
if request.content_length > _max_payload_size:
app.logger.error("Refusing to read overly-large payload of size %s"
% request.content_length)
return False
payload_body = request.get_data()
x_hub_signature = request.headers.get('X-Hub-Signature')
ret = validate_webhook_signature(payload_body, x_hub_signature)
if ret or ret is None:
return True # OK, signature match or secret key not configured
else:
app.logger.error("Payload signature did not match for secret key")
return False
def set_webhook_pr_state(num, sha, state):
prdir = "%s/%s" % (global_data.webhook_state_dir, num)
statefile = "%s/%s" % (prdir, sha)
os.makedirs(prdir, mode=0o755, exist_ok=True)
if isinstance(state, (tuple,list)):
state = "\n".join( x.replace("\n"," ") for x in map(str,state) )
with open(statefile, "w") as f:
print(state, file=f)
def get_webhook_pr_state(sha, num='*'):
prdir = "%s/%s" % (global_data.webhook_state_dir, num)
statefile = "%s/%s" % (prdir, sha)
def path_check(fn): return re.search(r'/\d+/[a-f\d]{40}$', fn)
def pr_num(fn): return int(fn.rsplit('/', 2)[-2])
if num == '*':
filelist = glob.glob(statefile)
filelist = list(filter(path_check, filelist))
if len(filelist) == 0:
return None, None
# if there are multiple PRs with this sha, take the newest
statefile = max(filelist, key=pr_num)
if os.path.exists(statefile):
with open(statefile) as f:
return f.read().strip().split('\n'), pr_num(statefile)
else:
return None, None
@app.route("/check_suite", methods=["GET", "POST"])
def check_suite_hook():
if not validate_request_signature(request):
return Response("Bad X-Hub-Signature", status=400)
event = request.headers.get('X-GitHub-Event')
if event == "ping":
return Response('Pong')
elif event != "check_suite":
app.logger.debug("Ignoring non-check_suite hook of type '%s'" % event)
return Response("Wrong event type", status=400)
payload = request.get_json()
action = payload and payload.get('action')
if action not in ("completed",):
app.logger.info("Ignoring check_suite hook action '%s'" % action)
return Response("Not Interested")
try:
check_suite = payload['check_suite']
head_sha = check_suite['head_sha']
repo = payload['repository']
owner = repo['owner']['login'] # 'opensciencegrid'
reponame = repo['name'] # 'topology'
app_name = check_suite['app']['name'] # 'Travis CI'
conclusion = check_suite['conclusion'] # 'success' ...
except (TypeError, KeyError) as e:
emsg = "Malformed payload for check_suite hook: %s" % e
app.logger.error(emsg)
return Response(emsg, status=400)
app.logger.debug("Got check_suite hook '%s' for '%s'"
% (conclusion, head_sha))
if app_name != 'Travis CI':
app.logger.info("Ignoring non-travis check_suite hook for '%s'"
% app_name)
return Response("Not Interested; app_name was '%s'" % app_name)
if owner != _required_repo_owner or reponame != _required_repo_name:
app.logger.info("Ignoring check_suite hook repo '%s/%s'"
% (owner, reponame))
return Response("Not Interested; repo was '%s/%s'" % (owner, reponame))
pr_webhook_state, pull_num = get_webhook_pr_state(head_sha)
if pr_webhook_state is None or len(pr_webhook_state) != 4:
app.logger.info("Got travis '%s' check_suite hook for commit %s;\n"
"not merging as No PR automerge info available"
% (conclusion, head_sha))
return Response("No PR automerge info available for %s" % head_sha)
pr_dt_automerge_ret, base_sha, head_label, sender = pr_webhook_state
if re.search(r'^-?\d+$', pr_dt_automerge_ret):
pr_dt_automerge_ret = int(pr_dt_automerge_ret)
if pr_dt_automerge_ret == 0 and conclusion != 'success':
body = webhook_status_messages.ci_failure.format(**locals())
publish_pr_review(pull_num, body, 'COMMENT', head_sha)
if conclusion != 'success':
app.logger.info("Ignoring travis '%s' check_suite hook" % conclusion)
return Response("Not interested; CI conclusion was '%s'" % conclusion)
if pr_dt_automerge_ret == 0:
app.logger.info("Got travis success check_suite hook for commit %s;\n"
"eligible for DT automerge" % head_sha)
body = None
publish_pr_review(pull_num, body, 'APPROVE', head_sha)
title = "Auto-merge Downtime PR #{pull_num} from {head_label}" \
.format(**locals())
ok, fail_message = hit_merge_button(pull_num, head_sha, title)
if ok:
osg_bot_msg = webhook_status_messages.merge_success
else:
osg_bot_msg = webhook_status_messages.merge_failure
body = osg_bot_msg.format(**locals())
publish_issue_comment(pull_num, body)
else:
app.logger.info("Got travis success check_suite hook for commit %s;\n"
"not eligible for DT automerge" % head_sha)
return Response('Thank You')
@app.route("/pull_request", methods=["GET", "POST"])
def pull_request_hook():
if not validate_request_signature(request):
return Response("Bad X-Hub-Signature", status=400)
event = request.headers.get('X-GitHub-Event')
if event == "ping":
return Response('Pong')
elif event != "pull_request":
app.logger.debug("Ignoring non-pull_request hook of type '%s'" % event)
return Response("Wrong event type", status=400)
payload = request.get_json()
action = payload and payload.get('action')
if action not in ("opened",):
app.logger.info("Ignoring pull_request hook action '%s'" % action)
return Response("Not Interested")
# status=204 : No Content
try:
sender = payload['sender']['login']
head_sha = payload['pull_request']['head']['sha']
head_label = payload['pull_request']['head']['label'] # user:branch
head_ref = payload['pull_request']['head']['ref'] # branch
base_sha = payload['pull_request']['base']['sha']
base_label = payload['pull_request']['base']['label']
base_ref = payload['pull_request']['base']['ref']
pull_num = payload['pull_request']['number']
pull_url = payload['pull_request']['html_url']
title = payload['pull_request']['title']
mergeable = payload['pull_request']['mergeable']
if mergeable:
merge_sha = payload['pull_request']['merge_commit_sha']
except (TypeError, KeyError) as e:
emsg = "Malformed payload for pull_request hook: %s" % e
app.logger.error(emsg)
return Response(emsg, status=400)
app.logger.debug("Got pull_request hook for PR #{pull_num}"
" at {head_sha} on {head_label} onto {base_label}"
.format(**locals()))
pull_ref = "pull/{pull_num}/head".format(**locals())
if base_label != _required_base_label:
app.logger.info("Ignoring pull_request hook against '%s' "
"('%s' is required)" % (base_label, _required_base_label))
return Response("Not Interested")
global_data.update_webhook_repo()
script = src_dir + "/webapp/automerge_check.py"
headmerge_sha = "%s:%s" % (head_sha, merge_sha) if mergeable else head_sha
cmd = [script, base_sha, headmerge_sha, sender]
stdout, stderr, ret = runcmd(cmd, cwd=global_data.webhook_data_dir)
webhook_state = (ret, base_sha, head_label, sender)
set_webhook_pr_state(pull_num, head_sha, webhook_state)
# only comment on errors if DT files modified or contact unknown
if ret in reportable_errors:
osg_bot_msg = webhook_status_messages.automerge_status_messages[ret]
body = osg_bot_msg.format(**locals())
action = 'REQUEST_CHANGES' if ret in rejectable_errors else 'COMMENT'
publish_pr_review(pull_num, body, action, head_sha)
return Response('Thank You')
def runcmd(cmd, input=None, **kw):
if input is None:
stdin = None
else:
stdin = PIPE
p = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=stdin,
encoding='utf-8', **kw)
stdout, stderr = p.communicate(input)
return stdout, stderr, p.returncode
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
app.run(debug=True, use_reloader=True)
else:
root = logging.getLogger()
root.addHandler(flask.logging.default_handler)
|
from tests.test_helper import *
from braintree.configuration import Configuration
class TestTransactionGateway(unittest.TestCase):
def setUp(self):
config = Configuration(
environment=Environment.Development,
merchant_id="integration_merchant_id",
public_key="integration_public_key",
private_key="integration_private_key"
)
self.gateway = BraintreeGateway(config)
def test_credit_with_a_successful_result(self):
result = self.gateway.transaction.credit({
"amount": Decimal(TransactionAmounts.Authorize),
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertNotEqual(None, re.search(r"\A\w{6,}\Z", transaction.id))
self.assertEqual(Transaction.Type.Credit, transaction.type)
self.assertEqual(Decimal(TransactionAmounts.Authorize), transaction.amount)
cc_details = transaction.credit_card_details
self.assertEqual("411111", cc_details.bin)
self.assertEqual("1111", cc_details.last_4)
self.assertEqual("05/2009", cc_details.expiration_date)
def test_shared_vault_transaction_with_nonce(self):
config = Configuration(
merchant_id="integration_merchant_public_id",
public_key="oauth_app_partner_user_public_key",
private_key="oauth_app_partner_user_private_key",
environment=Environment.Development
)
gateway = BraintreeGateway(config)
customer = gateway.customer.create({"first_name": "Bob"}).customer
address = gateway.address.create({
"customer_id": customer.id,
"first_name": "Joe",
}).address
credit_card = gateway.credit_card.create(
params={
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
}
).credit_card
shared_nonce = gateway.payment_method_nonce.create(
credit_card.token
).payment_method_nonce.nonce
oauth_app_gateway = BraintreeGateway(
client_id="client_id$development$integration_client_id",
client_secret="client_secret$development$integration_client_secret",
environment=Environment.Development
)
code = TestHelper.create_grant(oauth_app_gateway, {
"merchant_public_id": "integration_merchant_id",
"scope": "grant_payment_method,shared_vault_transactions"
})
access_token = oauth_app_gateway.oauth.create_token_from_code({
"code": code
}).credentials.access_token
recipient_gateway = BraintreeGateway(access_token=access_token)
result = recipient_gateway.transaction.sale({
"shared_payment_method_nonce": shared_nonce,
"shared_customer_id": customer.id,
"shared_shipping_address_id": address.id,
"shared_billing_address_id": address.id,
"amount": "100"
})
self.assertTrue(result.is_success)
self.assertEqual(result.transaction.shipping_details.first_name, address.first_name)
self.assertEqual(result.transaction.billing_details.first_name, address.first_name)
self.assertEqual(result.transaction.customer_details.first_name, customer.first_name)
def test_shared_vault_transaction_with_token(self):
config = Configuration(
merchant_id="integration_merchant_public_id",
public_key="oauth_app_partner_user_public_key",
private_key="oauth_app_partner_user_private_key",
environment=Environment.Development
)
gateway = BraintreeGateway(config)
customer = gateway.customer.create({"first_name": "Bob"}).customer
address = gateway.address.create({
"customer_id": customer.id,
"first_name": "Joe",
}).address
credit_card = gateway.credit_card.create(
params={
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
}
).credit_card
oauth_app_gateway = BraintreeGateway(
client_id="client_id$development$integration_client_id",
client_secret="client_secret$development$integration_client_secret",
environment=Environment.Development
)
code = TestHelper.create_grant(oauth_app_gateway, {
"merchant_public_id": "integration_merchant_id",
"scope": "grant_payment_method,shared_vault_transactions"
})
access_token = oauth_app_gateway.oauth.create_token_from_code({
"code": code
}).credentials.access_token
recipient_gateway = BraintreeGateway(
access_token=access_token,
)
result = recipient_gateway.transaction.sale({
"shared_payment_method_token": credit_card.token,
"shared_customer_id": customer.id,
"shared_shipping_address_id": address.id,
"shared_billing_address_id": address.id,
"amount": "100"
})
self.assertTrue(result.is_success)
self.assertEqual(result.transaction.shipping_details.first_name, address.first_name)
self.assertEqual(result.transaction.billing_details.first_name, address.first_name)
self.assertEqual(result.transaction.customer_details.first_name, customer.first_name)
def test_sale_with_gateway_rejected_with_incomplete_application(self):
gateway = BraintreeGateway(
client_id="client_id$development$integration_client_id",
client_secret="client_secret$development$integration_client_secret",
environment=Environment.Development
)
result = gateway.merchant.create({
"email": "name@email.com",
"country_code_alpha3": "USA",
"payment_methods": ["credit_card", "paypal"]
})
gateway = BraintreeGateway(
access_token=result.credentials.access_token,
environment=Environment.Development
)
result = gateway.transaction.sale({
"amount": "4000.00",
"billing": {
"street_address": "200 Fake Street"
},
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
}
})
self.assertFalse(result.is_success)
transaction = result.transaction
self.assertEqual(Transaction.GatewayRejectionReason.ApplicationIncomplete, transaction.gateway_rejection_reason)
def test_sale_with_apple_pay_params(self):
result = self.gateway.transaction.sale({
"amount": Decimal(TransactionAmounts.Authorize),
"apple_pay_card": {
"cardholder_name": "Evelyn Boyd Granville",
"cryptogram": "AAAAAAAA/COBt84dnIEcwAA3gAAGhgEDoLABAAhAgAABAAAALnNCLw==",
"eci_indicator": "07",
"expiration_month": "10",
"expiration_year": "14",
"number": "370295001292109"
}
})
self.assertTrue(result.is_success)
self.assertEqual(Transaction.Status.Authorized, result.transaction.status)
def test_sale_with_google_pay_params(self):
result = self.gateway.transaction.sale({
"amount": Decimal(TransactionAmounts.Authorize),
"android_pay_card": {
"cryptogram": "AAAAAAAA/COBt84dnIEcwAA3gAAGhgEDoLABAAhAgAABAAAALnNCLw==",
"eci_indicator": "07",
"expiration_month": "10",
"expiration_year": "14",
"google_transaction_id": "12345",
"number": "4012888888881881",
"source_card_last_four": "1881",
"source_card_type": "Visa"
}
})
self.assertTrue(result.is_success)
self.assertEqual(Transaction.Status.Authorized, result.transaction.status)
self.assertEqual("android_pay_card", result.transaction.payment_instrument_type)
self.assertEqual("10", result.transaction.android_pay_card_details.expiration_month)
self.assertEqual("14", result.transaction.android_pay_card_details.expiration_year)
self.assertEqual("12345", result.transaction.android_pay_card_details.google_transaction_id)
self.assertEqual("1881", result.transaction.android_pay_card_details.source_card_last_4)
self.assertEqual("Visa", result.transaction.android_pay_card_details.source_card_type)
def test_create_can_set_recurring_flag(self):
result = self.gateway.transaction.sale({
"amount": "100",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"recurring": True
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEqual(True, transaction.recurring)
def test_create_recurring_flag_sends_deprecation_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = self.gateway.transaction.sale({
"amount": "100",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2009"
},
"recurring": True
})
self.assertTrue(result.is_success)
transaction = result.transaction
self.assertEqual(True, transaction.recurring)
assert len(w) > 0
assert issubclass(w[-1].category, DeprecationWarning)
assert "Use transaction_source parameter instead" in str(w[-1].message)
|
import re
import tempfile
import unittest
from dataclasses import asdict
from pathlib import Path
from datasets.utils.metadata import (
DatasetMetadata,
metadata_dict_from_readme,
tagset_validator,
validate_metadata_type,
yaml_block_from_readme,
)
def _dedent(string: str) -> str:
indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines())
return "\n".join([line[indent_level:] for line in string.splitlines() if indent_level < len(line)])
README_YAML = """\
---
languages:
- zh
- en
task_ids:
- sentiment-classification
---
# Begin of markdown
Some cool dataset card
"""
README_EMPTY_YAML = """\
---
---
# Begin of markdown
Some cool dataset card
"""
README_NO_YAML = """\
# Begin of markdown
Some cool dataset card
"""
class TestMetadataUtils(unittest.TestCase):
def test_validate_metadata_type(self):
metadata_dict = {
"tag": ["list", "of", "values"],
"another tag": ["Another", {"list"}, ["of"], 0x646D46736457567A],
}
with self.assertRaises(TypeError):
validate_metadata_type(metadata_dict)
metadata_dict = {"tag1": []}
with self.assertRaises(TypeError):
validate_metadata_type(metadata_dict)
metadata_dict = {"tag1": None}
with self.assertRaises(TypeError):
validate_metadata_type(metadata_dict)
def test_tagset_validator(self):
name = "test_tag"
url = "https://dummy.hf.co"
items = ["tag1", "tag2", "tag2", "tag3"]
reference_values = ["tag1", "tag2", "tag3"]
returned_values, error = tagset_validator(items=items, reference_values=reference_values, name=name, url=url)
self.assertListEqual(returned_values, items)
self.assertIsNone(error)
items = []
reference_values = ["tag1", "tag2", "tag3"]
items, error = tagset_validator(items=items, reference_values=reference_values, name=name, url=url)
self.assertListEqual(items, [])
self.assertIsNone(error)
items = []
reference_values = []
returned_values, error = tagset_validator(items=items, reference_values=reference_values, name=name, url=url)
self.assertListEqual(returned_values, [])
self.assertIsNone(error)
items = ["tag1", "tag2", "tag2", "tag3", "unknown tag"]
reference_values = ["tag1", "tag2", "tag3"]
returned_values, error = tagset_validator(items=items, reference_values=reference_values, name=name, url=url)
self.assertListEqual(returned_values, [])
self.assertEqual(error, f"{['unknown tag']} are not registered tags for '{name}', reference at {url}")
def predicate_fn(string):
return "ignore" in string
items = ["process me", "process me too", "ignore me"]
reference_values = ["process me too"]
returned_values, error = tagset_validator(
items=items,
reference_values=reference_values,
name=name,
url=url,
escape_validation_predicate_fn=predicate_fn,
)
self.assertListEqual(returned_values, [])
self.assertEqual(error, f"{['process me']} are not registered tags for '{name}', reference at {url}")
items = ["process me", "process me too", "ignore me"]
reference_values = ["process me too", "process me"]
returned_values, error = tagset_validator(
items=items,
reference_values=reference_values,
name=name,
url=url,
escape_validation_predicate_fn=predicate_fn,
)
self.assertListEqual(returned_values, items)
self.assertIsNone(error)
items = ["ignore me too", "ignore me"]
reference_values = ["process me too"]
returned_values, error = tagset_validator(
items=items,
reference_values=reference_values,
name=name,
url=url,
escape_validation_predicate_fn=predicate_fn,
)
self.assertListEqual(returned_values, items)
self.assertIsNone(error)
def test_yaml_block_from_readme(self):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(README_YAML)
yaml_block = yaml_block_from_readme(path=path)
self.assertEqual(
yaml_block,
_dedent(
"""\
languages:
- zh
- en
task_ids:
- sentiment-classification
"""
),
)
with open(path, "w+") as readme_file:
readme_file.write(README_EMPTY_YAML)
yaml_block = yaml_block_from_readme(path=path)
self.assertEqual(
yaml_block,
_dedent(
"""\
"""
),
)
with open(path, "w+") as readme_file:
readme_file.write(README_NO_YAML)
yaml_block = yaml_block_from_readme(path=path)
self.assertIsNone(yaml_block)
def test_metadata_dict_from_readme(self):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(README_YAML)
metadata_dict = metadata_dict_from_readme(path)
self.assertDictEqual(metadata_dict, {"languages": ["zh", "en"], "task_ids": ["sentiment-classification"]})
with open(path, "w+") as readme_file:
readme_file.write(README_EMPTY_YAML)
metadata_dict = metadata_dict_from_readme(path)
self.assertDictEqual(metadata_dict, {})
with open(path, "w+") as readme_file:
readme_file.write(README_NO_YAML)
metadata_dict = metadata_dict_from_readme(path)
self.assertIsNone(metadata_dict)
def test_from_yaml_string(self):
valid_yaml_string = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
DatasetMetadata.from_yaml_string(valid_yaml_string)
valid_yaml_string_with_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
en:
- en
fr:
- fr
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
DatasetMetadata.from_yaml_string(valid_yaml_string_with_configs)
invalid_tag_yaml = _dedent(
"""\
annotations_creators:
- found
language_creators:
- some guys in Panama
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(invalid_tag_yaml)
metadata.validate()
missing_tag_yaml = _dedent(
"""\
annotations_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(missing_tag_yaml)
metadata.validate()
duplicate_yaml_keys = _dedent(
"""\
annotations_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(duplicate_yaml_keys)
metadata.validate()
valid_yaml_string_with_duplicate_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
en:
- en
en:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(valid_yaml_string_with_duplicate_configs)
metadata.validate()
valid_yaml_string_with_paperswithcode_id = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id: squad
"""
)
DatasetMetadata.from_yaml_string(valid_yaml_string_with_paperswithcode_id)
valid_yaml_string_with_null_paperswithcode_id = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id: null
"""
)
DatasetMetadata.from_yaml_string(valid_yaml_string_with_null_paperswithcode_id)
valid_yaml_string_with_list_paperswithcode_id = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id:
- squad
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(valid_yaml_string_with_list_paperswithcode_id)
metadata.validate()
def test_get_metadata_by_config_name(self):
valid_yaml_with_multiple_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
en:
- en
fr:
- fr
licenses:
- unknown
multilinguality:
- monolingual
pretty_name:
en: English Test Dataset
fr: French Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id:
- squad
"""
)
metadata = DatasetMetadata.from_yaml_string(valid_yaml_with_multiple_configs)
en_metadata = metadata.get_metadata_by_config_name("en")
self.assertEqual(
asdict(en_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["en"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "English Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
fr_metadata = metadata.get_metadata_by_config_name("fr")
self.assertEqual(
asdict(fr_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["fr"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "French Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
valid_yaml_with_single_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id:
- squad
"""
)
metadata = DatasetMetadata.from_yaml_string(valid_yaml_with_single_configs)
en_metadata = metadata.get_metadata_by_config_name("en")
self.assertEqual(
asdict(en_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["en"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
fr_metadata = metadata.get_metadata_by_config_name("fr")
self.assertEqual(
asdict(fr_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["en"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
invalid_yaml_with_multiple_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
en:
- en
zh:
- zh
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id:
- squad
"""
)
metadata = DatasetMetadata.from_yaml_string(invalid_yaml_with_multiple_configs)
en_metadata = metadata.get_metadata_by_config_name("en")
self.assertEqual(
asdict(en_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["en"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
zh_metadata = metadata.get_metadata_by_config_name("zh")
self.assertEqual(
asdict(zh_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["zh"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
with self.assertRaises(TypeError):
fr_metadata = metadata.get_metadata_by_config_name("fr")
|
import numpy as np
from pandas import Series, Timedelta, DataFrame
def acf(x, lags=None, bin_method='gaussian', bin_width=None, max_gap=np.inf,
min_obs=10, output="acf"):
"""Calculate the autocorrelation function for irregular timesteps.
Parameters
----------
x: pandas.Series
Pandas Series containing the values to calculate the
cross-correlation for. The index has to be a Pandas.DatetimeIndex
lags: numpy.array, optional
numpy array containing the lags in days for which the
cross-correlation if calculated. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
12, 13, 14, 30, 61, 90, 120, 150, 180, 210, 240, 270, 300, 330, 365]
bin_method: str, optional
method to determine the type of bin. Options are "gaussian" (default),
sinc and rectangle.
bin_width: float, optional
number of days used as the width for the bin to calculate the
correlation. By default these values are chosen based on the
bin_method.
max_gap: float, optional
Maximum timestep gap in the data. All timesteps above this gap value
are not used for calculating the average timestep. This can be
helpful when there is a large gap in the data that influences the
average timestep.
min_obs: int
Minimum number of observations in a bin to determine the correlation.
output: str
If output is "full", also estimated uncertainties are returned.
Returns
-------
CCF: pandas.Series
The Cross-correlation function.
References
----------
Rehfeld, K., Marwan, N., Heitzig, J., Kurths, J. (2011). Comparison
of correlation analysis techniques for irregularly sampled time series.
Nonlinear Processes in Geophysics. 18. 389-404. 10.5194 pg-18-389-2011.
Notes
-----
Calculate the autocorrelation function for irregular timesteps based on
the slotting technique. Different methods (kernels) to bin the data are
available.
Examples
--------
acf = ps.stats.ccf(x, y, bin_method="gaussian")
"""
c = ccf(x=x, y=x, lags=lags, bin_method=bin_method, bin_width=bin_width,
max_gap=max_gap, min_obs=min_obs, output=output)
c.name = "ACF"
return c
def ccf(x, y, lags=None, bin_method='gaussian', bin_width=None,
max_gap=np.inf, min_obs=10, output="ccf"):
"""Method to calculate the cross-correlation function for irregular
timesteps based on the slotting technique. Different methods (kernels)
to bin the data are available.
Parameters
----------
x, y: pandas.Series
Pandas Series containing the values to calculate the
cross-correlation for. The index has to be a Pandas.DatetimeIndex
lags: numpy.array, optional
numpy array containing the lags in days for which the
cross-correlation if calculated. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
12, 13, 14, 30, 61, 90, 120, 150, 180, 210, 240, 270, 300, 330, 365]
bin_method: str, optional
method to determine the type of bin. Options are "gaussian" (default),
sinc and rectangle.
bin_width: float, optional
number of days used as the width for the bin to calculate the
correlation. By default these values are chosed based on the
bin_method.
max_gap: float, optional
Maximum timestep gap in the data. All timesteps above this gap value
are not used for calculating the average timestep. This can be
helpfull when there is a large gap in the data that influences the
average timestep.
min_obs: int
Minimum number of observations in a bin to determine the correlation.
output: str
If output is "full", also estimated uncertainties are returned.
Returns
-------
CCF: pandas.Series
The Cross-correlation function.
References
----------
Rehfeld, K., Marwan, N., Heitzig, J., Kurths, J. (2011). Comparison
of correlation analysis techniques for irregularly sampled time series.
Nonlinear Processes in Geophysics. 18. 389-404. 10.5194 pg-18-389-2011.
Examples
--------
acf = ps.stats.ccf(x, y, bin_method="gaussian")
"""
# prepare the time indices for x and y
dt_x = x.index.to_series().diff().values / Timedelta(1, "D")
dt_x[0] = 0.0
dt_x_mu = dt_x[dt_x < max_gap].mean() # Deal with big gaps if present
t_x = np.cumsum(dt_x)
dt_y = y.index.to_series().diff().values / Timedelta(1, "D")
dt_y[0] = 0.0
dt_y_mu = dt_y[dt_y < max_gap].mean()
t_y = np.cumsum(dt_y)
dt_mu = max(dt_x_mu, dt_y_mu)
# Create matrix with time differences
t1, t2 = np.meshgrid(t_x, t_y)
# Do not take absolute value (previous behavior) and set values to nan
# where t < 0. This means only positive lags can be calculated!
t = np.subtract(t1, t2)
t[t < 0] = np.nan
# Normalize the values and create numpy arrays
x = (x.values - x.values.mean()) / x.values.std()
y = (y.values - y.values.mean()) / y.values.std()
# Create matrix for covariances
xy = np.outer(y, x)
if lags is None: # Default lags in Days, log-scale between 0 and 365.
lags = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 30, 61, 90, 120,
150, 180, 210, 240, 270, 300, 330, 365]
# Remove lags that cannot be determined because lag < dt_min
u, i = np.unique(dt_x, return_counts=True)
dt_x_min = u[Series(i, u).cumsum() >= min_obs][0]
u, i = np.unique(dt_y, return_counts=True)
dt_y_min = u[Series(i, u).cumsum() >= min_obs][0]
dt_min = min(dt_x_min, dt_y_min)
# dt_min = min(dt_x[1:].min(), dt_y[1:].min())
lags = np.array([float(lag) for lag in lags if lag >= dt_min or lag == 0])
# Delete to free memory
del (x, y, dt_x, dt_y, t1, t2, t_x, t_y)
# Select appropriate bin_width, default depend on bin_method
if bin_width is None:
options = {"rectangle": 0.5, "sinc": 1, "gaussian": 0.25}
bin_width = np.ones_like(lags) * options[bin_method] * dt_mu
elif isinstance(bin_width, float):
bin_width = np.ones_like(lags)
else:
bin_width = [0.5, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 5, 5, 5,
2, 2, 2, 2, 2, 2, 2, 2]
# Select the binning method to calculate the cross-correlation
if bin_method == "rectangle":
a = np.zeros_like(t, dtype=float)
kernel_func = lambda d, h: np.less_equal(np.abs(d, out=a), h,
out=a).astype(int)
elif bin_method == "gaussian":
a = np.zeros_like(t, dtype=float)
def kernel_func(d, h):
den1 = -2 * h ** 2 # denominator 1
den2 = np.sqrt(2 * np.pi * h) # denominator 2
return np.exp(np.square(d, out=a) / den1, out=a) / den2
elif bin_method == "sinc":
kernel_func = lambda d, h: np.sin(np.pi * h * d) / (np.pi * h * d)
else:
raise NotImplementedError("bin_method %s is not implemented." %
bin_method)
# Pre-allocate an array to speed up all numpy methods
UDCF = np.zeros_like(lags, dtype=float)
M = np.zeros_like(lags, dtype=float)
d = np.zeros_like(t, dtype=float)
for i, k in enumerate(lags):
# Construct the kernel for the lag
np.subtract(t, k, out=d)
h = bin_width[i]
b = kernel_func(d, h)
c = np.multiply(xy, b, out=d) # Element-wise multiplication
# Use nansum to avoid the NaNs that are now in these matrices
UDCF[i] = np.nansum(c)
M[i] = np.nansum(b)
DCF = UDCF / M
CCF = Series(data=DCF, index=lags, name="CCF")
if output == "full":
CCFstd = np.sqrt((np.cumsum(UDCF) - M * DCF) ** 2) / (M - 1)
CCF = DataFrame(data={"CCF": CCF.values, "stderr": CCFstd}, index=lags)
CCF.index.name = "Lags (Days)"
return CCF
|
try:
from kolibri.utils.pskolibri.common import LINUX # noqa: F401
from kolibri.utils.pskolibri.common import WINDOWS # noqa: F401
SUPPORTED_OS = True
except NotImplementedError:
# This module can't work on this OS
SUPPORTED_OS = False
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from solar.orchestration.runner import app
from solar.system_log.operations import move_to_commited
from solar.system_log.operations import set_error
__all__ = ['error_logitem', 'commit_logitem']
@app.task(name='error_logitem')
def error_logitem(task_uuid):
return set_error(task_uuid.rsplit(':', 1)[-1])
@app.task(name='commit_logitem')
def commit_logitem(task_uuid):
return move_to_commited(task_uuid.rsplit(':', 1)[-1])
|
# Authors: Edouard Oyallon, Joakim Anden, Mathieu Andreux
import torch
import torch.nn.functional as F
from collections import namedtuple
BACKEND_NAME = 'torch'
from ...backend.torch_backend import _is_complex, Modulus, concatenate, type_checks, cdgmm, real
from ...backend.base_backend import FFT
def subsample_fourier(x, k):
"""Subsampling in the Fourier domain
Subsampling in the temporal domain amounts to periodization in the Fourier
domain, so the input is periodized according to the subsampling factor.
Parameters
----------
x : tensor
Input tensor with at least 3 dimensions, where the next to last
corresponds to the frequency index in the standard PyTorch FFT
ordering. The length of this dimension should be a power of 2 to
avoid errors. The last dimension should represent the real and
imaginary parts of the Fourier transform.
k : int
The subsampling factor.
Returns
-------
res : tensor
The input tensor periodized along the next to last axis to yield a
tensor of size x.shape[-2] // k along that dimension.
"""
if not _is_complex(x):
raise TypeError('The input should be complex.')
N = x.shape[-2]
res = x.view(x.shape[:-2] + (k, N // k, 2)).mean(dim=-3)
return res
def pad_1d(x, pad_left, pad_right, mode='constant', value=0.):
"""Pad real 1D tensors
1D implementation of the padding function for real PyTorch tensors.
Parameters
----------
x : tensor
Three-dimensional input tensor with the third axis being the one to
be padded.
pad_left : int
Amount to add on the left of the tensor (at the beginning of the
temporal axis).
pad_right : int
amount to add on the right of the tensor (at the end of the temporal
axis).
mode : string, optional
Padding mode. Options include 'constant' and 'reflect'. See the
PyTorch API for other options. Defaults to 'constant'.
value : float, optional
If mode == 'constant', value to input within the padding. Defaults to
0.
Returns
-------
res : tensor
The tensor passed along the third dimension.
"""
if (pad_left >= x.shape[-1]) or (pad_right >= x.shape[-1]):
if mode == 'reflect':
raise ValueError('Indefinite padding size (larger than tensor).')
res = F.pad(x.unsqueeze(2),
(pad_left, pad_right, 0, 0),
mode=mode, value=value).squeeze(2)
return res
def pad(x, pad_left=0, pad_right=0, to_complex=True):
"""Pad real 1D tensors and map to complex
Padding which allows to simultaneously pad in a reflection fashion and map
to complex if necessary.
Parameters
----------
x : tensor
Three-dimensional input tensor with the third axis being the one to
be padded.
pad_left : int
Amount to add on the left of the tensor (at the beginning of the
temporal axis).
pad_right : int
amount to add on the right of the tensor (at the end of the temporal
axis).
to_complex : boolean, optional
Whether to map the resulting padded tensor to a complex type (seen
as a real number). Defaults to True.
Returns
-------
output : tensor
A padded signal, possibly transformed into a four-dimensional tensor
with the last axis of size 2 if to_complex is True (this axis
corresponds to the real and imaginary parts).
"""
output = pad_1d(x, pad_left, pad_right, mode='reflect')
if to_complex:
output = torch.stack((output, torch.zeros_like(output)), dim=-1)
return output
def unpad(x, i0, i1):
"""Unpad real 1D tensor
Slices the input tensor at indices between i0 and i1 along the last axis.
Parameters
----------
x : tensor
Input tensor with least one axis.
i0 : int
Start of original signal before padding.
i1 : int
End of original signal before padding.
Returns
-------
x_unpadded : tensor
The tensor x[..., i0:i1].
"""
return x[..., i0:i1]
fft = FFT(lambda x: torch.fft(x, 1, normalized=False),
lambda x: torch.ifft(x, 1, normalized=False),
lambda x: torch.irfft(x, 1, normalized=False, onesided=False),
type_checks)
backend = namedtuple('backend', ['name', 'modulus_complex', 'subsample_fourier', 'real', 'unpad', 'fft', 'concatenate'])
backend.name = 'torch'
backend.modulus_complex = Modulus()
backend.subsample_fourier = subsample_fourier
backend.real = real
backend.unpad = unpad
backend.cdgmm = cdgmm
backend.pad = pad
backend.pad_1d = pad_1d
backend.fft = fft
backend.concatenate = lambda x: concatenate(x, -2)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class RouteHistory(Document):
pass
def flush_old_route_records():
"""Deletes all route records except last 500 records per user"""
records_to_keep_limit = 500
users = frappe.db.sql('''
SELECT `user`
FROM `tabRoute History`
GROUP BY `user`
HAVING count(`name`) > %(limit)s
''', {
"limit": records_to_keep_limit
})
for user in users:
user = user[0]
last_record_to_keep = frappe.db.get_all('Route History',
filters={
'user': user,
},
limit=1,
limit_start=500,
fields=['modified'],
order_by='modified desc')
frappe.db.sql('''
DELETE
FROM `tabRoute History`
WHERE `modified` <= %(modified)s and `user`=%(modified)s
''', {
"modified": last_record_to_keep[0].modified,
"user": user
}) |
import os
import subprocess
import platform
import time
import database as DB
import input_commands as IC
import pygame_camera_module as CM
class Main(object):
def __init__(self):
#Setup input events
IC.InputCommands(self.shutdown)
#Connect to firebase
self.dirname, self.filename = os.path.split(os.path.abspath(__file__))
self.image_path = "{0}/{1}".format(self.dirname, "capture.jpg")
self.db = DB.PyrebaseDatabase()
self.cam = CM.PygameCameraModule()
self.cam.start()
def start(self):
self.run()
def run(self):
self.run_loop = True
#Loop until 'esc' pressed
while self.run_loop:
#capture image
self.cam.capture_image(self.image_path)
#send image, get url, and save to database
self.db.send_image(self.image_path)
time.sleep(60*5)
def shutdown(self):
print('stopping application')
self.db.stop()
self.run_loop = False
time.sleep(30) #wait a bit to start
m = Main()
m.start()
|
# import pytest
from mygamefile import Game
import resources
def test_init():
game = Game()
assert game.board == resources.EMPTY_BOARD
assert game.next_board == resources.EMPTY_BOARD
def test_print_empty_board():
game = Game()
assert game.board_to_string() == resources.EMPTY_BOARD_STRING
def test_set_cell_alive():
game = Game()
game.set_cell_alive(2, 4)
assert game.board == resources.BOARD_WITH_ROW_2_COLUMN_4_ALIVE
def test_print_board_with_row_2_column_4_alive():
game = Game()
game.set_cell_alive(2, 4)
s = game.board_to_string()
assert s == resources.BOARD_WITH_ROW_2_COLUMN_4_ALIVE_STRING
def test_set_cell_dead():
game = Game()
game.set_cell_alive(2, 4)
game.set_cell_dead(2, 4)
assert game.board == resources.EMPTY_BOARD
def test_swap_life_state():
game = Game()
game.set_cell_alive(2, 4)
game.swap_life_state(2, 4)
assert game.board == resources.EMPTY_BOARD
game.swap_life_state(2, 4)
assert game.board == resources.BOARD_WITH_ROW_2_COLUMN_4_ALIVE
def test_count_neighbors():
game = Game()
game.set_cell_alive(2, 4)
assert game.count_neighbors(2, 4) == 0
assert game.count_neighbors(1, 4) == 1
def test_next_generation_of_empty_board():
game = Game()
game.advance_to_next_generation()
assert game.board == resources.EMPTY_BOARD
def test_next_generation_of_board_with_row_2_column_4_alive():
game = Game()
game.board = resources.BOARD_WITH_ROW_2_COLUMN_4_ALIVE_STRING
game.advance_to_next_generation()
assert game.board == resources.EMPTY_BOARD
def test_next_generation_of_board_with_square():
game = Game()
game.board = resources.BOARD_WITH_SQUARE_AT_ROW_2_COLUMN_4_ALIVE
def test_blinker_boards():
game = Game()
game.board = resources.BOARD_WITH_BLINKER_A
game.advance_to_next_generation()
assert game.board == resources.BOARD_WITH_BLINKER_B
game.advance_to_next_generation()
assert game.board == resources.BOARD_WITH_BLINKER_A
|
from __future__ import absolute_import, print_function
import os
import sys
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
# For demo purposes, we build our own tiny library.
try:
print("building libmymath.a")
assert os.system("gcc -shared -fPIC -c mymath.c -o mymath.o") == 0
assert os.system("ar rcs libmymath.a mymath.o") == 0
except:
if not os.path.exists("libmymath.a"):
print("Error building external library, please create libmymath.a manually.")
sys.exit(1)
# Here is how to use the library built above.
ext_modules = cythonize([
Extension("call_mymath",
sources=["call_mymath.pyx"],
include_dirs=[os.getcwd()], # path to .h file(s)
library_dirs=[os.getcwd()], # path to .a or .so file(s)
libraries=['mymath'])
])
setup(
name='Demos',
ext_modules=ext_modules,
)
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import (
Any, Dict, Optional,
)
from amundsen_rds.models import RDSModel
def serialize_record(record: Optional[RDSModel]) -> Dict[str, Any]:
if record is None:
return {}
record_dict = {key: value for key, value in vars(record).items() if key in record.__table__.columns.keys()}
return record_dict
|
"""Miscellaneous utilities."""
import functools
import sys
class Named:
"""A named object.
This class can be used to construct objects with a name that will be used
for the string representation.
"""
def __init__(self, name):
"""Construct a named object.
Arguments:
name: The name of this object.
"""
self.name = name
def __repr__(self):
"""Return the object's name."""
return self.name
BOOTSTRAP = Named("BOOTSTRAP")
MISSING = Named("MISSING")
def keyword_decorator(deco):
"""Wrap a decorator to optionally takes keyword arguments."""
@functools.wraps(deco)
def new_deco(fn=None, **kwargs):
if fn is None:
@functools.wraps(deco)
def newer_deco(fn):
return deco(fn, **kwargs)
return newer_deco
else:
return deco(fn, **kwargs)
return new_deco
class _MetaMC(type):
def __subclasscheck__(cls, sub):
return cls.chk(sub)
def meta(condition):
"""Return a class with a subclassing relation defined by condition.
For example, a dataclass is a subclass of `meta(dataclasses.is_dataclass)`,
and a class which name starts with "X" is a subclass of
`meta(lambda cls: cls.__name__.startswith("X"))`.
Arguments:
condition: A function that takes a class as an argument and returns
True or False depending on whether it matches some condition.
"""
class M(metaclass=_MetaMC):
@classmethod
def chk(cls, sub):
return condition(sub)
return M
def _getcls(ref):
module, *parts = ref.split(".")
curr = __import__(module)
for part in parts:
curr = getattr(curr, part)
return curr
def deferred(ref):
"""Represent a class from an external module without importing it.
For instance, `deferred("numpy.ndarray")` matches instances of
numpy.ndarray, but it does not import numpy. When tested against a
class, if the first part of class's `__module__` is `numpy`, then
we do get the class and perform a normal issubclass check.
If the module is already loaded, `deferred` returns the class directly.
Arguments:
ref: A string starting with a module name representing the path
to import a class.
"""
module, _ = ref.split(".", 1)
if module in sys.modules:
return _getcls(ref)
@meta
def check(cls):
full_cls_mod = getattr(cls, "__module__", None)
cls_module = full_cls_mod.split(".", 1)[0] if full_cls_mod else None
if cls_module == module:
return issubclass(cls, _getcls(ref))
else:
return False
return check
def exactly(base_cls):
"""Match the class but not its subclasses."""
@meta
def check(cls):
return cls is base_cls
return check
def strict_subclass(base_cls):
"""Match subclasses but not the base class."""
@meta
def check(cls):
return (
isinstance(cls, type)
and issubclass(cls, base_cls)
and cls is not base_cls
)
return check
def has_attribute(*attrs):
"""Match classes with the given attributes."""
@meta
def check(cls):
return all(hasattr(cls, a) for a in attrs)
return check
__all__ = [
"BOOTSTRAP",
"MISSING",
"Named",
"deferred",
"exactly",
"has_attribute",
"meta",
"keyword_decorator",
"strict_subclass",
]
|
from django.conf.urls import url, patterns, include
urlpatterns = patterns(
'',
url(r'^', include('site_search.urls', 'search')),
)
|
from tkinter import Frame, Label, Entry, Button, LabelFrame
from tkinter import N, W, LEFT, CENTER
from gui.selected_device_frame import SelectedDeviceFrame
from gui.creator import write_device_config, load_device_config
import adb
import re
import traceback
RUNNING = 'running'
DISCONNECTED = 'disconnected'
CONNECTED = 'connected'
class DeviceListFrame(Frame):
def __init__(self, notebook, main_frame, cnf={}, **kwargs):
Frame.__init__(self, notebook, kwargs)
self.windows_size = [kwargs['width'], kwargs['height']]
self.devices_config = load_device_config()
self.main_frame = main_frame
adf = AddDeviceFrame(self, main_frame)
dlt = DeviceListTable(self, main_frame)
for config in self.devices_config:
dlt.add_row(config.get('name', 'None'), config['ip'], config['port'])
adf.set_on_add_click(dlt.add_row)
adf.grid(row=0, column=0, pady=(10, 0), sticky=N + W)
dlt.grid(row=1, column=0, pady=(10, 0), sticky=N + W)
class DeviceListTable(Frame):
def __init__(self, parent, main_frame, cnf={}, **kwargs):
Frame.__init__(self, parent, kwargs)
self.main_frame = main_frame
self.title = Label(self, text="Devices:")
self.title.grid(row=0, column=0, sticky=W, padx=(5, 0))
self.device_rows = []
def add_row(self, name, ip, port):
try:
new_row = DeviceRow(self, self.main_frame, name, ip, port)
new_row.set_on_display_click(self.on_display_click)
new_row.set_on_del_click(self.on_delete_click)
self.device_rows.append(new_row)
self.render()
except Exception as e:
traceback.print_exc()
return
def remove_row(self, row):
try:
idx = self.device_rows.index(row)
if row.device_frame is not None:
row.device_frame.stop()
row.device_frame.destroy()
self.device_rows.remove(row)
row.destroy()
except Exception as e:
traceback.print_exc()
return
def on_display_click(self, row):
self.master.master.select(1)
for device_row in self.device_rows:
if device_row.device_frame is not None:
device_row.device_frame.grid_forget()
row.device_frame.grid()
def on_delete_click(self, row):
ip, port = row.ip, row.port
# tmp = list(filter(lambda addr: addr['ip'] == ip and addr['port'] == port, self.master.devices_config))
self.master.devices_config.remove(
next(addr for addr in self.master.devices_config if addr['ip'] == ip and addr['port'] == port)
)
write_device_config(self.master.devices_config)
self.remove_row(row)
def render(self):
for i in range(len(self.device_rows)):
self.device_rows[i].grid(row=i + 1, column=0, sticky=W, padx=(10, 0), pady=(10, 0))
class DeviceRow(Frame):
def __init__(self, device_list_table, main_frame, name, ip, port, cnf={}, **kwargs):
Frame.__init__(self, device_list_table, kwargs)
self.main_frame = main_frame
self.name = name
self.ip = ip
self.port = port
self.device = adb.bridge.get_device(ip, port)
self.device_frame = None
self.name_label = Label(
self, text=self.name, bg='white', height=1, width=10)
self.ip_port_label = Label(
self, text='{}:{}'.format(ip, port), bg='white', height=1, width=19)
self.status_label = Label(
self, text=DISCONNECTED if self.device is None else CONNECTED, bg='white', width=11
)
self.display_btn = Button(self, text='Display')
self.del_btn = Button(self, text='Delete')
self.name_label.grid(row=0, column=0, sticky=W, padx=(10, 0))
self.ip_port_label.grid(row=0, column=1, sticky=W, padx=(10, 0))
self.status_label.grid(row=0, column=2, sticky=W, padx=(10, 0))
self.display_btn.grid(row=0, column=3, sticky=W, padx=(10, 0))
self.del_btn.grid(row=0, column=4, sticky=W, padx=(10, 0))
def set_on_del_click(self, on_click=lambda self: self):
self.del_btn.config(command=lambda: on_click(self))
def set_on_display_click(self, on_click=lambda self: self):
def callback():
device = adb.bridge.get_device(self.ip, self.port)
if device is None:
return
if self.device_frame is None:
self.status_label.config(text=DISCONNECTED if device is None else CONNECTED)
width, height = self.master.master.windows_size
self.device_frame = SelectedDeviceFrame(self.main_frame, device, width=width, height=height)
self.device_frame.grid(row=0, column=0, sticky=N + W)
self.device_frame.grid_forget()
on_click(self)
self.display_btn.config(command=callback)
class AddDeviceFrame(Frame):
def __init__(self, parent, cnf={}, **kwargs):
Frame.__init__(self, parent, kwargs)
self.name_label = Label(self, text='name: ')
self.name_entry = Entry(self)
self.ip_label = Label(self, text='ip: ')
self.ip_entry = Entry(self)
self.port_label = Label(self, text='port: ')
self.port_entry = Entry(self)
self.add_btn = Button(self, text='Add', width=10)
def ip_entry_validate_cmd(value, action_type):
if action_type == '1':
if not value[-1].isdigit() and value[-1] != '.':
return False
if value[0] == '0':
return False
return True
def port_entry_validate_cmd(value, action_type):
if action_type == '1':
if not value[-1].isdigit():
return False
if value[0] == '0':
return False
return True
self.name_entry.config(width=10)
self.ip_entry.config(width=15, validate='key', validatecommand=(
self.register(ip_entry_validate_cmd), '%P', '%d'
))
self.port_entry.config(width=8, validate='key', validatecommand=(
self.register(port_entry_validate_cmd), '%P', '%d'))
self.name_label.grid(row=0, column=0, sticky=W, padx=5)
self.name_entry.grid(row=0, column=1, sticky=W, padx=5)
self.ip_label.grid(row=0, column=2, sticky=W, padx=5)
self.ip_entry.grid(row=0, column=3, sticky=W, padx=5)
self.port_label.grid(row=0, column=4, sticky=W, padx=5)
self.port_entry.grid(row=0, column=5, sticky=W, padx=5)
self.add_btn.grid(row=0, column=6, sticky=W, padx=5)
def set_on_add_click(self, on_click=lambda ip, port: None):
def callback():
name = self.name_entry.get()
ip = self.ip_entry.get()
port = self.port_entry.get()
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip) is None:
return
if not (1 <= int(port) <= 65535):
return
on_click(name, ip, port)
self.master.devices_config.append(
{
'name': self.name_entry.get(),
'ip': self.ip_entry.get(),
'port': self.port_entry.get()
}
)
write_device_config(self.master.devices_config)
self.add_btn.config(command=callback)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiMarketingDataSmartactivityConfigResponse(AlipayResponse):
def __init__(self):
super(KoubeiMarketingDataSmartactivityConfigResponse, self).__init__()
self._activity_type = None
self._activity_valid_days = None
self._config_code = None
self._crowd_group = None
self._ext_info = None
self._item_id = None
self._item_name = None
self._min_consume = None
self._min_cost = None
self._pro_type = None
self._voucher_type = None
self._voucher_valid_days = None
self._worth_value = None
@property
def activity_type(self):
return self._activity_type
@activity_type.setter
def activity_type(self, value):
self._activity_type = value
@property
def activity_valid_days(self):
return self._activity_valid_days
@activity_valid_days.setter
def activity_valid_days(self, value):
self._activity_valid_days = value
@property
def config_code(self):
return self._config_code
@config_code.setter
def config_code(self, value):
self._config_code = value
@property
def crowd_group(self):
return self._crowd_group
@crowd_group.setter
def crowd_group(self, value):
self._crowd_group = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def min_consume(self):
return self._min_consume
@min_consume.setter
def min_consume(self, value):
self._min_consume = value
@property
def min_cost(self):
return self._min_cost
@min_cost.setter
def min_cost(self, value):
self._min_cost = value
@property
def pro_type(self):
return self._pro_type
@pro_type.setter
def pro_type(self, value):
self._pro_type = value
@property
def voucher_type(self):
return self._voucher_type
@voucher_type.setter
def voucher_type(self, value):
self._voucher_type = value
@property
def voucher_valid_days(self):
return self._voucher_valid_days
@voucher_valid_days.setter
def voucher_valid_days(self, value):
self._voucher_valid_days = value
@property
def worth_value(self):
return self._worth_value
@worth_value.setter
def worth_value(self, value):
self._worth_value = value
def parse_response_content(self, response_content):
response = super(KoubeiMarketingDataSmartactivityConfigResponse, self).parse_response_content(response_content)
if 'activity_type' in response:
self.activity_type = response['activity_type']
if 'activity_valid_days' in response:
self.activity_valid_days = response['activity_valid_days']
if 'config_code' in response:
self.config_code = response['config_code']
if 'crowd_group' in response:
self.crowd_group = response['crowd_group']
if 'ext_info' in response:
self.ext_info = response['ext_info']
if 'item_id' in response:
self.item_id = response['item_id']
if 'item_name' in response:
self.item_name = response['item_name']
if 'min_consume' in response:
self.min_consume = response['min_consume']
if 'min_cost' in response:
self.min_cost = response['min_cost']
if 'pro_type' in response:
self.pro_type = response['pro_type']
if 'voucher_type' in response:
self.voucher_type = response['voucher_type']
if 'voucher_valid_days' in response:
self.voucher_valid_days = response['voucher_valid_days']
if 'worth_value' in response:
self.worth_value = response['worth_value']
|
import time
import random
s1 = open("scores.txt","a")
name = open("name.txt","w")
score = 0
p1 = random.randint(1,4)
p2 = random.randint(1,4)
p3 = random.randint(1,4)
p4=random.randint(1,5)
p5=random.randint(1,4)
p11 = random.randint(1,5)*p3
rs = random.randint(9,12)
sc22 = random.randint(45,55)
while True:
yk = input("You are the killer type 1,2,3,4 or 5 to kill someone.")
if yk == "1"or"2"or'4' or '3' or "5" :
print("person attacked")
score += sc22
if yk == '1' or "2" and 3*2*2*2*2 >= sc22 :
print ("YOu were REPORTED You Lose one life:(")
rs-=1
if rs == 0:
s1.write(str(score) + " ")
exit("You Lost And were reported You're score was " + str(score))
elif score >= p1*p2*p5*2:
print("You win")
s1.write(str(score) + " ")
exit("won") |
from django.test import TestCase
from portfolios.models import ProfessionalExperience
from portfolios.factories.professional_experience_factory import create_professional_experiences_with_factory
class ProfessionalExperienceTestCase(TestCase):
__MODEL = ProfessionalExperience
@classmethod
def setUpTestData(cls):
# Create dummy professional experience with ProfessionalExperience factory and assign to class variable
cls.professional_experience = create_professional_experiences_with_factory(num_of_data=1)[0]
# test if professional experience is created sucessfully
def test_professional_experience_created_sucessfully(self):
instance = self.__MODEL.objects.get(id=self.professional_experience.id)
self.assertEqual(instance.company, self.professional_experience.company)
def test_professional_experience_company_label(self):
field_label = self.professional_experience._meta.get_field('company').verbose_name
self.assertEqual(field_label, 'company')
def test_professional_experience_user_label(self):
field_label = self.professional_experience._meta.get_field('user').verbose_name
self.assertEqual(field_label, 'user')
def test_professional_experience_company_max_length(self):
max_length = self.professional_experience._meta.get_field('company').max_length
self.assertEqual(max_length, 150)
def test_professional_experience_object_name_is_company(self):
expected_object_name = f'{self.professional_experience.company}'
self.assertEqual(expected_object_name, str(self.professional_experience))
def test_professional_experience_get_absolute_url(self):
# This will also fail if the urlconf is not defined.
self.assertEqual(
self.professional_experience.get_absolute_url(),
f'/portfolios/professional-experience/{self.professional_experience.slug}/detail/'
)
|
from typing import Tuple
from utils import (multiply,
pythagorean_triplets)
numbers_sum = 1_000
def special_condition(numbers: Tuple[int, ...]) -> bool:
return sum(numbers) == numbers_sum
special_pythagorean_triplet, = filter(special_condition,
pythagorean_triplets(numbers_sum))
assert multiply(special_pythagorean_triplet) == 31_875_000
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
from functools import partial
import numpy as np
import scipy
from addict import Dict
from ....algorithms.quantization import utils as eu
from ....engines.ac_engine import ACEngine
from ....graph.model_utils import get_nodes_by_type
from ....graph.node_utils import get_all_node_outputs
from ....graph.utils import find_operation_matches
from ....samplers.creator import create_sampler
SPECIAL_METRICS = ['cmc', 'reid_map', 'pairwise_accuracy_subsets', 'pairwise_accuracy', 'normalized_embedding_accuracy',
'face_recognition_tafa_pair_metric', 'localization_recall',
'coco_orig_keypoints_precision', 'coco_orig_segm_precision', 'coco_orig_keypoints_precision']
METRICS_CONFIGS = {'sigmoid_recom_loss': {'metrics': 'log_loss',
'postprocessing': 'sigmoid_normalize_recommendation'},
'coco_precision': {'metrics': 'coco_precision'},
'coco_segm_precision': {'metrics': 'coco_segm_precision'}}
METRIC2PROXY_METRIC = {
'hit_ratio':
{
'persample': 'sigmoid_recom_loss',
'ranking': 'sigmoid_recom_loss'
},
'ndcg':
{
'persample': 'sigmoid_recom_loss',
'ranking': 'sigmoid_recom_loss'
},
'coco_orig_precision':
{
'persample': 'coco_precision'
},
'coco_orig_keypoints_precision':
{
'persample': 'coco_precision'
},
'coco_orig_segm_precision':
{
'persample': 'coco_segm_precision'
}
}
def create_metric_config(engine, algo_config: Dict, force_logit_comparison=False,
logit_distance_type='cosine') -> Dict:
def create_metric_params(metric_name):
engine_metrics_attributes = engine.get_metrics_attributes()
if metric_name not in engine_metrics_attributes:
RuntimeError('Couldn\'t create metric parameters. '
'Metric {} not registered in the engine.'.format(metric_name))
params = Dict()
params.name = metric_name
params.type = engine_metrics_attributes[metric_name]['type']
params.is_special = (params.type in SPECIAL_METRICS) or force_logit_comparison
if engine_metrics_attributes[metric_name]['direction'] == 'higher-better':
params.comparator = (lambda a: a)
elif engine_metrics_attributes[metric_name]['direction'] == 'higher-worse':
params.comparator = (lambda a: -a)
else:
raise ValueError('Unexpected {} metric direction value.'.format(metric_name))
params.sort_fn = partial(sort_by_logit_distance, distance=logit_distance_type) \
if params.is_special else partial(sort_by_metric_difference, comp_fn=params.comparator)
return params
def metric_to_proxy_map(metrics):
"""Determines which metrics need proxy metrics and creates metrics to proxy metrics map.
:param metrics: optimizable metrics names
:returns a dictionary of metrics to proxy metrics mapping {metric_name: 'persample': proxy_name,
'ranking': proxy_name}
a list of proxy metrics names to register
"""
def update_proxy_list(proxy_metric_name):
"""Updates a list of proxy metrics names to register.
:return a proxy metric name in accordance with the engine naming
"""
proxy_config = METRICS_CONFIGS.get(proxy_metric_name, {})
metric_config = proxy_config.get('metrics')
postprocessing_config = proxy_config.get('postprocessing')
if metric_config or postprocessing_config:
to_register.add(proxy_metric_name)
return metric_name_from_config(metric_config)
match_names_config = Dict({metric_name: {} for metric_name in metrics})
to_register = set()
for metric_name, metric_type in metrics:
if metric_type in METRIC2PROXY_METRIC:
persample_metric_name = METRIC2PROXY_METRIC[metric_type].get('persample')
persample_proxy_metric_name = update_proxy_list(persample_metric_name)
if persample_proxy_metric_name:
match_names_config[metric_name].persample = persample_proxy_metric_name
ranking_metric_name = METRIC2PROXY_METRIC[metric_type].get('ranking')
ranking_proxy_metric_name = update_proxy_list(ranking_metric_name)
if ranking_proxy_metric_name:
match_names_config[metric_name].ranking = ranking_proxy_metric_name
return match_names_config, list(to_register)
metrics_attributes = engine.get_metrics_attributes()
# configure which metrics to optimize
if algo_config.metrics:
metrics_names = []
for metric in algo_config.metrics:
metric_type = metric.type if metric.type else metric.name
metrics_names.append((metric.name, metric_type))
else:
metrics_names = [(metric_name, metric_attr.get('type', metric_name)) for metric_name, metric_attr
in metrics_attributes.items()]
# register proxy metrics
metrics_to_proxy_map, metrics_to_register = metric_to_proxy_map(metrics_names)
register_metrics(engine, metrics_to_register)
metrics_config = Dict()
for metric, _ in metrics_names:
persample_name = metrics_to_proxy_map[metric].get('persample', metric)
ranking_name = metrics_to_proxy_map[metric].get('ranking', metric)
metrics_config[metric].persample = create_metric_params(persample_name)
metrics_config[metric].ranking = create_metric_params(ranking_name)
metrics_config[metric].update(create_metric_params(metric))
return metrics_config
def metric_name_from_config(metric_config):
if isinstance(metric_config, str):
return metric_config
if isinstance(metric_config, dict):
return metric_config.get('name', metric_config['type'])
return None
def register_metrics(engine, metrics_names: list):
"""Registers metrics and postprocessing in the engine.
:param engine: an engine in which metrics will be registered
:param metrics_names: a list of metrics names
"""
registered_metrics = engine.get_metrics_attributes()
for metric in metrics_names:
if metric not in METRICS_CONFIGS:
raise ValueError('Cannot register metric. Unsupported name {}.'.format(metric))
proxy_config = METRICS_CONFIGS.get(metric, {})
if 'metrics' in proxy_config:
metric_config = proxy_config['metrics']
if metric_name_from_config(metric_config) not in registered_metrics:
register_metric(engine, metric_config)
if 'postprocessing' in proxy_config:
postprocessing_config = proxy_config['postprocessing']
register_postprocessing(engine, postprocessing_config)
def sort_by_logit_distance(u, v, reverse=False, distance='cosine'):
if len(u) != len(v):
raise RuntimeError('Cannot compare samples. '
'Lists of per-sample metric results should be the same length.')
kd_distance = lambda u, v: scipy.stats.entropy(scipy.special.softmax(u),
scipy.special.softmax(v))
mse_distance = lambda u, v: np.mean((u - v) ** 2)
distance_function = {
'cosine': scipy.spatial.distance.cosine,
'kd': kd_distance,
'mse': mse_distance,
}
distance_between_samples = np.array([distance_function[distance](ui.flatten(), vi.flatten())
for ui, vi in zip(u, v)])
sorted_arr = np.argsort(distance_between_samples)
if reverse:
sorted_arr = np.flip(sorted_arr)
return sorted_arr
def sort_by_metric_difference(u, v, comp_fn=lambda a: a, reverse=False):
if len(u) != len(v):
raise RuntimeError('Cannot compare samples. '
'Lists of per-sample metric results should be the same length.')
u = np.asarray(u)
v = np.asarray(v)
sorted_arr = np.argsort(comp_fn(u - v))
if reverse:
sorted_arr = np.flip(sorted_arr)
return sorted_arr
def register_metric(engine, metric_config):
if isinstance(engine, ACEngine):
engine.add_metric(metric_config)
else:
raise NotImplementedError('{} engine cannot register new metrics.'
.format(type(engine).__name__))
def register_postprocessing(engine, postprocessing_config):
if isinstance(engine, ACEngine):
engine.add_postprocessing(postprocessing_config)
else:
raise NotImplementedError('{} engine cannot register new postprocessing.'
.format(type(engine).__name__))
def is_preset_performance(config: Dict):
if config.weights.mode == 'symmetric' and config.activations.mode == 'symmetric':
return True
if config.weights.mode == 'asymmetric' or config.activations.mode == 'asymmetric':
return False
if config.preset == 'performance':
return True
return False
def get_mixed_preset_config(config: Dict):
config = deepcopy(config)
config.update(preset='mixed')
if config.activations.mode:
config.activations.mode = 'asymmetric'
if config.weights.mode:
config.weights.mode = 'symmetric'
return config
def get_num_of_quantized_ops(model, quantizable_operations):
quantized_ops = set()
nodes_to_see = []
for fq_node in get_nodes_by_type(model, ['FakeQuantize']):
nodes_to_see.extend(get_all_node_outputs(fq_node))
while nodes_to_see:
child = nodes_to_see.pop()
if find_operation_matches(quantizable_operations, child):
quantized_ops.add(child)
continue
nodes_to_see.extend(get_all_node_outputs(child))
return len(quantized_ops)
def evaluate_model(
model, engine,
dataset_size,
subset_indices=None,
print_progress=True,
metrics_config=None,
per_sample_subset_indices=None,
output_node_name=None,
stats_layout=None,
):
"""Evaluates the model and processes metrics values
:param model: model to evaluate
:param subset_indices: image indices to evaluate on. If None evaluate on whole dataset
:param per_sample_subset_indices: image indices for which to return per-sample metrics.
If None for all predicted images
:param print_progress: Whether to print inference progress
:returns a dictionary of predicted metrics {metric_name: value}
a dictionary of per-sample metrics values {metric_name: [values]}
"""
engine.set_model(model)
eu.select_evaluation_dataset(engine)
if not subset_indices:
subset_indices = range(dataset_size)
index_sampler = create_sampler(engine, samples=subset_indices)
(metrics_per_sample, metrics), raw_output = engine.predict(stats_layout=stats_layout,
sampler=index_sampler,
metric_per_sample=True,
print_progress=print_progress)
raw_output = process_raw_output(raw_output, output_node_name)
metrics_per_sample = process_per_sample_metrics(metrics_per_sample,
metrics_config,
per_sample_subset_indices,
raw_output=raw_output)
metrics = dict((name, value) for name, value in metrics.items() if name in metrics_config)
eu.reset_dataset_to_default(engine)
return metrics, metrics_per_sample
def process_raw_output(output, output_node_name):
if not output:
return []
return output[output_node_name]['output_logits']
def process_per_sample_metrics(metrics_per_sample, metrics_config,
indices=None, raw_output=None):
"""Creates a dictionary of per-sample metrics values {metric_name: [values]}
:param metrics_per_sample: list of per-sample metrics
:param indices: indices of samples to be considered. All if None
:param raw_output: raw output from the model
:return processed dictionary
"""
metrics_to_keep = {config.persample.name: config.persample
for config in metrics_config.values()}
if not metrics_to_keep:
return {}
processed_metrics_per_sample = dict((name, []) for name in metrics_to_keep)
for metric_name, metric_params in metrics_to_keep.items():
if metric_params.is_special:
processed_metrics_per_sample[metric_name] = raw_output
for value in metrics_per_sample:
if value['metric_name'] in metrics_to_keep:
if metrics_to_keep[value['metric_name']].is_special:
continue
if value['result'] is not None:
result_value = np.nanmean(value['result'])
else:
result_value = None
processed_metrics_per_sample[value['metric_name']].append(result_value)
# check that all metrics have equal number of samples
if not len({len(value) for value in processed_metrics_per_sample.values()}) == 1:
raise RuntimeError('Inconsistent number of per-sample metric values')
if indices:
for name, values in processed_metrics_per_sample.items():
processed_metrics_per_sample[name] = [values[i] for i in indices]
return processed_metrics_per_sample
|
"""989. Add to Array-Form of Integer
https://leetcode.com/problems/add-to-array-form-of-integer/
For a non-negative integer X, the array-form of X is an array of its digits
in left to right order. For example, if X = 1231, then the array form is [1,2,3,1].
Given the array-form A of a non-negative integer X, return the array-form of the integer X+K.
Example 1:
Input: A = [1,2,0,0], K = 34
Output: [1,2,3,4]
Explanation: 1200 + 34 = 1234
Example 2:
Input: A = [2,7,4], K = 181
Output: [4,5,5]
Explanation: 274 + 181 = 455
Example 3:
Input: A = [2,1,5], K = 806
Output: [1,0,2,1]
Explanation: 215 + 806 = 1021
Example 4:
Input: A = [9,9,9,9,9,9,9,9,9,9], K = 1
Output: [1,0,0,0,0,0,0,0,0,0,0]
Explanation: 9999999999 + 1 = 10000000000
Note:
1 <= A.length <= 10000
0 <= A[i] <= 9
0 <= K <= 10000
If A.length > 1, then A[0] != 0
"""
from typing import List
class Solution:
def add_to_array_form(self, a: List[int], k: int) -> List[int]:
def helper(long_list: List[int], short_list: List[int]) -> List[int]:
len_short, len_long = len(short_list), len(long_list)
i = 1
prev = 0
while len_short - i >= 0:
cur = long_list[len_long - i] + short_list[len_short - i] + prev
prev, cur = divmod(cur, 10)
long_list[len_long - i] = cur
i += 1
while prev and len_long - i >= 0:
cur = long_list[len_long - i] + prev
prev, cur = divmod(cur, 10)
long_list[len_long - i] = cur
i += 1
if prev == 1:
long_list.insert(0, 1)
return long_list
list_k = [int(c) for c in str(k)]
len_k, len_a = len(list_k), len(a)
if len_a < len_k:
return helper(list_k, a)
return helper(a, list_k)
def add_to_array_form_2(self, a: List[int], k: int) -> List[int]:
def helper(list_1: List[int], list_2: List[int]) -> List[int]:
n = len(list_1)
prev = 0
for i in range(n):
index = n - 1 - i
cur = list_1[index] + list_2[index] + prev
prev, cur = divmod(cur, 10)
list_1[index] = cur
if prev == 1:
list_1.insert(0, 1)
return list_1
list_k = [int(c) for c in str(k)]
len_k, len_a = len(list_k), len(a)
if len_k < len_a:
list_k = [0] * (len_a - len_k) + list_k
else:
a = [0] * (len_k - len_a) + a
return helper(a, list_k)
|
#!/bin/python3
# https://www.hackerrank.com/challenges/minimum-swaps-2
import sys
if __name__ == "__main__":
n = int(input())
a = [int(s) for s in input().strip().split(' ')]
dict = {v: i for i, v in enumerate(sorted(a))}
swaps = 0
i = 0
while i < n:
j = dict[a[i]]
if j != i:
a[i], a[j] = a[j], a[i]
swaps += 1
else:
i += 1
print(swaps)
|
"""This is the top level module for my raspberry pi syncing program.
It's definitely more complicated than it needs to be. In short, all it does is
provide a way to run rsync jobs using config files. These would then ideally be
scheduled via cron.
I didn't just pure bash because I want it to be easily modular and configurable
which is tough in bash.
My design intention is not very clear here. I've probably straddled the worst
of both worlds. I've written this application so that it can be configured and
re-installed in other places. But it is also designed around my specific server
client layout and is probably useless to anyone but me. Perhaps the
configurability and re-usability will be useful for me as I change my setup
and get new computers. Regardless, writing the app in this way is a useful
exercise in interacting with the rest of the development world.
"""
import atexit
import argparse
import os
import sys
import subprocess
import piserver.desktop_notify
import piserver.config
import piserver.misc
import piserver.fileio
import piserver.constants
import piserver.jobrecords
def main():
BackupJob()
def listjobs():
jobs = piserver.fileio.get_user_job_config_files_list()
print('Available backup jobs:')
print('\t'+'\n\t'.join(jobs))
class BackupJob(object):
"""Class representing a single backup job."""
def __init__(self):
parser = argparse.ArgumentParser(
prog=piserver.constants.PROGRAM,
description=piserver.constants.DESCRIPTION)
# Only required argument is the name of the job file to read from.
parser.add_argument('jobname', type=str, help='Job configuration file name (the local name)')
parser.add_argument(
'-v', '--version', action='version',
version='%(prog)s ' + piserver.constants.__version__)
parser.add_argument('--dryrun', dest='dryrun', action='store_const',
const=True, default=False,
help='Sets the dry run flag to true (no data is actually copied)')
args = parser.parse_args(sys.argv[1:])
# read application and job config files
self.job_config = piserver.config.JobConfig(
args.jobname, dryrun=args.dryrun)
# compute the src and dst
self.src = self.job_config.gen_rsync_source()
self.dst = self.job_config.gen_rsync_target()
# setup job records
self.jobid = piserver.jobrecords.create_new_record(self.job_config)
# # setup logger
# self.log = piserver.log.Log(self.job_config)
self.completed = False
# set up failure catch
atexit.register(self._failure_catch)
self._run_backup()
def _failure_catch(self):
"""Function used to cleanup in case of an unexpected error."""
if self.completed:
return
piserver.desktop_notify.notify_failure(self.jobid, self.job_config, '?')
piserver.jobrecords.record_failure(self.jobid, self.job_config)
piserver.jobrecords.record_entry('failure was caught by _failure_catch')
# shortmsg = 'piserver backup encountered unknown failure'
# longmsg = 'script terminated prematurely while copying %s to %s' % (self.src, self.dst)
# self.log.log(shortmsg, longmsg, iserror=True)
def _run_backup(self):
# first we build the command.
rsync_cmd = ['rsync', '-a']
if self.job_config.is_dry_run():
rsync_cmd.append('-n')
if self.job_config.rsync_delete:
rsync_cmd.append('--delete')
if self.job_config.rsync_verbose:
rsync_cmd.append('-v')
for ignore in self.job_config.ignore_files:
rsync_cmd.append('--exclude='+ignore)
rsync_cmd.append(self.src)
rsync_cmd.append(self.dst)
# start record
piserver.jobrecords.record_started(self.jobid, self.job_config)
piserver.jobrecords.record_call_stack(self.jobid, rsync_cmd)
piserver.desktop_notify.notify_start(self.jobid, self.job_config)
# # start message
# shortmsg = 'piserver backup starting'
# longmsg = 'copying data from %s to %s' % (self.src, self.dst)
# self.log.log(shortmsg, longmsg)
# print('CALL: '+' '.join(rsync_cmd))
code = subprocess.call(rsync_cmd)
if code == 0:
piserver.jobrecords.record_success(self.jobid, self.job_config)
piserver.desktop_notify.notify_success(self.jobid, self.job_config)
# shortmsg = 'piserver backup finished successfully'
# longmsg = 'copied data from %s to %s' % (self.src, self.dst)
# self.log.log(shortmsg, longmsg)
else:
piserver.jobrecords.record_failure(self.jobid, self.job_config)
piserver.desktop_notify.notify_failure(self.jobid, self.job_config, code)
piserver.jobrecords.record_entry(
self.jobid, 'subprocess failed with code %d' % code)
# shortmsg = 'piserver backup failed with code %d' % code
# longmsg = 'failed to copy data from %s to %s' % (self.src, self.dst)
# self.log.log(shortmsg, longmsg, iserror=True)
self.completed = 1
if __name__ == '__main__':
main()
|
# -*- coding: utf-8; -*-
__all__ = ['Bunch']
from collections.abc import Mapping, MutableMapping, Container, Iterable, Sized
class Bunch:
"""A bunch of named values.
Can be used instead of a `dict` when `b.someattr` is more readable than
`d['somekey']`. In terms of `collections.abc`, a `MutableMapping`,
`Container`, `Iterable`, `Sized`.
Example::
b = Bunch(cat="meow", dog="woof")
assert b.cat == "meow"
assert b.dog == "woof"
"""
def __init__(self, **bindings):
self._data = bindings
self._reserved_names = []
self._reserved_names = dir(self)
def copy(self):
return Bunch(**{k: v for k, v in self._data.items()})
def __getattr__(self, name):
return self._data[name]
def __setattr__(self, name, value):
if name in ("_data", "_reserved_names"):
return super().__setattr__(name, value)
if name in self._reserved_names: # prevent shadowing get, pop, et al.
raise AttributeError(f"Cannot write to reserved attribute '{name}'")
self._data[name] = value
def __delattr__(self, name):
del self._data[name]
# Container, Iterable, Sized
def __contains__(self, name): return self._data.__contains__(name)
def __iter__(self): return self._data.__iter__()
def __len__(self): return len(self._data)
# Mapping
def __getitem__(self, name): return self._data.__getitem__(name)
def __setitem__(self, name, value): return self._data.__setitem__(name, value)
def __delitem__(self, name): return self._data.__delitem__(name)
def keys(self): return self._data.keys()
def items(self): return self._data.items()
def values(self): return self._data.values()
def get(self, name, default=None): return self[name] if name in self else default
def __eq__(self, other): return other == self._data
def __ne__(self, other): return other != self._data
# MutableMapping
def pop(self, name, *default): return self._data.pop(name, *default)
def popitem(self): return self._data.popitem()
def clear(self): return self._data.clear()
def update(self, **bindings): self._data.update(**bindings)
def setdefault(self, name, *default): return self._data.setdefault(name, *default)
def __repr__(self): # pragma: no cover
bindings = [f"{name:s}={repr(value)}" for name, value in self._data.items()]
args = ", ".join(bindings)
return f"Bunch({args})"
for abscls in (Mapping, MutableMapping, Container, Iterable, Sized): # virtual ABCs
abscls.register(Bunch)
del abscls
|
"""
util.py
Implements util functions
"""
__version__ = '1.0'
__author__ = 'Hugo Chauvary'
__email__ = 'chauvary.hugo@gmail.com'
import sys
import os
import functools
from random import choice
from inspect import getframeinfo, stack
import numpy as np
from module.logger import Logger
def log_item(func):
"""
Log decorator
Parameters
----------
func:
- decorated function
Returns
-------
wrapper:
- function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
logger = Logger().logger # get a logger
# generate file/function name for calling functions
# __func.name__ will give the name of the caller function
# ie. wrapper and caller file name ie log_item.py
# using extra param to get the actual function name
# by leveraging inspect.getframeinfo
pyfile = getframeinfo(stack()[1][0])
extra_args = {
'func_name_override': f'{func.__globals__["__name__"]}.{func.__name__}',
'file_name_override': os.path.basename(pyfile.filename)
}
# function begin checkpoint
logger.info(f"begin function", extra=extra_args)
try: # function end checkpoint
value = func(*args, **kwargs)
if value:
logger.info(f"end function, returned {value!r}", extra=extra_args)
else:
logger.info(f"end function, success!", extra=extra_args)
return value
except:
# log error if fails and raise
logger.error(f"exception: {str(sys.exc_info()[1])}", extra=extra_args)
raise
return wrapper
def classify_labels(labels: np.ndarray) -> list:
"""
Classify MNIST labels by value
Parameters
----------
labels:
- MNIST testing labels
Returns
-------
sorted_labels:
- labels sorted by value
"""
# initialize with 10 empty lists (= 10 digits)
sorted_labels = [[] for i in range(10)]
# loop through labels
# add their position to corresponding list
for i in range(len(labels)):
sorted_labels[labels[i]].append(i)
return sorted_labels
def get_spacing(
sequence: list,
image_width: int,
min_spacing: int,
digit_width:int = 28) -> int:
"""
Generates a spacing interval between two consecutive digits
Follows a uniformed distribution
Parameters
----------
sequence:
- A list-like containing the numerical values of the digits from which
the sequence will be generated (for example [3, 5, 0])
image_width:
- Digits sequence width
min_spacing:
- Represents the min spacing between digits
- Unit should be pixel.
digit_width:
- default 28
Returns
-------
spacing:
- Specifies the width of the MNIST digits
- Unit should be pixel.
"""
# no spacing required if digits sequence length is one
if len(sequence) == 1:
theo_spacing = 0
else:
# derived from formula below
# image_width = (len(sequence) * digit_width) + theo_spacing/(len(sequence) - 1)
theo_spacing = (image_width - len(sequence) * digit_width) / (len(sequence) - 1)
# raise ValueError if theo_spacing not inside spacing_range
if not theo_spacing > min_spacing:
raise ValueError(f'uniform spacing {int(theo_spacing)} '
+ f'must be greater than {min_spacing}')
# pick random value for range [min_spacing, theo_spacing]
spacing = choice([i for i in range(min_spacing, int(theo_spacing))])
return spacing
def validate_int(arg: int):
"""
Abstract
----------
Checks if arg input is strictly positive
Parameters
----------
arg:
- parsearg argument that needs validation
Returns
-------
arg:
- validated parsearg argument
"""
if not arg > 0:
raise ValueError("Must be strictly positive")
return arg |
import sys
import base64
from .exceptions import WinRMTransportError, UnauthorizedError
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
is_py2 = sys.version[0] == '2'
if is_py2:
from urllib2 import Request, URLError, HTTPError, HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm, HTTPSHandler
from urllib2 import urlopen, build_opener, install_opener
from urlparse import urlparse
from httplib import HTTPSConnection
else:
from urllib.request import Request, URLError, HTTPError, HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm, HTTPSHandler
from urllib.request import urlopen, build_opener, install_opener
from urllib.parse import urlparse
from http.client import HTTPSConnection
class HttpTransport(object):
def __init__(self, endpoint, username, password):
self.endpoint = endpoint
self.username = username
self.password = password
self.user_agent = 'Python WinRM client'
self.timeout = 3600 # Set this to an unreasonable amount for now because WinRM has timeouts
def basic_auth_only(self):
#here we should remove handler for any authentication handlers other than basic
# but maybe leave original credentials
# auths = @httpcli.www_auth.instance_variable_get('@authenticator')
# auths.delete_if {|i| i.scheme !~ /basic/i}
# drop all variables in auths if they not contains "basic" as insensitive.
pass
def no_sspi_auth(self):
# here we should remove handler for Negotiate/NTLM negotiation
# but maybe leave original credentials
pass
class HttpPlaintext(HttpTransport):
def __init__(self, endpoint, username='', password='', disable_sspi=True, basic_auth_only=True):
super(HttpPlaintext, self).__init__(endpoint, username, password)
if disable_sspi:
self.no_sspi_auth()
if basic_auth_only:
self.basic_auth_only()
self._headers = {'Content-Type': 'application/soap+xml;charset=UTF-8',
'User-Agent': 'Python WinRM client'}
def _setup_opener(self):
password_manager = HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, self.endpoint, self.username, self.password)
auth_manager = HTTPBasicAuthHandler(password_manager)
opener = build_opener(auth_manager)
install_opener(opener)
def send_message(self, message):
headers = self._headers.copy()
headers['Content-Length'] = str(len(message))
self._setup_opener()
request = Request(self.endpoint, data=message, headers=headers)
try:
response = urlopen(request, timeout=self.timeout)
# Version 1.1 of WinRM adds the namespaces in the document instead of the envelope so we have to
# add them ourselves here. This should have no affect version 2.
response_text = response.read()
return response_text
#doc = ElementTree.fromstring(response.read())
#Ruby
#doc = Nokogiri::XML(resp.http_body.content)
#doc.collect_namespaces.each_pair do |k,v|
# doc.root.add_namespace((k.split(/:/).last),v) unless doc.namespaces.has_key?(k)
#end
#return doc
#return doc
except HTTPError as ex:
if ex.code == 401:
raise UnauthorizedError(transport='plaintext', message=ex.msg)
response_text = ex.read()
# Per http://msdn.microsoft.com/en-us/library/cc251676.aspx rule 3,
# should handle this 500 error and retry receiving command output.
if 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive' in message and 'Code="2150858793"' in response_text:
# TODO raise TimeoutError here instead of just return text
return response_text
error_message = 'Bad HTTP response returned from server. Code {0}'.format(ex.code)
if ex.msg:
error_message += ', {0}'.format(ex.msg)
raise WinRMTransportError('http', error_message)
except URLError as ex:
raise WinRMTransportError('http', ex.reason)
class HTTPSClientAuthHandler(HTTPSHandler):
def __init__(self, cert, key):
HTTPSHandler.__init__(self)
self.cert = cert
self.key = key
def https_open(self, req):
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
return HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
class HttpSSL(HttpPlaintext):
"""Uses SSL to secure the transport"""
def __init__(self, endpoint, username, password, ca_trust_path=None, disable_sspi=True, basic_auth_only=True,
cert_pem=None, cert_key_pem=None):
super(HttpSSL, self).__init__(endpoint, username, password)
self._cert_pem = cert_pem
self._cert_key_pem = cert_key_pem
#Ruby
#@httpcli.set_auth(endpoint, user, pass)
#@httpcli.ssl_config.set_trust_ca(ca_trust_path) unless ca_trust_path.nil?
if disable_sspi:
self.no_sspi_auth()
if basic_auth_only:
self.basic_auth_only()
if self._cert_pem:
self._headers['Authorization'] = "http://schemas.dmtf.org/wbem/wsman/1/wsman/secprofile/https/mutual"
def _setup_opener(self):
if not self._cert_pem:
super(HttpSSL, self)._setup_opener()
else:
opener = build_opener(HTTPSClientAuthHandler(self._cert_pem, self._cert_key_pem))
install_opener(opener)
class KerberosTicket:
"""
Implementation based on http://ncoghlan_devs-python-notes.readthedocs.org/en/latest/python_kerberos.html
"""
def __init__(self, service):
ignored_code, krb_context = kerberos.authGSSClientInit(service)
kerberos.authGSSClientStep(krb_context, '')
# TODO authGSSClientStep may raise following error:
#GSSError: (('Unspecified GSS failure. Minor code may provide more information', 851968),
# ("Credentials cache file '/tmp/krb5cc_1000' not found", -1765328189))
self._krb_context = krb_context
gss_response = kerberos.authGSSClientResponse(krb_context)
self.auth_header = 'Negotiate {0}'.format(gss_response)
def verify_response(self, auth_header):
# Handle comma-separated lists of authentication fields
for field in auth_header.split(','):
kind, ignored_space, details = field.strip().partition(' ')
if kind.lower() == 'negotiate':
auth_details = details.strip()
break
else:
raise ValueError('Negotiate not found in {0}'.format(auth_header))
# Finish the Kerberos handshake
krb_context = self._krb_context
if krb_context is None:
raise RuntimeError('Ticket already used for verification')
self._krb_context = None
kerberos.authGSSClientStep(krb_context, auth_details)
#print('User {0} authenticated successfully using Kerberos authentication'.format(kerberos.authGSSClientUserName(krb_context)))
kerberos.authGSSClientClean(krb_context)
class HttpKerberos(HttpTransport):
def __init__(self, endpoint, realm=None, service='HTTP', keytab=None):
"""
Uses Kerberos/GSS-API to authenticate and encrypt messages
@param string endpoint: the WinRM webservice endpoint
@param string realm: the Kerberos realm we are authenticating to
@param string service: the service name, default is HTTP
@param string keytab: the path to a keytab file if you are using one
"""
if not HAVE_KERBEROS:
raise WinRMTransportError('kerberos is not installed')
super(HttpKerberos, self).__init__(endpoint, None, None)
self.krb_service = '{0}@{1}'.format(service, urlparse(endpoint).hostname)
#self.krb_ticket = KerberosTicket(krb_service)
def set_auth(self, username, password):
raise NotImplementedError
def send_message(self, message):
# TODO current implementation does negotiation on each HTTP request which is not efficient
# TODO support kerberos session with message encryption
krb_ticket = KerberosTicket(self.krb_service)
headers = {'Authorization': krb_ticket.auth_header,
'Connection': 'Keep-Alive',
'Content-Type': 'application/soap+xml;charset=UTF-8',
'User-Agent': 'Python WinRM client'}
request = Request(self.endpoint, data=message, headers=headers)
try:
response = urlopen(request, timeout=self.timeout)
krb_ticket.verify_response(response.headers['WWW-Authenticate'])
response_text = response.read()
return response_text
except HTTPError as ex:
response_text = ex.read()
# Per http://msdn.microsoft.com/en-us/library/cc251676.aspx rule 3,
# should handle this 500 error and retry receiving command output.
if 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive' in message and 'Code="2150858793"' in response_text:
return response_text
#if ex.code == 401 and ex.headers['WWW-Authenticate'] == 'Negotiate, Basic realm="WSMAN"':
error_message = 'Kerberos-based authentication was failed. Code {0}'.format(ex.code)
if ex.msg:
error_message += ', {0}'.format(ex.msg)
raise WinRMTransportError('kerberos', error_message)
except URLError as ex:
raise WinRMTransportError('kerberos', ex.reason)
def _winrm_encrypt(self, string):
"""
@returns the encrypted request string
@rtype string
"""
raise NotImplementedError
def _winrm_decrypt(self, string):
raise NotImplementedError
|
# Ask a user to enter a number
num1 = input('Enter a number: ')
# Ask a user to enter a second number
num2 = input('Enter a second number: ')
# Calculate the total of the two numbers added together
answer = float(num1) + float(num2)
# Print 'first number + second number = answer'
# For example if someone enters 4 and 6 the output should read
# 4 + 6 = 10
print(num1 + ' + ' + num2 + ' = ' + str(answer)) |
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.lost, name="lost"),
url(r'^found/', views.found, name="found"),
url(r'^lost/', views.lost, name="lost"),
url(r'^postItem/', views.postItem, name="postItem")
] |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ONNX export metrics."""
import os
import tempfile
import torch
from modnas.registry.metrics import build, register
from modnas.metrics.base import MetricsBase
@register
class OnnxExportMetrics(MetricsBase):
"""ONNX export metrics class."""
def __init__(self, metrics, head=None, export_dir=None, verbose=False):
super().__init__()
self.metrics = build(metrics)
if head is None:
head = 'name'
self.head = head
if export_dir is None:
export_dir = tempfile.gettempdir()
os.makedirs(export_dir, exist_ok=True)
self.export_dir = export_dir
self.verbose = verbose
self.exported = {}
def __call__(self, node):
"""Return metrics output."""
key = '#'.join([str(node[k]) for k in self.head if node[k] is not None])
onnx_info = self.exported.get(key, None)
if onnx_info is not None:
return self.metrics(onnx_info)
in_shape = node['in_shape']
module = node.module
plist = list(module.parameters())
device = None if len(plist) == 0 else plist[0].device
export_dir = os.path.join(self.export_dir, key)
os.makedirs(export_dir, exist_ok=True)
model_path = os.path.join(export_dir, 'model.onnx')
dummy_input = torch.randn(in_shape).to(device=device)
input_names = ['input']
input_shapes = [tuple(dummy_input.shape)]
output_names = ['output']
output_shapes = [tuple()]
with torch.no_grad():
torch.onnx.export(module,
dummy_input,
model_path,
verbose=self.verbose,
input_names=input_names,
output_names=output_names)
onnx_info = {
'model_path': model_path,
'input_names': input_names,
'input_shapes': input_shapes,
'output_names': output_names,
'output_shapes': output_shapes
}
self.exported[key] = onnx_info
ret = self.metrics(onnx_info)
self.logger.info('onnx export: {}: {}'.format(key, ret))
return ret
|
import pygame, random
import shared, circle
class Pow():
def __init__(self, images:dict, center:tuple):
''' dictionary of 2 images passed, gun and shield '''
self._key = random.choice(['shield', 'gun']) # choose one image: 'gun' or 'shield' #
self.image = images[self._key] # assign to self.image
self.image.set_colorkey(shared.BLACK) # set transparency
self._rect = self.image.get_rect()
self._rect.center = center
self._circle = circle.Circle(self._rect.centerx, self._rect.centery, self._rect.width / 2)
self.speedy = 500
if shared.debug:
self.speedy = 250
self.active = True
@property
def key(self):
return self._key
@property
def rect(self):
return self._rect
@property
def circle(self):
return self._circle
def update(self, dt):
self._rect.y += self.speedy * dt
self._circle.center = self._rect.center
# delete if moves off bottom of screen
if self._rect.top > shared.HEIGHT:
self.active = False
return self.active
def draw(self) -> None:
shared.screen.blit(self.image, self._rect)
if shared.debug:
pygame.draw.circle(shared.screen, shared.RED, self._circle.center, self._circle.radius, 1) |
# -*- coding: utf-8 -*-
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 4229 $
# Date: $Date: 2005-12-23 00:46:16 +0100 (Fri, 23 Dec 2005) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Galician-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'atenci\u00f3n': 'attention',
u'advertencia': 'caution',
u'code (translation required)': 'code',
u'perigo': 'danger',
u'erro': 'error',
u'pista': 'hint',
u'importante': 'important',
u'nota': 'note',
u'consello': 'tip',
u'aviso': 'warning',
u'admonici\u00f3n': 'admonition',
u'barra lateral': 'sidebar',
u't\u00f3pico': 'topic',
u'bloque-li\u00f1a': 'line-block',
u'literal-analizado': 'parsed-literal',
u'r\u00fabrica': 'rubric',
u'ep\u00edgrafe': 'epigraph',
u'realzados': 'highlights',
u'coller-citaci\u00f3n': 'pull-quote',
u'compor': 'compound',
u'recipiente': 'container',
#'questions': 'questions',
u't\u00e1boa': 'table',
u't\u00e1boa-csv': 'csv-table',
u't\u00e1boa-listaxe': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'imaxe': 'image',
u'figura': 'figure',
u'inclu\u00edr': 'include',
u'cru': 'raw',
u'substitu\u00edr': 'replace',
u'unicode': 'unicode',
u'data': 'date',
u'clase': 'class',
u'regra': 'role',
u'regra-predeterminada': 'default-role',
u't\u00edtulo': 'title',
u'contido': 'contents',
u'seccnum': 'sectnum',
u'secci\u00f3n-numerar': 'sectnum',
u'cabeceira': 'header',
u'p\u00e9 de p\u00e1xina': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'notas-destino': 'target-notes',
u'texto restruturado-proba-directiva': 'restructuredtext-test-directive'}
"""Galician name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'abreviatura': 'abbreviation',
u'ab': 'abbreviation',
u'acr\u00f3nimo': 'acronym',
u'ac': 'acronym',
u'code (translation required)': 'code',
u'\u00edndice': 'index',
u'i': 'index',
u'sub\u00edndice': 'subscript',
u'sub': 'subscript',
u'super\u00edndice': 'superscript',
u'sup': 'superscript',
u'referencia t\u00edtulo': 'title-reference',
u't\u00edtulo': 'title-reference',
u't': 'title-reference',
u'referencia-pep': 'pep-reference',
u'pep': 'pep-reference',
u'referencia-rfc': 'rfc-reference',
u'rfc': 'rfc-reference',
u'\u00e9nfase': 'emphasis',
u'forte': 'strong',
u'literal': 'literal',
'math (translation required)': 'math',
u'referencia-nome': 'named-reference',
u'referencia-an\u00f3nimo': 'anonymous-reference',
u'referencia-nota ao p\u00e9': 'footnote-reference',
u'referencia-citaci\u00f3n': 'citation-reference',
u'referencia-substituci\u00f3n': 'substitution-reference',
u'destino': 'target',
u'referencia-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'cru': 'raw',}
"""Mapping of Galician role names to canonical role names for interpreted text.
"""
|
from uuid import uuid4
import requests
from flask import Blueprint, abort, jsonify, request, url_for
from flask_security.core import current_user
from flask_security.decorators import login_required
from urlobject import URLObject as URL
from ..models import RunToken, db
from ..utils.redis import get_redis_client
_REDIS_TOKEN_TTL = 15 * 60
_REDIS_REQUEST_TTL = 5 * 60
blueprint = Blueprint('runtoken', __name__)
@blueprint.route('/runtoken/request/new')
@blueprint.route('/runtoken/request/<request_id>')
def runtoken_request(request_id=None):
if request_id is None:
request_id = _create_new_runtoken_request()
return _get_runtoken_request_status(request_id)
@blueprint.route('/runtoken/request/<request_id>/complete', methods=['POST'])
@login_required
def complete_runtoken_request(request_id):
redis = get_redis_client()
key = _get_request_key(request_id)
if redis.get(key) is None:
abort(requests.codes.not_found) # pylint: disable=no-member
token = create_new_runtoken(current_user)
db.session.commit()
# set reply
redis.set(name=key, value=token, ex=_REDIS_TOKEN_TTL)
return 'success'
def create_new_runtoken(user):
token = '{}:{}'.format(user.id, uuid4())
db.session.add(RunToken(
user_id=user.id,
token=token))
return token
def _create_new_runtoken_request():
request_id = str(uuid4())
redis = get_redis_client()
redis.set(name=_get_request_key(request_id), value='', ex=_REDIS_TOKEN_TTL)
return request_id
def _get_request_key(request_id):
return 'request:{}'.format(request_id)
def _get_runtoken_request_status(request_id):
request_key = 'request:{}'.format(request_id)
value = get_redis_client().get(request_key)
if value is None:
abort(requests.codes.not_found) # pylint: disable=no-member
return jsonify({
'token': value.decode('utf-8'),
'url': URL(request.host_url).add_path(url_for('runtoken.runtoken_request', request_id=request_id)),
'complete': request.host_url + '#/runtoken/' + request_id + '/authorize',
})
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
from os.path import join as opj
from datalad.tests.utils import skip_if_scrapy_without_selector
skip_if_scrapy_without_selector()
from ..nodes.crawl_url import crawl_url
from ..nodes.matches import *
from ..pipeline import run_pipeline, FinishPipeline
from ..nodes.misc import Sink, assign, range_node, interrupt_if
from ..nodes.annex import Annexificator
from ..pipeline import load_pipeline_from_module
from ...support.stats import ActivityStats
from ...tests.utils import with_tree
from ...tests.utils import eq_, ok_, assert_raises
from ...tests.utils import assert_in
from ...tests.utils import skip_if_no_module
from ...tests.utils import with_tempfile
from ...tests.utils import skip_if_no_network
from ...tests.utils import use_cassette
from logging import getLogger
lgr = getLogger('datalad.crawl.tests')
class AssertOrder(object):
"""Helper to verify that nodes executed in correct order
Counter _call gets incremented with each invocation of the _call
"""
def __init__(self):
self._call = 0
def __call__(self, numbers):
if isinstance(numbers, int):
numbers = {numbers}
def _assert_order(data):
self._call += 1
lgr.debug("#%d invocation of %s " % (self._call, self))
assert_in(self._call, numbers)
yield data
return _assert_order
@with_tree(tree={
'pipeline.py': 'pipeline = lambda: [1]',
'pipeline2.py': 'pipeline = lambda x: [2*x]',
})
def test_load_pipeline_from_script(d):
eq_(load_pipeline_from_module(opj(d, 'pipeline.py')), [1])
eq_(load_pipeline_from_module(opj(d, 'pipeline2.py'), kwargs=dict(x=2)), [4])
assert_raises(RuntimeError, load_pipeline_from_module, opj(d, 'unlikelytobethere.py'))
DEFAULT_OUTPUT = [{'datalad_stats': ActivityStats()}]
def _out(ld):
"""Adjust output entry to include default outputs as well
"""
outl = []
for d in ld:
out = d.copy()
outl.append(out)
for k, v in DEFAULT_OUTPUT[0].items():
if k not in out:
out[k] = v
return outl
def test_pipeline_linear_simple():
sink = Sink()
pipeline = [
range_node(2, "out1"),
range_node(3, "out2"),
sink
]
pipeline_output = run_pipeline(pipeline)
eq_(pipeline_output, DEFAULT_OUTPUT) # by default 'input' is output and input is made empty dict if not provided
eq_(sink.data, [{'out1': 0, 'out2': 0}, {'out1': 0, 'out2': 1}, {'out1': 0, 'out2': 2},
{'out1': 1, 'out2': 0}, {'out1': 1, 'out2': 1}, {'out1': 1, 'out2': 2}])
# if we extend pipeline with matching interrupt_if, the entire pipeline should
# stop at that matching point, but otherwise there should be no crash etc
sink.clean()
pipeline_output = run_pipeline(pipeline + [interrupt_if({'out1': 0, 'out2': 1})])
eq_(pipeline_output, DEFAULT_OUTPUT)
eq_(sink.data, [{'out1': 0, 'out2': 0}, {'out1': 0, 'out2': 1}])
def test_pipeline_unknown_opts():
assert_raises(ValueError, run_pipeline, [{'xxx': 1}])
def test_pipeline_linear_nested_order():
sink = Sink()
sink2 = Sink()
assert_order = AssertOrder()
pipeline = [
assert_order(1),
range_node(2, "out1"),
assert_order({2, 5}),
[
assert_order({3, 6}),
range_node(3, "out2"),
sink,
],
assert_order({4, 7}),
sink2
]
pipeline_output = run_pipeline(pipeline)
def test_pipeline_linear_nested():
sink = Sink()
sink2 = Sink()
assert_order = AssertOrder()
pipeline = [
range_node(2, "out1"),
[
range_node(3, "out2"),
sink,
],
sink2
]
all_pairs = [{'out1': 0, 'out2': 0}, {'out1': 0, 'out2': 1}, {'out1': 0, 'out2': 2},
{'out1': 1, 'out2': 0}, {'out1': 1, 'out2': 1}, {'out1': 1, 'out2': 2}]
pipeline_output = run_pipeline(pipeline)
eq_(pipeline_output, DEFAULT_OUTPUT)
eq_(sink.data, all_pairs)
# and output is not seen outside of the nested pipeline
eq_(sink2.data, [{'out1': 0}, {'out1': 1}])
# Let's make nested pipeline yield all
sink.clean()
sink2.clean()
pipeline[1].insert(0, {'output': 'outputs'})
pipeline_output = run_pipeline(pipeline)
eq_(pipeline_output, DEFAULT_OUTPUT) # by default no output produced
eq_(sink.data, all_pairs)
# and output was passed outside from the nested pipeline
eq_(sink2.data, all_pairs)
# Let's make it yield the last-output one
sink2.clean()
pipeline[1][0] = {'output': 'last-output'}
pipeline_output = run_pipeline(pipeline)
eq_(pipeline_output, DEFAULT_OUTPUT) # by default no output produced
# only the last output from the nested pipeline appeared outside
eq_(sink2.data, [{'out1': 0, 'out2': 2}, {'out1': 1, 'out2': 2}])
# Let's now add output to the top-most pipeline
pipeline.insert(0, {'output': 'outputs'})
pipeline_output = run_pipeline(pipeline)
eq_(pipeline_output, _out([{'out1': 0, 'out2': 2},
{'out1': 1, 'out2': 2}]))
# and if we ask only for the last one
pipeline[0] = {'output': 'last-output'}
pipeline_output = run_pipeline(pipeline)
eq_(pipeline_output, _out([{'out1': 1, 'out2': 2}]))
def test_pipeline_recursive():
def less3(data):
"""a little helper which would not yield whenever input x>3"""
if data['x'] < 3:
yield updated(data, dict(x=data['x']+1))
pipeline = [
{'loop': True, 'output': 'outputs'},
less3,
]
pipeline_output = run_pipeline(pipeline, dict(x=0))
eq_(pipeline_output, _out([{'x': 1}, {'x': 2}, {'x': 3}]))
def test_pipeline_looping():
count = [0, 0]
def count_threetimes(data):
"""helper to not yield anything if done it 3 times by now"""
if count[0] >= 3:
return
count[0] += 1
for i in range(count[0]):
yield updated(data, dict(somevar=(i, count[0])))
def add_count(data):
count[1] += 1
yield updated(data, {'count': count[0]})
def passthrough(data):
yield data
pipeline_output = run_pipeline([{'loop': True}, count_threetimes], dict(x=0))
eq_(pipeline_output, _out([{'x': 0}]))
eq_(count, [3, 0])
# and even if the node not yielding is note the first node
pipeline_output = run_pipeline([{'loop': True}, passthrough, count_threetimes], dict(x=0))
eq_(pipeline_output, _out([{'x': 0}]))
eq_(count, [3, 0])
count[0] = 0
# Let's rerun with explicit last-output, which would also affect output of this pipeline
pipeline_output = run_pipeline([{'loop': True, 'output': 'last-output'}, count_threetimes], dict(x=0))
eq_(pipeline_output, _out([{'x': 0, 'somevar': (2, 3)}]))
eq_(count, [3, 0])
# and if pipeline is composite, i.e. more than a single step, so we could make sure everything is called
count[0] = 0
pipeline_output = run_pipeline([{'loop': True}, count_threetimes, add_count], dict(x=0))
eq_(pipeline_output, _out([{'x': 0}]))
eq_(count, [3, 6])
count[0] = count[1] = 0
# Let's rerun with explicit last-output, which would also affect output of this pipeline
pipeline_output = run_pipeline([{'loop': True, 'output': 'last-output'}, count_threetimes, add_count], dict(x=0))
eq_(pipeline_output, _out([{'x': 0, 'somevar': (2, 3), 'count': 3}]))
eq_(count, [3, 6])
def test_pipeline_linear_top_isnested_pipeline():
# check if no generated data to reach the end node, it still gets executed
was_called = []
pipeline = [
# range_node(1),
[
range_node(1, "out2"),
],
lambda d: was_called.append('yes')
]
pipeline_output = run_pipeline(pipeline)
eq_(was_called, ['yes'])
def test_pipeline_updated_stats():
def n1(data):
data['datalad_stats'].increment('add_git')
yield data
def n2(data): # doesn't care to maintain previous stats
data = data.copy()
data['datalad_stats'] = ActivityStats(files=2)
data['out'] = 1
yield data
pipeline_output = run_pipeline([{'output': 'outputs'}, n1, n2])
eq_(pipeline_output, [{'datalad_stats': ActivityStats(files=2, add_git=1), 'out': 1}])
def test_pipeline_dropped_stats():
def n1(data):
data['datalad_stats'].increment('add_git')
yield data
def n2(data): # doesn't care to maintain previous stats
yield {'out': 1}
pipeline_output = run_pipeline([{'output': 'outputs'}, n1, n2])
eq_(pipeline_output, [{'datalad_stats': ActivityStats(add_git=1), 'out': 1}])
def test_pipeline_stats_persist():
# to test that we would get proper stats returned in various pipeline layouts
def n1(data):
data['datalad_stats'].increment('add_git')
yield data
def p(data):
yield data
def n2(data): # doesn't care to maintain previous stats
data['datalad_stats'].increment('add_annex')
yield data
target_stats = ActivityStats(add_git=1, add_annex=1)
def assert_pipeline(pipeline):
eq_(run_pipeline(pipeline), [{'datalad_stats': target_stats}])
assert_pipeline([n1, n2])
assert_pipeline([n1, [n2]])
assert_pipeline([[n1], [n2]])
assert_pipeline([n1, [n2, p]])
assert_pipeline([[n1], n2])
assert_pipeline([[n1, p], n2])
|
from geolite2 import geolite2
import requests
class bColors:
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BLUE = '\033[94m'
def banner():
print(bColors.GREEN + '<<< IP-TRACKER v1.0>>>')
print(bColors.RED + r'''
_
| |
| |___
| _ \ _ _
| |_) | | (_) |
\____/ \__, |
__/ |
|___/
_ _
| | (_)
____ ____ ___| | ___ _ ______ ______ ___ _ ______ ______ _ _ ____
/ ___\ / \ / _ | / _ | | / _____| / _____| / _ | | / _____| / _____| | | | | | \
| |____ | () | | (_| | | (_|| | \______\ \______\ | (_|| | \______\ \______\ | | | | | |
\____/ \____/ \____/ \___|_| |______/ |______/ \___|_| |______/ |______/ |_| |_| |_|
''')
def ipLocation(ipTrack):
docReader = geolite2.reader()
trackLocation = docReader.get(ipTrack)
# Assigning specific values from GeoLiteCity.dat
city = (trackLocation['city']['names']['en'])
continent = (trackLocation['continent']['names']['en'])
country = (trackLocation['country']['names']['en'])
location = (trackLocation['location'])
locationAccuracy = location['accuracy_radius']
locationLatitude = location['latitude']
locationLongitude = location['longitude']
locationTimeZone = location['time_zone']
postal = (trackLocation['postal'])
postalCode = postal['code']
registeredCountry = (trackLocation['registered_country']['names']['en'])
subdivisions = (trackLocation['subdivisions'][0]['names']['en'])
r = str(bColors.RED)
g = str(bColors.GREEN)
b = str(bColors.BLUE)
y = str(bColors.YELLOW)
print(r + '* ' + b + 'public_ip: ' + g + ip + r + '\n* ' + b + 'city: ' + g + city + r + '\n* ' + b + 'continent: ' + g +
continent + r + '\n* ' + b + 'country: ' + g + country + r + '\n* ' + b + 'location: ' + r + '\n\t↪ ' + y +
'accuracy_radius: ' + g + str(locationAccuracy) + r + '\n\t↪ ' + y + 'latitude: ' + g + str(locationLatitude)
+ r + '\n\t↪ ' + y + 'longitude: ' + g + str(locationLongitude) + r + '\n\t↪ ' + y + 'time_zone: ' + g +
locationTimeZone + r + '\n\t↪ ' + y + 'map: ' + g +
f'https://www.google.co.in/maps/@{locationLatitude},{locationLongitude},15z?hl=en' + r + '\n* ' + b +
'postal_code: ' + g + str(postalCode) + r + '\n* ' + b + 'registered_country: ' + g + registeredCountry + r +
'\n* ' + b + 'subdivisions: ' + g + subdivisions)
ip = requests.get('https://api.ipify.org').text
banner()
ipLocation(ip)
|
#
# Copyright (c) 2011 Thomas Rampelberg
#
"""
Make doing janky cross domain communication easy.
"""
__author__ = 'Thomas Rampelberg'
__author_email__ = 'thomas@saunter.org'
from setuptools import setup, find_packages
setup(
name = "janky_post",
version = "0.5",
author = __author__,
author_email = __author_email__,
url = "http://saunter.org/janky.post/",
description = "Makes janky cross-domain communication easy",
packages = find_packages()
)
|
"""
Main module for Flambda APP
Version: 1.0.0
"""
import os
def load_projectrc(projectrc_filepath):
"""
Load the values of .projectrc file
"""
from dotenv import dotenv_values
return dotenv_values(projectrc_filepath)
if __package__:
current_path = os.path.abspath(os.path.dirname(__file__)).replace('/' + str(__package__), '', 1)
else:
current_path = os.path.abspath(os.path.dirname(__file__))
env_vars = {}
projectrc_file = os.path.join(current_path, '.projectrc')
# inside of a docker the name of folder is app
PROJECT_NAME = os.path.basename(current_path).replace('_', '-')
if not current_path[-1] == '/':
current_path += '/'
if os.path.exists(projectrc_file):
env_vars = load_projectrc(projectrc_file)
APP_NAME = env_vars['APP_NAME'] if 'APP_NAME' in env_vars else PROJECT_NAME
APP_VERSION = env_vars['APP_VERSION'] if 'APP_VERSION' in env_vars else '1.0.0'
APP_ARCH_VERSION = env_vars['APP_ARCH_VERSION'] if 'APP_ARCH_VERSION' in env_vars else 'v1'
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-20 02:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0003_auto_20200518_1637'),
]
operations = [
migrations.AlterField(
model_name='books',
name='publication_date',
field=models.DateField(),
),
]
|
# https://stepik.org/lesson/3369/step/11?unit=952
# Sample Input:
# 5
# Sample Output:
# 1 2 3 4 5
# 16 17 18 19 6
# 15 24 25 20 7
# 14 23 22 21 8
# 13 12 11 10 9
def position(a):
x = 0
y = 0
minX, maxX = 1, a - 1
minY, maxY = 0, a - 1
xyz = 1
for count in range(1, a**2 + 1, 1):
yield x, y
if xyz == 1:
y += 1
if y == maxY:
xyz +=1
maxY -=1
elif xyz == 2:
x += 1
if x == maxX:
xyz +=1
maxX -=1
elif xyz == 3:
y -= 1
if y == minY:
xyz +=1
minY +=1
elif xyz == 4:
x -= 1
if x == minX:
xyz +=1
minX +=1
if xyz > 4:
xyz = 1
def PrintHelix(z):
a = len(z)
for x in range(a):
row = ""
for y in range(a):
row += " " + z[x][y]
print (row)
a = int(input())
z = [[0]*a for x in range(a)]
for index, (x,y) in enumerate(position(a), 1):
z[x][y] = str(index)
PrintHelix(z)
|
"""The ue_smart_radio component."""
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
from sys import platform
extra_compile_args = []
extra_link_args = []
try:
from Cython.Distutils.build_ext import build_ext
except ImportError:
print('Error: Cython not installed. please install by running "conda install cython". exiting')
exit()
if platform == "linux" or platform == "linux2":
# linux
extra_compile_args.append('-fopenmp')
extra_compile_args.append('-ffast-math')
extra_compile_args.append('-msse')
extra_compile_args.append('-msse2')
extra_compile_args.append('-msse3')
extra_compile_args.append('-msse4')
extra_compile_args.append('-s')
extra_compile_args.append('-std=c99')
extra_link_args.append('-fopenmp')
elif platform == "darwin":
# OS X
extra_compile_args.append('-fopenmp')
extra_compile_args.append('-ffast-math')
extra_compile_args.append('-msse')
extra_compile_args.append('-msse2')
extra_compile_args.append('-msse3')
extra_compile_args.append('-msse4')
extra_compile_args.append('-s')
extra_compile_args.append('-std=c99')
extra_link_args.append('-fopenmp')
import os
os.environ["CC"] = "gcc-6"
os.environ["CXX"] = "gcc-6"
elif platform == "win32":
# Windows
pass
extensions = [
Extension("nms", ["nms.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args)
]
setup(
ext_modules = cythonize(extensions),
include_dirs=[numpy.get_include()]
)
|
'''
Problem 7
@author: Kevin Ji
'''
primes_list = [ 2, 3 ] # Cache list, will grow as get_nth_prime gets called
def get_nth_prime( term ):
# Map term number to list index number (1 -> 0)
term -= 1
# If the index is less than the length of the list, then return the cached value
if term < len( primes_list ):
return primes_list[ term ]
# Otherwise, get the last prime, add 2 for the next possible prime, and keep iterating from there
test_prime = primes_list[ -1 ] + 2
while len( primes_list ) <= term:
# Flag determining whether test_prime is prime or not
is_prime = True
# See if it is divisble by any previous primes
# If not, then test_prime must be prime
for prime in primes_list:
if test_prime % prime == 0:
is_prime = False
break
# If it is prime, the flag should not have been switched
if is_prime:
primes_list.append( test_prime )
# Increment by 2 to iterate through odd numbers
test_prime += 2
# Newest element must be the prime wanted
return primes_list[ -1 ]
print( get_nth_prime( 6 ) ) # 13
print( get_nth_prime( 100 ) ) # 541
#print( primes_list )
print( get_nth_prime( 10001 ) )
|
import syft as sy
import torch
def test_plan_module_tracing():
@sy.func2plan(args_shape=[(1,)])
def plan_test(x, torch=torch):
y = torch.rand([1])
return x + y
p = plan_test(torch.tensor([3]))
assert len(plan_test.role.actions) == 2
|
class ICharacterComponent():
def equip(self):
pass
class CharacterConcreteComponent(ICharacterComponent):
def __init__(self,name):
self.name=name
self.equipamento=[]
def equip(self):
if len(self.equipamento)==0:
return f'{self.name} equipment: Empty'
else:
return f'{self.name} equipment:{"".join(self.equipamento)}'
class ArmorConcreteDecorator(CharacterConcreteComponent):
def __init__(self, character:CharacterConcreteComponent):
self.character=character
self.name=character.name
self.equipamento=character.equipamento
self.equipamento.append('\nArmor: Yes')
def equip(self):
return self.character.equip()
class SwordConcreteDecorator(CharacterConcreteComponent):
def __init__(self, character:CharacterConcreteComponent):
self.character=character
self.name=character.name
self.equipamento=character.equipamento
self.equipamento.append('\nSword: Yes')
def equip(self):
return self.character.equip()
class HelmetConcreteDecorator(CharacterConcreteComponent):
def __init__(self, character:CharacterConcreteComponent):
self.character=character
self.name=character.name
self.equipamento=character.equipamento
self.equipamento.append('\nHelmet: Yes')
def equip(self):
return self.character.equip()
class BootsConcreteDecorator(CharacterConcreteComponent):
def __init__(self, character:CharacterConcreteComponent):
self.character=character
self.name=character.name
self.equipamento=character.equipamento
self.equipamento.append('\nBoots: Yes')
def equip(self):
return self.character.equip() |
#!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import fire
import json
import os
import numpy as np
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
import model, sample, encoder
def interact_model(
model_name='poetry-pgclean-117m',
seed=None,
length=50,
temperature=0.8,
top_k=40,
top_p=1,
models_dir='models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [1, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=1,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
prev_text = None
while True:
raw_text = input("Model prompt >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input("Model prompt >>> ")
if prev_text is not None:
prev_text += "\n" + raw_text
else:
prev_text = raw_text
context_tokens = enc.encode(prev_text)
out = sess.run(output, feed_dict={
context: [context_tokens]
})[:, len(context_tokens):]
text = enc.decode(out[0])
while text[-1].isalnum() or text[-1] == " ":
text = text[:-1]
prev_text += text
print("=" * 80)
print(prev_text)
print("=" * 80)
if __name__ == '__main__':
fire.Fire(interact_model)
|
from semseg.loader.cityscapes_loader import cityscapesLoader
def get_loader(name):
"""get_loader
:param name:
"""
return {
"cityscapes": cityscapesLoader,
}[name] |
from kpm.commands.deploy import DeployCmd
class RemoveCmd(DeployCmd):
name = 'remove'
help_message = "remove a package from kubernetes"
def _call(self):
self.status = self.kub().delete(dest=self.tmpdir, force=self.force, dry=self.dry_run,
proxy=self.api_proxy, fmt=self.output)
|
#!/usr/bin/env python
# coding: utf-8
# # Tone generator
#
# Required: `pip install tones`
# https://pypi.org/project/tones/
# In[53]:
from tones.mixer import Mixer
notes = ['c','d','e','f','g','a','b']
octave = 3
for note in notes:
mixer = Mixer(44100,0.5)
mixer.create_track(0)
mixer.add_note(0,note=note,octave=octave,duration=1.0)
mixer.write_wav(f'../database/musical-tones/{note}-{octave}.wav')
del mixer
# In[106]:
# multiple outputs
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython.display import Audio
import os
bdir = '../database/musical-tones/'
files = [name for name in os.listdir('../database/musical-tones/') if name.endswith('3.wav')]
# natural order
files = (sorted(files) + sorted(files)[0:2] )[2:]
for f in files:
Audio(os.path.join(bdir,f))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 24 11:01:50 2017
@author: slauniai
******************************************************************************
CanopyGrid:
Gridded canopy and snow hydrology model for SpaFHy -integration
Based on simple schemes for computing water flows and storages within vegetation
canopy and snowpack at daily or sub-daily timesteps.
(C) Samuli Launiainen, 2016-
Modifications made to snowpack equations (based on Ala-aho et al.)
- Short- and longwave radiation to snowpack from global radiation
- Advection to snowpack from rainfall
- Sensible heat exchange between air and snowpack
- Heat to and from snowpack from convective vapour exchange
- Snowpack energy and according to that mass balance equations
(C) Jari-Pekka Nousu, 2020-
******************************************************************************
"""
import numpy as np
from numpy import log as ln
eps = np.finfo(float).eps
class CanopyGrid():
def __init__(self, cpara, state, outputs=False):
"""
initializes CanopyGrid -object
Args:
cpara - parameter dict:
state - dict of initial state
outputs - True saves output grids to list at each timestep
Returns:
self - object
NOTE:
Currently the initialization assumes simulation start 1st Jan,
and sets self._LAI_decid and self.X equal to minimum values.
Also leaf-growth & senescence parameters are intialized to zero.
"""
epsi = 0.01 # small number
self.Lat = cpara['loc']['lat']
self.Lon = cpara['loc']['lon']
# physiology: transpi + floor evap
self.physpara = cpara['physpara']
# phenology & LAI cycle
self.phenopara = cpara['phenopara']
# canopy parameters and state
self.hc = state['hc'] + epsi
self.cf = state['cf'] + epsi
#self.cf = 0.1939 * ba / (0.1939 * ba + 1.69) + epsi
# canopy closure [-] as function of basal area ba m2ha-1;
# fitted to Korhonen et al. 2007 Silva Fennica Fig.2
spec_para = cpara['spec_para']
ptypes = {}
LAI = 0.0
for pt in list(spec_para.keys()):
ptypes[pt] = spec_para[pt]
ptypes[pt]['LAImax'] = state['LAI_' + pt]
self.ptypes = ptypes
# compute gridcell average LAI and photosynthesis-stomatal conductance parameters:
LAI = 0.0
Amax = 0.0
q50 = 0.0
g1 = 0.0
for pt in self.ptypes.keys():
if self.ptypes[pt]['lai_cycle']:
pt_lai = self.ptypes[pt]['LAImax'] * self.phenopara['lai_decid_min']
else:
pt_lai = self.ptypes[pt]['LAImax']
LAI += pt_lai
Amax += pt_lai * ptypes[pt]['amax']
q50 += pt_lai * ptypes[pt]['q50']
g1 += pt_lai * ptypes[pt]['g1']
self.LAI = LAI + epsi
self.physpara.update({'Amax': Amax / self.LAI, 'q50': q50 / self.LAI, 'g1': g1 / self.LAI})
del Amax, q50, g1, pt, LAI, pt_lai
# - compute start day of senescence: starts at first doy when daylength < self.phenopara['sdl']
doy = np.arange(1, 366)
dl = daylength(self.Lat, self.Lon, doy)
ix = np.max(np.where(dl > self.phenopara['sdl']))
self.phenopara['sso'] = doy[ix] # this is onset date for senescence
del ix
# snow model
self.wmax = cpara['interc']['wmax']
self.wmaxsnow = cpara['interc']['wmaxsnow']
self.Kmelt = cpara['snow']['kmelt']
self.Kfreeze = cpara['snow']['kfreeze']
self.R = cpara['snow']['r'] # max fraction of liquid water in snow
self.Tmin = cpara['snow']['Tmin'] # threshold below which all precipitation is snow
self.Tmax = cpara['snow']['Tmin'] # threshold above which all precipitation is water (note in between will be divided to rain and snow)
self.albpow = cpara['snow']['albpow']
self.albground = cpara['snow']['albground']
self.cAtten = cpara['snow']['cAtten']
self.RDthres = cpara['snow']['RDthres']
# --- for computing aerodynamic resistances
self.zmeas = cpara['flow']['zmeas']
self.zground =cpara['flow']['zground'] # reference height above ground [m]
self.zo_ground = cpara['flow']['zo_ground'] # ground roughness length [m]
self.gsoil = self.physpara['gsoil']
# --- state variables
self.W = np.minimum(state['w'], self.wmax*self.LAI)
self.SWE = state['swe']
self.SWEi = self.SWE
self.SWEl = np.zeros(np.shape(self.SWE))
self.d_nosnow = state['d_nosnow']
self.d_snow = state['d_snow']
self.Tsnow = state['Tsnow']
self.tau0 = np.exp(-self.cAtten * self.LAI)
self.Wice = state['Wice']
self.Wliq = state['Wliq']
self.alb = state['alb']
self.emAir = state['emAir']
# deciduous leaf growth stage
# NOTE: this assumes simulations start 1st Jan each year !!!
self.DDsum = 0.0
self.X = 0.0
self._relative_lai = self.phenopara['lai_decid_min']
self._growth_stage = 0.0
self._senesc_stage = 0.0
# phenological state
self.fPheno = self.phenopara['fmin']
# create dictionary of empty lists for saving results
if outputs:
self.results = {'PotInf': [], 'Trfall': [], 'Interc': [], 'Evap': [],
'ET': [], 'Transpi': [], 'Efloor': [], 'SWE': [],
'LAI': [], 'Mbe': [], 'LAIfract': [], 'Unload': []
}
def run_timestep(self, doy, dt, Ta, Prec, Rg, RH, Par, VPD, U=2.0, CO2=380.0,
Rew=1.0, beta=1.0, P=101300.0):
"""
Runs CanopyGrid instance for one timestep
IN:
doy - day of year
dt - timestep [s]
Ta - air temperature [degC], scalar or (n x m) -matrix
prec - precipitatation rate [mm/s]
Rg - global radiation [Wm-2], scalar or matrix
Par - photos. act. radiation [Wm-2], scalar or matrix
VPD - vapor pressure deficit [kPa], scalar or matrix
U - mean wind speed at ref. height above canopy top [ms-1], scalar or matrix
CO2 - atm. CO2 mixing ratio [ppm]
Rew - relative extractable water [-], scalar or matrix
beta - term for soil evaporation resistance (Wliq/FC) [-]
P - pressure [Pa], scalar or matrix
OUT:
updated CanopyGrid instance state variables
flux grids PotInf, Trfall, Interc, Evap, ET, MBE [mm]
"""
# Rn = 0.7 * Rg #net radiation
Rn = np.maximum(2.57 * self.LAI / (2.57 * self.LAI + 0.57) - 0.2,
0.55) * Rg # Launiainen et al. 2016 GCB, fit to Fig 2a
""" --- update grid-cell phenology, LAI and average Amax, g1 and q50: self.ddsum & self.X ---"""
self.update_daily(Ta, doy)
""" --- aerodynamic conductances --- """
Ra, Rb, Ras, ustar, Uh, Ug = aerodynamics(self.LAI, self.hc, U, w=0.01, zm=self.zmeas,
zg=self.zground, zos=self.zo_ground)
""" --- interception, evaporation and snowpack --- """
PotInf, Trfall, Evap, Interc, MBE, unload = self.canopy_water_snow(dt, Ta, Prec, Rn, RH, VPD, Rg, Ra=Ra, U=2.0)
"""--- dry-canopy evapotranspiration [mm s-1] --- """
Transpi, Efloor, Gc = self.dry_canopy_et(VPD, Par, Rn, Ta, Ra=Ra, Ras=Ras,
CO2=CO2, Rew=Rew, beta=beta, fPheno=self.fPheno)
Transpi = Transpi * dt
Efloor = Efloor * dt
ET = Transpi + Efloor + Evap
# append results to lists; use only for testing small grids!
if hasattr(self, 'results'):
self.results['PotInf'].append(PotInf)
self.results['Trfall'].append(Trfall)
self.results['Interc'].append(Interc)
self.results['Evap'].append(Evap)
self.results['ET'].append(ET)
self.results['Transpi'].append(Transpi)
self.results['Efloor'].append(Efloor)
self.results['SWE'].append(self.SWE)
self.results['LAI'].append(self.LAI)
self.results['Mbe'].append(np.nanmax(MBE))
self.results['LAIfract'].append(self._relative_lai)
self.results['Unload'].append(unload)
return PotInf, Trfall, Interc, Evap, ET, Transpi, Efloor, MBE
def update_daily(self, T, doy):
"""
updates temperature sum, leaf-area development, phenology and
computes effective parameters for grid-cell
Args:
T - daily mean temperature (degC)
doy - day of year
Returns:
None
"""
self._degreeDays(T, doy)
self._photoacclim(T)
# deciduous relative leaf-area index
self._lai_dynamics(doy)
# canopy effective photosynthesis-stomatal conductance parameters:
LAI = 0.0
Amax = 0.0
q50 = 0.0
g1 = 0.0
for pt in self.ptypes.keys():
if self.ptypes[pt]['lai_cycle']:
pt_lai = self.ptypes[pt]['LAImax'] * self._relative_lai
else:
pt_lai = self.ptypes[pt]['LAImax']
LAI += pt_lai
Amax += pt_lai * self.ptypes[pt]['amax']
q50 += pt_lai * self.ptypes[pt]['q50']
g1 += pt_lai * self.ptypes[pt]['g1']
self.LAI = LAI + eps
#print(doy, LAI, Amax / self.LAI, g1 / self.LAI)
self.physpara.update({'Amax': Amax / self.LAI, 'q50': q50 / self.LAI, 'g1': g1 / self.LAI})
def _degreeDays(self, T, doy):
"""
Calculates and updates degree-day sum from the current mean Tair.
INPUT:
T - daily mean temperature (degC)
doy - day of year 1...366 (integer)
"""
To = 5.0 # threshold temperature
if doy == 1: # reset in the beginning of the year
self.DDsum = 0.
else:
self.DDsum += np.maximum(0.0, T - To)
def _photoacclim(self, T):
"""
computes new stage of temperature acclimation and phenology modifier.
Peltoniemi et al. 2015 Bor.Env.Res.
IN: object, T = daily mean air temperature
OUT: None, updates object state
"""
self.X = self.X + 1.0 / self.phenopara['tau'] * (T - self.X) # degC
S = np.maximum(self.X - self.phenopara['xo'], 0.0)
fPheno = np.maximum(self.phenopara['fmin'],
np.minimum(S / self.phenopara['smax'], 1.0))
self.fPheno = fPheno
def _lai_dynamics(self, doy):
"""
Seasonal cycle of deciduous leaf area
Args:
self - object
doy - day of year
Returns:
none, updates state variables self._relative_lai, self._growth_stage,
self._senec_stage
"""
lai_min = self.phenopara['lai_decid_min']
ddo = self.phenopara['ddo']
ddur = self.phenopara['ddur']
sso = self.phenopara['sso']
sdur = self.phenopara['sdur']
# growth phase
if self.DDsum <= ddo:
f = lai_min
self._growth_stage = 0.
self._senesc_stage = 0.
elif self.DDsum > ddo:
self._growth_stage += 1.0 / ddur
f = np. minimum(1.0, lai_min + (1.0 - lai_min) * self._growth_stage)
# senescence phase
if doy > sso:
self._growth_stage = 0.
self._senesc_stage += 1.0 / sdur
f = 1.0 - (1.0 - lai_min) * np.minimum(1.0, self._senesc_stage)
self._relative_lai = f
def dry_canopy_et(self, D, Qp, AE, Ta, Ra=25.0, Ras=250.0, CO2=380.0,
Rew=1.0, beta=1.0, fPheno=1.0):
"""
Computes ET from 2-layer canopy in absense of intercepted precipitiation,
i.e. in dry-canopy conditions
IN:
self - object
D - vpd in kPa
Qp - PAR in Wm-2
AE - available energy in Wm-2
Ta - air temperature degC
Ra - aerodynamic resistance (s/m)
Ras - soil aerodynamic resistance (s/m)
CO2 - atm. CO2 mixing ratio (ppm)
Rew - relative extractable water [-]
beta - relative soil conductance for evaporation [-]
fPheno - phenology modifier [-]
Args:
Tr - transpiration rate (mm s-1)
Efloor - forest floor evaporation rate (mm s-1)
Gc - canopy conductance (integrated stomatal conductance) (m s-1)
SOURCES:
Launiainen et al. (2016). Do the energy fluxes and surface conductance
of boreal coniferous forests in Europe scale with leaf area?
Global Change Biol.
Modified from: Leuning et al. 2008. A Simple surface conductance model
to estimate regional evaporation using MODIS leaf area index and the
Penman-Montheith equation. Water. Resources. Res., 44, W10419
Original idea Kelliher et al. (1995). Maximum conductances for
evaporation from global vegetation types. Agric. For. Met 85, 135-147
Samuli Launiainen, Luke
Last edit: 13.6.2018: TESTING UPSCALING
"""
# ---Amax and g1 as LAI -weighted average of conifers and decid.
rhoa = 101300.0 / (8.31 * (Ta + 273.15)) # mol m-3
Amax = self.physpara['Amax']
g1 = self.physpara['g1']
kp = self.physpara['kp'] # (-) attenuation coefficient for PAR
q50 = self.physpara['q50'] # Wm-2, half-sat. of leaf light response
rw = self.physpara['rw'] # rew parameter
rwmin = self.physpara['rwmin'] # rew parameter
tau = np.exp(-kp * self.LAI) # fraction of Qp at ground relative to canopy top
"""--- canopy conductance Gc (integrated stomatal conductance)----- """
# fQ: Saugier & Katerji, 1991 Agric. For. Met., eq. 4. Leaf light response = Qp / (Qp + q50)
fQ = 1./ kp * np.log((kp*Qp + q50) / (kp*Qp*np.exp(-kp * self.LAI) + q50 + eps) )
# the next formulation is from Leuning et al., 2008 WRR for daily Gc; they refer to
# Kelliher et al. 1995 AFM but the resulting equation is not exact integral of K95.
# fQ = 1./ kp * np.log((Qp + q50) / (Qp*np.exp(-kp*self.LAI) + q50))
# soil moisture response: Lagergren & Lindroth, xxxx"""
fRew = np.minimum(1.0, np.maximum(Rew / rw, rwmin))
# fRew = 1.0
# CO2 -response of canopy conductance, derived from APES-simulations
# (Launiainen et al. 2016, Global Change Biology). relative to 380 ppm
fCO2 = 1.0 - 0.387 * np.log(CO2 / 380.0)
# leaf level light-saturated gs (m/s)
gs = 1.6*(1.0 + g1 / np.sqrt(D)) * Amax / CO2 / rhoa
# canopy conductance
Gc = gs * fQ * fRew * fCO2 * fPheno
Gc[np.isnan(Gc)] = eps
""" --- transpiration rate --- """
Tr = penman_monteith((1.-tau)*AE, 1e3*D, Ta, Gc, 1./Ra, units='mm')
Tr[Tr < 0] = 0.0
"""--- forest floor evaporation rate--- """
# soil conductance is function of relative water availability
# gcs = 1. / self.soilrp * beta**2.0
# beta = Wliq / FC; Best et al., 2011 Geosci. Model. Dev. JULES
Gcs = self.gsoil
Efloor = beta * penman_monteith(tau * AE, 1e3*D, Ta, Gcs, 1./Ras, units='mm')
Efloor[self.SWE > 0] = 0.0 # no evaporation from floor if snow on ground or beta == 0
return Tr, Efloor, Gc
def canopy_water_snow(self, dt, T, Prec, AE, D, Rg, RH, Ra=25.0, U=2.0):
"""
Calculates canopy water interception and SWE during timestep dt
Args:
self - object
dt - timestep [s]
T - air temperature (degC)
Prec - precipitation rate during (mm d-1)
AE - available energy (~net radiation) (Wm-2)
D - vapor pressure deficit (kPa)
Ra - canopy aerodynamic resistance (s m-1)
Returns:
self - updated state W, Wf, SWE, SWEi, SWEl
PotInf - potential infiltration to soil profile (mm)
Trfall - throughfall to snow / soil surface (mm)
Evap - evaporation / sublimation from canopy store (mm)
Interc - interception of canopy (mm)
MBE - mass balance error (mm)
Unload - undloading from canopy storage (mm)
Samuli Launiainen & Ari Laurén 2014 - 2017
Last edit 12 / 2017
"""
## JP EDIT 22.1
# new constants for energy snow
SBc = 4.89E-9 # Stefan-Boltzmann constant {MJ/day*m2*K^4)
rooW = 1000.0 # density of water [kg/m3]
Cw = 4.2E-3 # specific heat capacity of water [MJ/kg*C]
cair = 1.29E-3 # heat capacity of air [MJ/m3*C]
ds = 0.0 # zero-plane dispalcement for snow [m] (Walter et al 2005)
zms = 0.001 # momentum roughness for snow [m] (Walter et al 2005)
zhs = 0.0002 # heat and vapour roughness parameter for snow [m] (Walter et al 2005)
k = 0.41 # von Karman's constant
Rv = 4.63E-3 # gas constant for water vapour
Rt = 0.4615 # thermodynamic vapour constant [kJ/kg*K]
lamv = 2.800 # latent heat of vaporization [MJ/kg]
lamf = 0.333 # latent heat of fusion [MJ/kg]
Ci = 2.03E-3 # specific heat capacity of ice [MJ/kg*C]
# quality of precipitation
Tmin = self.Tmin # 'C, equal or below all is snow
Tmax = self.Tmax # 'C, above all is water
Tmelt = 0.0 # 'C, T when melting starts
# storage capacities mm
Wmax = self.wmax * self.LAI # # storage capacity for rain (mm)
Wmaxsnow = self.wmaxsnow * self.LAI # storage capacity for snow (mm)
# melting/freezing coefficients mm/s
Kmelt = self.Kmelt - 1.64 * self.cf / dt # Kuusisto E, 'Lumi Suomessa'
Kfreeze = self.Kfreeze # freezing coefficient (mm/s)
kp = self.physpara['kp'] # canopy light attenuation parameter (-)
''' sama kuin tau0? '''
tau = np.exp(-kp*self.LAI) # fraction of Rn at ground
# inputs to arrays, needed for indexing later in the code
gridshape = np.shape(self.LAI) # rows, cols
if np.shape(T) != gridshape:
T = np.ones(gridshape) * T
Prec = np.ones(gridshape) * Prec
AE = np.ones(gridshape) * AE
D = np.ones(gridshape) * D
Ra = np.ones(gridshape) * Ra
RH = np.ones(gridshape) * RH
Rg = np.ones(gridshape) * Rg
U = np.ones(gridshape) * U
# reduction of windspeed due to vegetation (Tarboton and Luke 1996) (from Ala-aho et al.)
''' sama kuih aerodynamic functions ?'''
WS = U * (1 - (0.8 * self.cf))
Prec = Prec * dt # mm
# latent heat of vaporization (Lv) and sublimation (Ls) J kg-1
Lv = 1e3 * (3147.5 - 2.37 * (T + 273.15))
Ls = Lv + 3.3e5
# compute 'potential' evaporation / sublimation rates for each grid cell
erate = np.zeros(gridshape)
ixs = np.where((Prec == 0) & (T <= Tmin))
ixr = np.where((Prec == 0) & (T > Tmin))
Ga = 1. / Ra # aerodynamic conductance
# resistance for snow sublimation adopted from:
# Pomeroy et al. 1998 Hydrol proc; Essery et al. 2003 J. Climate;
# Best et al. 2011 Geosci. Mod. Dev.
Ce = 0.01*((self.W + eps) / Wmaxsnow)**(-0.4) # exposure coeff (-)
Sh = (1.79 + 3.0*U**0.5) # Sherwood numbner (-)
gi = Sh*self.W*Ce / 7.68 + eps # m s-1
erate[ixs] = dt / Ls[ixs] * penman_monteith((1.0 - tau[ixs])*AE[ixs],
1e3*D[ixs], T[ixs], gi[ixs],
Ga[ixs], units='W')
# evaporation of intercepted water, mm
gs = 1e6
erate[ixr] = dt / Lv[ixr] * penman_monteith((1.0 - tau[ixr])*AE[ixr],
1e3*D[ixr], T[ixr], gs,
Ga[ixr], units='W')
# ---proportion of state of precipitation [as water (fW) or as snow(fS)]
fW = np.zeros(gridshape)
fS = np.zeros(gridshape)
fW[T >= Tmax] = 1.0
fS[T <= Tmin] = 1.0
ix = np.where((T > Tmin) & (T < Tmax))
fW[ix] = (T[ix] - Tmin) / (Tmax - Tmin)
fS[ix] = 1.0 - fW[ix]
del ix
# --- Local fluxes (mm)
Unload = np.zeros(gridshape) # snow unloading
Interc = np.zeros(gridshape) # interception
Melt = np.zeros(gridshape) # melting
Freeze = np.zeros(gridshape) # freezing
Evap = np.zeros(gridshape) # evaporation
Subl = np.zeros(gridshape) # sublimation from snowpack
Meltrate = np.zeros(gridshape) # rate of melting/refreezing [m/d]
Freezerate = np.zeros(gridshape) # rate of melting/refreezing [m/d]
Qm = np.zeros(gridshape) # melt energy content
Qc = np.zeros(gridshape) # cold content
""" --- initial conditions for calculating mass balance error --"""
Wo = self.W # canopy storage
SWEo = self.SWE # Snow water equivalent mm
""" --------- Canopy water storage change -----"""
# snow unloading from canopy, ensures also that seasonal LAI development does
# not mess up computations
ix = (T >= Tmax)
Unload[ix] = np.maximum(self.W[ix] - Wmax[ix], 0.0)
self.W = self.W - Unload
del ix
# dW = self.W - Wo
# Interception of rain or snow: asymptotic approach of saturation.
# Hedstrom & Pomeroy 1998. Hydrol. Proc 12, 1611-1625;
# Koivusalo & Kokkonen 2002 J.Hydrol. 262, 145-164.
ix = (T < Tmin)
Interc[ix] = (Wmaxsnow[ix] - self.W[ix]) \
* (1.0 - np.exp(-(self.cf[ix] / Wmaxsnow[ix]) * Prec[ix]))
del ix
# above Tmin, interception capacity equals that of liquid precip
ix = (T >= Tmin)
Interc[ix] = np.maximum(0.0, (Wmax[ix] - self.W[ix]))\
* (1.0 - np.exp(-(self.cf[ix] / Wmax[ix]) * Prec[ix]))
del ix
self.W = self.W + Interc # new canopy storage, mm
Trfall = Prec + Unload - Interc # Throughfall to field layer or snowpack
Trfallice = Trfall * fS / 1000
Trfallliq = Trfall * fW / 1000
# evaporate from canopy and update storage
Evap = np.minimum(erate, self.W) # mm
self.W = self.W - Evap
#print(self.SWE)
""" Snowpack (in case no snow, all Trfall routed to floor) """
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
SWE_m = np.ones(gridshape) # temp variable for SWE energy (m)
SWE_m = self.SWE * 0.001
SWE_m0 = SWE_m
#Wliq_m = np.ones(gridshape) # temp variable for liquid water in snowpack (m)
#Wice_m = np.zeros(gridshape) # temp variable for liquid water in snowpack (m)
# Albedo where there is very old snow
olds = np.where((SWE_m > 0) & (self.d_snow > 100))
self.alb[olds] = (0.94**self.d_nosnow[olds]**0.58)**self.albpow
# Albedo where there is newer snow
news = np.where((SWE_m > 0) & (self.d_snow < 100))
self.alb[news] = 0.94**self.d_nosnow[news]**0.58
# Ground albedo
nos = np.where(SWE_m <= 0)
self.alb[nos] = self.albground
#print(self.alb[1])
del olds, news, nos
# INCOMING SHORTWAVE RADIATION
# [MJ*d/m2] passing through canopy and transmitted by canopy (Wigmosta et al 1996)
''' Common with AE ?'''
radRs = Rg * 1E-6 * dt # W/m2 to MJ/d/m2
radRss = radRs * (1-self.alb) * (self.tau0 * self.cf + (1-self.cf)) # radRss = MJ/d*m2
# NET LONGWAVE RADIATION in the snowpack emitted by atmosphere, overstorey and lost by snowpack (Wigmosta et al. 1996)
# atmosphere emissivity, different for cloudy and clear days (Walter et al 2005/Campbell and Norman
ix = np.where(Prec > self.RDthres)
self.emAir[ix] = (0.72 + 0.005 * T[ix]) * (1 - 0.84) + 0.84
ax = np.where(Prec <= self.RDthres)
self.emAir[ax] = 0.72 + 0.005 * T[ax]
# Atmospheric longwave radiation
Ld = self.emAir * SBc * (273.15 + T)**4
# Longwave emitted by overstorey, assuming emissivity of unity
L0 = SBc * (273.15 + T)**4
# longwave emitted by snow, emissivity 0.97 from (Walter et al 2005)
Lss = 0.97 * SBc * (273.15 + self.Tsnow)**4
# Net longwave radiation [MJ/d*m2]
radLs = L0 * self.cf + (Ld * (1 - self.cf)) - Lss
# net TOTAL radation on the SNOWPACK
radRns = radRss + radLs # MJ/d*m2
# Advection from precipitation
# Heat from rain, both liquid and solid (Wigmosta et al. 1994), conversion from mm to m and kj to MJ
Qp = rooW * Cw * T * (Trfallice + 0.5 * Trfallice)
# Sensible heat exchange in the snowpack ...snow temperature from previous timestep is taken as input
# resistance to heat transfer (Walter et al 2005)
ras = ((ln((self.zmeas - ds + zms) / zms) * ln((self.zmeas - ds + zhs) / zhs)) / (k**2 * WS)) / dt
# Sensible heat transfer by turbulent convection [MJ/d*m2]
Qs = cair * (T - self.Tsnow) / ras
# HEAT from convective VAPOUR EXCHANGE (evaporation and condensation) ...snow temperature from previous timestep is takes as input
# saturation vapour pressure in a given air temperature (Allen et al 2000), converted to mbbar and scaled to actual with relative humidity data
pVap = 0.6108 * np.exp((17.27 * T) / (T + 237.3)) * 10 * RH / 100
# vapour density of air (Dingman 1993, eq D-7a) converted to [kg/m3]
rooA = (pVap / ((T + 273.15) * Rv)) / 1000
# vapour density at the snow surface (Walter et al 2005)
rooSA = np.exp((16.78 * self.Tsnow - 116.8) / (self.Tsnow + 273.3)) * (1 / ((273.15 + self.Tsnow) * Rt))
# latent heat flux [MJ/d*m2] (Walter et al 2005)
Ql = lamv * ((rooA - rooSA) / ras)
# sum of ENERGY INPUT/OUTPUT which will results in melting/refreezing and heating/cooling the snowpack
# positive fluxes add energy to the snowpack and negative remove energy from snowpack
Esum = radRns + Qp + Qs + Ql # MJ/dt*m2
#print('Esum = ', Esum)
# Energy for melt with different conditions
# if there is no snow, there can be no melt or refreezing
nos = np.where(SWE_m == 0)
Qm[nos] = 0
# If there is snow and energy to melt it
ix = np.where((SWE_m > 0) & (Esum > 0) & (self.Tsnow < 0))
# what is available for melt after heating the snowpack.
# If more cold content than heat, nothing left for melt (add 0.00001 for numerical stability)
Qm[ix] = np.maximum(0, Esum[ix] - (0 - self.Tsnow[ix]) * (1 / (Ci * (SWE_m[ix] + 0.00001) * rooW)))
# Snowpack isothermal, all energy is diverted to snowmelt !
ax = np.where((SWE_m > 0) & (Esum <= 0) & (self.Tsnow >= 0))
Qm[ax] = Esum[ax]
# If Esum negative but no water to freeze -> Qm = 0
ex = np.where((SWE_m > 0) & (Esum <= 0) & (self.Wliq == 0))
Qm[ex] = 0
# Esum negative and Wliq to freeze
yx = np.where((SWE_m > 0) & (Esum <= 0) & (self.Wliq != 0))
Qm[yx] = np.maximum(-rooW * lamf * self.Wliq[yx], Esum[yx])
del ix, ax, ex, yx
# snowpack cold content change
Qc = Esum - Qm
#print('Qc = ', Qc)
#print('Wliq', self.Wliq)
# mass balance formulation concept from (Wigmosta 1994) except that sublimation/deposition takes place from ice phase
# store values from previous timestep
Wliq_o = self.Wliq
Wice_o = self.Wice
self.Wice = np.maximum(0.0, self.Wice + Trfallice)
#print('Qm = ', Qm)
ix = np.where(Qm < 0)
Freezerate[ix] = np.maximum(Qm[ix] / (rooW * lamf), -self.Wliq[ix]) # rate of freezing
ax = np.where(Qm >= 0)
Meltrate[ax] = np.minimum(Qm[ax] / (rooW * lamf), self.Wice[ax]) # rate of melting
del ix, ax
self.Wice = np.maximum(0, self.Wice - Meltrate - Freezerate)
# calculate and limit evaporation/deposition
# sublimation/deposition to the solid ice phase, assuming there is ice left !! no evaporation from liquid phase...!
ix = np.where(self.Wice > 0)
Subl[ix] = np.maximum(Ql[ix] / (rooW * lamv), -self.Wice[ix])
del ix
# update the mass balance of the ice phase
# update snow ice content after sublimation, cannot go negative
self.Wice = np.maximum(0, self.Wice + Subl)
# mass balance for liquid phase
# liquid water in the snowpack [m], cannot exceed water retention capacity or go negative
# water is added via rain and added/removed via melting/freezing
self.Wliq = np.maximum(0, np.minimum(self.R * self.Wice, self.Wliq + Trfallliq + Meltrate))
# water flow out of the snowpack, a certain volume retained
self.Wliqout = np.maximum(0, (Wliq_o + Trfallliq + Meltrate) - self.R * self.Wice)
# COMFORM to variable naming and convert to [mm/d]
#Melt = self.Wliqout * 1000
self.Wliq = np.maximum(0, self.Wliq - self.Wliqout)
#print('Wliq out = ', self.Wliqout)
#print('Wliq = ', self.Wliq)
#print('Wice = ', self.Wice)
# total water content in snowpack
# total snow water equivalent a sum of liquid and ice fraction
SWE_m = self.Wice + self.Wliq
# save the change in SWE
self.deltaSWE = SWE_m - SWEo
# update snow temperature according to energy and mass balance
self.Tsnow0 = self.Tsnow
# constrain snow temperature
# with shallow snow depths snow temp equals air temp
ix = np.where(SWE_m < 0.05)
self.Tsnow[ix] = T[ix]
# excess energy from melting
ax = np.where(SWE_m >= 0.05)
self.Tsnow[ax] = self.Tsnow0[ax] + (Qc[ax] / (Ci * SWE_m[ax] * rooW)) # excess energy from melting
# during melt Tsnow = 0
ex = np.where(Qm > 0)
self.Tsnow[ex] = 0.0
# cannot go too cold, miniimise to AT. This breaks the energy conservation so this could be improved. Perhaps introduce soil energy store where excess energy/cold content is diverted
# if snow temperature tries to go below -4, set lower limit to air temperature
yx = np.where(self.Tsnow < -4.0)
self.Tsnow[yx] = np.maximum(self.Tsnow[yx], T[yx])
self.Tsnow = np.minimum(self.Tsnow, 0.0) # cannot be positive
# set the internal energy flux variables to 0 as long as the SWE is 0
xx = np.where(SWE_m == 0)
Ql[xx] = 0.0
Qs[xx] = 0.0
Qc[xx] = 0.0
Qm[xx] = 0.0
del ix, ax, ex, yx
self.SWE = SWE_m * 100
PotInf = np.maximum(0.0, self.Wliq - self.Wice * self.R)
PotInf = PotInf * 1000
# mass-balance error mm
MBE = (self.W + self.SWE) - (Wo + SWEo) - (Prec - Evap - PotInf)
#print('ESUM = ', Esum)
#print('Potinf =', PotInf)
#print('Wliq = ', self.Wliq)
#print('Wice =', self.Wice)
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
###################################################################################################################################################################
'''
ix = np.where(T >= Tmelt)
Melt[ix] = np.minimum(self.SWEi[ix], Kmelt[ix] * dt * (T[ix] - Tmelt)) # mm
del ix
ix = np.where(T < Tmelt)
Freeze[ix] = np.minimum(self.SWEl[ix], Kfreeze * dt * (Tmelt - T[ix])) # mm
del ix
# amount of water as ice and liquid in snowpack
Sice = np.maximum(0.0, self.SWEi + fS * Trfall + Freeze - Melt)
Sliq = np.maximum(0.0, self.SWEl + fW * Trfall - Freeze + Melt)
PotInf = np.maximum(0.0, Sliq - Sice * self.R) # mm
Sliq = np.maximum(0.0, Sliq - PotInf) # mm, liquid water in snow
# update Snowpack state variables
self.SWEl = Sliq
self.SWEi = Sice
self.SWE = self.SWEl + self.SWEi
'''
ix = np.where(Trfallice < 0.001)
self.d_nosnow[ix] = np.minimum(30, self.d_nosnow[ix] + 1)
ax = np.where(Trfallice > 0.001)
self.d_nosnow[ax] = 1
ex = np.where(self.SWE > 0)
self.d_snow[ex] = self.d_snow[ex] + 1
yx = np.where(self.SWE == 0)
self.d_snow[yx] = 0
del ix, ax, ex, yx
# mass-balance error mm
#MBE = (self.W + self.SWE) - (Wo + SWEo) - (Prec - Evap - PotInf)
return PotInf, Trfall, Evap, Interc, MBE, Unload
""" *********** utility functions ******** """
# @staticmethod
def degreeDays(dd0, T, Tbase, doy):
"""
Calculates degree-day sum from the current mean Tair.
INPUT:
dd0 - previous degree-day sum (degC)
T - daily mean temperature (degC)
Tbase - base temperature at which accumulation starts (degC)
doy - day of year 1...366 (integer)
OUTPUT:
x - degree-day sum (degC)
"""
if doy == 1: # reset in the beginning of the year
dd0 = 0.
return dd0 + max(0, T - Tbase)
# @staticmethod
def eq_evap(AE, T, P=101300.0, units='W'):
"""
Calculates the equilibrium evaporation according to McNaughton & Spriggs,\
1986.
INPUT:
AE - Available energy (Wm-2)
T - air temperature (degC)
P - pressure (Pa)
units - W (Wm-2), mm (mms-1=kg m-2 s-1), mol (mol m-2 s-1)
OUTPUT:
equilibrium evaporation rate (Wm-2)
"""
Mw = 18e-3 # kg mol-1
# latent heat of vaporization of water [J/kg]
L = 1e3 * (2500.8 - 2.36 * T + 1.6e-3 * T ** 2 - 6e-5 * T ** 3)
# latent heat of sublimation [J/kg]
if T < 0:
L = 1e3 * (2834.1 - 0.29 * T - 0.004 * T ** 2)
_, s, g = e_sat(T, P)
x = np.divide((AE * s), (s + g)) # Wm-2 = Js-1m-2
if units == 'mm':
x = x / L # kg m-2 s-1 = mm s-1
elif units == 'mol':
x = x / L / Mw # mol m-2 s-1
x = np.maximum(x, 0.0)
return x
# @staticmethod
def e_sat(T, P=101300):
"""
Computes saturation vapor pressure (Pa), slope of vapor pressure curve
[Pa K-1] and psychrometric constant [Pa K-1]
IN:
T - air temperature (degC)
P - ambient pressure (Pa)
OUT:
esa - saturation vapor pressure in Pa
s - slope of saturation vapor pressure curve (Pa K-1)
g - psychrometric constant (Pa K-1)
"""
NT = 273.15
cp = 1004.67 # J/kg/K
Lambda = 1e3 * (3147.5 - 2.37 * (T + NT)) # lat heat of vapor [J/kg]
esa = 1e3 * (0.6112 * np.exp((17.67 * T) / (T + 273.16 - 29.66))) # Pa
s = 17.502 * 240.97 * esa / ((240.97 + T) ** 2)
g = P * cp / (0.622 * Lambda)
return esa, s, g
# @staticmethod
def penman_monteith(AE, D, T, Gs, Ga, P=101300.0, units='W'):
"""
Computes latent heat flux LE (Wm-2) i.e evapotranspiration rate ET (mm/s)
from Penman-Monteith equation
INPUT:
AE - available energy [Wm-2]
VPD - vapor pressure deficit [Pa]
T - ambient air temperature [degC]
Gs - surface conductance [ms-1]
Ga - aerodynamic conductance [ms-1]
P - ambient pressure [Pa]
units - W (Wm-2), mm (mms-1=kg m-2 s-1), mol (mol m-2 s-1)
OUTPUT:
x - evaporation rate in 'units'
"""
# --- constants
cp = 1004.67 # J kg-1 K-1
rho = 1.25 # kg m-3
Mw = 18e-3 # kg mol-1
_, s, g = e_sat(T, P) # slope of sat. vapor pressure, psycrom const
L = 1e3 * (3147.5 - 2.37 * (T + 273.15))
x = (s * AE + rho * cp * Ga * D) / (s + g * (1.0 + Ga / Gs)) # Wm-2
if units == 'mm':
x = x / L # kgm-2s-1 = mms-1
if units == 'mol':
x = x / L / Mw # mol m-2 s-1
x = np.maximum(x, 0.0)
return x
# @staticmethod
def aerodynamics(LAI, hc, Uo, w=0.01, zm=2.0, zg=0.5, zos=0.01):
"""
computes wind speed at ground and canopy + boundary layer conductances
Computes wind speed at ground height assuming logarithmic profile above and
exponential within canopy
Args:
LAI - one-sided leaf-area /plant area index (m2m-2)
hc - canopy height (m)
Uo - mean wind speed at height zm (ms-1)
w - leaf length scale (m)
zm - wind speed measurement height above canopy (m)
zg - height above ground where Ug is computed (m)
zos - forest floor roughness length, ~ 0.1*roughness element height (m)
Returns:
ra - canopy aerodynamic resistance (sm-1)
rb - canopy boundary layer resistance (sm-1)
ras - forest floor aerod. resistance (sm-1)
ustar - friction velocity (ms-1)
Uh - wind speed at hc (ms-1)
Ug - wind speed at zg (ms-1)
SOURCE:
Cammalleri et al. 2010 Hydrol. Earth Syst. Sci
Massman 1987, BLM 40, 179 - 197.
Magnani et al. 1998 Plant Cell Env.
"""
zm = hc + zm # m
zg = np.minimum(zg, 0.1 * hc)
kv = 0.4 # von Karman constant (-)
beta = 285.0 # s/m, from Campbell & Norman eq. (7.33) x 42.0 molm-3
alpha = LAI / 2.0 # wind attenuation coeff (Yi, 2008 eq. 23)
d = 0.66*hc # m
zom = 0.123*hc # m
zov = 0.1*zom
zosv = 0.1*zos
# solve ustar and U(hc) from log-profile above canopy
ustar = Uo * kv / np.log((zm - d) / zom)
Uh = ustar / kv * np.log((hc - d) / zom)
# U(zg) from exponential wind profile
zn = np.minimum(zg / hc, 1.0) # zground can't be above canopy top
Ug = Uh * np.exp(alpha*(zn - 1.0))
# canopy aerodynamic & boundary-layer resistances (sm-1). Magnani et al. 1998 PCE eq. B1 & B5
#ra = 1. / (kv*ustar) * np.log((zm - d) / zom)
ra = 1./(kv**2.0 * Uo) * np.log((zm - d) / zom) * np.log((zm - d) / zov)
rb = 1. / LAI * beta * ((w / Uh)*(alpha / (1.0 - np.exp(-alpha / 2.0))))**0.5
# soil aerodynamic resistance (sm-1)
ras = 1. / (kv**2.0*Ug) * (np.log(zg / zos))*np.log(zg / (zosv))
#print('ra', ra, 'rb', rb)
ra = ra + rb
return ra, rb, ras, ustar, Uh, Ug
def wind_profile(LAI, hc, Uo, z, zm=2.0, zg=0.2):
"""
Computes wind speed at ground height assuming logarithmic profile above and
hyperbolic cosine profile within canopy
INPUT:
LAI - one-sided leaf-area /plant area index (m2m-2)
hc - canopy height (m)
Uo - mean wind speed at height zm (ms-1)
zm - wind speed measurement height above canopy (m)
zg - height above ground where U is computed
OUTPUT:
Uh - wind speed at hc (ms-1)
Ug - wind speed at zg (ms-1)
SOURCE:
Cammalleri et al. 2010 Hydrol. Earth Syst. Sci
Massman 1987, BLM 40, 179 - 197.
"""
k = 0.4 # von Karman const
Cd = 0.2 # drag coeff
alpha = 1.5 # (-)
zm = zm + hc
d = 0.66*hc
zom = 0.123*hc
beta = 4.0 * Cd * LAI / (k**2.0*alpha**2.0)
# solve ustar and U(hc) from log-profile above canopy
ustar = Uo * k / np.log((zm - d) / zom) # m/s
U = np.ones(len(z))*np.NaN
# above canopy top wind profile is logarithmic
U[z >= hc] = ustar / k * np.log((z[z >= hc] - d) / zom)
# at canopy top, match log and exponential profiles
Uh = ustar / k * np.log((hc - d) / zom) # m/s
# within canopy hyperbolic cosine profile
U[z <= hc] = Uh * (np.cosh(beta * z[z <= hc] / hc) / np.cosh(beta))**0.5
return U, ustar, Uh
def daylength(LAT, LON, DOY):
"""
Computes daylength from location and day of year.
Args:
LAT, LON - in deg, float or arrays of floats
doy - day of year, float or arrays of floats
Returns:
dl - daylength (hours), float or arrays of floats
"""
CF = np.pi / 180.0 # conversion deg -->rad
LAT = LAT*CF
LON = LON*CF
# ---> compute declination angle
xx = 278.97 + 0.9856*DOY + 1.9165*np.sin((356.6 + 0.9856*DOY)*CF)
DECL = np.arcsin(0.39785*np.sin(xx*CF))
del xx
# --- compute day length, the period when sun is above horizon
# i.e. neglects civil twilight conditions
cosZEN = 0.0
dl = 2.0*np.arccos(cosZEN - np.sin(LAT)*np.sin(DECL) / (np.cos(LAT)*np.cos(DECL))) / CF / 15.0 # hours
return dl
#def read_ini(inifile):
# """read_ini(inifile): reads canopygrid.ini parameter file into pp dict"""
#
# cfg = configparser.ConfigParser()
# cfg.read(inifile)
#
# pp = {}
# for s in cfg.sections():
# section = s.encode('ascii', 'ignore')
# pp[section] = {}
# for k, v in cfg.items(section):
# key = k.encode('ascii', 'ignore')
# val = v.encode('ascii', 'ignore')
# if section == 'General': # 'general' section
# pp[section][key] = val
# else:
# pp[section][key] = float(val)
#
# pp['General']['dt'] = float(pp['General']['dt'])
#
# pgen = pp['General']
# cpara = pp['CanopyGrid']
# return pgen, cpara |
load("//internal/starlark:util.bzl", "run_starlark_executor")
def _terraform_locals_impl(ctx):
output = ctx.actions.declare_file(ctx.label.name + "_locals.tf.json")
run_starlark_executor(
ctx,
output,
ctx.file.src,
ctx.files.deps,
ctx.executable._starlark_executor,
"""
# Create local variable definitions for .tf.json file
def wrap_locals(x):
assert_type(x, "dict")
return { "locals": x }
""",
"encode_indent(wrap_locals(main()))",
)
return [DefaultInfo(files = depset([output]))]
terraform_locals = rule(
implementation = _terraform_locals_impl,
doc = "Creates a .tf.json file defining local variables from a Starlark dict",
attrs = {
"src": attr.label(
doc = "Source Starlark file to execute",
mandatory = True,
allow_single_file = True,
),
"deps": attr.label_list(
doc = "Files needed to execute Starlark",
),
"_starlark_executor": attr.label(
default = Label("//internal/starlark"),
allow_single_file = True,
executable = True,
cfg = "exec",
),
},
)
|
# -*- coding: utf8 -*-
from QcloudApi.qcloudapi import QcloudApi
from tce.tcloud.utils.config import global_config
# 设置需要加载的模块
module = 'lb'
# 对应接口的接口名,请参考wiki文档上对应接口的接口名
action = 'CreateLoadBalancerListeners'
region = global_config.get('regions')
params = global_config.get(region)
secretId = params['secretId']
secretKey = params['secretKey']
domain =params['domain']
# 云API的公共参数
config = {
'Region': region,
'secretId': secretId,
'secretKey': secretKey,
'method': 'GET',
'SignatureMethod': 'HmacSHA1'
}
# 接口参数,根据实际情况填写,支持json
# 例如数组可以 "ArrayExample": ["1","2","3"]
# 例如字典可以 "DictExample": {"key1": "value1", "key2": "values2"}
action_params = {
'loadBalancerId':'lb-4buhw4ug',
'listeners.0.loadBalancerPort':443,
'listeners.0.instancePort':443,
'listeners.0.protocol':4,
'listeners.0.SSLMode':'mutual',
'listeners.0.certName':'00',
'listeners.0.certContent':'-----BEGIN CERTIFICATE-----'
'MIICwDCCAagCAQEwDQYJKoZIhvcNAQEEBQAwaDELMAkGA1UEBhMCVFgxCzAJBgNVBAgMAlRYMQswCQYDVQQHDAJUWDELMAkGA1UECgwCVFgxCzAJBgNVBAsMAlRYMQsw'
'CQYDVQQDDAJUWDEYMBYGCSqGSIb3DQEJARYJVFhAcXEuY29tMB4XDTE5MDEyMjAzMDYzMVoXDTIwMDEyMjAzMDYzMVowaDELMAkGA1UEBhMCVFgxCzAJBgNVBAgMAlRY'
'MQswCQYDVQQHDAJUWDELMAkGA1UECgwCVFgxCzAJBgNVBAsMAlRYMQswCQYDVQQDDAJUWDEYMBYGCSqGSIb3DQEJARYJVFhAcXEuY29tMIGfMA0GCSqGSIb3DQEBAQUA'
'A4GNADCBiQKBgQDsiSwcKelWmjH/2oTcKCuq19qE7bd9qBpRdLDRCF/WJrGYpQm9J6oikJ55Xhivcy/APcX2C4KtXcUD/MCbZ+nb1J0daWOGmcSkoKhRp/Chp8VGTMJW'
'd6prOC2if/QUbncIVCni6dQE6V86lF6hH8W9ZuncRpgyWMWg9mxdekfQpQIDAQABMA0GCSqGSIb3DQEBBAUAA4IBAQBSVkcG/lIu0O6bEkGj+FBysl45rLa9dt+EKHLL'
'+GKun1lDH6Qz5f7D97SujfH1m8lr1RYsczYjvR1gOr2aRnp7xqJj9pt9Z/VMnXqR9djSZHfcnGDCgHlDqXqsIu+L0l9wb/BcjytDpnD5ISEphBGvdj2bFnyva6y/fPwb'
'Xk+cdDLY72Xuk8lrk3CBq2qsMg17zDI40Ut0nqcNPPe0BIWip1ernVrW6IhXgD/T'
'znwMEWAV9jRtaZcWDkJRfW9YovKsSegu7Y0qQnhtasEyBs1lrdD/W1ohwdS7vc06'
'GlicjYBlpKxelPsvv38Z3v0QPLw9H4e+XPFVAQJOUmR+2PrM-----END CERTIFICATE-----',
'listeners.0.certKey':'-----BEGIN RSA PRIVATE KEY-----your own key-----END RSA PRIVATE KEY-----',
'listeners.0.certCaContent':'-----BEGIN CERTIFICATE-----'
'MIICwDCCAagCAQEwDQYJKoZIhvcNAQEEBQAwaDELMAkGA1UEBhMCVFgxCzAJBgNVBAgMAlRYMQswCQYDVQQHDAJUWDELMAkGA1UECgwCVFgxCzAJBgNVBAsMAlRYMQsw'
'CQYDVQQDDAJUWDEYMBYGCSqGSIb3DQEJARYJVFhAcXEuY29tMB4XDTE5MDEyMjAzMDYzMVoXDTIwMDEyMjAzMDYzMVowaDELMAkGA1UEBhMCVFgxCzAJBgNVBAgMAlRY'
'MQswCQYDVQQHDAJUWDELMAkGA1UECgwCVFgxCzAJBgNVBAsMAlRYMQswCQYDVQQDDAJUWDEYMBYGCSqGSIb3DQEJARYJVFhAcXEuY29tMIGfMA0GCSqGSIb3DQEBAQUA'
'A4GNADCBiQKBgQDsiSwcKelWmjH/2oTcKCuq19qE7bd9qBpRdLDRCF/WJrGYpQm9J6oikJ55Xhivcy/APcX2C4KtXcUD/MCbZ+nb1J0daWOGmcSkoKhRp/Chp8VGTMJW'
'd6prOC2if/QUbncIVCni6dQE6V86lF6hH8W9ZuncRpgyWMWg9mxdekfQpQIDAQABMA0GCSqGSIb3DQEBBAUAA4IBAQBSVkcG/lIu0O6bEkGj+FBysl45rLa9dt+EKHLL'
'+GKun1lDH6Qz5f7D97SujfH1m8lr1RYsczYjvR1gOr2aRnp7xqJj9pt9Z/VMnXqR9djSZHfcnGDCgHlDqXqsIu+L0l9wb/BcjytDpnD5ISEphBGvdj2bFnyva6y/fPwb'
'Xk+cdDLY72Xuk8lrk3CBq2qsMg17zDI40Ut0nqcNPPe0BIWip1ernVrW6IhXgD/T'
'znwMEWAV9jRtaZcWDkJRfW9YovKsSegu7Y0qQnhtasEyBs1lrdD/W1ohwdS7vc06'
'GlicjYBlpKxelPsvv38Z3v0QPLw9H4e+XPFVAQJOUmR+2PrM-----END CERTIFICATE-----',
'listeners.0.certCaName':'00'
}
try:
service = QcloudApi(module, config)
# 请求前可以通过下面几个方法重新设置请求的secretId/secretKey/Region/method/SignatureMethod参数
# 重新设置请求的Region
# service.setRegion('shanghai')
# 打印生成的请求URL,不发起请求
print(service.generateUrl(action, action_params))
# 调用接口,发起请求,并打印返回结果
print(service.call(action, action_params))
except Exception as e:
import traceback
print('traceback.format_exc():\n%s' % traceback.format_exc()) |
import great_expectations as ge
import pytest
context = ge.get_data_context('SparkCSV', './tests/test_sets')
titanic_dataset = context.get_dataset('Titanic.csv', header=True)
strf_dataset = context.get_dataset('strf_test.csv', header=True)
def test_expect_column_values_to_be_unique():
result = titanic_dataset.expect_column_values_to_be_unique('_c0')
assert result['success']
result = titanic_dataset.expect_column_values_to_be_unique('Age')
assert not result['success']
result = titanic_dataset.expect_column_values_to_be_unique('Name')
assert not result['success']
assert 'Kelly, Mr James' in result['result']['partial_unexpected_list']
result = titanic_dataset.expect_column_values_to_be_unique('Name', mostly=0.95)
assert result['success']
def test_expect_column_values_to_match_strftime_format():
result = strf_dataset.expect_column_values_to_match_strftime_format('date', '%Y-%m-%d')
assert result['success']
result = strf_dataset.expect_column_values_to_match_strftime_format('date', '%Y%m%d')
assert not result['success']
result = titanic_dataset.expect_column_values_to_match_strftime_format('Age', '%Y-%m-%d')
assert not result['success']
|
def test_request_items_runner_fixture(testdir):
"""Make sure that pytest accepts our fixture."""
# create a temporary pytest test module
testdir.makepyfile(
"""
def test_exists(request_items_runner):
assert request_items_runner
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_exists PASSED*"])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_help_message(testdir):
result = testdir.runpytest("--help")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["requests:", "*--requests-baseurl*"])
result.stdout.fnmatch_lines(["requests:", "*--requests-timeout*"])
result.stdout.fnmatch_lines(["requests:", "*--requests-extra-vars*"])
|
# coding=utf-8
"""Survey model definition.
"""
from django.db import models
from django.utils import timezone
from bims.models import LocationSite
class Survey(models.Model):
"""Survey model."""
date = models.DateField(
default=timezone.now
)
sites = models.ManyToManyField(LocationSite)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.