hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7952b76a0e355dbd7466342c0d34c676ea5a52f1
| 1,850
|
py
|
Python
|
sdk/python/pulumi_aws/waf/get_rate_based_rule.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/waf/get_rate_based_rule.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/waf/get_rate_based_rule.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetRateBasedRuleResult:
"""
A collection of values returned by getRateBasedRule.
"""
def __init__(__self__, id=None, name=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
The provider-assigned unique ID for this managed resource.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
class AwaitableGetRateBasedRuleResult(GetRateBasedRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRateBasedRuleResult(
id=self.id,
name=self.name)
def get_rate_based_rule(name=None,opts=None):
"""
`waf.RateBasedRule` Retrieves a WAF Rate Based Rule Resource Id.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.waf.get_rate_based_rule(name="tfWAFRateBasedRule")
```
:param str name: The name of the WAF rate based rule.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:waf/getRateBasedRule:getRateBasedRule', __args__, opts=opts).value
return AwaitableGetRateBasedRuleResult(
id=__ret__.get('id'),
name=__ret__.get('name'))
| 28.030303
| 107
| 0.657297
|
7952b866b5554bd0bd823fe81e42316f23fa7b9d
| 9,267
|
py
|
Python
|
myanimelist/media_list.py
|
Zenrac/python3-mal
|
025444816ba23a0a9192265e87b7768d4f5488bf
|
[
"WTFPL"
] | null | null | null |
myanimelist/media_list.py
|
Zenrac/python3-mal
|
025444816ba23a0a9192265e87b7768d4f5488bf
|
[
"WTFPL"
] | null | null | null |
myanimelist/media_list.py
|
Zenrac/python3-mal
|
025444816ba23a0a9192265e87b7768d4f5488bf
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import abc
import collections
from lxml import etree as et
import decimal
import datetime
import urllib.request, urllib.parse, urllib.error
from . import utilities
from .base import Base, MalformedPageError, InvalidBaseError, loadable
class MalformedMediaListPageError(MalformedPageError):
pass
class InvalidMediaListError(InvalidBaseError):
pass
class MediaList(Base, collections.Mapping, metaclass=abc.ABCMeta):
__id_attribute = "username"
def __getitem__(self, media):
return self.list[media]
def __contains__(self, media):
return media in self.list
def __len__(self):
return len(self.list)
def __iter__(self):
for media in self.list:
yield media
def __init__(self, session, user_name):
super(MediaList, self).__init__(session)
self.username = user_name
if not isinstance(self.username, str) or len(self.username) < 1:
raise InvalidMediaListError(self.username)
self._list = None
self._stats = None
# subclasses must define a list type, ala "anime" or "manga"
@abc.abstractproperty
def type(self):
pass
# a list verb ala "watch", "read", etc
@abc.abstractproperty
def verb(self):
pass
# a list with status ints as indices and status texts as values.
@property
def user_status_terms(self):
statuses = collections.defaultdict(lambda: 'Unknown')
statuses[1] = self.verb.capitalize() + 'ing'
statuses[2] = 'Completed'
statuses[3] = 'On-Hold'
statuses[4] = 'Dropped'
statuses[6] = 'Plan to ' + self.verb.capitalize()
return statuses
def parse_entry_media_attributes(self, soup):
"""
Args:
soup: a lxml.html.HtmlElement containing a row from the current media list
Return a dict of attributes of the media the row is about.
"""
row_info = {}
try:
start = utilities.parse_profile_date(soup.find('.//series_start').text)
except ValueError:
start = None
except:
if not self.session.suppress_parse_exceptions:
raise
if start is not None:
try:
row_info['aired'] = (start, utilities.parse_profile_date(soup.find('.//series_end').text))
except ValueError:
row_info['aired'] = (start, None)
except:
if not self.session.suppress_parse_exceptions:
raise
# look up the given media type's status terms.
status_terms = getattr(self.session, self.type)(1)._status_terms
try:
row_info['id'] = int(soup.find('.//series_' + self.type + 'db_id').text)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['title'] = soup.find('.//series_title').text
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['status'] = status_terms[int(soup.find('.//series_status').text)]
except:
if not self.session.suppress_parse_exceptions:
raise
try:
row_info['picture'] = soup.find('.//series_image').text
except:
if not self.session.suppress_parse_exceptions:
raise
return row_info
def parse_entry(self, soup):
"""
Given:
soup: a lxml.html.HtmlElement containing a row from the current media list
Return a tuple:
(media object, dict of this row's parseable attributes)
"""
# parse the media object first.
media_attrs = self.parse_entry_media_attributes(soup)
media_id = media_attrs['id']
del media_attrs['id']
media = getattr(self.session, self.type)(media_id).set(media_attrs)
entry_info = {}
try:
entry_info['started'] = utilities.parse_profile_date(soup.find('.//my_start_date').text)
except ValueError:
entry_info['started'] = None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
entry_info['finished'] = utilities.parse_profile_date(soup.find('.//my_finish_date').text)
except ValueError:
entry_info['finished'] = None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
entry_info['status'] = self.user_status_terms[int(soup.find('.//my_status').text)]
except:
if not self.session.suppress_parse_exceptions:
raise
try:
entry_info['score'] = int(soup.find('.//my_score').text)
# if user hasn't set a score, set it to None to indicate as such.
if entry_info['score'] == 0:
entry_info['score'] = None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
entry_info['last_updated'] = datetime.datetime.fromtimestamp(int(soup.find('.//my_last_updated').text))
except:
if not self.session.suppress_parse_exceptions:
raise
return media, entry_info
def parse_stats(self, soup):
"""
Given:
soup: a lxml.etree element containing the current media list's stats
Return a dict of this media list's stats.
"""
stats = {}
for row in soup.getchildren():
try:
key = row.tag.replace('user_', '')
if key == 'id':
stats[key] = int(row.text)
elif key == 'name':
stats[key] = row.text
elif key == self.verb + 'ing':
try:
stats[key] = int(row.text)
except ValueError:
stats[key] = 0
elif key == 'completed':
try:
stats[key] = int(row.text)
except ValueError:
stats[key] = 0
elif key == 'onhold':
try:
stats['on_hold'] = int(row.text)
except ValueError:
stats[key] = 0
elif key == 'dropped':
try:
stats[key] = int(row.text)
except ValueError:
stats[key] = 0
elif key == 'planto' + self.verb:
try:
stats['plan_to_' + self.verb] = int(row.text)
except ValueError:
stats[key] = 0
# for some reason, MAL doesn't substitute 'read' in for manga for the verb here
elif key == 'days_spent_watching':
try:
stats['days_spent'] = decimal.Decimal(row.text)
except decimal.InvalidOperation:
stats[key] = decimal.Decimal(0)
except:
if not self.session.suppress_parse_exceptions:
raise
return stats
def parse(self, xml):
list_info = {}
list_page = et.fromstring(xml.encode())
primary_elt = list_page
if primary_elt is None:
raise MalformedMediaListPageError(self.username, xml,
message="Could not find root XML element in " + self.type + " list")
bad_username_elt = list_page.find('.//error')
if bad_username_elt is not None:
raise InvalidMediaListError(self.username, message="Invalid username when fetching " + self.type + " list")
stats_elt = list_page.find('.//myinfo')
if stats_elt is None and not utilities.check_if_mal_response_is_empty(list_page):
raise MalformedMediaListPageError(self.username, xml,
message="Could not find stats element in " + self.type + " list")
if utilities.check_if_mal_response_is_empty(list_page):
raise InvalidMediaListError(self.username, message="Empty result set when fetching " + self.type + " list")
list_info['stats'] = self.parse_stats(stats_elt)
list_info['list'] = {}
for row in list_page.findall(".//%s" % self.type):
(media, entry) = self.parse_entry(row)
list_info['list'][media] = entry
return list_info
def load(self):
media_list = self.session.session.get('https://myanimelist.net/malappinfo.php?' + urllib.parse.urlencode(
{'u': self.username, 'status': 'all', 'type': self.type})).text
self.set(self.parse(media_list))
return self
@property
@loadable('load')
def list(self):
return self._list
@property
@loadable('load')
def stats(self):
return self._stats
def section(self, status):
return {k: self.list[k] for k in self.list if self.list[k]['status'] == status}
| 33.576087
| 119
| 0.553038
|
7952b885f3aef57c07eba85abd55ff979228c601
| 40,477
|
py
|
Python
|
onmt/opts.py
|
biomed-AI/PROTAC-RL
|
d682c0d517a2ff8362e5a4a67bb297773daaf5bf
|
[
"MIT"
] | 1
|
2022-02-15T00:54:34.000Z
|
2022-02-15T00:54:34.000Z
|
onmt/opts.py
|
TanYoh/PROTAC-RL
|
a8074af6afa478404a3a04da878eb0704e3ec984
|
[
"MIT"
] | null | null | null |
onmt/opts.py
|
TanYoh/PROTAC-RL
|
a8074af6afa478404a3a04da878eb0704e3ec984
|
[
"MIT"
] | null | null | null |
""" Implementation of all available options """
from __future__ import print_function
import argparse
from onmt.models.sru import CheckSRU
def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Embedding Options
group = parser.add_argument_group('Model-Embeddings')
group.add_argument('-src_word_vec_size', type=int, default=500,
help='Word embedding size for src.')
group.add_argument('-tgt_word_vec_size', type=int, default=500,
help='Word embedding size for tgt.')
group.add_argument('-word_vec_size', type=int, default=-1,
help='Word embedding size for src and tgt.')
group.add_argument('-share_decoder_embeddings', action='store_true',
help="""Use a shared weight matrix for the input and
output word embeddings in the decoder.""")
group.add_argument('-share_embeddings', action='store_true',
help="""Share the word embeddings between encoder
and decoder. Need to use shared dictionary for this
option.""")
group.add_argument('-position_encoding', action='store_true',
help="""Use a sin to mark relative words positions.
Necessary for non-RNN style models.
""")
group = parser.add_argument_group('Model-Embedding Features')
group.add_argument('-feat_merge', type=str, default='concat',
choices=['concat', 'sum', 'mlp'],
help="""Merge action for incorporating features embeddings.
Options [concat|sum|mlp].""")
group.add_argument('-feat_vec_size', type=int, default=-1,
help="""If specified, feature embedding sizes
will be set to this. Otherwise, feat_vec_exponent
will be used.""")
group.add_argument('-feat_vec_exponent', type=float, default=0.7,
help="""If -feat_merge_size is not set, feature
embedding sizes will be set to N^feat_vec_exponent
where N is the number of values the feature takes.""")
# Encoder-Decoder Options
group = parser.add_argument_group('Model- Encoder-Decoder')
group.add_argument('-model_type', default='text',
help="""Type of source model to use. Allows
the system to incorporate non-text inputs.
Options are [text|img|audio].""")
group.add_argument('-encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],
help="""Type of encoder layer to use. Non-RNN layers
are experimental. Options are
[rnn|brnn|mean|transformer|cnn].""")
group.add_argument('-decoder_type', type=str, default='rnn',
choices=['rnn', 'transformer', 'cnn'],
help="""Type of decoder layer to use. Non-RNN layers
are experimental. Options are
[rnn|transformer|cnn].""")
group.add_argument('-layers', type=int, default=-1,
help='Number of layers in enc/dec.')
group.add_argument('-enc_layers', type=int, default=2,
help='Number of layers in the encoder')
group.add_argument('-dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add_argument('-rnn_size', type=int, default=-1,
help="""Size of rnn hidden states. Overwrites
enc_rnn_size and dec_rnn_size""")
group.add_argument('-enc_rnn_size', type=int, default=500,
help="""Size of encoder rnn hidden states.
Must be equal to dec_rnn_size except for
speech-to-text.""")
group.add_argument('-dec_rnn_size', type=int, default=500,
help="""Size of decoder rnn hidden states.
Must be equal to enc_rnn_size except for
speech-to-text.""")
group.add_argument('-audio_enc_pooling', type=str, default='1',
help="""The amount of pooling of audio encoder,
either the same amount of pooling across all layers
indicated by a single number, or different amounts of
pooling per layer separated by comma.""")
group.add_argument('-cnn_kernel_width', type=int, default=3,
help="""Size of windows in the cnn, the kernel_size is
(cnn_kernel_width, 1) in conv layer""")
group.add_argument('-input_feed', type=int, default=1,
help="""Feed the context vector at each time step as
additional input (via concatenation with the word
embeddings) to the decoder.""")
group.add_argument('-bridge', action="store_true",
help="""Have an additional layer between the last encoder
state and the first decoder state""")
group.add_argument('-rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
action=CheckSRU,
help="""The gate type to use in the RNNs""")
# group.add_argument('-residual', action="store_true",
# help="Add residual connections between RNN layers.")
group.add_argument('-brnn', action=DeprecateAction,
help="Deprecated, use `encoder_type`.")
group.add_argument('-context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="""Type of context gate to use.
Do not select for no context gate.""")
# Attention options
group = parser.add_argument_group('Model- Attention')
group.add_argument('-global_attention', type=str, default='general',
choices=['dot', 'general', 'mlp'],
help="""The attention type to use:
dotprod or general (Luong) or MLP (Bahdanau)""")
group.add_argument('-global_attention_function', type=str,
default="softmax", choices=["softmax", "sparsemax"])
group.add_argument('-self_attn_type', type=str, default="scaled-dot",
help="""Self attention type in Transformer decoder
layer -- currently "scaled-dot" or "average" """)
group.add_argument('-heads', type=int, default=8,
help='Number of heads for transformer self-attention')
group.add_argument('-transformer_ff', type=int, default=2048,
help='Size of hidden transformer feed-forward')
# Generator and loss options.
group.add_argument('-copy_attn', action="store_true",
help='Train copy attention layer.')
group.add_argument('-generator_function', default="log_softmax",
choices=["log_softmax", "sparsemax"],
help="""Which function to use for generating
probabilities over the target vocabulary (choices:
log_softmax, sparsemax)""")
group.add_argument('-copy_attn_force', action="store_true",
help='When available, train to copy.')
group.add_argument('-reuse_copy_attn', action="store_true",
help="Reuse standard attention for copy")
group.add_argument('-copy_loss_by_seqlength', action="store_true",
help="Divide copy loss by length of sequence")
group.add_argument('-coverage_attn', action="store_true",
help='Train a coverage attention layer.')
group.add_argument('-lambda_coverage', type=float, default=1,
help='Lambda value for coverage.')
def preprocess_opts(parser):
""" Pre-procesing options """
# Data options
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="""Type of the source input.
Options are [text|img].""")
group.add_argument('-train_src', required=True,
help="Path to the training source data")
group.add_argument('-train_tgt', required=True,
help="Path to the training target data")
group.add_argument('-valid_src', required=True,
help="Path to the validation source data")
group.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
group.add_argument('-src_dir', default="",
help="Source directory for image or audio files.")
group.add_argument('-save_data', required=True,
help="Output file for the prepared data")
group.add_argument('-max_shard_size', type=int, default=0,
help="""Deprecated use shard_size instead""")
group.add_argument('-shard_size', type=int, default=1000000,
help="""Divide src_corpus and tgt_corpus into
smaller multiple src_copus and tgt corpus files, then
build shards, each shard will have
opt.shard_size samples except last shard.
shard_size=0 means no segmentation
shard_size>0 means segment dataset into multiple shards,
each shard has shard_size samples""")
# Dictionary options, for text corpus
group = parser.add_argument_group('Vocab')
group.add_argument('-src_vocab', default="",
help="""Path to an existing source vocabulary. Format:
one word per line.""")
group.add_argument('-tgt_vocab', default="",
help="""Path to an existing target vocabulary. Format:
one word per line.""")
group.add_argument('-features_vocabs_prefix', type=str, default='',
help="Path prefix to existing features vocabularies")
group.add_argument('-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
group.add_argument('-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
group.add_argument('-src_words_min_frequency', type=int, default=0)
group.add_argument('-tgt_words_min_frequency', type=int, default=0)
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
# Truncation options, for text corpus
group = parser.add_argument_group('Pruning')
group.add_argument('-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
group.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
group.add_argument('-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
group.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
group.add_argument('-lower', action='store_true', help='lowercase data')
# Data processing options
group = parser.add_argument_group('Random')
group.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
group.add_argument('-seed', type=int, default=3435,
help="Random seed")
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
# Options most relevant to speech
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add_argument('-window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add_argument('-window', default='hamming',
help="Window type for spectrogram generation.")
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""")
def train_opts(parser):
""" Training and saving options """
group = parser.add_argument_group('General')
group.add_argument('-data', required=True,
help="""Path prefix to the ".train.pt" and
".valid.pt" file path from preprocess.py""")
group.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_N.pt where N is the number
of steps""")
group.add_argument('-save_checkpoint_steps', type=int, default=5000,
help="""Save a checkpoint every X steps""")
group.add_argument('-keep_checkpoint', type=int, default=-1,
help="""Keep X checkpoints (negative: keep all)""")
# GPU
group.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Deprecated see world_size and gpu_ranks.")
group.add_argument('-gpu_ranks', default=[], nargs='+', type=int,
help="list of ranks of each process.")
group.add_argument('-world_size', default=1, type=int,
help="total number of distributed processes.")
group.add_argument('-gpu_backend', default='nccl', nargs='+', type=str,
help="Type of torch distributed backend")
group.add_argument('-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add_argument('-master_ip', default="localhost", type=str,
help="IP of master for torch.distributed training.")
group.add_argument('-master_port', default=10000, type=int,
help="Port of master for torch.distributed training.")
group.add_argument('-seed', type=int, default=-1,
help="""Random seed used for the experiments
reproducibility.""")
# Init options
group = parser.add_argument_group('Initialization')
group.add_argument('-param_init', type=float, default=0.1,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init).
Use 0 to not use initialization""")
group.add_argument('-param_init_glorot', action='store_true',
help="""Init parameters with xavier_uniform.
Required for transfomer.""")
group.add_argument('-train_from', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model's state_dict.""")
# Pretrained word vectors
group.add_argument('-pre_word_vecs_enc',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the encoder side.
See README for specific formatting instructions.""")
group.add_argument('-pre_word_vecs_dec',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the decoder side.
See README for specific formatting instructions.""")
# Fixed word vectors
group.add_argument('-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
group.add_argument('-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the decoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add_argument('-batch_size', type=int, default=64,
help='Maximum batch size for training')
group.add_argument('-batch_type', default='sents',
choices=["sents", "tokens"],
help="""Batch grouping for batch_size. Standard
is sents. Tokens will do dynamic batching""")
group.add_argument('-normalization', default='sents',
choices=["sents", "tokens"],
help='Normalization method of the gradient.')
group.add_argument('-accum_count', type=int, default=1,
help="""Accumulate gradient this many times.
Approximately equivalent to updating
batch_size * accum_count batches at once.
Recommended for Transformer.""")
group.add_argument('-valid_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add_argument('-valid_batch_size', type=int, default=32,
help='Maximum batch size for validation')
group.add_argument('-max_generator_batches', type=int, default=32,
help="""Maximum batches of words in a sequence to run
the generator on in parallel. Higher is faster, but
uses more memory.""")
group.add_argument('-train_steps', type=int, default=100000,
help='Number of training steps')
group.add_argument('-epochs', type=int, default=0,
help='Deprecated epochs see train_steps')
group.add_argument('-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam',
'sparseadam'],
help="""Optimization method.""")
group.add_argument('-adagrad_accumulator_init', type=float, default=0,
help="""Initializes the accumulator values in adagrad.
Mirrors the initial_accumulator_value option
in the tensorflow adagrad (use 0.1 for their default).
""")
group.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to
max_grad_norm""")
group.add_argument('-dropout', type=float, default=0.3,
help="Dropout probability; applied in LSTM stacks.")
group.add_argument('-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add_argument('-adam_beta1', type=float, default=0.9,
help="""The beta1 parameter used by Adam.
Almost without exception a value of 0.9 is used in
the literature, seemingly giving good results,
so we would discourage changing this value from
the default without due consideration.""")
group.add_argument('-adam_beta2', type=float, default=0.999,
help="""The beta2 parameter used by Adam.
Typically a value of 0.999 is recommended, as this is
the value suggested by the original paper describing
Adam, and is also the value adopted in other frameworks
such as Tensorflow and Kerras, i.e. see:
https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/ .
Whereas recently the paper "Attention is All You Need"
suggested a value of 0.98 for beta2, this parameter may
not work well for normal models / default
baselines.""")
group.add_argument('-label_smoothing', type=float, default=0.0,
help="""Label smoothing value epsilon.
Probabilities of all non-true labels
will be smoothed by epsilon / (vocab_size - 1).
Set to zero to turn off label smoothing.
For more detailed information, see:
https://arxiv.org/abs/1512.00567""")
# learning rate
group = parser.add_argument_group('Optimization- Rate')
group.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate.
Recommended settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
group.add_argument('-learning_rate_decay', type=float, default=0.5,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) steps have gone past
start_decay_steps""")
group.add_argument('-start_decay_steps', type=int, default=50000,
help="""Start decaying every decay_steps after
start_decay_steps""")
group.add_argument('-decay_steps', type=int, default=10000,
help="""Decay every decay_steps""")
group.add_argument('-decay_method', type=str, default="",
choices=['noam', 'fixed'], help="Use a custom decay rate.")
group.add_argument('-warmup_steps', type=int, default=4000,
help="""Number of warmup steps for custom decay.""")
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=50,
help="Print stats at this interval.")
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add_argument('-exp_host', type=str, default="",
help="Send logs to this crayon server.")
group.add_argument('-exp', type=str, default="",
help="Name of the experiment for logging.")
# Use TensorboardX for visualization during training
group.add_argument('-tensorboard', action="store_true",
help="""Use tensorboardX for visualization during training.
Must have the library tensorboardX.""")
group.add_argument("-tensorboard_log_dir", type=str,
default="runs/onmt",
help="""Log directory for Tensorboard.
This is also the name of the run.
""")
group = parser.add_argument_group('Speech')
# Options most relevant to speech
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""")
def translate_opts(parser):
""" Translation / inference options """
group = parser.add_argument_group('Model')
group.add_argument('-model', dest='models', metavar='MODEL',
nargs='+', type=str, default=[], required=True,
help='Path to model .pt file(s). '
'Multiple models can be specified, '
'for ensemble decoding.')
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="Type of the source input. Options: [text|img].")
group.add_argument('-src', required=True,
help="""Source sequence to decode (one line per
sequence)""")
group.add_argument('-src_dir', default="",
help='Source directory for image or audio files')
group.add_argument('-tgt',
help='True target sequence (optional)')
group.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
group.add_argument('-log_probs', action='store_true',
help="Save log probs of predictions, use -output + '_log_probs' ")
group.add_argument('-report_bleu', action='store_true',
help="""Report bleu score after translation,
call tools/multi-bleu.perl on command line""")
group.add_argument('-report_rouge', action='store_true',
help="""Report rouge 1/2/3/L/SU4 score after translation
call tools/test_rouge.py on command line""")
# Options most relevant to summarization.
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
group = parser.add_argument_group('Beam')
group.add_argument('-fast', action="store_true",
help="""Use fast beam search (some features may not be
supported!)""")
group.add_argument('-beam_size', type=int, default=5,
help='Beam size')
group.add_argument('-min_length', type=int, default=0,
help='Minimum prediction length')
group.add_argument('-max_length', type=int, default=100,
help='Maximum prediction length.')
group.add_argument('-max_sent_length', action=DeprecateAction,
help="Deprecated, use `-max_length` instead")
group.add_argument('-mask_from', default='',
help="""Path to mask.
Help to predict only valid tokens""")
# Alpha and Beta values for Google Length + Coverage penalty
# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7
group.add_argument('-stepwise_penalty', action='store_true',
help="""Apply penalty at every decoding step.
Helpful for summary penalty.""")
group.add_argument('-length_penalty', default='none',
choices=['none', 'wu', 'avg'],
help="""Length Penalty to use.""")
group.add_argument('-coverage_penalty', default='none',
choices=['none', 'wu', 'summary'],
help="""Coverage Penalty to use.""")
group.add_argument('-alpha', type=float, default=0.,
help="""Google NMT length penalty parameter
(higher = longer generation)""")
group.add_argument('-beta', type=float, default=-0.,
help="""Coverage penalty parameter""")
group.add_argument('-block_ngram_repeat', type=int, default=0,
help='Block repetition of ngrams during decoding.')
group.add_argument('-ignore_when_blocking', nargs='+', type=str,
default=[],
help="""Ignore these strings when blocking repeats.
You want to block sentence delimiters.""")
group.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the
source token that had highest attention weight. If
phrase_table is provided, it will lookup the
identified source token and give the corresponding
target token. If it is not provided(or the identified
source token does not exist in the table) then it
will copy the source token""")
group = parser.add_argument_group('Logging')
group.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add_argument('-attn_debug', action="store_true",
help='Print best attn for each word')
group.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
group.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
group = parser.add_argument_group('Efficiency')
group.add_argument('-batch_size', type=int, default=30,
help='Batch size')
group.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
# Options most relevant to speech.
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help='Window size for spectrogram in seconds')
group.add_argument('-window_stride', type=float, default=.01,
help='Window stride for spectrogram in seconds')
group.add_argument('-window', default='hamming',
help='Window type for spectrogram generation')
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""")
def agent_opts(parser):
""" Train Agent / inference options """
group = parser.add_argument_group('Agent Model')
group.add_argument('-pred_rewrite', action='store_true',
default=False,
help="""Use a sin to mark relative words positions.
Necessary for non-RNN style models.
""")
group.add_argument('-src_type', default='L', type=str,
help="L or N,which help us cut src without L_* using for score function.")
group.add_argument('-scoring_function', type=str, default="tanimoto",
help='Define scoring function.')
parser.add_argument('-score_function_num_processes', type=int, default=0,
help='multi-thread number for score calculation')
parser.add_argument('-score_para_k', type=float, default=0.8,
help='multi-thread number for score calculation')
parser.add_argument('-score_para_w', type=float, default=0.0,
help='multi-thread number for score calculation')
parser.add_argument('-score_para_clf', type=str, default='./data/clf_jak3_active.pkl',
help='multi-thread number for score calculation')
group.add_argument('-sigma', type=int, default=-1,
help='scalar sigma for scoring function')
group.add_argument('-goal', type=float, default=0.0,
help='goal for scoring function')
group.add_argument('-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam',
'sparseadam'],
help="""Optimization method.""")
group.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_N.pt where N is the number
of steps""")
group.add_argument('-adam_beta1', type=float, default=0.9,
help="""The beta1 parameter used by Adam.
Almost without exception a value of 0.9 is used in
the literature, seemingly giving good results,
so we would discourage changing this value from
the default without due consideration.""")
group.add_argument('-adam_beta2', type=float, default=0.999,
help="""The beta2 parameter used by Adam.
Typically a value of 0.999 is recommended, as this is
the value suggested by the original paper describing
Adam, and is also the value adopted in other frameworks
such as Tensorflow and Kerras, i.e. see:
https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/ .
Whereas recently the paper "Attention is All You Need"
suggested a value of 0.98 for beta2, this parameter may
not work well for normal models / default
baselines.""")
group.add_argument('-label_smoothing', type=float, default=0.0,
help="""Label smoothing value epsilon.
Probabilities of all non-true labels
will be smoothed by epsilon / (vocab_size - 1).
Set to zero to turn off label smoothing.
For more detailed information, see:
https://arxiv.org/abs/1512.00567""")
group.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate.
Recommended settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
group.add_argument('-decay_method', type=str, default="",
choices=['noam', 'fixed'], help="Use a custom decay rate.")
group.add_argument('-warmup_steps', type=int, default=4000,
help="""Number of warmup steps for custom decay.""")
group.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
group.add_argument('-save_checkpoint_steps', type=int, default=5000,
help="""Save a checkpoint every X steps""")
group.add_argument('-keep_checkpoint', type=int, default=-1,
help="""Keep X checkpoints (negative: keep all)""")
group.add_argument('-train_steps', type=int, default=100000,
help='Number of training steps')
group.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to
max_grad_norm""")
group.add_argument('-start_decay_steps', type=int, default=1000,
help="""Start decaying every decay_steps after""")
group.add_argument('-decay_steps', type=int, default=500,
help="""Decay every decay_steps""")
group.add_argument('-learning_rate_decay', type=float, default=0.5,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) steps have gone past
start_decay_steps""")
group.add_argument('-adagrad_accumulator_init', type=float, default=0,
help="""Initializes the accumulator values in adagrad.
Mirrors the initial_accumulator_value option
in the tensorflow adagrad (use 0.1 for their default).
""")
group.add_argument('-rnn_size', type=int, default=-1,
help="""Size of rnn hidden states. Overwrites
enc_rnn_size and dec_rnn_size""")
group.add_argument('-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add_argument('-max_generator_batches', type=int, default=32,
help="""Maximum batches of words in a sequence to run
the generator on in parallel. Higher is faster, but
uses more memory.""")
group.add_argument('-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add_argument('-gpu_ranks', default=[], nargs='+', type=int,
help="list of ranks of each process.")
group.add_argument('-seed', type=int, default=1024,
help="""Random seed used for the experiments
reproducibility.""")
def add_md_help_argument(parser):
""" md help parser """
parser.add_argument('-md', action=MarkdownHelpAction,
help='print Markdown-formatted help text and exit.')
# MARKDOWN boilerplate
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class MarkdownHelpFormatter(argparse.HelpFormatter):
"""A really bare-bones argparse help formatter that generates valid markdown.
This will generate something like:
usage
# **section heading**:
## **--argument-one**
```
argument-one help text
```
"""
def _format_usage(self, usage, actions, groups, prefix):
return ""
def format_help(self):
print(self._prog)
self._root_section.heading = '# Options: %s' % self._prog
return super(MarkdownHelpFormatter, self).format_help()
def start_section(self, heading):
super(MarkdownHelpFormatter, self) \
.start_section('### **%s**' % heading)
def _format_action(self, action):
if action.dest == "help" or action.dest == "md":
return ""
lines = []
lines.append('* **-%s %s** ' % (action.dest,
"[%s]" % action.default
if action.default else "[]"))
if action.help:
help_text = self._expand_help(action)
lines.extend(self._split_lines(help_text, 80))
lines.extend(['', ''])
return '\n'.join(lines)
class MarkdownHelpAction(argparse.Action):
""" MD help action """
def __init__(self, option_strings,
dest=argparse.SUPPRESS, default=argparse.SUPPRESS,
**kwargs):
super(MarkdownHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser.formatter_class = MarkdownHelpFormatter
parser.print_help()
parser.exit()
class DeprecateAction(argparse.Action):
""" Deprecate action """
def __init__(self, option_strings, dest, help=None, **kwargs):
super(DeprecateAction, self).__init__(option_strings, dest, nargs=0,
help=help, **kwargs)
def __call__(self, parser, namespace, values, flag_name):
help = self.help if self.mdhelp is not None else ""
msg = "Flag '%s' is deprecated. %s" % (flag_name, help)
raise argparse.ArgumentTypeError(msg)
| 53.329381
| 97
| 0.572226
|
7952b8a671b44307164f1eff8fdc3c70ecbe3a59
| 18,263
|
py
|
Python
|
openfold/model/template.py
|
aqlaboratory/openfold
|
d6b36a80e3ee82eee8ac09215fce553d822f86a3
|
[
"Apache-2.0"
] | 789
|
2021-11-12T16:12:21.000Z
|
2022-03-28T05:45:19.000Z
|
openfold/model/template.py
|
aqlaboratory/openfold
|
d6b36a80e3ee82eee8ac09215fce553d822f86a3
|
[
"Apache-2.0"
] | 84
|
2021-11-12T22:23:50.000Z
|
2022-03-29T01:06:06.000Z
|
openfold/model/template.py
|
aqlaboratory/openfold
|
d6b36a80e3ee82eee8ac09215fce553d822f86a3
|
[
"Apache-2.0"
] | 114
|
2021-11-12T16:00:57.000Z
|
2022-03-27T21:32:31.000Z
|
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import math
from typing import Optional, List
import torch
import torch.nn as nn
from openfold.model.primitives import Linear, LayerNorm, Attention
from openfold.model.dropout import (
DropoutRowwise,
DropoutColumnwise,
)
from openfold.model.pair_transition import PairTransition
from openfold.model.triangular_attention import (
TriangleAttentionStartingNode,
TriangleAttentionEndingNode,
)
from openfold.model.triangular_multiplicative_update import (
TriangleMultiplicationOutgoing,
TriangleMultiplicationIncoming,
)
from openfold.utils.checkpointing import checkpoint_blocks
from openfold.utils.feats import (
build_template_angle_feat,
build_template_pair_feat,
)
from openfold.utils.tensor_utils import (
add,
chunk_layer,
ChunkSizeTuner,
permute_final_dims,
flatten_final_dims,
tensor_tree_map,
)
class TemplatePointwiseAttention(nn.Module):
"""
Implements Algorithm 17.
"""
def __init__(self, c_t, c_z, c_hidden, no_heads, inf, **kwargs):
"""
Args:
c_t:
Template embedding channel dimension
c_z:
Pair embedding channel dimension
c_hidden:
Hidden channel dimension
"""
super(TemplatePointwiseAttention, self).__init__()
self.c_t = c_t
self.c_z = c_z
self.c_hidden = c_hidden
self.no_heads = no_heads
self.inf = inf
self.mha = Attention(
self.c_z,
self.c_t,
self.c_t,
self.c_hidden,
self.no_heads,
gating=False,
)
def _chunk(self,
z: torch.Tensor,
t: torch.Tensor,
biases: List[torch.Tensor],
chunk_size: int,
use_lma: bool = False,
) -> torch.Tensor:
mha_inputs = {
"q_x": z,
"kv_x": t,
"biases": biases,
}
return chunk_layer(
partial(self.mha, use_lma=use_lma),
mha_inputs,
chunk_size=chunk_size,
no_batch_dims=len(z.shape[:-2]),
)
def forward(self,
t: torch.Tensor,
z: torch.Tensor,
template_mask: Optional[torch.Tensor] = None,
# This module suffers greatly from a small chunk size
chunk_size: Optional[int] = 256,
use_lma: bool = False,
) -> torch.Tensor:
"""
Args:
t:
[*, N_templ, N_res, N_res, C_t] template embedding
z:
[*, N_res, N_res, C_t] pair embedding
template_mask:
[*, N_templ] template mask
Returns:
[*, N_res, N_res, C_z] pair embedding update
"""
if template_mask is None:
template_mask = t.new_ones(t.shape[:-3])
bias = self.inf * (template_mask[..., None, None, None, None, :] - 1)
# [*, N_res, N_res, 1, C_z]
z = z.unsqueeze(-2)
# [*, N_res, N_res, N_temp, C_t]
t = permute_final_dims(t, (1, 2, 0, 3))
# [*, N_res, N_res, 1, C_z]
biases = [bias]
if chunk_size is not None and not self.training:
z = self._chunk(z, t, biases, chunk_size, use_lma=use_lma)
else:
z = self.mha(q_x=z, kv_x=t, biases=biases, use_lma=use_lma)
# [*, N_res, N_res, C_z]
z = z.squeeze(-2)
return z
class TemplatePairStackBlock(nn.Module):
def __init__(
self,
c_t: int,
c_hidden_tri_att: int,
c_hidden_tri_mul: int,
no_heads: int,
pair_transition_n: int,
dropout_rate: float,
inf: float,
**kwargs,
):
super(TemplatePairStackBlock, self).__init__()
self.c_t = c_t
self.c_hidden_tri_att = c_hidden_tri_att
self.c_hidden_tri_mul = c_hidden_tri_mul
self.no_heads = no_heads
self.pair_transition_n = pair_transition_n
self.dropout_rate = dropout_rate
self.inf = inf
self.dropout_row = DropoutRowwise(self.dropout_rate)
self.dropout_col = DropoutColumnwise(self.dropout_rate)
self.tri_att_start = TriangleAttentionStartingNode(
self.c_t,
self.c_hidden_tri_att,
self.no_heads,
inf=inf,
)
self.tri_att_end = TriangleAttentionEndingNode(
self.c_t,
self.c_hidden_tri_att,
self.no_heads,
inf=inf,
)
self.tri_mul_out = TriangleMultiplicationOutgoing(
self.c_t,
self.c_hidden_tri_mul,
)
self.tri_mul_in = TriangleMultiplicationIncoming(
self.c_t,
self.c_hidden_tri_mul,
)
self.pair_transition = PairTransition(
self.c_t,
self.pair_transition_n,
)
def forward(self,
z: torch.Tensor,
mask: torch.Tensor,
chunk_size: Optional[int] = None,
use_lma: bool = False,
_mask_trans: bool = True,
_inplace: bool = False,
):
single_templates = [
t.unsqueeze(-4) for t in torch.unbind(z, dim=-4)
]
single_templates_masks = [
m.unsqueeze(-3) for m in torch.unbind(mask, dim=-3)
]
for i in range(len(single_templates)):
single = single_templates[i]
single_mask = single_templates_masks[i]
single = add(single,
self.dropout_row(
self.tri_att_start(
single,
chunk_size=chunk_size,
mask=single_mask,
use_lma=use_lma,
)
),
_inplace,
)
single = add(single,
self.dropout_col(
self.tri_att_end(
single,
chunk_size=chunk_size,
mask=single_mask,
use_lma=use_lma,
)
),
_inplace,
)
tmu_update = self.tri_mul_out(
single,
mask=single_mask,
_inplace=_inplace,
_add_with_inplace=True,
)
if(not _inplace):
single = single + self.dropout_row(tmu_update)
else:
single = tmu_update
del tmu_update
tmu_update = self.tri_mul_in(
single,
mask=single_mask,
_inplace=_inplace,
_add_with_inplace=True,
)
if(not _inplace):
single = single + self.dropout_row(tmu_update)
else:
single = tmu_update
del tmu_update
single = add(single,
self.pair_transition(
single,
mask=single_mask if _mask_trans else None,
chunk_size=chunk_size,
),
_inplace,
)
if(not _inplace):
single_templates[i] = single
if(not _inplace):
z = torch.cat(single_templates, dim=-4)
return z
class TemplatePairStack(nn.Module):
"""
Implements Algorithm 16.
"""
def __init__(
self,
c_t,
c_hidden_tri_att,
c_hidden_tri_mul,
no_blocks,
no_heads,
pair_transition_n,
dropout_rate,
blocks_per_ckpt,
tune_chunk_size: bool = False,
inf=1e9,
**kwargs,
):
"""
Args:
c_t:
Template embedding channel dimension
c_hidden_tri_att:
Per-head hidden dimension for triangular attention
c_hidden_tri_att:
Hidden dimension for triangular multiplication
no_blocks:
Number of blocks in the stack
pair_transition_n:
Scale of pair transition (Alg. 15) hidden dimension
dropout_rate:
Dropout rate used throughout the stack
blocks_per_ckpt:
Number of blocks per activation checkpoint. None disables
activation checkpointing
"""
super(TemplatePairStack, self).__init__()
self.blocks_per_ckpt = blocks_per_ckpt
self.blocks = nn.ModuleList()
for _ in range(no_blocks):
block = TemplatePairStackBlock(
c_t=c_t,
c_hidden_tri_att=c_hidden_tri_att,
c_hidden_tri_mul=c_hidden_tri_mul,
no_heads=no_heads,
pair_transition_n=pair_transition_n,
dropout_rate=dropout_rate,
inf=inf,
)
self.blocks.append(block)
self.layer_norm = LayerNorm(c_t)
self.tune_chunk_size = tune_chunk_size
self.chunk_size_tuner = None
if(tune_chunk_size):
self.chunk_size_tuner = ChunkSizeTuner()
def forward(
self,
t: torch.tensor,
mask: torch.tensor,
chunk_size: int,
use_lma: bool = False,
_mask_trans: bool = True,
):
"""
Args:
t:
[*, N_templ, N_res, N_res, C_t] template embedding
mask:
[*, N_templ, N_res, N_res] mask
Returns:
[*, N_templ, N_res, N_res, C_t] template embedding update
"""
if(mask.shape[-3] == 1):
expand_idx = list(mask.shape)
expand_idx[-3] = t.shape[-4]
mask = mask.expand(*expand_idx)
blocks = [
partial(
b,
mask=mask,
chunk_size=chunk_size,
use_lma=use_lma,
_mask_trans=_mask_trans,
_inplace=not (self.training or torch.is_grad_enabled()),
)
for b in self.blocks
]
if(chunk_size is not None and self.chunk_size_tuner is not None):
chunk_size = self.chunk_size_tuner.tune_chunk_size(
representative_fn=blocks[0],
args=(t,),
min_chunk_size=chunk_size,
)
blocks = [partial(b, chunk_size=chunk_size) for b in blocks]
t, = checkpoint_blocks(
blocks=blocks,
args=(t,),
blocks_per_ckpt=self.blocks_per_ckpt if self.training else None,
)
t = self.layer_norm(t)
return t
def embed_templates_offload(
model,
batch,
z,
pair_mask,
templ_dim,
template_chunk_size=256,
):
"""
Args:
model:
An AlphaFold model object
batch:
An AlphaFold input batch. See documentation of AlphaFold.
z:
A [*, N, N, C_z] pair embedding
pair_mask:
A [*, N, N] pair mask
templ_dim:
The template dimension of the template tensors in batch
template_chunk_size:
Integer value controlling how quickly the offloaded pair embedding
tensor is brought back into GPU memory. In dire straits, can be
lowered to reduce memory consumption of this function even more.
Returns:
A dictionary of template pair and angle embeddings.
A version of the "embed_templates" method of the AlphaFold class that
offloads the large template pair tensor to CPU. Slower but more frugal
with GPU memory than the original. Useful for long-sequence inference.
"""
inplace_safe = not (model.training or torch.is_grad_enabled())
# Embed the templates one at a time (with a poor man's vmap)
pair_embeds_cpu = []
n = z.shape[-2]
n_templ = batch["template_aatype"].shape[templ_dim]
for i in range(n_templ):
idx = batch["template_aatype"].new_tensor(i)
single_template_feats = tensor_tree_map(
lambda t: torch.index_select(t, templ_dim, idx),
batch,
)
# [*, N, N, C_t]
t = build_template_pair_feat(
single_template_feats,
use_unit_vector=model.config.template.use_unit_vector,
inf=model.config.template.inf,
eps=model.config.template.eps,
**model.config.template.distogram,
).to(z.dtype)
t = model.template_pair_embedder(t)
# [*, 1, N, N, C_z]
t = model.template_pair_stack(
t,
pair_mask.unsqueeze(-3).to(dtype=z.dtype),
chunk_size=model.globals.chunk_size,
use_lma=model.globals.use_lma,
_mask_trans=model.config._mask_trans,
)
pair_embeds_cpu.append(t.cpu())
del t
# Preallocate the output tensor
t = z.new_zeros(z.shape)
for i in range(0, n, template_chunk_size):
pair_chunks = [
p[..., i: i + template_chunk_size, :, :] for p in pair_embeds_cpu
]
pair_chunk = torch.cat(pair_chunks, dim=templ_dim).to(device=z.device)
z_chunk = z[..., i: i + template_chunk_size, :, :]
att_chunk = model.template_pointwise_att(
pair_chunk,
z_chunk,
template_mask=batch["template_mask"].to(dtype=z.dtype),
use_lma=model.globals.use_lma,
)
t[..., i: i + template_chunk_size, :, :] = att_chunk
del pair_chunks
if(inplace_safe):
t = t * (torch.sum(batch["template_mask"], dim=-1) > 0)
else:
t *= (torch.sum(batch["template_mask"], dim=-1) > 0)
ret = {}
if model.config.template.embed_angles:
template_angle_feat = build_template_angle_feat(
batch,
)
# [*, N, C_m]
a = model.template_angle_embedder(template_angle_feat)
ret["template_angle_embedding"] = a
ret.update({"template_pair_embedding": t})
return ret
def embed_templates_average(
model,
batch,
z,
pair_mask,
templ_dim,
templ_group_size=2,
):
"""
Args:
model:
An AlphaFold model object
batch:
An AlphaFold input batch. See documentation of AlphaFold.
z:
A [*, N, N, C_z] pair embedding
pair_mask:
A [*, N, N] pair mask
templ_dim:
The template dimension of the template tensors in batch
templ_group_size:
Granularity of the approximation. Larger values trade memory for
greater proximity to the original function
Returns:
A dictionary of template pair and angle embeddings.
A memory-efficient approximation of the "embed_templates" method of the
AlphaFold class. Instead of running pointwise attention over pair
embeddings for all of the templates at the same time, it splits templates
into groups of size templ_group_size, computes embeddings for each group
normally, and then averages the group embeddings. In our experiments, this
approximation has a minimal effect on the quality of the resulting
embedding, while its low memory footprint allows the number of templates
to scale almost indefinitely.
"""
inplace_safe = not (model.training or torch.is_grad_enabled())
# Embed the templates one at a time (with a poor man's vmap)
n = z.shape[-2]
n_templ = batch["template_aatype"].shape[templ_dim]
out_tensor = z.new_zeros(z.shape)
for i in range(0, n_templ, templ_group_size):
def slice_template_tensor(t):
s = [slice(None) for _ in t.shape]
s[templ_dim] = slice(i, i + templ_group_size)
return t[s]
template_feats = tensor_tree_map(
slice_template_tensor,
batch,
)
# [*, N, N, C_t]
t = build_template_pair_feat(
template_feats,
use_unit_vector=model.config.template.use_unit_vector,
inf=model.config.template.inf,
eps=model.config.template.eps,
**model.config.template.distogram,
).to(z.dtype)
# [*, S_t, N, N, C_z]
t = model.template_pair_embedder(t)
t = model.template_pair_stack(
t,
pair_mask.unsqueeze(-3).to(dtype=z.dtype),
chunk_size=model.globals.chunk_size,
use_lma=model.globals.use_lma,
_mask_trans=model.config._mask_trans,
)
t = model.template_pointwise_att(
t,
z,
template_mask=template_feats["template_mask"].to(dtype=z.dtype),
use_lma=model.globals.use_lma,
)
denom = math.ceil(n_templ / templ_group_size)
if(inplace_safe):
t /= denom
else:
t = t / denom
if(inplace_safe):
out_tensor += t
else:
out_tensor = out_tensor + t
del t
if(inplace_safe):
out_tensor *= (torch.sum(batch["template_mask"], dim=-1) > 0)
else:
out_tensor = out_tensor * (torch.sum(batch["template_mask"], dim=-1) > 0)
ret = {}
if model.config.template.embed_angles:
template_angle_feat = build_template_angle_feat(
batch,
)
# [*, N, C_m]
a = model.template_angle_embedder(template_angle_feat)
ret["template_angle_embedding"] = a
ret.update({"template_pair_embedding": out_tensor})
return ret
| 29.7443
| 81
| 0.562503
|
7952ba3874e8098d26cad37114917cd1079b06b4
| 990
|
py
|
Python
|
multi_timer.py
|
emguse/env_logger
|
8db848e8231f995cc056d51d3c6868b382f0ef39
|
[
"MIT"
] | null | null | null |
multi_timer.py
|
emguse/env_logger
|
8db848e8231f995cc056d51d3c6868b382f0ef39
|
[
"MIT"
] | null | null | null |
multi_timer.py
|
emguse/env_logger
|
8db848e8231f995cc056d51d3c6868b382f0ef39
|
[
"MIT"
] | null | null | null |
import time
class multi_timer():
def __init__(self, interval):
self.last_time = 0.0
self.up_state = False
self.interval = interval
def timer(self):
if self.last_time + self.interval <= time.time():
self.last_time = time.time()
self.up_state = True
def main():
INTERVAL_1s = float(1.0) # Enter the interval time in seconds
INTERVAL_10s = float(10.0) # Enter the interval time in seconds
timer_1s = multi_timer(INTERVAL_1s)
timer_10s = multi_timer(INTERVAL_10s)
while True:
timer_1s.timer()
if timer_1s.up_state == True:
timer_1s.up_state = False
# Write the process here
print("1sec: " + str(time.time()))
timer_10s.timer()
if timer_10s.up_state ==True:
timer_10s.up_state = False
# Write the process here
print("10sec: " + str(time.time()))
return
if __name__ == "__main__":
main()
| 28.285714
| 68
| 0.586869
|
7952ba6189b49ef4456cd9944dc2e1df35c70bb3
| 3,708
|
py
|
Python
|
src/statick_web/plugins/tool/jshint_tool_plugin.py
|
xydesa/statick-web
|
600f655c128e40a8c1439d912a3d9ab4df7c813a
|
[
"CC0-1.0"
] | 2
|
2020-02-07T00:13:53.000Z
|
2020-02-07T00:58:26.000Z
|
src/statick_web/plugins/tool/jshint_tool_plugin.py
|
xydesa/statick-web
|
600f655c128e40a8c1439d912a3d9ab4df7c813a
|
[
"CC0-1.0"
] | 19
|
2020-02-14T23:47:25.000Z
|
2022-02-28T21:38:01.000Z
|
src/statick_web/plugins/tool/jshint_tool_plugin.py
|
xydesa/statick-web
|
600f655c128e40a8c1439d912a3d9ab4df7c813a
|
[
"CC0-1.0"
] | 3
|
2020-02-12T16:16:54.000Z
|
2020-02-14T00:53:46.000Z
|
"""Apply jshint tool and gather results."""
import logging
import re
import subprocess
from typing import List, Match, Optional, Pattern
from statick_tool.issue import Issue
from statick_tool.package import Package
from statick_tool.tool_plugin import ToolPlugin
class JSHintToolPlugin(ToolPlugin): # type: ignore
"""Apply jshint tool and gather results."""
def get_name(self) -> str:
"""Get name of tool."""
return "jshint"
# pylint: disable=too-many-locals
def scan(self, package: Package, level: str) -> Optional[List[Issue]]:
"""Run tool and gather output."""
tool_bin = "jshint"
tool_config = ".jshintrc"
user_config = self.plugin_context.config.get_tool_config(
self.get_name(), level, "config"
)
if user_config is not None:
tool_config = user_config
format_file_name = self.plugin_context.resources.get_file(tool_config)
flags: List[str] = []
if format_file_name is not None:
flags += ["-c", format_file_name]
flags += ["-e", ".js,.html", "--extract", "auto", "--reporter", "unix"]
user_flags = self.get_user_flags(level)
flags += user_flags
files: List[str] = []
if "html_src" in package:
files += package["html_src"]
if "javascript_src" in package:
files += package["javascript_src"]
total_output: List[str] = []
for src in files:
try:
exe = [tool_bin] + flags + [src]
output = subprocess.check_output(
exe, stderr=subprocess.STDOUT, universal_newlines=True
)
except subprocess.CalledProcessError as ex:
if ex.returncode == 2: # jshint returns 2 upon linting errors
total_output.append(ex.output)
else:
logging.warning(
"%s failed! Returncode = %s", tool_bin, ex.returncode
)
logging.warning("%s exception: %s", self.get_name(), ex.output)
return None
except OSError as ex:
logging.warning("Couldn't find %s! (%s)", tool_bin, ex)
return None
for output in total_output:
logging.debug("%s", output)
with open(self.get_name() + ".log", "w", encoding="utf8") as fid:
for output in total_output:
fid.write(output)
issues: List[Issue] = self.parse_output(total_output)
return issues
# pylint: enable=too-many-locals
def parse_output(self, total_output: List[str]) -> List[Issue]:
"""Parse tool output and report issues."""
jshint_re = r"(.+):(\d+):(\d+):\s(.+)"
parse: Pattern[str] = re.compile(jshint_re)
issues: List[Issue] = []
for output in total_output:
lines = output.split("\n")
for line in lines:
match: Optional[Match[str]] = parse.match(line)
if match:
filename = match.group(1)
line_number = match.group(2)
issue_type = "jshint"
severity = 5
message = match.group(4)
issues.append(
Issue(
filename,
line_number,
self.get_name(),
issue_type,
severity,
message,
None,
)
)
return issues
| 34.018349
| 83
| 0.515372
|
7952bba8abe54583f998a8961c47562a40ef4443
| 6,767
|
py
|
Python
|
pylearn2/cross_validation/tests/test_cross_validation.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 2,045
|
2015-01-01T14:07:52.000Z
|
2022-03-08T08:56:41.000Z
|
pylearn2/cross_validation/tests/test_cross_validation.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 305
|
2015-01-02T13:18:24.000Z
|
2021-08-20T18:03:28.000Z
|
pylearn2/cross_validation/tests/test_cross_validation.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 976
|
2015-01-01T17:08:51.000Z
|
2022-03-25T19:53:17.000Z
|
"""
Tests for cross-validation module.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_train_cv():
"""Test TrainCV class."""
skip_if_no_sklearn()
handle, layer0_filename = tempfile.mkstemp()
handle, layer1_filename = tempfile.mkstemp()
handle, layer2_filename = tempfile.mkstemp()
# train the first hidden layer (unsupervised)
# (test for TrainCV)
trainer = yaml_parse.load(test_yaml_layer0 %
{'layer0_filename': layer0_filename})
trainer.main_loop()
# train the second hidden layer (unsupervised)
# (test for TransformerDatasetCV)
trainer = yaml_parse.load(test_yaml_layer1 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename})
trainer.main_loop()
# train the third hidden layer (unsupervised)
# (test for StackedBlocksCV)
trainer = yaml_parse.load(test_yaml_layer2 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename,
'layer2_filename': layer2_filename})
trainer.main_loop()
# train the full model (supervised)
# (test for PretrainedLayerCV)
trainer = yaml_parse.load(test_yaml_layer3 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename,
'layer2_filename': layer2_filename})
trainer.main_loop()
# clean up
os.remove(layer0_filename)
os.remove(layer1_filename)
test_yaml_layer0 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer0_filename)s,
}
"""
test_yaml_layer1 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.TransformerDatasetCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
transformers: !pkl: %(layer0_filename)s,
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 8,
nhid: 6,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer1_filename)s,
}
"""
test_yaml_layer2 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.TransformerDatasetCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
transformers: !obj:pylearn2.cross_validation.blocks.StackedBlocksCV {
layers: [
!pkl: %(layer0_filename)s,
!pkl: %(layer1_filename)s,
],
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 6,
nhid: 4,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer2_filename)s,
}
"""
test_yaml_layer3 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h0',
layer_content: !pkl: %(layer0_filename)s,
},
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h1',
layer_content: !pkl: %(layer1_filename)s,
},
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h2',
layer_content: !pkl: %(layer2_filename)s,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 2,
irange: 0.,
},
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
| 31.919811
| 78
| 0.577213
|
7952bc3d09b6d4ebbc2cd05122e182bdc8d497b5
| 3,127
|
py
|
Python
|
msgraph-cli-extensions/v1_0/schemaextensions_v1_0/azext_schemaextensions_v1_0/vendored_sdks/schemaextensions/_schema_extensions.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/schemaextensions_v1_0/azext_schemaextensions_v1_0/vendored_sdks/schemaextensions/_schema_extensions.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph-cli-extensions/v1_0/schemaextensions_v1_0/azext_schemaextensions_v1_0/vendored_sdks/schemaextensions/_schema_extensions.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import SchemaExtensionsConfiguration
from .operations import SchemaExtensionsSchemaExtensionOperations
from . import models
class SchemaExtensions(object):
"""SchemaExtensions.
:ivar schema_extensions_schema_extension: SchemaExtensionsSchemaExtensionOperations operations
:vartype schema_extensions_schema_extension: schema_extensions.operations.SchemaExtensionsSchemaExtensionOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
search=None, # type: Optional[str]
filter=None, # type: Optional[str]
count=None, # type: Optional[bool]
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://graph.microsoft.com/v1.0'
self._config = SchemaExtensionsConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.schema_extensions_schema_extension = SchemaExtensionsSchemaExtensionOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> SchemaExtensions
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 38.134146
| 119
| 0.668052
|
7952bc3d985dfacdb5896735e8f111f36c8b2254
| 10,203
|
py
|
Python
|
brian2/sphinxext/briandoc.py
|
chbehrens/brian2
|
46b5264caa5375ae13084508b5c1049e0c9e019e
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/sphinxext/briandoc.py
|
chbehrens/brian2
|
46b5264caa5375ae13084508b5c1049e0c9e019e
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/sphinxext/briandoc.py
|
chbehrens/brian2
|
46b5264caa5375ae13084508b5c1049e0c9e019e
|
[
"BSD-2-Clause"
] | null | null | null |
"""
========
briandoc
========
Sphinx extension that handles docstrings in the Numpy standard format with some
brian-specific tweaks. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined otherwise.
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
"""
from __future__ import absolute_import
import re
import pydoc
import inspect
from docutils import statemachine
from docutils.parsers.rst import directives, Directive
import sphinx
from sphinx.roles import XRefRole
from sphinx.domains.python import PyXRefRole
if sphinx.__version__ < '1.0.1':
raise RuntimeError("Sphinx 1.0.1 or newer is required")
from brian2.core.preferences import prefs
from .docscrape_sphinx import get_doc_object, SphinxDocString
class BrianPrefsDirective(Directive):
'''
A sphinx 'Directive' for automatically generated documentation of Brian preferences.
The directive takes an optional argument, the basename of the preferences
to document. In addition, you can specify a `nolinks` option which means
that no target links for the references are added. Do this if you document
preferences in more then one place.
Examples
--------
Document one category of preferences and generate links::
.. document_brian_prefs:: core
Document all preferences without generating links::
.. document_brian_prefs::
:nolinks:
'''
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'nolinks': directives.flag}
has_content = False
def run(self):
# The section that should be documented
if len(self.arguments):
section = self.arguments[0]
else:
section = None
link_targets = not ('nolinks' in self.options)
rawtext = prefs.get_documentation(section, link_targets)
include_lines = statemachine.string2lines(rawtext,
convert_whitespace=True)
self.state_machine.insert_input(include_lines, 'Brian preferences')
return []
def brianobj_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
'''
A Sphinx role, used as a wrapper for the default `py:obj` role, allowing
us to use the simple backtick syntax for brian classes/functions without
having to qualify the package for classes/functions that are available after
a `from brian2 import *`, e.g `NeuronGroup`.
Also allows to directly link to preference names using the same syntax.
'''
if text in prefs:
linktext = text.replace('_', '-').replace('.', '-')
text = '%s <brian-pref-%s>' % (text, linktext)
# Use sphinx's cross-reference role
xref = XRefRole(warn_dangling=True)
return xref('std:ref', rawtext, text, lineno, inliner, options, content)
else:
if text and (not '~' in text):
try:
# A simple class or function name
if not '.' in text:
module = __import__('brian2', fromlist=[str(text)])
imported = getattr(module, str(text), None)
if hasattr(imported, '__module__'):
text = '~' + imported.__module__ + '.' + text
if inspect.isfunction(imported):
text += '()'
# Possibly a method/classmethod/attribute name
elif len(text.split('.')) == 2:
classname, attrname = text.split('.')
# Remove trailing parentheses (will be readded for display)
if attrname.endswith('()'):
attrname = attrname[:-2]
module = __import__('brian2', fromlist=[str(classname)])
imported = getattr(module, str(classname), None)
if hasattr(imported, '__module__'):
# Add trailing parentheses only for methods not for
# attributes
if inspect.ismethod(getattr(imported,
str(attrname),
None)):
parentheses = '()'
else:
parentheses = ''
text = ('{classname}.{attrname}{parentheses} '
'<{modname}.{classname}.{attrname}>').format(classname=classname,
attrname=attrname,
modname=imported.__module__,
parentheses=parentheses)
except ImportError:
pass
role = 'py:obj'
py_role = PyXRefRole()
return py_role(role, rawtext, text, lineno, inliner, options, content)
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = dict()
if what == 'module':
# Strip top title
title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I | re.S)
lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
exported_members = getattr(obj, '__all__', None)
if exported_members:
lines.append('*Exported members:* ')
# do not print more than 25 members
lines.append(', '.join(['`%s`' % member for
member in exported_members[:25]]))
if len(exported_members) > 25:
lines.append('... (%d more members)' % (len(exported_members) - 25))
lines.append('')
else:
doc = get_doc_object(obj, what, u"\n".join(lines), name=name,
config=cfg)
lines[:] = unicode(doc).split(u"\n")
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
if m:
references.append(m.group(1))
# start renaming from the longest string, to avoid overwriting parts
references.sort(key=lambda x:-len(x))
if references:
for i, line in enumerate(lines):
for r in references:
if re.match(ur'^\d+$', r):
new_r = u"R%d" % (reference_offset[0] + int(r))
else:
new_r = u"%s%d" % (r, reference_offset[0])
lines[i] = lines[i].replace(u'[%s]_' % r,
u'[%s]_' % new_r)
lines[i] = lines[i].replace(u'.. [%s]' % r,
u'.. [%s]' % new_r)
reference_offset[0] += len(references)
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')):
return
if not hasattr(obj, '__doc__'):
return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub(u"^[^(]*", u"", doc['Signature'])
return sig, u''
def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
directives.register_directive('document_brian_prefs', BrianPrefsDirective)
# provide the brianobj role with a link to the Python domain
app.add_role('brianobj', brianobj_role)
#------------------------------------------------------------------------------
# Docstring-mangling domains
#------------------------------------------------------------------------------
from docutils.statemachine import ViewList
from sphinx.domains.c import CDomain
from sphinx.domains.python import PythonDomain
class ManglingDomainBase(object):
directive_mangling_map = {}
def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
self.wrap_mangling_directives()
def wrap_mangling_directives(self):
for name, objtype in self.directive_mangling_map.items():
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
name = 'np'
directive_mangling_map = {
'function': 'function',
'class': 'class',
'exception': 'class',
'method': 'function',
'classmethod': 'function',
'staticmethod': 'function',
'attribute': 'attribute',
}
class NumpyCDomain(ManglingDomainBase, CDomain):
name = 'np-c'
directive_mangling_map = {
'function': 'function',
'member': 'attribute',
'macro': 'function',
'type': 'class',
'var': 'object',
}
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
| 36.701439
| 105
| 0.559541
|
7952bce0cd016ab73746766050ad45faabc7be49
| 872
|
py
|
Python
|
Generic/patterns/observer.py
|
hc-tec/redis-py
|
28172eb4de2bfc61b0ff1023dbf7b52d4f4ba47c
|
[
"MIT"
] | 1
|
2021-11-24T07:54:17.000Z
|
2021-11-24T07:54:17.000Z
|
Generic/patterns/observer.py
|
hc-tec/pydis
|
28172eb4de2bfc61b0ff1023dbf7b52d4f4ba47c
|
[
"MIT"
] | 1
|
2021-11-13T15:08:14.000Z
|
2021-11-13T15:08:14.000Z
|
Generic/patterns/observer.py
|
hc-tec/pydis
|
28172eb4de2bfc61b0ff1023dbf7b52d4f4ba47c
|
[
"MIT"
] | 1
|
2021-11-24T07:54:28.000Z
|
2021-11-24T07:54:28.000Z
|
from typing import List
from abc import abstractmethod
class AbstractObserver:
@abstractmethod
def update(self, *args, **kwargs): ...
class AbstractSubject:
@abstractmethod
def attach(self, observer: AbstractObserver): ...
@abstractmethod
def detach(self, observer: AbstractObserver): ...
@abstractmethod
def notify(self, *args, **kwargs): ...
class Observer(AbstractObserver):
def update(self, *args, **kwargs):
pass
class Subject(AbstractSubject):
def __init__(self):
self._observers: List[Observer] = []
def attach(self, observer: Observer):
self._observers.append(observer)
def detach(self, observer: Observer):
self._observers.remove(observer)
def notify(self, *args, **kwargs):
for observer in self._observers:
observer.update(*args, **kwargs)
| 19.818182
| 53
| 0.66055
|
7952be95f94342b709705ee79481164113b97434
| 2,034
|
py
|
Python
|
hyperlpr/e2e.py
|
lillin-bug/Vehicle-Detection
|
4fe7a376788033e67ffec5a0ab584a6b44c0c000
|
[
"Apache-2.0"
] | 13
|
2018-06-20T08:48:15.000Z
|
2019-03-07T01:50:07.000Z
|
hyperlpr/e2e.py
|
617475jordan/hyperDemo
|
2b6eb60b900090034373d058646f2d4253a49b90
|
[
"Apache-2.0"
] | null | null | null |
hyperlpr/e2e.py
|
617475jordan/hyperDemo
|
2b6eb60b900090034373d058646f2d4253a49b90
|
[
"Apache-2.0"
] | 5
|
2018-06-20T09:13:18.000Z
|
2020-04-10T15:24:45.000Z
|
#coding=utf-8
from keras import backend as K
from keras.models import load_model
from keras.layers import *
import numpy as np
import random
import string
import cv2
import e2emodel as model
chars = [u"ไบฌ", u"ๆฒช", u"ๆดฅ", u"ๆธ", u"ๅ", u"ๆ", u"่", u"่พฝ", u"ๅ", u"้ป", u"่", u"ๆต", u"็", u"้ฝ", u"่ตฃ", u"้ฒ", u"่ฑซ", u"้", u"ๆน", u"็ฒค", u"ๆก",
u"็ผ", u"ๅท", u"่ดต", u"ไบ", u"่", u"้", u"็", u"้", u"ๅฎ", u"ๆฐ", u"0", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"A",
u"B", u"C", u"D", u"E", u"F", u"G", u"H", u"J", u"K", u"L", u"M", u"N", u"P", u"Q", u"R", u"S", u"T", u"U", u"V", u"W", u"X",
u"Y", u"Z",u"ๆธฏ",u"ๅญฆ",u"ไฝฟ",u"่ญฆ",u"ๆพณ",u"ๆ",u"ๅ",u"ๅ",u"ๅ",u"ๅนฟ",u"ๆฒ",u"ๅ
ฐ",u"ๆ",u"ๆต",u"ๆตท",u"ๆฐ",u"่ช",u"็ฉบ"
];
pred_model = model.construct_model("./model/ocr_plate_all_w_rnn_2.h5",)
import time
def fastdecode(y_pred):
results = ""
confidence = 0.0
table_pred = y_pred.reshape(-1, len(chars)+1)
res = table_pred.argmax(axis=1)
for i,one in enumerate(res):
if one<len(chars) and (i==0 or (one!=res[i-1])):
results+= chars[one]
confidence+=table_pred[i][one]
confidence/= len(results)
return results,confidence
def recognizeOne(src):
# x_tempx= cv2.imread(src)
x_tempx = src
# x_tempx = cv2.bitwise_not(x_tempx)
x_temp = cv2.resize(x_tempx,( 160,40))
x_temp = x_temp.transpose(1, 0, 2)
t0 = time.time()
y_pred = pred_model.predict(np.array([x_temp]))
y_pred = y_pred[:,2:,:]
# plt.imshow(y_pred.reshape(16,66))
# plt.show()
#
# cv2.imshow("x_temp",x_tempx)
# cv2.waitKey(0)
return fastdecode(y_pred)
#
#
# import os
#
# path = "/Users/yujinke/PycharmProjects/HyperLPR_Python_web/cache/finemapping"
# for filename in os.listdir(path):
# if filename.endswith(".png") or filename.endswith(".jpg") or filename.endswith(".bmp"):
# x = os.path.join(path,filename)
# recognizeOne(x)
# # print time.time() - t0
#
# # cv2.imshow("x",x)
# # cv2.waitKey()
| 31.78125
| 138
| 0.555064
|
7952beab10b4a44cd448cd627097ea34b8e3125c
| 7,308
|
py
|
Python
|
composer/algorithms/colout/colout.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
composer/algorithms/colout/colout.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
composer/algorithms/colout/colout.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
"""Core ColOut classes and functions."""
from __future__ import annotations
import logging
import textwrap
import weakref
from typing import TypeVar
import torch
from PIL.Image import Image as PillowImage
from torchvision.datasets import VisionDataset
from composer.algorithms.utils.augmentation_common import image_as_type
from composer.core import Algorithm, Event, Logger, State
from composer.core.types import Tensor
from composer.datasets.utils import add_vision_dataset_transform
log = logging.getLogger(__name__)
ImgT = TypeVar("ImgT", torch.Tensor, PillowImage)
__all__ = ["ColOut", "ColOutTransform", "colout_batch"]
def colout_batch(X: ImgT, p_row: float = 0.15, p_col: float = 0.15) -> ImgT:
"""Applies ColOut augmentation to a batch of images, dropping the same random rows and columns from all images in a
batch.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from composer.algorithms.colout import colout_batch
new_X = colout_batch(
X=X_example,
p_row=0.15,
p_col=0.15
)
Args:
X: :class:`PIL.Image.Image` or :class:`torch.Tensor` of image data. In
the latter case, must be a single image of shape ``CHW`` or a batch
of images of shape ``NCHW``.
p_row: Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col: Fraction of columns to drop (drop along W). Default: ``0.15``.
Returns:
torch.Tensor: Input batch tensor with randomly dropped columns and rows.
"""
# Convert image to Tensor if needed
X_tensor = image_as_type(X, torch.Tensor)
# Get the dimensions of the image
row_size = X_tensor.shape[-2]
col_size = X_tensor.shape[-1]
# Determine how many rows and columns to keep
kept_row_size = int((1 - p_row) * row_size)
kept_col_size = int((1 - p_col) * col_size)
# Randomly choose indices to keep. Must be sorted for slicing
kept_row_idx = sorted(torch.randperm(row_size)[:kept_row_size].numpy())
kept_col_idx = sorted(torch.randperm(col_size)[:kept_col_size].numpy())
# Keep only the selected row and columns
X_colout = X_tensor[..., kept_row_idx, :]
X_colout = X_colout[..., :, kept_col_idx]
# convert back to same type as input, and strip added batch dim if needed;
# we can't just reshape to input shape because we've reduced the spatial size
if not isinstance(X, torch.Tensor) or (X.ndim < X_colout.ndim):
X_colout = X_colout.reshape(X_colout.shape[-3:])
X_colout = image_as_type(X_colout, type(X))
return X_colout
class ColOutTransform:
"""Torchvision-like transform for performing the ColOut augmentation, where random rows and columns are dropped from
a single image.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from torchvision import datasets, transforms
from composer.algorithms.colout import ColOutTransform
colout_transform = ColOutTransform(p_row=0.15, p_col=0.15)
transforms = transforms.Compose([colout_transform, transforms.ToTensor()])
Args:
p_row (float): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float): Fraction of columns to drop (drop along W). Default: ``0.15``.
"""
def __init__(self, p_row: float = 0.15, p_col: float = 0.15):
self.p_row = p_row
self.p_col = p_col
def __call__(self, img: ImgT) -> ImgT:
"""Drops random rows and columns from a single image.
Args:
img (torch.Tensor or PIL Image): An input image as a torch.Tensor or PIL image
Returns:
torch.Tensor or PIL Image: A smaller image with rows and columns dropped
"""
return colout_batch(img, self.p_row, self.p_col)
class ColOut(Algorithm):
"""Drops a fraction of the rows and columns of an input image. If the fraction of rows/columns dropped isn't too
large, this does not significantly alter the content of the image, but reduces its size and provides extra
variability.
If ``batch`` is True (the default), this algorithm runs on :attr:`Event.INIT` to insert a dataset transformation.
It is a no-op if this algorithm already applied itself on the :attr:`State.train_dataloader.dataset`.
Otherwise, if ``batch`` is False, then this algorithm runs on :attr:`Event.AFTER_DATALOADER` to modify the batch.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from composer.algorithms import ColOut
from composer.trainer import Trainer
colout_algorithm = ColOut(p_row=0.15, p_col=0.15, batch=True)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[colout_algorithm],
optimizers=[optimizer]
)
Args:
p_row (float): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float): Fraction of columns to drop (drop along W). Default: ``0.15``.
batch (bool): Run ColOut at the batch level. Default: ``True``.
"""
def __init__(self, p_row: float = 0.15, p_col: float = 0.15, batch: bool = True):
if not (0 <= p_col <= 1):
raise ValueError("p_col must be between 0 and 1")
if not (0 <= p_row <= 1):
raise ValueError("p_row must be between 0 and 1")
self.p_row = p_row
self.p_col = p_col
self.batch = batch
self._transformed_datasets = weakref.WeakSet()
def match(self, event: Event, state: State) -> bool:
if self.batch:
return event == Event.AFTER_DATALOADER
else:
return event == Event.FIT_START and state.train_dataloader.dataset not in self._transformed_datasets
def _apply_sample(self, state: State) -> None:
"""Add the ColOut dataset transform to the dataloader."""
dataset = state.train_dataloader.dataset
transform = ColOutTransform(p_row=self.p_row, p_col=self.p_col)
if not isinstance(dataset, VisionDataset):
raise TypeError(
textwrap.dedent(f"""\
To use {type(self).__name__}, the dataset must be a
{VisionDataset.__qualname__}, not {type(dataset).__name__}"""))
add_vision_dataset_transform(dataset, transform, is_tensor_transform=False)
self._transformed_datasets.add(dataset)
def _apply_batch(self, state: State) -> None:
"""Transform a batch of images using the ColOut augmentation."""
inputs, labels = state.batch_pair
assert isinstance(inputs, Tensor), "Multiple Tensors not supported yet for ColOut"
new_inputs = colout_batch(inputs, p_row=self.p_row, p_col=self.p_col)
state.batch = (new_inputs, labels)
def apply(self, event: Event, state: State, logger: Logger) -> None:
if self.batch:
self._apply_batch(state)
else:
self._apply_sample(state)
| 37.476923
| 120
| 0.655446
|
7952c318602472bf666621ed5e15363ef9620813
| 59
|
py
|
Python
|
nnsvs/__init__.py
|
oatsu-gh/nnsvs
|
510f37bc1d1f15282646e4d34435b5d63686cf40
|
[
"MIT"
] | 298
|
2020-05-03T04:45:59.000Z
|
2022-03-31T14:38:52.000Z
|
nnsvs/__init__.py
|
oatsu-gh/nnsvs
|
510f37bc1d1f15282646e4d34435b5d63686cf40
|
[
"MIT"
] | 54
|
2020-05-02T16:14:30.000Z
|
2022-03-30T18:04:11.000Z
|
nnsvs/__init__.py
|
oatsu-gh/nnsvs
|
510f37bc1d1f15282646e4d34435b5d63686cf40
|
[
"MIT"
] | 39
|
2020-05-08T04:50:17.000Z
|
2022-03-21T18:46:14.000Z
|
# coding: utf-8
from .version import version as __version__
| 29.5
| 43
| 0.79661
|
7952c5430ec778869f3f63341b3815069c7cf637
| 1,826
|
py
|
Python
|
hooks/pre_gen_project.py
|
ma7modx/cookiecutter-django
|
c29ed729acc973977809311bdaa493342dfd38c6
|
[
"BSD-3-Clause"
] | null | null | null |
hooks/pre_gen_project.py
|
ma7modx/cookiecutter-django
|
c29ed729acc973977809311bdaa493342dfd38c6
|
[
"BSD-3-Clause"
] | 2
|
2018-09-27T10:51:31.000Z
|
2018-10-04T17:16:42.000Z
|
hooks/pre_gen_project.py
|
ma7modx/cookiecutter-django
|
c29ed729acc973977809311bdaa493342dfd38c6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
NOTE:
the below code is to be maintained Python 2.x-compatible
as the whole Cookiecutter Django project initialization
can potentially be run in Python 2.x environment.
TODO: ? restrict Cookiecutter Django project initialization to Python 3.x environments only
"""
from __future__ import print_function
import sys
TERMINATOR = "\x1b[0m"
WARNING = "\x1b[1;33m [WARNING]: "
INFO = "\x1b[1;33m [INFO]: "
HINT = "\x1b[3;33m"
SUCCESS = "\x1b[1;32m [SUCCESS]: "
project_slug = "{{ cookiecutter.project_slug }}"
if hasattr(project_slug, "isidentifier"):
assert project_slug.isidentifier(), "'{}' project slug is not a valid Python identifier.".format(
project_slug
)
assert "\\" not in "{{ cookiecutter.author_name }}", "Don't include backslashes in author name."
python_major_version = sys.version_info[0]
if python_major_version == 2:
print(
WARNING + "Cookiecutter Django does not support Python 2. "
"Stability is guaranteed with Python 3.6+ only, "
"are you sure you want to proceed (y/n)? " + TERMINATOR
)
yes_options, no_options = frozenset(["y"]), frozenset(["n"])
while True:
choice = raw_input().lower()
if choice in yes_options:
break
elif choice in no_options:
print(INFO + "Generation process stopped as requested." + TERMINATOR)
sys.exit(1)
else:
print(
HINT
+ "Please respond with {} or {}: ".format(
", ".join(
["'{}'".format(o) for o in yes_options if not o == ""]
),
", ".join(
["'{}'".format(o) for o in no_options if not o == ""]
),
)
+ TERMINATOR
)
| 32.607143
| 101
| 0.572289
|
7952c55667949ed2ef22e3aaff632db7c22d313e
| 19,460
|
py
|
Python
|
beets/autotag/mb.py
|
adammillerio/beets
|
c1d93165f05ba87387c61ae2eda3c93a77efa956
|
[
"MIT"
] | null | null | null |
beets/autotag/mb.py
|
adammillerio/beets
|
c1d93165f05ba87387c61ae2eda3c93a77efa956
|
[
"MIT"
] | null | null | null |
beets/autotag/mb.py
|
adammillerio/beets
|
c1d93165f05ba87387c61ae2eda3c93a77efa956
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Searches for albums in the MusicBrainz database.
"""
from __future__ import division, absolute_import, print_function
import musicbrainzngs
import re
import traceback
from six.moves.urllib.parse import urljoin
from beets import logging
import beets.autotag.hooks
import beets
from beets import util
from beets import config
import six
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
if util.SNI_SUPPORTED:
BASE_URL = 'https://musicbrainz.org/'
else:
BASE_URL = 'http://musicbrainz.org/'
SKIPPED_TRACKS = ['[data track]']
FIELDS_TO_MB_KEYS = {
'catalognum': 'catno',
'country': 'country',
'label': 'label',
'media': 'format',
'year': 'date',
}
musicbrainzngs.set_useragent('beets', beets.__version__,
'https://beets.io/')
class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type.
"""
def __init__(self, reason, verb, query, tb=None):
self.query = query
if isinstance(reason, musicbrainzngs.WebServiceError):
reason = u'MusicBrainz not reachable'
super(MusicBrainzAPIError, self).__init__(reason, verb, tb)
def get_message(self):
return u'{0} in {1} with query {2}'.format(
self._reasonstr(), self.verb, repr(self.query)
)
log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases',
'recording-level-rels', 'work-rels',
'work-level-rels', 'artist-rels']
TRACK_INCLUDES = ['artists', 'aliases']
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
if 'genres' in musicbrainzngs.VALID_INCLUDES['recording']:
RELEASE_INCLUDES += ['genres']
def track_url(trackid):
return urljoin(BASE_URL, 'recording/' + trackid)
def album_url(albumid):
return urljoin(BASE_URL, 'release/' + albumid)
def configure():
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
"""
hostname = config['musicbrainz']['host'].as_str()
musicbrainzngs.set_hostname(hostname)
musicbrainzngs.set_rate_limit(
config['musicbrainz']['ratelimit_interval'].as_number(),
config['musicbrainz']['ratelimit'].get(int),
)
def _preferred_alias(aliases):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
"""
if not aliases:
return
# Only consider aliases that have locales set.
aliases = [a for a in aliases if 'locale' in a]
# Search configured locales in order.
for locale in config['import']['languages'].as_str_seq():
# Find matching primary aliases for this locale.
matches = [a for a in aliases
if a['locale'] == locale and 'primary' in a]
# Skip to the next locale if we have no matches
if not matches:
continue
return matches[0]
def _preferred_release_event(release):
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
countries = config['match']['preferred']['countries'].as_str_seq()
for country in countries:
for event in release.get('release-event-list', {}):
try:
if country in event['area']['iso-3166-1-code-list']:
return country, event['date']
except KeyError:
pass
return release.get('country'), release.get('date')
def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
"""
artist_parts = []
artist_sort_parts = []
artist_credit_parts = []
for el in credit:
if isinstance(el, six.string_types):
# Join phrase.
artist_parts.append(el)
artist_credit_parts.append(el)
artist_sort_parts.append(el)
else:
alias = _preferred_alias(el['artist'].get('alias-list', ()))
# An artist.
if alias:
cur_artist_name = alias['alias']
else:
cur_artist_name = el['artist']['name']
artist_parts.append(cur_artist_name)
# Artist sort name.
if alias:
artist_sort_parts.append(alias['sort-name'])
elif 'sort-name' in el['artist']:
artist_sort_parts.append(el['artist']['sort-name'])
else:
artist_sort_parts.append(cur_artist_name)
# Artist credit.
if 'name' in el:
artist_credit_parts.append(el['name'])
else:
artist_credit_parts.append(cur_artist_name)
return (
''.join(artist_parts),
''.join(artist_sort_parts),
''.join(artist_credit_parts),
)
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium; ``medium_total``,
the number of tracks on the medium. Each number is a 1-based index.
"""
info = beets.autotag.hooks.TrackInfo(
title=recording['title'],
track_id=recording['id'],
index=index,
medium=medium,
medium_index=medium_index,
medium_total=medium_total,
data_source=u'MusicBrainz',
data_url=track_url(recording['id']),
)
if recording.get('artist-credit'):
# Get the artist names.
info.artist, info.artist_sort, info.artist_credit = \
_flatten_artist_credit(recording['artist-credit'])
# Get the ID and sort name of the first artist.
artist = recording['artist-credit'][0]['artist']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
info.work = work_relation['work']['title']
info.mb_workid = work_relation['work']['id']
if 'disambiguation' in work_relation['work']:
info.work_disambig = work_relation['work']['disambiguation']
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = u', '.join(lyricist)
if composer:
info.composer = u', '.join(composer)
info.composer_sort = u', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = u', '.join(arranger)
info.decode()
return info
def _set_date_str(info, date_str, original=False):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If
`original`, then set the original_year, etc., fields.
"""
if date_str:
date_parts = date_str.split('-')
for key in ('year', 'month', 'day'):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
if original:
key = 'original_' + key
setattr(info, key, date_num)
def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
# Get artist name using join phrases.
artist_name, artist_sort_name, artist_credit_name = \
_flatten_artist_credit(release['artist-credit'])
# Basic info.
track_infos = []
index = 0
for medium in release['medium-list']:
disctitle = medium.get('title')
format = medium.get('format')
if format in config['match']['ignored_media'].as_str_seq():
continue
all_tracks = medium['track-list']
if ('data-track-list' in medium
and not config['match']['ignore_data_tracks']):
all_tracks += medium['data-track-list']
track_count = len(all_tracks)
if 'pregap' in medium:
all_tracks.insert(0, medium['pregap'])
for track in all_tracks:
if ('title' in track['recording'] and
track['recording']['title'] in SKIPPED_TRACKS):
continue
if ('video' in track['recording'] and
track['recording']['video'] == 'true' and
config['match']['ignore_video_tracks']):
continue
# Basic information from the recording.
index += 1
ti = track_info(
track['recording'],
index,
int(medium['position']),
int(track['position']),
track_count,
)
ti.release_track_id = track['id']
ti.disctitle = disctitle
ti.media = format
ti.track_alt = track['number']
# Prefer track data, where present, over recording data.
if track.get('title'):
ti.title = track['title']
if track.get('artist-credit'):
# Get the artist names.
ti.artist, ti.artist_sort, ti.artist_credit = \
_flatten_artist_credit(track['artist-credit'])
ti.artist_id = track['artist-credit'][0]['artist']['id']
if track.get('length'):
ti.length = int(track['length']) / (1000.0)
track_infos.append(ti)
info = beets.autotag.hooks.AlbumInfo(
album=release['title'],
album_id=release['id'],
artist=artist_name,
artist_id=release['artist-credit'][0]['artist']['id'],
tracks=track_infos,
mediums=len(release['medium-list']),
artist_sort=artist_sort_name,
artist_credit=artist_credit_name,
data_source=u'MusicBrainz',
data_url=album_url(release['id']),
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va:
info.artist = config['va_name'].as_str()
info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id']
info.albumstatus = release.get('status')
# Get the disambiguation strings at the release and release group level.
if release['release-group'].get('disambiguation'):
info.releasegroupdisambig = \
release['release-group'].get('disambiguation')
if release.get('disambiguation'):
info.albumdisambig = release.get('disambiguation')
# Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Log the new-style "primary" and "secondary" release types.
# Eventually, we'd like to actually store this data, but we just log
# it for now to help understand the differences.
if 'primary-type' in release['release-group']:
rel_primarytype = release['release-group']['primary-type']
if rel_primarytype:
log.debug('primary MB release type: ' + rel_primarytype.lower())
if 'secondary-type-list' in release['release-group']:
if release['release-group']['secondary-type-list']:
log.debug('secondary MB release type(s): ' + ', '.join(
[secondarytype.lower() for secondarytype in
release['release-group']['secondary-type-list']]))
# Release events.
info.country, release_date = _preferred_release_event(release)
release_group_date = release['release-group'].get('first-release-date')
if not release_date:
# Fall back if release-specific date is not available.
release_date = release_group_date
_set_date_str(info, release_date, False)
_set_date_str(info, release_group_date, True)
# Label name.
if release.get('label-info-list'):
label_info = release['label-info-list'][0]
if label_info.get('label'):
label = label_info['label']['name']
if label != '[no label]':
info.label = label
info.catalognum = label_info.get('catalog-number')
# Text representation data.
if release.get('text-representation'):
rep = release['text-representation']
info.script = rep.get('script')
info.language = rep.get('language')
# Media (format).
if release['medium-list']:
first_medium = release['medium-list'][0]
info.media = first_medium.get('format')
genres = release.get('genre-list')
if config['musicbrainz']['genres'] and genres:
info.genre = ';'.join(g['name'] for g in genres)
info.decode()
return info
def match_album(artist, album, tracks=None, extra_tags=None):
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError.
The query consists of an artist name, an album name, and,
optionally, a number of tracks on the album and any other extra tags.
"""
# Build search criteria.
criteria = {'release': album.lower().strip()}
if artist is not None:
criteria['artist'] = artist.lower().strip()
else:
# Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None:
criteria['tracks'] = six.text_type(tracks)
# Additional search cues from existing metadata.
if extra_tags:
for tag in extra_tags:
key = FIELDS_TO_MB_KEYS[tag]
value = six.text_type(extra_tags.get(tag, '')).lower().strip()
if key == 'catno':
value = value.replace(u' ', '')
if value:
criteria[key] = value
# Abort if we have no search terms.
if not any(criteria.values()):
return
try:
log.debug(u'Searching for MusicBrainz releases with: {!r}', criteria)
res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'release search', criteria,
traceback.format_exc())
for release in res['release-list']:
# The search result is missing some data (namely, the tracks),
# so we just use the ID and fetch the rest of the information.
albuminfo = album_for_id(release['id'])
if albuminfo is not None:
yield albuminfo
def match_track(artist, title):
"""Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError.
"""
criteria = {
'artist': artist.lower().strip(),
'recording': title.lower().strip(),
}
if not any(criteria.values()):
return
try:
res = musicbrainzngs.search_recordings(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'recording search', criteria,
traceback.format_exc())
for recording in res['recording-list']:
yield track_info(recording)
def _parse_id(s):
"""Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None.
"""
# Find the first thing that looks like a UUID/MBID.
match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match:
return match.group()
def album_for_id(releaseid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
"""
log.debug(u'Requesting MusicBrainz release {}', releaseid)
albumid = _parse_id(releaseid)
if not albumid:
log.debug(u'Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug(u'Album ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get release by ID', albumid,
traceback.format_exc())
return album_info(res['release'])
def track_for_id(releaseid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""
trackid = _parse_id(releaseid)
if not trackid:
log.debug(u'Invalid MBID ({0}).', releaseid)
return
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:
log.debug(u'Track ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get recording by ID', trackid,
traceback.format_exc())
return track_info(res['recording'])
| 35.641026
| 77
| 0.616084
|
7952c5a014fd6436c3d7ad00c4de38a77f55cd98
| 230
|
py
|
Python
|
public/app/models/all_transactions.py
|
iconation/ICONScan
|
3fb2f364a678a0e8d2b8a4a0d2275fbe8e2e3b50
|
[
"Apache-2.0"
] | null | null | null |
public/app/models/all_transactions.py
|
iconation/ICONScan
|
3fb2f364a678a0e8d2b8a4a0d2275fbe8e2e3b50
|
[
"Apache-2.0"
] | null | null | null |
public/app/models/all_transactions.py
|
iconation/ICONScan
|
3fb2f364a678a0e8d2b8a4a0d2275fbe8e2e3b50
|
[
"Apache-2.0"
] | null | null | null |
class AllTransactions:
def __init__ (self, db):
db.sql.execute (open ("app/sql/all_icx_transactions.sql", "rb").read())
self.result = db.sql.fetchall()
def process (db):
return AllTransactions(db).result
| 25.555556
| 79
| 0.66087
|
7952c6bcb478498482cddbee36d74677a502ecd2
| 5,403
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
afiniel/but
|
ac5a1d9d05c3b4cb7c6087bcfb19da218e40f8c3
|
[
"MIT"
] | 13
|
2021-04-24T14:07:48.000Z
|
2021-12-29T18:10:08.000Z
|
contrib/seeds/makeseeds.py
|
afiniel/but
|
ac5a1d9d05c3b4cb7c6087bcfb19da218e40f8c3
|
[
"MIT"
] | 13
|
2021-04-22T17:44:59.000Z
|
2022-02-22T10:38:44.000Z
|
contrib/seeds/makeseeds.py
|
afiniel/but
|
ac5a1d9d05c3b4cb7c6087bcfb19da218e40f8c3
|
[
"MIT"
] | 22
|
2021-06-05T00:58:07.000Z
|
2022-02-12T16:46:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from "protx list valid 1"
#
NSEEDS=512
MAX_SEEDS_PER_ASN=4
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
}
import re
import sys
import dns.resolver
import collections
import json
import time
import multiprocessing
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
def parseip(ip):
m = PATTERN_IPV4.match(ip)
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(ip)
if m is None:
m = PATTERN_ONION.match(ip)
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
return {
"net": net,
"ip": ipstr,
"port": port,
"ipnum": ip,
"sortkey": sortkey
}
def filtermulticollateralhash(mns):
'''Filter out MNs sharing the same collateral hash'''
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['collateralHash']].append(mn)
return [mn for mn in mns if len(hist[mn['collateralHash']]) == 1]
def filtermulticollateraladdress(mns):
'''Filter out MNs sharing the same collateral address'''
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['collateralAddress']].append(mn)
return [mn for mn in mns if len(hist[mn['collateralAddress']]) == 1]
def filtermultipayoutaddress(mns):
'''Filter out MNs sharing the same payout address'''
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['state']['payoutAddress']].append(mn)
return [mn for mn in mns if len(hist[mn['state']['payoutAddress']]) == 1]
def resolveasn(resolver, ip):
asn = int([x.to_text() for x in resolver.query('.'.join(reversed(ip.split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
my_resolver = dns.resolver.Resolver()
pool = multiprocessing.Pool(processes=16)
# OpenDNS servers
my_resolver.nameservers = ['208.67.222.222', '208.67.220.220']
# Resolve ASNs in parallel
asns = [pool.apply_async(resolveasn, args=(my_resolver, ip['ip'])) for ip in ips_ipv4]
# Filter IPv4 by ASN
result = []
asn_count = {}
for i in range(len(ips_ipv4)):
ip = ips_ipv4[i]
if len(result) == max_total:
break
try:
asn = asns[i].get()
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
# This expects a json as outputted by "protx list valid 1"
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as f:
mns = json.load(f)
else:
mns = json.load(sys.stdin)
# Skip PoSe banned MNs
mns = [mn for mn in mns if mn['state']['PoSeBanHeight'] == -1]
# Skip MNs with < 10000 confirmations
mns = [mn for mn in mns if mn['confirmations'] >= 100]
# Filter out MNs which are definitely from the same person/operator
mns = filtermulticollateralhash(mns)
mns = filtermulticollateraladdress(mns)
mns = filtermultipayoutaddress(mns)
# Extract IPs
ips = [parseip(mn['state']['service']) for mn in mns]
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 31.596491
| 168
| 0.585601
|
7952c88d5ecff2e02c14d66029c5f34017d324d0
| 5,468
|
py
|
Python
|
awx_collection/plugins/modules/tower_receive.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | 1
|
2021-04-15T18:50:58.000Z
|
2021-04-15T18:50:58.000Z
|
awx_collection/plugins/modules/tower_receive.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | 35
|
2021-03-01T06:34:26.000Z
|
2022-03-01T01:18:42.000Z
|
awx_collection/plugins/modules/tower_receive.py
|
bhyunki/awx
|
ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, John Westcott IV <john.westcott.iv@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['deprecated'], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_receive
deprecated:
removed_in: "14.0.0"
why: Deprecated in favor of upcoming C(_export) module.
alternative: Once published, use M(tower_export) instead.
author: "John Westcott IV (@john-westcott-iv)"
short_description: Receive assets from Ansible Tower.
description:
- Receive assets from Ansible Tower. See
U(https://www.ansible.com/tower) for an overview.
options:
all:
description:
- Export all assets
type: bool
default: 'False'
organization:
description:
- List of organization names to export
default: []
type: list
elements: str
user:
description:
- List of user names to export
default: []
type: list
elements: str
team:
description:
- List of team names to export
default: []
type: list
elements: str
credential_type:
description:
- List of credential type names to export
default: []
type: list
elements: str
credential:
description:
- List of credential names to export
default: []
type: list
elements: str
notification_template:
description:
- List of notification template names to export
default: []
type: list
elements: str
inventory_script:
description:
- List of inventory script names to export
default: []
type: list
elements: str
inventory:
description:
- List of inventory names to export
default: []
type: list
elements: str
project:
description:
- List of project names to export
default: []
type: list
elements: str
job_template:
description:
- List of job template names to export
default: []
type: list
elements: str
workflow:
description:
- List of workflow names to export
default: []
type: list
elements: str
requirements:
- "ansible-tower-cli >= 3.3.0"
notes:
- Specifying a name of "all" for any asset type will export all items of that asset type.
extends_documentation_fragment: awx.awx.auth_legacy
'''
EXAMPLES = '''
- name: Export all tower assets
tower_receive:
all: True
tower_config_file: "~/tower_cli.cfg"
- name: Export all inventories
tower_receive:
inventory:
- all
- name: Export a job template named "My Template" and all Credentials
tower_receive:
job_template:
- "My Template"
credential:
- all
'''
RETURN = '''
assets:
description: The exported assets
returned: success
type: dict
sample: [ {}, {} ]
'''
from ..module_utils.tower_legacy import TowerLegacyModule, tower_auth_config, HAS_TOWER_CLI
try:
from tower_cli.cli.transfer.receive import Receiver
from tower_cli.cli.transfer.common import SEND_ORDER
from tower_cli.utils.exceptions import TowerCLIError
from tower_cli.conf import settings
TOWER_CLI_HAS_EXPORT = True
except ImportError:
TOWER_CLI_HAS_EXPORT = False
def main():
argument_spec = dict(
all=dict(type='bool', default=False),
credential=dict(type='list', default=[], elements='str'),
credential_type=dict(type='list', default=[], elements='str'),
inventory=dict(type='list', default=[], elements='str'),
inventory_script=dict(type='list', default=[], elements='str'),
job_template=dict(type='list', default=[], elements='str'),
notification_template=dict(type='list', default=[], elements='str'),
organization=dict(type='list', default=[], elements='str'),
project=dict(type='list', default=[], elements='str'),
team=dict(type='list', default=[], elements='str'),
user=dict(type='list', default=[], elements='str'),
workflow=dict(type='list', default=[], elements='str'),
)
module = TowerLegacyModule(argument_spec=argument_spec, supports_check_mode=False)
module.deprecate(msg="This module is deprecated and will be replaced by the AWX CLI export command.", version="awx.awx:14.0.0")
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
if not TOWER_CLI_HAS_EXPORT:
module.fail_json(msg='ansible-tower-cli version does not support export')
export_all = module.params.get('all')
assets_to_export = {}
for asset_type in SEND_ORDER:
assets_to_export[asset_type] = module.params.get(asset_type)
result = dict(
assets=None,
changed=False,
message='',
)
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
try:
receiver = Receiver()
result['assets'] = receiver.export_assets(all=export_all, asset_input=assets_to_export)
module.exit_json(**result)
except TowerCLIError as e:
result['message'] = e.message
module.fail_json(msg='Receive Failed', **result)
if __name__ == '__main__':
main()
| 27.34
| 131
| 0.645208
|
7952c9600d46fc478a084344b6b070cb5976121f
| 972
|
py
|
Python
|
promgen/util.py
|
kfdm/promgen
|
2fdff588fbee7cc74fdffec35f18c64e8ec420e6
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
promgen/util.py
|
kfdm/promgen
|
2fdff588fbee7cc74fdffec35f18c64e8ec420e6
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
promgen/util.py
|
kfdm/promgen
|
2fdff588fbee7cc74fdffec35f18c64e8ec420e6
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import requests.sessions
from promgen.version import __version__
# Wrappers around request api to ensure we always attach our user agent
# https://github.com/requests/requests/blob/master/requests/api.py
def post(url, data=None, json=None, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.post(url, data=data, json=json, **kwargs)
def get(url, params=None, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.get(url, params=params, **kwargs)
def delete(url, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.delete(url, **kwargs)
| 34.714286
| 76
| 0.708848
|
7952c977de00561db80ce8d8b1efbcdbbfd6764c
| 2,412
|
py
|
Python
|
Software/Python/grove_dht_pro.py
|
yasu-kun/GrovePi
|
1d583cfee9397290587282750bdc25dd910b0018
|
[
"MIT"
] | null | null | null |
Software/Python/grove_dht_pro.py
|
yasu-kun/GrovePi
|
1d583cfee9397290587282750bdc25dd910b0018
|
[
"MIT"
] | null | null | null |
Software/Python/grove_dht_pro.py
|
yasu-kun/GrovePi
|
1d583cfee9397290587282750bdc25dd910b0018
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Temperature & Humidity Sensor Pro
# (http://www.seeedstudio.com/wiki/Grove_-_Temperature_and_Humidity_Sensor_Pro)
#
# The GrovePi connects the Raspberry Pi and Grove sensors.
# You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2017 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import grovepi
import math
# Connect the Grove Temperature & Humidity Sensor Pro to digital port D4
# This example uses the blue colored sensor.
# SIG,NC,VCC,GND
sensor = 4 # The Sensor goes on digital port 4.
# temp_humidity_sensor_type
# Grove Base Kit comes with the blue sensor.
blue = 0 # The Blue colored sensor.
white = 1 # The White colored sensor.
while True:
try:
# This example uses the blue colored sensor.
# The first parameter is the port, the second parameter is the type of sensor.
[temp,humidity] = grovepi.dht(sensor,white)
if math.isnan(temp) == False and math.isnan(humidity) == False:
print("temp = %.02f C humidity =%.02f%%"%(temp, humidity))
except IOError:
print ("Error")
| 40.881356
| 107
| 0.752488
|
7952c97ca185c8c1f29d623c1376ff5ca796fd56
| 3,711
|
py
|
Python
|
heatmap/guided_backprop.py
|
MickeyZeng/Data-Visualization
|
c7005d1096545d7a5eb96dd0c9bc13e874d42fa4
|
[
"MIT"
] | null | null | null |
heatmap/guided_backprop.py
|
MickeyZeng/Data-Visualization
|
c7005d1096545d7a5eb96dd0c9bc13e874d42fa4
|
[
"MIT"
] | null | null | null |
heatmap/guided_backprop.py
|
MickeyZeng/Data-Visualization
|
c7005d1096545d7a5eb96dd0c9bc13e874d42fa4
|
[
"MIT"
] | null | null | null |
"""
Created on Thu Oct 26 11:23:47 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
import torch
from torch.nn import ReLU
class GuidedBackprop():
"""
Produces gradients generated with guided back propagation from the given image
"""
def __init__(self, model):
self.model = model
self.gradients = None
self.forward_relu_outputs = []
# Put model in evaluation mode
self.model.eval()
self.update_relus()
self.hook_layers()
def hook_layers(self):
def hook_function(module, grad_in, grad_out):
self.gradients = grad_in[0]
# Register hook to the first layer
first_layer = list(self.model.features._modules.items())[0][1]
first_layer.register_backward_hook(hook_function)
def update_relus(self):
"""
Updates relu activation functions so that
1- stores output in forward pass
2- imputes zero for gradient values that are less than zero
"""
def relu_backward_hook_function(module, grad_in, grad_out):
"""
If there is a negative gradient, change it to zero
"""
# Get last forward output
corresponding_forward_output = self.forward_relu_outputs[-1]
corresponding_forward_output[corresponding_forward_output > 0] = 1
modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)
del self.forward_relu_outputs[-1] # Remove last forward output
return (modified_grad_out,)
def relu_forward_hook_function(module, ten_in, ten_out):
"""
Store results of forward pass
"""
self.forward_relu_outputs.append(ten_out)
# Loop through layers, hook up ReLUs
for pos, module in self.model.features._modules.items():
if isinstance(module, ReLU):
module.register_backward_hook(relu_backward_hook_function)
module.register_forward_hook(relu_forward_hook_function)
def generate_gradients(self, input_image, target_class):
# Forward pass
model_output = self.model(input_image)
# Zero gradients
self.model.zero_grad()
# Target for backprop
one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()
one_hot_output[0][target_class] = 1
# Backward pass
model_output.backward(gradient=one_hot_output)
# Convert Pytorch variable to numpy array
# [0] to get rid of the first channel (1,3,224,224)
gradients_as_arr = self.gradients.data.numpy()[0]
return gradients_as_arr
# if __name__ == '__main__':
# Snake
# target_example = 0
# (original_image, prep_img, target_class, file_name_to_export, pretrained_model) = \
# get_example_params(target_example)
#
# # Guided backprop
# GBP = GuidedBackprop(pretrained_model)
# # Get gradients
# guided_grads = GBP.generate_gradients(prep_img, target_class)
# # Save colored gradients
# save_gradient_images(guided_grads, file_name_to_export + '_Guided_BP_color')
# # Convert to grayscale
# grayscale_guided_grads = convert_to_grayscale(guided_grads)
# # Save grayscale gradients
# save_gradient_images(grayscale_guided_grads, file_name_to_export + '_Guided_BP_gray')
# # Positive and negative saliency maps
# pos_sal, neg_sal = get_positive_negative_saliency(guided_grads)
# save_gradient_images(pos_sal, file_name_to_export + '_pos_sal')
# save_gradient_images(neg_sal, file_name_to_export + '_neg_sal')
# print('Guided backprop completed')
| 37.484848
| 95
| 0.663164
|
7952c9f24c36ca687a698eb5f5a58215c97c6724
| 1,800
|
py
|
Python
|
skl2onnx/shape_calculators/svd.py
|
MaxNoe/sklearn-onnx
|
698c9347e7c70cbb1a2c5bba1657e6548ff5897d
|
[
"MIT"
] | 1
|
2021-04-12T12:38:20.000Z
|
2021-04-12T12:38:20.000Z
|
skl2onnx/shape_calculators/svd.py
|
MaxNoe/sklearn-onnx
|
698c9347e7c70cbb1a2c5bba1657e6548ff5897d
|
[
"MIT"
] | null | null | null |
skl2onnx/shape_calculators/svd.py
|
MaxNoe/sklearn-onnx
|
698c9347e7c70cbb1a2c5bba1657e6548ff5897d
|
[
"MIT"
] | 1
|
2020-10-01T09:26:27.000Z
|
2020-10-01T09:26:27.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ..common._registration import register_shape_calculator
from ..common.data_types import (
FloatTensorType, Int64TensorType, DoubleTensorType
)
from ..common.utils import check_input_and_output_numbers
from ..common.utils import check_input_and_output_types
def calculate_sklearn_truncated_svd_output_shapes(operator):
"""
Allowed input/output patterns are
1. [N, C] ---> [N, K]
Transform feature dimension from C to K
"""
check_input_and_output_numbers(operator, input_count_range=1,
output_count_range=1)
check_input_and_output_types(
operator, good_input_types=[
FloatTensorType, Int64TensorType, DoubleTensorType],
good_output_types=[FloatTensorType, DoubleTensorType])
if len(operator.inputs[0].type.shape) != 2:
raise RuntimeError('Only 2-D tensor(s) can be input(s).')
N = operator.inputs[0].type.shape[0]
K = (operator.raw_operator.n_components
if operator.type == 'SklearnTruncatedSVD'
else operator.raw_operator.n_components_)
operator.outputs[0].type.shape = [N, K]
register_shape_calculator('SklearnIncrementalPCA',
calculate_sklearn_truncated_svd_output_shapes)
register_shape_calculator('SklearnPCA',
calculate_sklearn_truncated_svd_output_shapes)
register_shape_calculator('SklearnTruncatedSVD',
calculate_sklearn_truncated_svd_output_shapes)
| 39.130435
| 76
| 0.655556
|
7952caa64d8b156eb59cb505b7bc2793a2d3ee09
| 19,226
|
py
|
Python
|
py/riscv/AssemblyHelperRISCV.py
|
jeremybennett/force-riscv
|
a5222a3b3fa8a0b9464204056ddca148f16b7e49
|
[
"Apache-2.0"
] | null | null | null |
py/riscv/AssemblyHelperRISCV.py
|
jeremybennett/force-riscv
|
a5222a3b3fa8a0b9464204056ddca148f16b7e49
|
[
"Apache-2.0"
] | null | null | null |
py/riscv/AssemblyHelperRISCV.py
|
jeremybennett/force-riscv
|
a5222a3b3fa8a0b9464204056ddca148f16b7e49
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base.AssemblyHelper import AssemblyHelper
from riscv.PrivilegeLevel import PrivilegeLevelRISCV
## This class facilitates generating common RISC-V instructions using common defaults. It provides a
# simpler, higher-level alternative to using the Sequence.genInstruction() command with less
# flexibility.
class AssemblyHelperRISCV(AssemblyHelper):
## Generate instructions to advance the system register containing the exception return address
# to the next instruction address. This will generally result in skipping the instruction that
# triggered the exception.
#
# @param aScratchRegIndex The index of a register that can be freely modified.
# @param aPrivLevelRegIndex The index of a register containing a value representing the current
# privilege level.
def genIncrementExceptionReturnAddress(self, aScratchRegIndex, aPrivLevelRegIndex):
for priv_level in self.genPrivilegeLevelInstructions(aPrivLevels=tuple(PrivilegeLevelRISCV)[1:], aInstrCountPerLevel=3, aScratchRegIndex=aScratchRegIndex, aPrivLevelRegIndex=aPrivLevelRegIndex):
self.genReadSystemRegister(aScratchRegIndex, ('%sepc' % priv_level.name.lower()))
self.genAddImmediate(aScratchRegIndex, 4)
self.genWriteSystemRegister(('%sepc' % priv_level.name.lower()), aScratchRegIndex)
## Generate instructions to set the system register containing the exception return address
# to the provided recovery address. This will generally result in skipping the instruction that
# triggered the exception and resuming execution at the recovery address.
#
# @param aScratchRegIndex The index of a register that can be freely modified.
# @param aRecoveryRegIndex The index of a register that contains the desired recovery address.
# @param aPrivLevelRegIndex The index of a register containing a value representing the current
# privilege level.
def genProvidedExceptionReturnAddress(self, aScratchRegIndex, aRecoveryRegIndex, aPrivLevelRegIndex):
for priv_level in self.genPrivilegeLevelInstructions(aPrivLevels=tuple(PrivilegeLevelRISCV)[1:], aInstrCountPerLevel=1, aScratchRegIndex=aScratchRegIndex, aPrivLevelRegIndex=aPrivLevelRegIndex):
self.genWriteSystemRegister(('%sepc' % priv_level.name.lower()), aRecoveryRegIndex)
## Generate a relative branch instruction to the specified address.
#
# @param aTargetAddr The target address of the branch.
def genRelativeBranchToAddress(self, aTargetAddr):
br_offset = self.getBranchOffset(aTargetAddr, 20)
self.genRelativeBranch(br_offset)
## Generate a relative branch instruction.
#
# @param aBrOffset The branch offset.
def genRelativeBranch(self, aBrOffset):
self.mSequence.genInstruction('JAL##RISCV', {'rd': 0, 'simm20': aBrOffset, 'NoRestriction': 1})
## Generate a relative branch instruction targeting a labeled address. Record the branch for
# later verification that the expected address was targeted via the addLabel() method.
#
# @param aBrOffset The branch offset.
# @param aLabel The label the branch is targeting.
def genRelativeBranchToLabel(self, aBrOffset, aLabel):
self.recordBranchToLabel(aLabel, aBrOffset)
self.genRelativeBranch(aBrOffset)
## Generate a relative branch with link instruction to the specified address.
#
# @param aTargetAddr The target address of the branch.
def genRelativeBranchWithLinkToAddress(self, aTargetAddr):
br_offset = self.getBranchOffset(aTargetAddr, 20)
self.genRelativeBranchWithLink(br_offset)
## Generate a relative branch with link instruction.
#
# @param aBrOffset The branch offset.
def genRelativeBranchWithLink(self, aBrOffset):
# We use the conventional link register x1
# TODO(Noah): Provide a mechanism for using alternate link registers when there is time to
# do so if it is deemed valuable.
self.mSequence.genInstruction('JAL##RISCV', {'rd': 1, 'simm20': aBrOffset, 'NoRestriction': 1})
## Generate an instruction to return to the address contained in the default link register.
def genReturn(self):
# We use the conventional link register x1
# TODO(Noah): Provide a mechanism for using alternate link registers when there is time to
# do so if it is deemed valuable.
self.mSequence.genInstruction('JALR##RISCV', {'rd': 0, 'rs1': 1, 'simm12': 0, 'NoRestriction': 1})
## Generate an instruction to load the specified register with a small immediate value.
#
# @param aRegIndex The index of the register to load.
# @param aImmVal The immediate value to load.
def genMoveImmediate(self, aRegIndex, aImmVal):
self.genAddImmediate(aRegIndex, aImmVal, aSrcRegIndex=0)
## Generate an instruction to shift the specified register to the left.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aShiftAmount The number of bits to shift.
# @param aSrcRegIndex The index of the register containing the value to shift. May be omitted
# if the destination register is also the source register.
def genShiftLeftImmediate(self, aDestRegIndex, aShiftAmount, aSrcRegIndex=None):
src_reg_index = aSrcRegIndex if aSrcRegIndex is not None else aDestRegIndex
self.mSequence.genInstruction('SLLI#RV64I#RISCV', {'rd': aDestRegIndex, 'rs1': src_reg_index, 'shamt': aShiftAmount})
## Generate an instruction to shift the specified register to the right.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aShiftAmount The number of bits to shift.
# @param aSrcRegIndex The index of the register containing the value to shift. May be omitted
# if the destination register is also the source register.
def genShiftRightImmediate(self, aDestRegIndex, aShiftAmount, aSrcRegIndex=None):
src_reg_index = aSrcRegIndex if aSrcRegIndex is not None else aDestRegIndex
self.mSequence.genInstruction('SRLI#RV64I#RISCV', {'rd': aDestRegIndex, 'rs1': src_reg_index, 'shamt': aShiftAmount})
## Generate an instruction to AND a specified register with an immediate value.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aImmVal The immediate value to AND.
# @param aSrcRegIndex The index of the register containing the value to AND. May be omitted if
# the destination register is also the source register.
def genAndImmediate(self, aDestRegIndex, aImmVal, aSrcRegIndex=None):
src_reg_index = aSrcRegIndex if aSrcRegIndex is not None else aDestRegIndex
self.mSequence.genInstruction('ANDI##RISCV', {'rd': aDestRegIndex, 'rs1': src_reg_index, 'simm12': aImmVal})
## Generate an instruction to OR a specified register with an immediate value.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aImmVal The immediate value to OR.
# @param aSrcRegIndex The index of the register containing the value to OR. May be omitted if
# the destination register is also the source register.
def genOrImmediate(self, aDestRegIndex, aImmVal, aSrcRegIndex=None):
src_reg_index = aSrcRegIndex if aSrcRegIndex is not None else aDestRegIndex
self.mSequence.genInstruction('ORI##RISCV', {'rd': aDestRegIndex, 'rs1': src_reg_index, 'simm12': aImmVal})
## Generate an instruction to XOR a specified register with an immediate value.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aImmVal The immediate value to XOR.
# @param aSrcRegIndex The index of the register containing the value to XOR. May be omitted if
# the destination register is also the source register.
def genXorImmediate(self, aDestRegIndex, aImmVal, aSrcRegIndex=None):
src_reg_index = aSrcRegIndex if aSrcRegIndex is not None else aDestRegIndex
self.mSequence.genInstruction('XORI##RISCV', {'rd': aDestRegIndex, 'rs1': src_reg_index, 'simm12': aImmVal})
## Generate an instruction to add an immediate value to a specified register.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aImmVal The immediate value to add to the register value.
# @param aSrcRegIndex The index of the register containing the value to be added to. May be
# omitted if the destination register is also the source register.
def genAddImmediate(self, aDestRegIndex, aImmVal, aSrcRegIndex=None):
src_reg_index = aSrcRegIndex if aSrcRegIndex is not None else aDestRegIndex
self.mSequence.genInstruction('ADDI##RISCV', {'rd': aDestRegIndex, 'rs1': src_reg_index, 'simm12': aImmVal})
## Generate an instruction to load the specified register with the value in a different
# register.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aSrcRegIndex The index of the register containing the value to copied.
def genMoveRegister(self, aDestRegIndex, aSrcRegIndex):
self.genAddRegister(aDestRegIndex, aSrcRegIndex, aSrcRegIndex2=0)
## Generate an instruction to AND two registers.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aSrcRegIndex1 The index of the register containing one of the values to AND.
# @param aSrcRegIndex2 The index of the register containing one of the values to AND. May be
# omitted if the destination register is also one of the source registers.
def genAndRegister(self, aDestRegIndex, aSrcRegIndex1, aSrcRegIndex2=None):
src_reg_index_2 = aSrcRegIndex2 if aSrcRegIndex2 is not None else aDestRegIndex
self.mSequence.genInstruction('AND##RISCV', {'rd': aDestRegIndex, 'rs1': aSrcRegIndex1, 'rs2': src_reg_index_2})
## Generate an instruction to OR two registers.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aSrcRegIndex1 The index of the register containing one of the values to OR.
# @param aSrcRegIndex2 The index of the register containing one of the values to OR. May be
# omitted if the destination register is also one of the source registers.
def genOrRegister(self, aDestRegIndex, aSrcRegIndex1, aSrcRegIndex2=None):
src_reg_index_2 = aSrcRegIndex2 if aSrcRegIndex2 is not None else aDestRegIndex
self.mSequence.genInstruction('OR##RISCV', {'rd': aDestRegIndex, 'rs1': aSrcRegIndex1, 'rs2': src_reg_index_2})
## Generate an instruction to take the one's complement of a register.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aSrcRegIndex The index of the register containing the value to NOT. May be omitted if
# the destination register is also the source register.
def genNotRegister(self, aDestRegIndex, aSrcRegIndex=None):
self.genXorImmediate(aDestRegIndex, 0xFFF, aSrcRegIndex)
## Generate an instruction to add two registers.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aSrcRegIndex1 The index of the register containing one of the values to add.
# @param aSrcRegIndex2 The index of the register containing one of the values to add. May be
# omitted if the destination register is also one of the source registers.
def genAddRegister(self, aDestRegIndex, aSrcRegIndex1, aSrcRegIndex2=None):
src_reg_index_2 = aSrcRegIndex2 if aSrcRegIndex2 is not None else aDestRegIndex
self.mSequence.genInstruction('ADD##RISCV', {'rd': aDestRegIndex, 'rs1': aSrcRegIndex1, 'rs2': src_reg_index_2})
## Generate an instruction to subtract one register from another.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aSubtrahendRegIndex The index of the register containing the value to subtract from
# the minuend.
# @param aMinuendRegIndex The index of the register containing the value to be subtracted from.
# May be omitted if the destination register is also used as the minuend.
def genSubRegister(self, aDestRegIndex, aSubtrahendRegIndex, aMinuendRegIndex=None):
minuend_reg_index = aMinuendRegIndex if aMinuendRegIndex is not None else aDestRegIndex
self.mSequence.genInstruction('SUB##RISCV', {'rd': aDestRegIndex, 'rs1': minuend_reg_index, 'rs2': aSubtrahendRegIndex})
## Generate an instruction to load a register with the value in the specified system register.
#
# @param aDestRegIndex The index of the register to write the result to.
# @param aSysRegName The name of the system register to read.
def genReadSystemRegister(self, aDestRegIndex, aSysRegName):
self.mSequence.genInstruction('CSRRS#register#RISCV', {'rd': aDestRegIndex, 'rs1': 0, 'csr': self.mSequence.getRegisterIndex(aSysRegName)})
## Generate an instruction to write a system register with the value contained in another
# register.
#
# @param aSysRegName The name of the system register to write to.
# @param aSrcRegIndex The index of the register containing the value to write to the system
# register.
def genWriteSystemRegister(self, aSysRegName, aSrcRegIndex):
self.mSequence.genInstruction('CSRRW#register#RISCV', {'rd': 0, 'rs1': aSrcRegIndex, 'csr': self.mSequence.getRegisterIndex(aSysRegName)})
## Generate an instruction to load a value from a memory location defined in a GPR w/ a 12 bit
# immediate offset. Value loaded into GPR.
#
# @param aDestRegIndex The index of the register to be written with the value from memory.
# @param aAddrRegIndex The index of the register containing the address to load from memory
# @param aImmOffsetVal The 12b signed immediate offset applied to the address in aAddrRegIndex
def genLoadMemory(self, aDestRegIndex, aAddrRegIndex, aImmOffsetVal):
self.mSequence.genInstruction('LD##RISCV', {'rd': aDestRegIndex, 'rs1': aAddrRegIndex, 'simm12':aImmOffsetVal, 'NoRestriction': 1})
## Generate a conditional branch instruction to the specified address.
#
# @param aLhRegIndex The index of the register to use for the left-hand side of the comparison.
# @param aRhRegIndex The index of the register to use for the right-hand side of the
# comparison.
# @param aTargetAddr The target address of the branch.
# @param aCondition A two-letter string encoding the branch condition.
def genConditionalBranchToAddress(self, aLhRegIndex, aRhRegIndex, aTargetAddr, aCondition):
br_offset = self.getBranchOffset(aTargetAddr, 12)
self.genConditionalBranch(aLhRegIndex, aRhRegIndex, br_offset, aCondition)
## Generate a conditional branch instruction targeting a labeled address. Record the branch for
# later verification that the expected address was targeted via the addLabel() method.
#
# @param aLhRegIndex The index of the register to use for the left-hand side of the comparison.
# @param aRhRegIndex The index of the register to use for the right-hand side of the
# comparison.
# @param aBrOffset The branch offset.
# @param aCondition A two-letter string encoding the branch condition.
# @param aLabel The label the branch is targeting.
def genConditionalBranchToLabel(self, aLhRegIndex, aRhRegIndex, aBrOffset, aCondition, aLabel):
self.recordBranchToLabel(aLabel, aBrOffset)
self.genConditionalBranch(aLhRegIndex, aRhRegIndex, aBrOffset, aCondition)
## Generate a conditional branch instruction.
#
# @param aLhRegIndex The index of the register to use for the left-hand side of the comparison.
# @param aRhRegIndex The index of the register to use for the right-hand side of the
# comparison.
# @param aBrOffset The branch offset.
# @param aCondition A two-letter string encoding the branch condition.
def genConditionalBranch(self, aLhRegIndex, aRhRegIndex, aBrOffset, aCondition):
CONDITIONS = {'EQ': 0, 'NE': 1, 'LT': 2, 'LTU': 3, 'GE': 4, 'GEU': 5}
instr = 'B%s##RISCV' % aCondition
self.mSequence.genInstruction(instr, {'rs1': aLhRegIndex, 'rs2': aRhRegIndex, 'simm12': aBrOffset, 'NoRestriction': 1})
## Generate an instruction to return from an exception.
#
# @param aScratchRegIndex The index of a register that can be freely modified.
# @param aPrivLevel The privilege level in which the exception return will be executed.
def genExceptionReturn(self, aPrivLevel):
self.mSequence.genInstruction('%sRET##RISCV' % aPrivLevel.name, {'NoRestriction': 1})
## Generate branch instructions to determine the current privilege level. This method yields at
# the appropriate locations in the branch instruction structure to allow the caller to generate
# specific instructions for the privilege level. It is assumed that each privilege level
# executes the same number of instructions, as specified by aInstrCountPerLevel.
#
# @param aPrivLevels A list of privilege levels for which to generate instructions.
# @param aInstrCountPerLevel The number of custom instructions to be generated for each
# privilege level.
# @param aScratchRegIndex The index of a register that can be freely modified.
# @param aPrivLevelRegIndex The index of a register containing a value representing the current
# privilege level.
def genPrivilegeLevelInstructions(self, aPrivLevels, aInstrCountPerLevel, aScratchRegIndex, aPrivLevelRegIndex):
# Loop through all privilege levels except the last one; the last privilege level doesn't
# need the surroudning branch instructions.
for (i, priv_level) in enumerate(aPrivLevels[:-1]):
self.genMoveImmediate(aScratchRegIndex, priv_level.value)
cond_branch_offset = (2 + aInstrCountPerLevel) * 2
self.genConditionalBranch(aPrivLevelRegIndex, aScratchRegIndex, cond_branch_offset, 'NE')
yield priv_level
rel_branch_offset = ((3 + aInstrCountPerLevel) * (len(aPrivLevels) - i - 1) - 2) * 2
self.genRelativeBranch(rel_branch_offset)
# Generate code for the last privilege level.
yield aPrivLevels[-1]
## Return the shift amount for PC-relative branch instructions.
def getBranchShift(self):
return 1
| 60.649842
| 202
| 0.731041
|
7952cabc78198b5ff97e744c979d3651ab8906ec
| 6,404
|
py
|
Python
|
src/datadog_api_client/v1/model/widget_display_type.py
|
mrhwick/datadog-api-client-python
|
9f57bf378b80b7558070087182722f4ca88d630d
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/model/widget_display_type.py
|
mrhwick/datadog-api-client-python
|
9f57bf378b80b7558070087182722f4ca88d630d
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/model/widget_display_type.py
|
mrhwick/datadog-api-client-python
|
9f57bf378b80b7558070087182722f4ca88d630d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class WidgetDisplayType(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'AREA': "area",
'BARS': "bars",
'LINE': "line",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, value, *args, **kwargs):
"""WidgetDisplayType - a model defined in OpenAPI
Args:
value (str): Type of display to use for the request.., must be one of ["area", "bars", "line", ] # noqa: E501
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| 38.119048
| 122
| 0.574329
|
7952cbd7eb2c048813931c881ec351c93bfb8214
| 81
|
py
|
Python
|
web_server/web_server/models/__init__.py
|
mahonda/project-vending-machine
|
8180163047a14ec61cbbedc5a63702f7b05c4c29
|
[
"MIT"
] | null | null | null |
web_server/web_server/models/__init__.py
|
mahonda/project-vending-machine
|
8180163047a14ec61cbbedc5a63702f7b05c4c29
|
[
"MIT"
] | null | null | null |
web_server/web_server/models/__init__.py
|
mahonda/project-vending-machine
|
8180163047a14ec61cbbedc5a63702f7b05c4c29
|
[
"MIT"
] | null | null | null |
from .Doggo import Doggo
from .Owner import Owner
from .invmodel import invmodel
| 20.25
| 30
| 0.814815
|
7952cc0e4763474e66bcd6758d30899b7a38a931
| 13,385
|
py
|
Python
|
featexp/base.py
|
luobaozhu/featexp
|
c371494e9cfd08dababdfa5226929b1b7f0c8f73
|
[
"MIT"
] | 1
|
2020-11-26T07:46:58.000Z
|
2020-11-26T07:46:58.000Z
|
featexp/base.py
|
luobaozhu/featexp
|
c371494e9cfd08dababdfa5226929b1b7f0c8f73
|
[
"MIT"
] | null | null | null |
featexp/base.py
|
luobaozhu/featexp
|
c371494e9cfd08dababdfa5226929b1b7f0c8f73
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
def get_grouped_data(input_data, feature, target_col, bins, cuts=0):
"""
Bins continuous features into equal sample size buckets and returns the target mean in each bucket. Separates out
nulls into another bucket.
:param input_data: dataframe containg features and target column
:param feature: feature column name
:param target_col: target column
:param bins: Number bins required
:param cuts: if buckets of certain specific cuts are required. Used on test data to use cuts from train.
:return: If cuts are passed only grouped data is returned, else cuts and grouped data is returned
"""
has_null = pd.isnull(input_data[feature]).sum() > 0
if has_null == 1:
data_null = input_data[pd.isnull(input_data[feature])]
input_data = input_data[~pd.isnull(input_data[feature])]
input_data.reset_index(inplace=True, drop=True)
is_train = 0
if cuts == 0:
is_train = 1
prev_cut = min(input_data[feature]) - 1
cuts = [prev_cut]
reduced_cuts = 0
for i in range(1, bins + 1):
next_cut = np.percentile(input_data[feature], i * 100 / bins)
if next_cut > prev_cut + .000001: # float numbers shold be compared with some threshold!
cuts.append(next_cut)
else:
reduced_cuts = reduced_cuts + 1
prev_cut = next_cut
# if reduced_cuts>0:
# print('Reduced the number of bins due to less variation in feature')
cut_series = pd.cut(input_data[feature], cuts)
else:
cut_series = pd.cut(input_data[feature], cuts)
grouped = input_data.groupby([cut_series], as_index=True).agg(
{target_col: [np.size, np.mean], feature: [np.mean]})
grouped.columns = ['_'.join(cols).strip() for cols in grouped.columns.values]
grouped[grouped.index.name] = grouped.index
grouped.reset_index(inplace=True, drop=True)
grouped = grouped[[feature] + list(grouped.columns[0:3])]
grouped = grouped.rename(index=str, columns={target_col + '_size': 'Samples_in_bin'})
grouped = grouped.reset_index(drop=True)
corrected_bin_name = '[' + str(min(input_data[feature])) + ', ' + str(grouped.loc[0, feature]).split(',')[1]
grouped[feature] = grouped[feature].astype('category')
grouped[feature] = grouped[feature].cat.add_categories(corrected_bin_name)
grouped.loc[0, feature] = corrected_bin_name
if has_null == 1:
grouped_null = grouped.loc[0:0, :].copy()
grouped_null[feature] = grouped_null[feature].astype('category')
grouped_null[feature] = grouped_null[feature].cat.add_categories('Nulls')
grouped_null.loc[0, feature] = 'Nulls'
grouped_null.loc[0, 'Samples_in_bin'] = len(data_null)
grouped_null.loc[0, target_col + '_mean'] = data_null[target_col].mean()
grouped_null.loc[0, feature + '_mean'] = np.nan
grouped[feature] = grouped[feature].astype('str')
grouped = pd.concat([grouped_null, grouped], axis=0)
grouped.reset_index(inplace=True, drop=True)
grouped[feature] = grouped[feature].astype('str').astype('category')
if is_train == 1:
return (cuts, grouped)
else:
return (grouped)
def draw_plots(input_data, feature, target_col, trend_correlation=None):
"""
Draws univariate dependence plots for a feature
:param input_data: grouped data contained bins of feature and target mean.
:param feature: feature column name
:param target_col: target column
:param trend_correlation: correlation between train and test trends of feature wrt target
:return: Draws trend plots for feature
"""
trend_changes = get_trend_changes(grouped_data=input_data, feature=feature, target_col=target_col)
plt.figure(figsize=(12, 5))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(input_data[target_col + '_mean'], marker='o')
ax1.set_xticks(np.arange(len(input_data)))
ax1.set_xticklabels((input_data[feature]).astype('str'))
plt.xticks(rotation=45)
ax1.set_xlabel('Bins of ' + feature)
ax1.set_ylabel('Average of ' + target_col)
comment = "Trend changed " + str(trend_changes) + " times"
if trend_correlation == 0:
comment = comment + '\n' + 'Correlation with train trend: NA'
elif trend_correlation != None:
comment = comment + '\n' + 'Correlation with train trend: ' + str(int(trend_correlation * 100)) + '%'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.3)
ax1.text(0.05, 0.95, comment, fontsize=12, verticalalignment='top', bbox=props, transform=ax1.transAxes)
plt.title('Average of ' + target_col + ' wrt ' + feature)
ax2 = plt.subplot(1, 2, 2)
ax2.bar(np.arange(len(input_data)), input_data['Samples_in_bin'], alpha=0.5)
ax2.set_xticks(np.arange(len(input_data)))
ax2.set_xticklabels((input_data[feature]).astype('str'))
plt.xticks(rotation=45)
ax2.set_xlabel('Bins of ' + feature)
ax2.set_ylabel('Bin-wise sample size')
plt.title('Samples in bins of ' + feature)
plt.tight_layout()
plt.show()
def get_trend_changes(grouped_data, feature, target_col, threshold=0.03):
"""
Calculates number of times the trend of feature wrt target changed direction.
:param grouped_data: grouped dataset
:param feature: feature column name
:param target_col: target column
:param threshold: minimum % difference required to count as trend change
:return: number of trend chagnes for the feature
"""
grouped_data = grouped_data.loc[grouped_data[feature] != 'Nulls', :].reset_index(drop=True)
target_diffs = grouped_data[target_col + '_mean'].diff()
target_diffs = target_diffs[~np.isnan(target_diffs)].reset_index(drop=True)
max_diff = grouped_data[target_col + '_mean'].max() - grouped_data[target_col + '_mean'].min()
target_diffs_mod = target_diffs.fillna(0).abs()
low_change = target_diffs_mod < threshold * max_diff
target_diffs_norm = target_diffs.divide(target_diffs_mod)
target_diffs_norm[low_change] = 0
target_diffs_norm = target_diffs_norm[target_diffs_norm != 0]
target_diffs_lvl2 = target_diffs_norm.diff()
changes = target_diffs_lvl2.fillna(0).abs() / 2
tot_trend_changes = int(changes.sum()) if ~np.isnan(changes.sum()) else 0
return (tot_trend_changes)
def get_trend_correlation(grouped, grouped_test, feature, target_col):
"""
Calculates correlation between train and test trend of feature wrt target.
:param grouped: train grouped data
:param grouped_test: test grouped data
:param feature: feature column name
:param target_col: target column name
:return: trend correlation between train and test
"""
grouped = grouped[grouped[feature] != 'Nulls'].reset_index(drop=True)
grouped_test = grouped_test[grouped_test[feature] != 'Nulls'].reset_index(drop=True)
if grouped_test.loc[0, feature] != grouped.loc[0, feature]:
grouped_test[feature] = grouped_test[feature].cat.add_categories(grouped.loc[0, feature])
grouped_test.loc[0, feature] = grouped.loc[0, feature]
grouped_test_train = grouped.merge(grouped_test[[feature, target_col + '_mean']], on=feature, how='left',
suffixes=('', '_test'))
nan_rows = pd.isnull(grouped_test_train[target_col + '_mean']) | pd.isnull(
grouped_test_train[target_col + '_mean_test'])
grouped_test_train = grouped_test_train.loc[~nan_rows, :]
if len(grouped_test_train) > 1:
trend_correlation = np.corrcoef(grouped_test_train[target_col + '_mean'],
grouped_test_train[target_col + '_mean_test'])[0, 1]
else:
trend_correlation = 0
print("Only one bin created for " + feature + ". Correlation can't be calculated")
return (trend_correlation)
def univariate_plotter(feature, data, target_col, bins=10, data_test=0):
"""
Calls the draw plot function and editing around the plots
:param feature: feature column name
:param data: dataframe containing features and target columns
:param target_col: target column name
:param bins: number of bins to be created from continuous feature
:param data_test: test data which has to be compared with input data for correlation
:return: grouped data if only train passed, else (grouped train data, grouped test data)
"""
print(' {:^100} '.format('Plots for ' + feature))
if data[feature].dtype == 'O':
print('Categorical feature not supported')
else:
cuts, grouped = get_grouped_data(input_data=data, feature=feature, target_col=target_col, bins=bins)
has_test = type(data_test) == pd.core.frame.DataFrame
if has_test:
grouped_test = get_grouped_data(input_data=data_test.reset_index(drop=True), feature=feature,
target_col=target_col, bins=bins, cuts=cuts)
trend_corr = get_trend_correlation(grouped, grouped_test, feature, target_col)
print(' {:^100} '.format('Train data plots'))
draw_plots(input_data=grouped, feature=feature, target_col=target_col)
print(' {:^100} '.format('Test data plots'))
draw_plots(input_data=grouped_test, feature=feature, target_col=target_col, trend_correlation=trend_corr)
else:
draw_plots(input_data=grouped, feature=feature, target_col=target_col)
print(
'--------------------------------------------------------------------------------------------------------------')
print('\n')
if has_test:
return (grouped, grouped_test)
else:
return (grouped)
def get_univariate_plots(data, target_col, features_list=0, bins=10, data_test=0):
"""
Creates univariate dependence plots for features in the dataset
:param data: dataframe containing features and target columns
:param target_col: target column name
:param features_list: by default creates plots for all features. If list passed, creates plots of only those features.
:param bins: number of bins to be created from continuous feature
:param data_test: test data which has to be compared with input data for correlation
:return: Draws univariate plots for all columns in data
"""
if type(features_list) == int:
features_list = list(data.columns)
features_list.remove(target_col)
for cols in features_list:
if cols != target_col and data[cols].dtype == 'O':
print(cols + ' is categorical. Categorical features not supported yet.')
elif cols != target_col and data[cols].dtype != 'O':
univariate_plotter(feature=cols, data=data, target_col=target_col, bins=bins, data_test=data_test)
def get_trend_stats(data, target_col, features_list=0, bins=10, data_test=0):
"""
Calculates trend changes and correlation between train/test for list of features
:param data: dataframe containing features and target columns
:param target_col: target column name
:param features_list: by default creates plots for all features. If list passed, creates plots of only those features.
:param bins: number of bins to be created from continuous feature
:param data_test: test data which has to be compared with input data for correlation
:return: dataframe with trend changes and trend correlation (if test data passed)
"""
if type(features_list) == int:
features_list = list(data.columns)
features_list.remove(target_col)
stats_all = []
has_test = type(data_test) == pd.core.frame.DataFrame
ignored = []
for feature in features_list:
if data[feature].dtype == 'O' or feature == target_col:
ignored.append(feature)
else:
cuts, grouped = get_grouped_data(input_data=data, feature=feature, target_col=target_col, bins=bins)
trend_changes = get_trend_changes(grouped_data=grouped, feature=feature, target_col=target_col)
if has_test:
grouped_test = get_grouped_data(input_data=data_test.reset_index(drop=True), feature=feature,
target_col=target_col, bins=bins, cuts=cuts)
trend_corr = get_trend_correlation(grouped, grouped_test, feature, target_col)
trend_changes_test = get_trend_changes(grouped_data=grouped_test, feature=feature,
target_col=target_col)
stats = [feature, trend_changes, trend_changes_test, trend_corr]
else:
stats = [feature, trend_changes]
stats_all.append(stats)
stats_all_df = pd.DataFrame(stats_all)
stats_all_df.columns = ['Feature', 'Trend_changes'] if has_test == False else ['Feature', 'Trend_changes',
'Trend_changes_test',
'Trend_correlation']
if len(ignored) > 0:
print('Categorical features ' + str(ignored) + ' ignored. Categorical features not supported yet.')
print('Returning stats for all numeric features')
return (stats_all_df)
| 49.758364
| 125
| 0.665446
|
7952ccc66a9215c56e45a434eec475746810d3ac
| 1,033
|
py
|
Python
|
setup.py
|
krzpiesiewicz/timeseries-pytorch
|
1e543caf9ea3918fc65fa9d715a75273be99c8ec
|
[
"MIT"
] | null | null | null |
setup.py
|
krzpiesiewicz/timeseries-pytorch
|
1e543caf9ea3918fc65fa9d715a75273be99c8ec
|
[
"MIT"
] | null | null | null |
setup.py
|
krzpiesiewicz/timeseries-pytorch
|
1e543caf9ea3918fc65fa9d715a75273be99c8ec
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="timeseries-pytorch",
version="0.0.1",
author="Krzysztof Piesiewicz",
author_email="krz.piesiewicz@gmail.com",
description="A pytorch extension for timeseries package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/krzpiesiewicz/timeseries-pytorch",
packages=setuptools.find_packages(exclude=['tests', 'examples']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
dependency_links=[
"https://github.com/krzpiesiewicz/pytorch-fit#egg=pytorch_fit",
"https://github.com/krzpiesiewicz/timeseries#egg=timeseries",
],
install_requires=[
"pandas>=1.0.5",
"numpy~=1.19.0",
],
test_requirements=["pytest>=6.2.0"],
python_requires='>=3.6',
)
| 32.28125
| 71
| 0.659245
|
7952ceaaed122da5fb26882fa23ff22ff2e25b08
| 4,281
|
py
|
Python
|
Gaurav/Assignment 7/faster thread.py
|
Gaurav3963/AGS-intern-files
|
ee4db12755cf680c0575b3c1a6a6146ee415743a
|
[
"MIT"
] | null | null | null |
Gaurav/Assignment 7/faster thread.py
|
Gaurav3963/AGS-intern-files
|
ee4db12755cf680c0575b3c1a6a6146ee415743a
|
[
"MIT"
] | null | null | null |
Gaurav/Assignment 7/faster thread.py
|
Gaurav3963/AGS-intern-files
|
ee4db12755cf680c0575b3c1a6a6146ee415743a
|
[
"MIT"
] | 1
|
2021-07-26T05:45:53.000Z
|
2021-07-26T05:45:53.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 16:07:39 2021
@author: patil
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 17:47:33 2021
@author: patil
"""
import pyodbc as db
import pandas as pd
import time as t
import _thread
conn = db.connect('Driver={SQL Server};''Server=DESKTOP-VI5MRAI\GAURAVPATIL;''Database=sample;''Trusted_Connection=yes;')
c = conn.cursor()
def checktable(table_name):
c = conn.cursor()
try:
#c.execute("SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = N'"+table_name+"'")
c.execute("SELECT * from "+table_name)
except:
print('Table Dosent exists.\n\n')
new_table = conn.cursor()
new_table.execute("CREATE TABLE "+table_name+"(NAME varchar(50),ID bigint,Price float,Discount float);");
new_table.commit()
new_table.close()
finally:
c.close()
#checktable("Table_1")
#checktable("Table_2")
#checktable("Table_3")
def executeNew1(dataset) :
con = db.connect('Driver={SQL Server};''Server=DESKTOP-VI5MRAI\GAURAVPATIL;''Database=sample;''Trusted_Connection=yes;')
print("Thread 1 starting time : ",t.time())
data1 = pd.read_csv("C:\Office\AGS - Internship\AGS-intern-files\Gaurav\Assignment 6\\"+dataset+".csv")
cursor = con.cursor()
old = t.time()
for row in data1.itertuples():
b = str(row.ID)
if(b[0]=='4'):
cursor.execute("INSERT into Table_1 values(?,?,?,?)",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.02)
elif(b[0]=='5'):
cursor.execute("INSERT into Table_2 values(?,?,?,?)",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.03)
else:
cursor.execute("INSERT into Table_3 values(?,?,?,?)",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.04)
cursor.commit()
cursor.close()
required = t.time()-old
'''
print("\n\nTable 1")
sql_query = pd.read_sql_query('select * from Table_1',conn)
print(sql_query)
print("\n\nTable 2")
sql_query = pd.read_sql_query('select * from Table_2',conn)
print(sql_query)
print("\n\nTable 3")
sql_query = pd.read_sql_query('select * from Table_3',conn)
print(sql_query)
'''
print("\n\nTime Required for Thread 1 : ",required)
con.close()
def executeNew2(dataset) :
conn = db.connect('Driver={SQL Server};''Server=DESKTOP-VI5MRAI\GAURAVPATIL;''Database=sample;''Trusted_Connection=yes;')
print("Thread 2 starting time : ",t.time())
data = pd.read_csv("C:\Office\AGS - Internship\AGS-intern-files\Gaurav\Assignment 6\\"+dataset+".csv")
curso = conn.cursor()
old = t.time()
for row in data.itertuples():
b = str(row.ID)
if(b[0]=='4'):
curso.execute("INSERT into Table_1 values(?,?,?,?)",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.02)
elif(b[0]=='5'):
curso.execute("INSERT into Table_2 values(?,?,?,?)",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.03)
else:
curso.execute("INSERT into Table_3 values(?,?,?,?)",row.NAME,row.ID,row.Total_Amount,row.Total_Amount*0.04)
curso.commit()
curso.close()
required = t.time()-old
'''
print("\n\nTable 1")
sql_query = pd.read_sql_query('select * from Table_1',conn)
print(sql_query)
print("\n\nTable 2")
sql_query = pd.read_sql_query('select * from Table_2',conn)
print(sql_query)
print("\n\nTable 3")
sql_query = pd.read_sql_query('select * from Table_3',conn)
print(sql_query)
'''
print("\n\nTime Required for Thread 2: ",required)
conn.close()
e = t.time()
#t1 = td.Thread(target=executeNew1("Book1"))
#t2 = td.Thread(target=executeNew2("Book2"))
_thread.start_new_thread( executeNew1,("Book1",) )
_thread.start_new_thread( executeNew2,("Book2",) )
#p1 = multiprocessing.Process(target=executeNew1("Book1"))
#p2 = multiprocessing.Process(target=executeNew2, args=("Book2"))
# starting thread 1
#t1.start()
# starting thread 2
#t2.start()
# wait until thread 1 is completely executed
#t1.join()
# wait until thread 2 is completely executed
#t2.join()
print("time needed is ",t.time()-e)
conn.close()
# both threads completely executed
print("Done!")
| 32.431818
| 125
| 0.636066
|
7952cefda21df3ff6c858300dc512504405e1924
| 40,265
|
py
|
Python
|
ui/Ui_UpSite.py
|
osDanielLee/UpSite
|
99da7c321dde2df52c2b886263541b30dd998f67
|
[
"BSD-3-Clause"
] | 1
|
2016-11-25T11:10:44.000Z
|
2016-11-25T11:10:44.000Z
|
ui/Ui_UpSite.py
|
osDanielLee/UpSite
|
99da7c321dde2df52c2b886263541b30dd998f67
|
[
"BSD-3-Clause"
] | null | null | null |
ui/Ui_UpSite.py
|
osDanielLee/UpSite
|
99da7c321dde2df52c2b886263541b30dd998f67
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'E:\workspace\UpSite\ui\UpSite.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CTH(object):
def setupUi(self, CTH):
CTH.setObjectName(_fromUtf8("CTH"))
CTH.resize(700, 580)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(CTH.sizePolicy().hasHeightForWidth())
CTH.setSizePolicy(sizePolicy)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/ICO/bug.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
CTH.setWindowIcon(icon)
self.centralWidget = QtGui.QWidget(CTH)
self.centralWidget.setObjectName(_fromUtf8("centralWidget"))
self.formLayout_2 = QtGui.QFormLayout(self.centralWidget)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.tabWidget = QtGui.QTabWidget(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.tabWidget.setAutoFillBackground(False)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.baseTab = QtGui.QWidget()
self.baseTab.setObjectName(_fromUtf8("baseTab"))
self.gridLayout_8 = QtGui.QGridLayout(self.baseTab)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.widget_2 = QtGui.QWidget(self.baseTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_2.sizePolicy().hasHeightForWidth())
self.widget_2.setSizePolicy(sizePolicy)
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.verticalLayout = QtGui.QVBoxLayout(self.widget_2)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.basicParaWidget = QtGui.QWidget(self.widget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.basicParaWidget.sizePolicy().hasHeightForWidth())
self.basicParaWidget.setSizePolicy(sizePolicy)
self.basicParaWidget.setObjectName(_fromUtf8("basicParaWidget"))
self.gridLayout = QtGui.QGridLayout(self.basicParaWidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.updatePara = QtGui.QPushButton(self.basicParaWidget)
self.updatePara.setObjectName(_fromUtf8("updatePara"))
self.gridLayout.addWidget(self.updatePara, 7, 1, 1, 1)
self.label_7 = QtGui.QLabel(self.basicParaWidget)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 0, 0, 1, 1)
self.widget_7 = QtGui.QWidget(self.basicParaWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_7.sizePolicy().hasHeightForWidth())
self.widget_7.setSizePolicy(sizePolicy)
self.widget_7.setObjectName(_fromUtf8("widget_7"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.widget_7)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.num = QtGui.QLineEdit(self.widget_7)
self.num.setInputMethodHints(QtCore.Qt.ImhDigitsOnly)
self.num.setObjectName(_fromUtf8("num"))
self.horizontalLayout_2.addWidget(self.num)
self.gridLayout.addWidget(self.widget_7, 0, 1, 1, 1)
self.widget_6 = QtGui.QWidget(self.basicParaWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_6.sizePolicy().hasHeightForWidth())
self.widget_6.setSizePolicy(sizePolicy)
self.widget_6.setObjectName(_fromUtf8("widget_6"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.widget_6)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.name = QtGui.QLineEdit(self.widget_6)
self.name.setInputMethodHints(QtCore.Qt.ImhFormattedNumbersOnly)
self.name.setInputMask(_fromUtf8(""))
self.name.setMaxLength(32767)
self.name.setObjectName(_fromUtf8("name"))
self.verticalLayout_6.addWidget(self.name)
self.gridLayout.addWidget(self.widget_6, 2, 1, 1, 1)
self.widget_8 = QtGui.QWidget(self.basicParaWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_8.sizePolicy().hasHeightForWidth())
self.widget_8.setSizePolicy(sizePolicy)
self.widget_8.setObjectName(_fromUtf8("widget_8"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.widget_8)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.alarmValue = QtGui.QLineEdit(self.widget_8)
self.alarmValue.setInputMethodHints(QtCore.Qt.ImhDigitsOnly)
self.alarmValue.setObjectName(_fromUtf8("alarmValue"))
self.horizontalLayout_3.addWidget(self.alarmValue)
self.gridLayout.addWidget(self.widget_8, 5, 1, 1, 1)
self.widget_4 = QtGui.QWidget(self.basicParaWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_4.sizePolicy().hasHeightForWidth())
self.widget_4.setSizePolicy(sizePolicy)
self.widget_4.setObjectName(_fromUtf8("widget_4"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.widget_4)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.recordTime = QtGui.QLineEdit(self.widget_4)
self.recordTime.setInputMethodHints(QtCore.Qt.ImhDigitsOnly)
self.recordTime.setObjectName(_fromUtf8("recordTime"))
self.verticalLayout_5.addWidget(self.recordTime)
self.gridLayout.addWidget(self.widget_4, 3, 1, 1, 1)
self.widget_10 = QtGui.QWidget(self.basicParaWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_10.sizePolicy().hasHeightForWidth())
self.widget_10.setSizePolicy(sizePolicy)
self.widget_10.setObjectName(_fromUtf8("widget_10"))
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.widget_10)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.handComboBox = QtGui.QComboBox(self.widget_10)
self.handComboBox.setObjectName(_fromUtf8("handComboBox"))
self.handComboBox.addItem(_fromUtf8(""))
self.handComboBox.addItem(_fromUtf8(""))
self.horizontalLayout_5.addWidget(self.handComboBox)
self.gridLayout.addWidget(self.widget_10, 6, 1, 1, 1)
self.widget_5 = QtGui.QWidget(self.basicParaWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_5.sizePolicy().hasHeightForWidth())
self.widget_5.setSizePolicy(sizePolicy)
self.widget_5.setObjectName(_fromUtf8("widget_5"))
self.horizontalLayout = QtGui.QHBoxLayout(self.widget_5)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.sampleInterval = QtGui.QLineEdit(self.widget_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sampleInterval.sizePolicy().hasHeightForWidth())
self.sampleInterval.setSizePolicy(sizePolicy)
self.sampleInterval.setInputMethodHints(QtCore.Qt.ImhDigitsOnly)
self.sampleInterval.setObjectName(_fromUtf8("sampleInterval"))
self.horizontalLayout.addWidget(self.sampleInterval)
self.sampleInvBox = QtGui.QComboBox(self.widget_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sampleInvBox.sizePolicy().hasHeightForWidth())
self.sampleInvBox.setSizePolicy(sizePolicy)
self.sampleInvBox.setObjectName(_fromUtf8("sampleInvBox"))
self.sampleInvBox.addItem(_fromUtf8(""))
self.sampleInvBox.addItem(_fromUtf8(""))
self.sampleInvBox.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.sampleInvBox)
self.gridLayout.addWidget(self.widget_5, 4, 1, 1, 1)
self.refreshPara = QtGui.QPushButton(self.basicParaWidget)
self.refreshPara.setObjectName(_fromUtf8("refreshPara"))
self.gridLayout.addWidget(self.refreshPara, 9, 1, 1, 1)
self.calibrationClockButton = QtGui.QPushButton(self.basicParaWidget)
self.calibrationClockButton.setObjectName(_fromUtf8("calibrationClockButton"))
self.gridLayout.addWidget(self.calibrationClockButton, 8, 1, 1, 1)
self.label_12 = QtGui.QLabel(self.basicParaWidget)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout.addWidget(self.label_12, 5, 0, 1, 1)
self.label_14 = QtGui.QLabel(self.basicParaWidget)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout.addWidget(self.label_14, 6, 0, 1, 1)
self.label_11 = QtGui.QLabel(self.basicParaWidget)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout.addWidget(self.label_11, 4, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.basicParaWidget)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout.addWidget(self.label_10, 3, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.basicParaWidget)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 2, 0, 1, 1)
self.stopRecord = QtGui.QPushButton(self.basicParaWidget)
self.stopRecord.setObjectName(_fromUtf8("stopRecord"))
self.gridLayout.addWidget(self.stopRecord, 10, 1, 1, 1)
self.verticalLayout.addWidget(self.basicParaWidget)
self.gridLayout_8.addWidget(self.widget_2, 0, 1, 2, 1)
self.displayBasicInfoWidget = QtGui.QWidget(self.baseTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.displayBasicInfoWidget.sizePolicy().hasHeightForWidth())
self.displayBasicInfoWidget.setSizePolicy(sizePolicy)
self.displayBasicInfoWidget.setObjectName(_fromUtf8("displayBasicInfoWidget"))
self.gridLayout_10 = QtGui.QGridLayout(self.displayBasicInfoWidget)
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.label_9 = QtGui.QLabel(self.displayBasicInfoWidget)
font = QtGui.QFont()
font.setPointSize(12)
self.label_9.setFont(font)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_10.addWidget(self.label_9, 2, 0, 1, 1)
self.humidity = QtGui.QLCDNumber(self.displayBasicInfoWidget)
self.humidity.setObjectName(_fromUtf8("humidity"))
self.gridLayout_10.addWidget(self.humidity, 1, 1, 1, 1)
self.temperature_c = QtGui.QLCDNumber(self.displayBasicInfoWidget)
self.temperature_c.setObjectName(_fromUtf8("temperature_c"))
self.gridLayout_10.addWidget(self.temperature_c, 2, 1, 1, 1)
self.co2concentration = QtGui.QLCDNumber(self.displayBasicInfoWidget)
self.co2concentration.setObjectName(_fromUtf8("co2concentration"))
self.gridLayout_10.addWidget(self.co2concentration, 0, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.displayBasicInfoWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout_10.addWidget(self.label_6, 3, 0, 1, 1)
self.label_5 = QtGui.QLabel(self.displayBasicInfoWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_10.addWidget(self.label_5, 1, 0, 1, 1)
self.temperature_f = QtGui.QLCDNumber(self.displayBasicInfoWidget)
self.temperature_f.setObjectName(_fromUtf8("temperature_f"))
self.gridLayout_10.addWidget(self.temperature_f, 3, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.displayBasicInfoWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_10.addWidget(self.label_4, 0, 0, 1, 1)
self.label_13 = QtGui.QLabel(self.displayBasicInfoWidget)
font = QtGui.QFont()
font.setPointSize(12)
self.label_13.setFont(font)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout_10.addWidget(self.label_13, 4, 0, 1, 1)
self.pressure = QtGui.QLCDNumber(self.displayBasicInfoWidget)
self.pressure.setObjectName(_fromUtf8("pressure"))
self.gridLayout_10.addWidget(self.pressure, 4, 1, 1, 1)
self.gridLayout_8.addWidget(self.displayBasicInfoWidget, 0, 0, 2, 1)
self.tabWidget.addTab(self.baseTab, _fromUtf8(""))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout_3 = QtGui.QGridLayout(self.tab)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.deleteFileButton = QtGui.QPushButton(self.tab)
self.deleteFileButton.setObjectName(_fromUtf8("deleteFileButton"))
self.gridLayout_3.addWidget(self.deleteFileButton, 1, 1, 1, 1)
self.refreshFiles = QtGui.QPushButton(self.tab)
self.refreshFiles.setObjectName(_fromUtf8("refreshFiles"))
self.gridLayout_3.addWidget(self.refreshFiles, 2, 1, 1, 1)
self.receiveFiles = QtGui.QPushButton(self.tab)
self.receiveFiles.setObjectName(_fromUtf8("receiveFiles"))
self.gridLayout_3.addWidget(self.receiveFiles, 0, 1, 1, 1)
self.fileOperTableWidget = QtGui.QTableWidget(self.tab)
self.fileOperTableWidget.setObjectName(_fromUtf8("fileOperTableWidget"))
self.fileOperTableWidget.setColumnCount(3)
self.fileOperTableWidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.fileOperTableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.fileOperTableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.fileOperTableWidget.setHorizontalHeaderItem(2, item)
self.gridLayout_3.addWidget(self.fileOperTableWidget, 0, 0, 7, 1)
self.tranferFileButton = QtGui.QPushButton(self.tab)
self.tranferFileButton.setObjectName(_fromUtf8("tranferFileButton"))
self.gridLayout_3.addWidget(self.tranferFileButton, 3, 1, 1, 1)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.recordTab = QtGui.QWidget()
self.recordTab.setObjectName(_fromUtf8("recordTab"))
self.gridLayout_4 = QtGui.QGridLayout(self.recordTab)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.recordrangeBox = QtGui.QGroupBox(self.recordTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.recordrangeBox.sizePolicy().hasHeightForWidth())
self.recordrangeBox.setSizePolicy(sizePolicy)
self.recordrangeBox.setObjectName(_fromUtf8("recordrangeBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.recordrangeBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.queryButton = QtGui.QPushButton(self.recordrangeBox)
self.queryButton.setObjectName(_fromUtf8("queryButton"))
self.gridLayout_2.addWidget(self.queryButton, 7, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.recordrangeBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 1, 2, 1, 1)
self.afterDateEdit = QtGui.QDateEdit(self.recordrangeBox)
self.afterDateEdit.setObjectName(_fromUtf8("afterDateEdit"))
self.gridLayout_2.addWidget(self.afterDateEdit, 6, 1, 1, 1)
self.beginDateEdit = QtGui.QDateEdit(self.recordrangeBox)
self.beginDateEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2001, 1, 1), QtCore.QTime(0, 0, 0)))
self.beginDateEdit.setMaximumTime(QtCore.QTime(23, 59, 59))
self.beginDateEdit.setObjectName(_fromUtf8("beginDateEdit"))
self.gridLayout_2.addWidget(self.beginDateEdit, 5, 1, 1, 1)
self.radioButton = QtGui.QRadioButton(self.recordrangeBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radioButton.sizePolicy().hasHeightForWidth())
self.radioButton.setSizePolicy(sizePolicy)
self.radioButton.setObjectName(_fromUtf8("radioButton"))
self.gridLayout_2.addWidget(self.radioButton, 5, 0, 1, 1)
self.label = QtGui.QLabel(self.recordrangeBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 6, 0, 1, 1)
self.alldateRadioButton = QtGui.QRadioButton(self.recordrangeBox)
self.alldateRadioButton.setChecked(True)
self.alldateRadioButton.setObjectName(_fromUtf8("alldateRadioButton"))
self.gridLayout_2.addWidget(self.alldateRadioButton, 0, 0, 1, 1)
self.beforedayRadioButton = QtGui.QRadioButton(self.recordrangeBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.beforedayRadioButton.sizePolicy().hasHeightForWidth())
self.beforedayRadioButton.setSizePolicy(sizePolicy)
self.beforedayRadioButton.setObjectName(_fromUtf8("beforedayRadioButton"))
self.gridLayout_2.addWidget(self.beforedayRadioButton, 1, 0, 1, 1)
self.beforeMonthRadioButton = QtGui.QRadioButton(self.recordrangeBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.beforeMonthRadioButton.sizePolicy().hasHeightForWidth())
self.beforeMonthRadioButton.setSizePolicy(sizePolicy)
self.beforeMonthRadioButton.setObjectName(_fromUtf8("beforeMonthRadioButton"))
self.gridLayout_2.addWidget(self.beforeMonthRadioButton, 3, 0, 1, 1)
self.beforeMonthBox = QtGui.QSpinBox(self.recordrangeBox)
self.beforeMonthBox.setObjectName(_fromUtf8("beforeMonthBox"))
self.gridLayout_2.addWidget(self.beforeMonthBox, 3, 1, 1, 1)
self.beforeDayBox = QtGui.QSpinBox(self.recordrangeBox)
self.beforeDayBox.setObjectName(_fromUtf8("beforeDayBox"))
self.gridLayout_2.addWidget(self.beforeDayBox, 1, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.recordrangeBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 3, 2, 1, 1)
self.gridLayout_4.addWidget(self.recordrangeBox, 0, 0, 1, 1)
self.widget_3 = QtGui.QWidget(self.recordTab)
self.widget_3.setObjectName(_fromUtf8("widget_3"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.widget_3)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.displayDetailInfoTableWidget = QtGui.QTableWidget(self.widget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.displayDetailInfoTableWidget.sizePolicy().hasHeightForWidth())
self.displayDetailInfoTableWidget.setSizePolicy(sizePolicy)
self.displayDetailInfoTableWidget.setObjectName(_fromUtf8("displayDetailInfoTableWidget"))
self.displayDetailInfoTableWidget.setColumnCount(6)
self.displayDetailInfoTableWidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.displayDetailInfoTableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.displayDetailInfoTableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.displayDetailInfoTableWidget.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.displayDetailInfoTableWidget.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.displayDetailInfoTableWidget.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.displayDetailInfoTableWidget.setHorizontalHeaderItem(5, item)
self.verticalLayout_2.addWidget(self.displayDetailInfoTableWidget)
self.gridLayout_4.addWidget(self.widget_3, 0, 1, 1, 1)
self.fileOperWidget = QtGui.QWidget(self.recordTab)
self.fileOperWidget.setObjectName(_fromUtf8("fileOperWidget"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.fileOperWidget)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.fileOperBox = QtGui.QGroupBox(self.fileOperWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fileOperBox.sizePolicy().hasHeightForWidth())
self.fileOperBox.setSizePolicy(sizePolicy)
self.fileOperBox.setObjectName(_fromUtf8("fileOperBox"))
self.gridLayout_6 = QtGui.QGridLayout(self.fileOperBox)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.dirTreeView = QtGui.QTreeView(self.fileOperBox)
self.dirTreeView.setObjectName(_fromUtf8("dirTreeView"))
self.gridLayout_6.addWidget(self.dirTreeView, 0, 0, 1, 2)
self.loadinFileButton = QtGui.QPushButton(self.fileOperBox)
self.loadinFileButton.setObjectName(_fromUtf8("loadinFileButton"))
self.gridLayout_6.addWidget(self.loadinFileButton, 1, 1, 1, 1)
self.verticalLayout_4.addWidget(self.fileOperBox)
self.gridLayout_4.addWidget(self.fileOperWidget, 2, 0, 1, 1)
self.drawCurveWidget = QtGui.QWidget(self.recordTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.drawCurveWidget.sizePolicy().hasHeightForWidth())
self.drawCurveWidget.setSizePolicy(sizePolicy)
self.drawCurveWidget.setObjectName(_fromUtf8("drawCurveWidget"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.drawCurveWidget)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.drawCurveLayout = QtGui.QVBoxLayout()
self.drawCurveLayout.setObjectName(_fromUtf8("drawCurveLayout"))
self.horizontalLayout_6.addLayout(self.drawCurveLayout)
self.gridLayout_4.addWidget(self.drawCurveWidget, 1, 1, 2, 1)
self.groupBox = QtGui.QGroupBox(self.recordTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_5 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.tempCheckBox = QtGui.QCheckBox(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tempCheckBox.sizePolicy().hasHeightForWidth())
self.tempCheckBox.setSizePolicy(sizePolicy)
self.tempCheckBox.setChecked(True)
self.tempCheckBox.setObjectName(_fromUtf8("tempCheckBox"))
self.gridLayout_5.addWidget(self.tempCheckBox, 3, 0, 1, 1)
self.co2CheckBox = QtGui.QCheckBox(self.groupBox)
self.co2CheckBox.setChecked(True)
self.co2CheckBox.setObjectName(_fromUtf8("co2CheckBox"))
self.gridLayout_5.addWidget(self.co2CheckBox, 0, 0, 1, 1)
self.humiCheckBox = QtGui.QCheckBox(self.groupBox)
self.humiCheckBox.setChecked(True)
self.humiCheckBox.setObjectName(_fromUtf8("humiCheckBox"))
self.gridLayout_5.addWidget(self.humiCheckBox, 4, 0, 1, 1)
self.co2Line = QtGui.QLabel(self.groupBox)
self.co2Line.setText(_fromUtf8(""))
self.co2Line.setObjectName(_fromUtf8("co2Line"))
self.gridLayout_5.addWidget(self.co2Line, 0, 1, 1, 1)
self.tempLine = QtGui.QLabel(self.groupBox)
self.tempLine.setText(_fromUtf8(""))
self.tempLine.setObjectName(_fromUtf8("tempLine"))
self.gridLayout_5.addWidget(self.tempLine, 3, 1, 1, 1)
self.humLine = QtGui.QLabel(self.groupBox)
self.humLine.setText(_fromUtf8(""))
self.humLine.setObjectName(_fromUtf8("humLine"))
self.gridLayout_5.addWidget(self.humLine, 4, 1, 1, 1)
self.atmospheriCheckBox = QtGui.QCheckBox(self.groupBox)
self.atmospheriCheckBox.setChecked(True)
self.atmospheriCheckBox.setObjectName(_fromUtf8("atmospheriCheckBox"))
self.gridLayout_5.addWidget(self.atmospheriCheckBox, 5, 0, 1, 1)
self.batteryCheckBox = QtGui.QCheckBox(self.groupBox)
self.batteryCheckBox.setChecked(True)
self.batteryCheckBox.setObjectName(_fromUtf8("batteryCheckBox"))
self.gridLayout_5.addWidget(self.batteryCheckBox, 6, 0, 1, 1)
self.atmoLine = QtGui.QLabel(self.groupBox)
self.atmoLine.setText(_fromUtf8(""))
self.atmoLine.setObjectName(_fromUtf8("atmoLine"))
self.gridLayout_5.addWidget(self.atmoLine, 5, 1, 1, 1)
self.battLine = QtGui.QLabel(self.groupBox)
self.battLine.setText(_fromUtf8(""))
self.battLine.setObjectName(_fromUtf8("battLine"))
self.gridLayout_5.addWidget(self.battLine, 6, 1, 1, 1)
self.gridLayout_4.addWidget(self.groupBox, 1, 0, 1, 1)
self.tabWidget.addTab(self.recordTab, _fromUtf8(""))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.tabWidget)
CTH.setCentralWidget(self.centralWidget)
self.statusBar = QtGui.QStatusBar(CTH)
self.statusBar.setToolTip(_fromUtf8(""))
self.statusBar.setStatusTip(_fromUtf8(""))
self.statusBar.setAutoFillBackground(True)
self.statusBar.setObjectName(_fromUtf8("statusBar"))
CTH.setStatusBar(self.statusBar)
self.menuBar = QtGui.QMenuBar(CTH)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 700, 23))
self.menuBar.setObjectName(_fromUtf8("menuBar"))
self.fileMenu = QtGui.QMenu(self.menuBar)
self.fileMenu.setObjectName(_fromUtf8("fileMenu"))
self.helpMenu = QtGui.QMenu(self.menuBar)
self.helpMenu.setObjectName(_fromUtf8("helpMenu"))
CTH.setMenuBar(self.menuBar)
self.openAct = QtGui.QAction(CTH)
self.openAct.setObjectName(_fromUtf8("openAct"))
self.loadAct = QtGui.QAction(CTH)
self.loadAct.setObjectName(_fromUtf8("loadAct"))
self.exitAct = QtGui.QAction(CTH)
self.exitAct.setObjectName(_fromUtf8("exitAct"))
self.action_4 = QtGui.QAction(CTH)
self.action_4.setObjectName(_fromUtf8("action_4"))
self.setparaAct = QtGui.QAction(CTH)
self.setparaAct.setObjectName(_fromUtf8("setparaAct"))
self.refreshAct = QtGui.QAction(CTH)
self.refreshAct.setObjectName(_fromUtf8("refreshAct"))
self.lookpicAct = QtGui.QAction(CTH)
self.lookpicAct.setObjectName(_fromUtf8("lookpicAct"))
self.helpAct = QtGui.QAction(CTH)
self.helpAct.setObjectName(_fromUtf8("helpAct"))
self.aboutAct = QtGui.QAction(CTH)
self.aboutAct.setObjectName(_fromUtf8("aboutAct"))
self.actionSave = QtGui.QAction(CTH)
self.actionSave.setCheckable(False)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/image/16x16/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon1)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionHelp = QtGui.QAction(CTH)
self.actionHelp.setCheckable(False)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/image/16x16/info.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionHelp.setIcon(icon2)
self.actionHelp.setObjectName(_fromUtf8("actionHelp"))
self.actionExit = QtGui.QAction(CTH)
self.actionExit.setCheckable(False)
self.actionExit.setChecked(False)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/image/16x16/close-tab.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit.setIcon(icon3)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionRefresh = QtGui.QAction(CTH)
self.actionRefresh.setCheckable(False)
self.actionRefresh.setChecked(False)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/image/16x16/refresh.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionRefresh.setIcon(icon4)
self.actionRefresh.setObjectName(_fromUtf8("actionRefresh"))
self.actionOpen = QtGui.QAction(CTH)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/image/16x16/folder.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon5)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.fileMenu.addAction(self.exitAct)
self.helpMenu.addAction(self.helpAct)
self.menuBar.addAction(self.fileMenu.menuAction())
self.menuBar.addAction(self.helpMenu.menuAction())
self.retranslateUi(CTH)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.exitAct, QtCore.SIGNAL(_fromUtf8("triggered()")), CTH.close)
QtCore.QMetaObject.connectSlotsByName(CTH)
def retranslateUi(self, CTH):
CTH.setWindowTitle(_translate("CTH", "CTH", None))
self.updatePara.setText(_translate("CTH", "ๆดๆนๅๆฐ", None))
self.label_7.setText(_translate("CTH", "ไปชๅจ็ผๅท", None))
self.handComboBox.setItemText(0, _translate("CTH", "็ซๅณ", None))
self.handComboBox.setItemText(1, _translate("CTH", "ๆๅจ", None))
self.sampleInvBox.setItemText(0, _translate("CTH", "็ง", None))
self.sampleInvBox.setItemText(1, _translate("CTH", "ๅ", None))
self.sampleInvBox.setItemText(2, _translate("CTH", "ๆถ", None))
self.refreshPara.setText(_translate("CTH", "ๅทๆฐ", None))
self.calibrationClockButton.setText(_translate("CTH", "ๆ กๅๆถ้", None))
self.label_12.setText(_translate("CTH", "ไบๆฐงๅ็ขณๆฅ่ญฆๅผ", None))
self.label_14.setText(_translate("CTH", "็ซๅณ/ๆๅจ", None))
self.label_11.setText(_translate("CTH", "้ๆ ท้ด้", None))
self.label_10.setText(_translate("CTH", "่ฎฐๅฝๆฌกๆฐ", None))
self.label_8.setText(_translate("CTH", "ๆต้ๅ็งฐ", None))
self.stopRecord.setText(_translate("CTH", "ๅๆญข่ฎฐๅฝ", None))
self.label_9.setText(_translate("CTH", "ๆๆฐๅบฆ(โ)", None))
self.label_6.setText(_translate("CTH", "ๅๆฐๅบฆ(โ)", None))
self.label_5.setText(_translate("CTH", "ๆนฟๅบฆ(%)", None))
self.label_4.setText(_translate("CTH", "ไบๆฐงๅ็ขณๆตๅบฆ(PPM)", None))
self.label_13.setText(_translate("CTH", "ๅคงๆฐๅ(hPa)", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.baseTab), _translate("CTH", "ๅบๆฌไฟกๆฏ", None))
self.deleteFileButton.setText(_translate("CTH", "ๅ ้คๆไปถ", None))
self.refreshFiles.setText(_translate("CTH", "ๅทๆฐ", None))
self.receiveFiles.setText(_translate("CTH", "ๆฅๆถๆไปถ", None))
item = self.fileOperTableWidget.horizontalHeaderItem(0)
item.setText(_translate("CTH", "ๆไปถๅ", None))
item = self.fileOperTableWidget.horizontalHeaderItem(1)
item.setText(_translate("CTH", "ๆไปถๅคงๅฐ", None))
item = self.fileOperTableWidget.horizontalHeaderItem(2)
item.setText(_translate("CTH", "ๆดๆฐๆถ้ด", None))
self.tranferFileButton.setText(_translate("CTH", "่ฝฌๆขๆไปถ", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("CTH", "ๆไปถๆไฝ", None))
self.recordrangeBox.setTitle(_translate("CTH", "่ฎฐๅฝๆฅ่ฏข่ๅด", None))
self.queryButton.setText(_translate("CTH", "ๅผๅงๆฅ่ฏข", None))
self.label_2.setText(_translate("CTH", "ๅคฉ", None))
self.radioButton.setText(_translate("CTH", "ไป", None))
self.label.setText(_translate("CTH", " ๅฐ", None))
self.alldateRadioButton.setText(_translate("CTH", "ๆๆๆฅๆ", None))
self.beforedayRadioButton.setText(_translate("CTH", "ๅ", None))
self.beforeMonthRadioButton.setText(_translate("CTH", "ๅ", None))
self.label_3.setText(_translate("CTH", "ๆ", None))
item = self.displayDetailInfoTableWidget.horizontalHeaderItem(0)
item.setText(_translate("CTH", "ๆถ้ด", None))
item = self.displayDetailInfoTableWidget.horizontalHeaderItem(1)
item.setText(_translate("CTH", "ไบๆฐงๅ็ขณๆตๅบฆ", None))
item = self.displayDetailInfoTableWidget.horizontalHeaderItem(2)
item.setText(_translate("CTH", "ๆธฉๅบฆ", None))
item = self.displayDetailInfoTableWidget.horizontalHeaderItem(3)
item.setText(_translate("CTH", "ๆนฟๅบฆ", None))
item = self.displayDetailInfoTableWidget.horizontalHeaderItem(4)
item.setText(_translate("CTH", "ๅคงๆฐๅ", None))
item = self.displayDetailInfoTableWidget.horizontalHeaderItem(5)
item.setText(_translate("CTH", "็ตๆฑ ็ตๅ", None))
self.fileOperBox.setTitle(_translate("CTH", "ๆไปถ็ฎก็", None))
self.loadinFileButton.setText(_translate("CTH", "ๅฏผๅ
ฅๆไปถ", None))
self.groupBox.setTitle(_translate("CTH", "ๆฒ็บฟๅพ", None))
self.tempCheckBox.setText(_translate("CTH", "ๆธฉๅบฆ", None))
self.co2CheckBox.setText(_translate("CTH", "ไบๆฐงๅ็ขณๆตๅบฆ", None))
self.humiCheckBox.setText(_translate("CTH", "ๆนฟๅบฆ", None))
self.atmospheriCheckBox.setText(_translate("CTH", "ๅคงๆฐๅ", None))
self.batteryCheckBox.setText(_translate("CTH", "็ตๆฑ ็ตๅ", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.recordTab), _translate("CTH", "่ฎฐๅฝๆฅ่ฏข", None))
self.fileMenu.setTitle(_translate("CTH", "ๆไปถ", None))
self.helpMenu.setTitle(_translate("CTH", "ๅธฎๅฉ", None))
self.openAct.setText(_translate("CTH", "ๆๅผ", None))
self.loadAct.setText(_translate("CTH", "ๅฏผๅ
ฅ", None))
self.exitAct.setText(_translate("CTH", "้ๅบ", None))
self.action_4.setText(_translate("CTH", "่ฏปๅๅๆฐ", None))
self.setparaAct.setText(_translate("CTH", "่ฎพ็ฝฎๅๆฐ", None))
self.refreshAct.setText(_translate("CTH", "ๅทๆฐ", None))
self.lookpicAct.setText(_translate("CTH", "ๆฅ็ๆฒ็บฟๅพ", None))
self.helpAct.setText(_translate("CTH", "ๆไฝ่ฏดๆ", None))
self.aboutAct.setText(_translate("CTH", "ๅ
ณไบ", None))
self.actionSave.setText(_translate("CTH", "save", None))
self.actionSave.setToolTip(_translate("CTH", "save", None))
self.actionHelp.setText(_translate("CTH", "help", None))
self.actionHelp.setToolTip(_translate("CTH", "help", None))
self.actionExit.setText(_translate("CTH", "exit", None))
self.actionExit.setToolTip(_translate("CTH", "exit", None))
self.actionRefresh.setText(_translate("CTH", "refresh", None))
self.actionRefresh.setToolTip(_translate("CTH", "refresh", None))
self.actionOpen.setText(_translate("CTH", "open", None))
self.actionOpen.setToolTip(_translate("CTH", "open", None))
import UpSite_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
CTH = QtGui.QMainWindow()
ui = Ui_CTH()
ui.setupUi(CTH)
CTH.show()
sys.exit(app.exec_())
| 60.186846
| 118
| 0.701229
|
7952cf6aeeaddde1a6cb8780b0d0c59ade36800c
| 50,779
|
py
|
Python
|
napalm/nxos/nxos.py
|
TomCos/napalm
|
81045d7e3a095b8d125505718ffe88fe40378be8
|
[
"Apache-2.0"
] | null | null | null |
napalm/nxos/nxos.py
|
TomCos/napalm
|
81045d7e3a095b8d125505718ffe88fe40378be8
|
[
"Apache-2.0"
] | null | null | null |
napalm/nxos/nxos.py
|
TomCos/napalm
|
81045d7e3a095b8d125505718ffe88fe40378be8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015 Spotify AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from __future__ import unicode_literals
# import stdlib
from builtins import super
import os
import re
import time
import tempfile
import uuid
from collections import defaultdict
# import third party lib
from requests.exceptions import ConnectionError
from netaddr import IPAddress
from netaddr.core import AddrFormatError
from netmiko import file_transfer
from nxapi_plumbing import Device as NXOSDevice
from nxapi_plumbing import NXAPIAuthError, NXAPIConnectionError, NXAPICommandError
# import NAPALM Base
import napalm.base.helpers
from napalm.base import NetworkDriver
from napalm.base.utils import py23_compat
from napalm.base.exceptions import ConnectionException
from napalm.base.exceptions import MergeConfigException
from napalm.base.exceptions import CommandErrorException
from napalm.base.exceptions import ReplaceConfigException
from napalm.base.netmiko_helpers import netmiko_args
import napalm.base.constants as c
def ensure_netmiko_conn(func):
"""Decorator that ensures Netmiko connection exists."""
def wrap_function(self, filename=None, config=None):
try:
netmiko_object = self._netmiko_device
if netmiko_object is None:
raise AttributeError()
except AttributeError:
device_type = c.NETMIKO_MAP[self.platform]
netmiko_optional_args = self.netmiko_optional_args
if "port" in netmiko_optional_args:
netmiko_optional_args["port"] = 22
self._netmiko_open(
device_type=device_type, netmiko_optional_args=netmiko_optional_args
)
func(self, filename=filename, config=config)
return wrap_function
class NXOSDriverBase(NetworkDriver):
"""Common code shared between nx-api and nxos_ssh."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
if optional_args is None:
optional_args = {}
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
self.replace = True
self.loaded = False
self.changed = False
self.merge_candidate = ""
self.candidate_cfg = "candidate_config.txt"
self.rollback_cfg = "rollback_config.txt"
self._dest_file_system = optional_args.pop("dest_file_system", "bootflash:")
self.netmiko_optional_args = netmiko_args(optional_args)
self.device = None
@ensure_netmiko_conn
def load_replace_candidate(self, filename=None, config=None):
if not filename and not config:
raise ReplaceConfigException(
"filename or config parameter must be provided."
)
if not filename:
tmp_file = self._create_tmp_file(config)
filename = tmp_file
else:
if not os.path.isfile(filename):
raise ReplaceConfigException("File {} not found".format(filename))
try:
transfer_result = file_transfer(
self._netmiko_device,
source_file=filename,
dest_file=self.candidate_cfg,
file_system=self._dest_file_system,
direction="put",
overwrite_file=True,
)
if not transfer_result["file_exists"]:
raise ValueError()
except Exception:
msg = (
"Could not transfer file. There was an error "
"during transfer. Please make sure remote "
"permissions are set."
)
raise ReplaceConfigException(msg)
self.replace = True
self.loaded = True
if config and os.path.isfile(tmp_file):
os.remove(tmp_file)
def load_merge_candidate(self, filename=None, config=None):
if not filename and not config:
raise MergeConfigException("filename or config param must be provided.")
self.merge_candidate += "\n" # insert one extra line
if filename is not None:
with open(filename, "r") as f:
self.merge_candidate += f.read()
else:
self.merge_candidate += config
self.replace = False
self.loaded = True
def _send_command(self, command, raw_text=False):
raise NotImplementedError
def _commit_merge(self):
try:
output = self._send_config(self.merge_candidate)
if output and "Invalid command" in output:
raise MergeConfigException("Error while applying config!")
except Exception as e:
self.changed = True
self.rollback()
raise MergeConfigException(str(e))
self.changed = True
# clear the merge buffer
self.merge_candidate = ""
def _get_merge_diff(self):
"""
The merge diff is not necessarily what needs to be loaded
for example under NTP, even though the 'ntp commit' command might be
alread configured, it is mandatory to be sent
otherwise it won't take the new configuration - see:
https://github.com/napalm-automation/napalm-nxos/issues/59
therefore this method will return the real diff (but not necessarily what is
being sent by the merge_load_config()
"""
diff = []
running_config = self.get_config(retrieve="running")["running"]
running_lines = running_config.splitlines()
for line in self.merge_candidate.splitlines():
if line not in running_lines and line:
if line[0].strip() != "!":
diff.append(line)
return "\n".join(diff)
def _get_diff(self):
"""Get a diff between running config and a proposed file."""
diff = []
self._create_sot_file()
diff_out = self._send_command(
"show diff rollback-patch file {} file {}".format(
"sot_file", self.candidate_cfg
),
raw_text=True,
)
try:
diff_out = (
diff_out.split("Generating Rollback Patch")[1]
.replace("Rollback Patch is Empty", "")
.strip()
)
for line in diff_out.splitlines():
if line:
if line[0].strip() != "!" and line[0].strip() != ".":
diff.append(line.rstrip(" "))
except (AttributeError, KeyError):
raise ReplaceConfigException(
"Could not calculate diff. It's possible the given file doesn't exist."
)
return "\n".join(diff)
def compare_config(self):
if self.loaded:
if not self.replace:
return self._get_merge_diff()
diff = self._get_diff()
return diff
return ""
def commit_config(self, message=""):
if message:
raise NotImplementedError(
"Commit message not implemented for this platform"
)
if self.loaded:
# Create checkpoint from current running-config
self._save_to_checkpoint(self.rollback_cfg)
if self.replace:
self._load_cfg_from_checkpoint()
else:
self._commit_merge()
self._copy_run_start()
self.loaded = False
else:
raise ReplaceConfigException("No config loaded.")
def discard_config(self):
if self.loaded:
# clear the buffer
self.merge_candidate = ""
if self.loaded and self.replace:
self._delete_file(self.candidate_cfg)
self.loaded = False
def _create_sot_file(self):
"""Create Source of Truth file to compare."""
# Bug on on NX-OS 6.2.16 where overwriting sot_file would take exceptionally long time
# (over 12 minutes); so just delete the sot_file
try:
self._delete_file(filename="sot_file")
except Exception:
pass
commands = [
"terminal dont-ask",
"checkpoint file sot_file",
"no terminal dont-ask",
]
self._send_command_list(commands)
def ping(
self,
destination,
source=c.PING_SOURCE,
ttl=c.PING_TTL,
timeout=c.PING_TIMEOUT,
size=c.PING_SIZE,
count=c.PING_COUNT,
vrf=c.PING_VRF,
):
"""
Execute ping on the device and returns a dictionary with the result.
Output dictionary has one of following keys:
* success
* error
In case of success, inner dictionary will have the followin keys:
* probes_sent (int)
* packet_loss (int)
* rtt_min (float)
* rtt_max (float)
* rtt_avg (float)
* rtt_stddev (float)
* results (list)
'results' is a list of dictionaries with the following keys:
* ip_address (str)
* rtt (float)
"""
ping_dict = {}
version = ""
try:
version = "6" if IPAddress(destination).version == 6 else ""
except AddrFormatError:
# Allow use of DNS names
pass
command = "ping{version} {destination}".format(
version=version, destination=destination
)
command += " timeout {}".format(timeout)
command += " packet-size {}".format(size)
command += " count {}".format(count)
if source != "":
command += " source {}".format(source)
if vrf != "":
command += " vrf {}".format(vrf)
output = self._send_command(command, raw_text=True)
if "connect:" in output:
ping_dict["error"] = output
elif "PING" in output:
ping_dict["success"] = {
"probes_sent": 0,
"packet_loss": 0,
"rtt_min": 0.0,
"rtt_max": 0.0,
"rtt_avg": 0.0,
"rtt_stddev": 0.0,
"results": [],
}
results_array = []
for line in output.splitlines():
fields = line.split()
if "icmp" in line:
if "Unreachable" in line:
if "(" in fields[2]:
results_array.append(
{
"ip_address": py23_compat.text_type(
fields[2][1:-1]
),
"rtt": 0.0,
}
)
else:
results_array.append(
{
"ip_address": py23_compat.text_type(fields[1]),
"rtt": 0.0,
}
)
elif "truncated" in line:
if "(" in fields[4]:
results_array.append(
{
"ip_address": py23_compat.text_type(
fields[4][1:-2]
),
"rtt": 0.0,
}
)
else:
results_array.append(
{
"ip_address": py23_compat.text_type(fields[3][:-1]),
"rtt": 0.0,
}
)
elif fields[1] == "bytes":
if version == "6":
m = fields[5][5:]
else:
m = fields[6][5:]
results_array.append(
{
"ip_address": py23_compat.text_type(fields[3][:-1]),
"rtt": float(m),
}
)
elif "packets transmitted" in line:
ping_dict["success"]["probes_sent"] = int(fields[0])
ping_dict["success"]["packet_loss"] = int(fields[0]) - int(
fields[3]
)
elif "min/avg/max" in line:
m = fields[3].split("/")
ping_dict["success"].update(
{
"rtt_min": float(m[0]),
"rtt_avg": float(m[1]),
"rtt_max": float(m[2]),
}
)
ping_dict["success"].update({"results": results_array})
return ping_dict
def traceroute(
self,
destination,
source=c.TRACEROUTE_SOURCE,
ttl=c.TRACEROUTE_TTL,
timeout=c.TRACEROUTE_TIMEOUT,
vrf=c.TRACEROUTE_VRF,
):
_HOP_ENTRY_PROBE = [
r"\s+",
r"(", # beginning of host_name (ip_address) RTT group
r"(", # beginning of host_name (ip_address) group only
r"([a-zA-Z0-9\.:-]*)", # hostname
r"\s+",
r"\(?([a-fA-F0-9\.:][^\)]*)\)?" # IP Address between brackets
r")?", # end of host_name (ip_address) group only
# also hostname/ip are optional -- they can or cannot be specified
# if not specified, means the current probe followed the same path as the previous
r"\s+",
r"(\d+\.\d+)\s+ms", # RTT
r"|\*", # OR *, when non responsive hop
r")", # end of host_name (ip_address) RTT group
]
_HOP_ENTRY = [r"\s?", r"(\d+)"] # space before hop index? # hop index
traceroute_result = {}
timeout = 5 # seconds
probes = 3 # 3 probes/jop and this cannot be changed on NXOS!
version = ""
try:
version = "6" if IPAddress(destination).version == 6 else ""
except AddrFormatError:
# Allow use of DNS names
pass
if source:
source_opt = "source {source}".format(source=source)
command = "traceroute{version} {destination} {source_opt}".format(
version=version, destination=destination, source_opt=source_opt
)
else:
command = "traceroute{version} {destination}".format(
version=version, destination=destination
)
try:
traceroute_raw_output = self._send_command(command, raw_text=True)
except CommandErrorException:
return {
"error": "Cannot execute traceroute on the device: {}".format(command)
}
hop_regex = "".join(_HOP_ENTRY + _HOP_ENTRY_PROBE * probes)
traceroute_result["success"] = {}
if traceroute_raw_output:
for line in traceroute_raw_output.splitlines():
hop_search = re.search(hop_regex, line)
if not hop_search:
continue
hop_details = hop_search.groups()
hop_index = int(hop_details[0])
previous_probe_host_name = "*"
previous_probe_ip_address = "*"
traceroute_result["success"][hop_index] = {"probes": {}}
for probe_index in range(probes):
host_name = hop_details[3 + probe_index * 5]
ip_address_raw = hop_details[4 + probe_index * 5]
ip_address = napalm.base.helpers.convert(
napalm.base.helpers.ip, ip_address_raw, ip_address_raw
)
rtt = hop_details[5 + probe_index * 5]
if rtt:
rtt = float(rtt)
else:
rtt = timeout * 1000.0
if not host_name:
host_name = previous_probe_host_name
if not ip_address:
ip_address = previous_probe_ip_address
if hop_details[1 + probe_index * 5] == "*":
host_name = "*"
ip_address = "*"
traceroute_result["success"][hop_index]["probes"][
probe_index + 1
] = {
"host_name": py23_compat.text_type(host_name),
"ip_address": py23_compat.text_type(ip_address),
"rtt": rtt,
}
previous_probe_host_name = host_name
previous_probe_ip_address = ip_address
return traceroute_result
def _get_checkpoint_file(self):
filename = "temp_cp_file_from_napalm"
self._set_checkpoint(filename)
command = "show file {}".format(filename)
output = self._send_command(command, raw_text=True)
self._delete_file(filename)
return output
def _set_checkpoint(self, filename):
commands = [
"terminal dont-ask",
"checkpoint file {}".format(filename),
"no terminal dont-ask",
]
self._send_command_list(commands)
def _save_to_checkpoint(self, filename):
"""Save the current running config to the given file."""
commands = [
"terminal dont-ask",
"checkpoint file {}".format(filename),
"no terminal dont-ask",
]
self._send_command_list(commands)
def _delete_file(self, filename):
commands = [
"terminal dont-ask",
"delete {}".format(filename),
"no terminal dont-ask",
]
self._send_command_list(commands)
@staticmethod
def _create_tmp_file(config):
tmp_dir = tempfile.gettempdir()
rand_fname = py23_compat.text_type(uuid.uuid4())
filename = os.path.join(tmp_dir, rand_fname)
with open(filename, "wt") as fobj:
fobj.write(config)
return filename
def _disable_confirmation(self):
self._send_command_list(["terminal dont-ask"])
def get_config(self, retrieve="all"):
config = {"startup": "", "running": "", "candidate": ""} # default values
if retrieve.lower() in ("running", "all"):
command = "show running-config"
config["running"] = py23_compat.text_type(
self._send_command(command, raw_text=True)
)
if retrieve.lower() in ("startup", "all"):
command = "show startup-config"
config["startup"] = py23_compat.text_type(
self._send_command(command, raw_text=True)
)
return config
def get_lldp_neighbors(self):
"""IOS implementation of get_lldp_neighbors."""
lldp = {}
neighbors_detail = self.get_lldp_neighbors_detail()
for intf_name, entries in neighbors_detail.items():
lldp[intf_name] = []
for lldp_entry in entries:
hostname = lldp_entry["remote_system_name"]
# Match IOS behaviour of taking remote chassis ID
# When lacking a system name (in show lldp neighbors)
if hostname == "N/A":
hostname = lldp_entry["remote_chassis_id"]
lldp_dict = {"port": lldp_entry["remote_port"], "hostname": hostname}
lldp[intf_name].append(lldp_dict)
return lldp
def get_lldp_neighbors_detail(self, interface=""):
lldp = {}
lldp_interfaces = []
if interface:
command = "show lldp neighbors interface {} detail".format(interface)
else:
command = "show lldp neighbors detail"
lldp_entries = self._send_command(command, raw_text=True)
lldp_entries = py23_compat.text_type(lldp_entries)
lldp_entries = napalm.base.helpers.textfsm_extractor(
self, "show_lldp_neighbors_detail", lldp_entries
)
if len(lldp_entries) == 0:
return {}
for idx, lldp_entry in enumerate(lldp_entries):
local_intf = lldp_entry.pop("local_interface") or lldp_interfaces[idx]
# Convert any 'not advertised' to an empty string
for field in lldp_entry:
if "not advertised" in lldp_entry[field]:
lldp_entry[field] = ""
# Add field missing on IOS
lldp_entry["parent_interface"] = ""
# Translate the capability fields
lldp_entry[
"remote_system_capab"
] = napalm.base.helpers.transform_lldp_capab(
lldp_entry["remote_system_capab"]
)
lldp_entry[
"remote_system_enable_capab"
] = napalm.base.helpers.transform_lldp_capab(
lldp_entry["remote_system_enable_capab"]
)
# Turn the interfaces into their long version
local_intf = napalm.base.helpers.canonical_interface_name(local_intf)
lldp.setdefault(local_intf, [])
lldp[local_intf].append(lldp_entry)
return lldp
class NXOSDriver(NXOSDriverBase):
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
super().__init__(
hostname, username, password, timeout=timeout, optional_args=optional_args
)
if optional_args is None:
optional_args = {}
# nxos_protocol is there for backwards compatibility, transport is the preferred method
self.transport = optional_args.get(
"transport", optional_args.get("nxos_protocol", "https")
)
if self.transport == "https":
self.port = optional_args.get("port", 443)
elif self.transport == "http":
self.port = optional_args.get("port", 80)
self.ssl_verify = optional_args.get("ssl_verify", False)
self.platform = "nxos"
def open(self):
try:
self.device = NXOSDevice(
host=self.hostname,
username=self.username,
password=self.password,
timeout=self.timeout,
port=self.port,
transport=self.transport,
verify=self.ssl_verify,
api_format="jsonrpc",
)
self._send_command("show hostname")
except (NXAPIConnectionError, NXAPIAuthError):
# unable to open connection
raise ConnectionException("Cannot connect to {}".format(self.hostname))
def close(self):
self.device = None
def _send_command(self, command, raw_text=False):
"""
Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH.
"""
return self.device.show(command, raw_text=raw_text)
def _send_command_list(self, commands):
return self.device.config_list(commands)
def _send_config(self, commands):
if isinstance(commands, py23_compat.string_types):
# Has to be a list generator and not generator expression (not JSON serializable)
commands = [command for command in commands.splitlines() if command]
return self.device.config_list(commands)
@staticmethod
def _compute_timestamp(stupid_cisco_output):
"""
Some fields such `uptime` are returned as: 23week(s) 3day(s)
This method will determine the epoch of the event.
e.g.: 23week(s) 3day(s) -> 1462248287
"""
if not stupid_cisco_output or stupid_cisco_output == "never":
return -1.0
if "(s)" in stupid_cisco_output:
pass
elif ":" in stupid_cisco_output:
stupid_cisco_output = stupid_cisco_output.replace(":", "hour(s) ", 1)
stupid_cisco_output = stupid_cisco_output.replace(":", "minute(s) ", 1)
stupid_cisco_output += "second(s)"
else:
stupid_cisco_output = stupid_cisco_output.replace("d", "day(s) ")
stupid_cisco_output = stupid_cisco_output.replace("h", "hour(s)")
things = {
"second(s)": {"weight": 1},
"minute(s)": {"weight": 60},
"hour(s)": {"weight": 3600},
"day(s)": {"weight": 24 * 3600},
"week(s)": {"weight": 7 * 24 * 3600},
"year(s)": {"weight": 365.25 * 24 * 3600},
}
things_keys = things.keys()
for part in stupid_cisco_output.split():
for key in things_keys:
if key in part:
things[key]["count"] = napalm.base.helpers.convert(
int, part.replace(key, ""), 0
)
delta = sum(
[det.get("count", 0) * det.get("weight") for det in things.values()]
)
return time.time() - delta
@staticmethod
def _get_table_rows(parent_table, table_name, row_name):
"""
Inconsistent behavior:
{'TABLE_intf': [{'ROW_intf': {
vs
{'TABLE_mac_address': {'ROW_mac_address': [{
vs
{'TABLE_vrf': {'ROW_vrf': {'TABLE_adj': {'ROW_adj': {
"""
if parent_table is None:
return []
_table = parent_table.get(table_name)
_table_rows = []
if isinstance(_table, list):
_table_rows = [_table_row.get(row_name) for _table_row in _table]
elif isinstance(_table, dict):
_table_rows = _table.get(row_name)
if not isinstance(_table_rows, list):
_table_rows = [_table_rows]
return _table_rows
def _get_reply_table(self, result, table_name, row_name):
return self._get_table_rows(result, table_name, row_name)
def _get_command_table(self, command, table_name, row_name):
json_output = self._send_command(command)
return self._get_reply_table(json_output, table_name, row_name)
def is_alive(self):
if self.device:
return {"is_alive": True}
else:
return {"is_alive": False}
def _copy_run_start(self):
results = self.device.save(filename="startup-config")
if not results:
msg = "Unable to save running-config to startup-config!"
raise CommandErrorException(msg)
def _load_cfg_from_checkpoint(self):
commands = [
"terminal dont-ask",
"rollback running-config file {}".format(self.candidate_cfg),
"no terminal dont-ask",
]
try:
rollback_result = self._send_command_list(commands)
except ConnectionError:
# requests will raise an error with verbose warning output (don't fail on this).
return
finally:
self.changed = True
# For nx-api a list is returned so extract the result associated with the
# 'rollback' command.
rollback_result = rollback_result[1]
msg = (
rollback_result.get("msg")
if rollback_result.get("msg")
else rollback_result
)
error_msg = True if rollback_result.get("error") else False
if "Rollback failed." in msg or error_msg:
raise ReplaceConfigException(msg)
elif rollback_result == []:
raise ReplaceConfigException
def rollback(self):
if self.changed:
self.device.rollback(self.rollback_cfg)
self._copy_run_start()
self.changed = False
def get_facts(self):
facts = {}
facts["vendor"] = "Cisco"
show_version = self._send_command("show version")
facts["model"] = show_version.get("chassis_id", "")
facts["hostname"] = show_version.get("host_name", "")
facts["serial_number"] = show_version.get("proc_board_id", "")
facts["os_version"] = show_version.get("sys_ver_str", "")
uptime_days = show_version.get("kern_uptm_days", 0)
uptime_hours = show_version.get("kern_uptm_hrs", 0)
uptime_mins = show_version.get("kern_uptm_mins", 0)
uptime_secs = show_version.get("kern_uptm_secs", 0)
uptime = 0
uptime += uptime_days * 24 * 60 * 60
uptime += uptime_hours * 60 * 60
uptime += uptime_mins * 60
uptime += uptime_secs
facts["uptime"] = uptime
iface_cmd = "show interface"
interfaces_out = self._send_command(iface_cmd)
interfaces_body = interfaces_out["TABLE_interface"]["ROW_interface"]
interface_list = [intf_data["interface"] for intf_data in interfaces_body]
facts["interface_list"] = interface_list
hostname_cmd = "show hostname"
hostname = self._send_command(hostname_cmd).get("hostname")
if hostname:
facts["fqdn"] = hostname
return facts
def get_interfaces(self):
interfaces = {}
iface_cmd = "show interface"
interfaces_out = self._send_command(iface_cmd)
interfaces_body = interfaces_out["TABLE_interface"]["ROW_interface"]
for interface_details in interfaces_body:
interface_name = interface_details.get("interface")
interface_mtu = interface_details.get("eth_mtu", 0)
interface_mtu = int(interface_mtu)
# Earlier version of Nexus returned a list for 'eth_bw' (observed on 7.1(0)N1(1a))
interface_speed = interface_details.get("eth_bw", 0)
if isinstance(interface_speed, list):
interface_speed = interface_speed[0]
interface_speed = int(interface_speed / 1000)
if "admin_state" in interface_details:
is_up = interface_details.get("admin_state", "") == "up"
else:
is_up = interface_details.get("state", "") == "up"
interfaces[interface_name] = {
"is_up": is_up,
"is_enabled": (interface_details.get("state") == "up"),
"description": py23_compat.text_type(
interface_details.get("desc", "").strip('"')
),
"last_flapped": self._compute_timestamp(
interface_details.get("eth_link_flapped", "")
),
"speed": interface_speed,
"mtu": interface_mtu,
"mac_address": napalm.base.helpers.convert(
napalm.base.helpers.mac, interface_details.get("eth_hw_addr")
),
}
return interfaces
def get_bgp_neighbors(self):
results = {}
bgp_state_dict = {
"Idle": {"is_up": False, "is_enabled": True},
"Active": {"is_up": False, "is_enabled": True},
"Open": {"is_up": False, "is_enabled": True},
"Established": {"is_up": True, "is_enabled": True},
"Closing": {"is_up": True, "is_enabled": True},
"Shutdown": {"is_up": False, "is_enabled": False},
}
"""
af_name_dict = {
'af-id': {'safi': "af-name"},
'af-id': {'safi': "af-name"},
'af-id': {'safi': "af-name"}
}
"""
af_name_dict = {
1: {1: "ipv4", 128: "vpnv4"},
2: {1: "ipv6", 128: "vpnv6"},
25: {70: "l2vpn"},
}
try:
cmd = "show bgp all summary vrf all"
vrf_list = self._get_command_table(cmd, "TABLE_vrf", "ROW_vrf")
except NXAPICommandError:
vrf_list = []
for vrf_dict in vrf_list:
result_vrf_dict = {
"router_id": py23_compat.text_type(vrf_dict["vrf-router-id"]),
"peers": {},
}
af_list = vrf_dict.get("TABLE_af", {}).get("ROW_af", [])
if isinstance(af_list, dict):
af_list = [af_list]
for af_dict in af_list:
saf_dict = af_dict.get("TABLE_saf", {}).get("ROW_saf", {})
neighbors_list = saf_dict.get("TABLE_neighbor", {}).get(
"ROW_neighbor", []
)
if isinstance(neighbors_list, dict):
neighbors_list = [neighbors_list]
for neighbor_dict in neighbors_list:
neighborid = napalm.base.helpers.ip(neighbor_dict["neighborid"])
remoteas = napalm.base.helpers.as_number(
neighbor_dict["neighboras"]
)
state = py23_compat.text_type(neighbor_dict["state"])
bgp_state = bgp_state_dict[state]
afid_dict = af_name_dict[int(af_dict["af-id"])]
safi_name = afid_dict[int(saf_dict["safi"])]
result_peer_dict = {
"local_as": int(vrf_dict["vrf-local-as"]),
"remote_as": remoteas,
"remote_id": neighborid,
"is_enabled": bgp_state["is_enabled"],
"uptime": -1,
"description": "",
"is_up": bgp_state["is_up"],
"address_family": {
safi_name: {
"sent_prefixes": -1,
"accepted_prefixes": -1,
"received_prefixes": int(
neighbor_dict["prefixreceived"]
),
}
},
}
result_vrf_dict["peers"][neighborid] = result_peer_dict
vrf_name = vrf_dict["vrf-name-out"]
if vrf_name == "default":
vrf_name = "global"
results[vrf_name] = result_vrf_dict
return results
def cli(self, commands):
cli_output = {}
if type(commands) is not list:
raise TypeError("Please enter a valid list of commands!")
for command in commands:
command_output = self._send_command(command, raw_text=True)
cli_output[py23_compat.text_type(command)] = command_output
return cli_output
def get_arp_table(self, vrf=""):
if vrf:
msg = "VRF support has not been added for this getter on this platform."
raise NotImplementedError(msg)
arp_table = []
command = "show ip arp"
arp_table_vrf = self._get_command_table(command, "TABLE_vrf", "ROW_vrf")
arp_table_raw = self._get_table_rows(arp_table_vrf[0], "TABLE_adj", "ROW_adj")
for arp_table_entry in arp_table_raw:
raw_ip = arp_table_entry.get("ip-addr-out")
raw_mac = arp_table_entry.get("mac")
age = arp_table_entry.get("time-stamp")
if age == "-":
age_sec = -1.0
elif ":" not in age:
# Cisco sometimes returns a sub second arp time 0.411797
try:
age_sec = float(age)
except ValueError:
age_sec = -1.0
else:
fields = age.split(":")
if len(fields) == 3:
try:
fields = [float(x) for x in fields]
hours, minutes, seconds = fields
age_sec = 3600 * hours + 60 * minutes + seconds
except ValueError:
age_sec = -1.0
age_sec = round(age_sec, 1)
interface = py23_compat.text_type(arp_table_entry.get("intf-out"))
arp_table.append(
{
"interface": interface,
"mac": napalm.base.helpers.convert(
napalm.base.helpers.mac, raw_mac, raw_mac
),
"ip": napalm.base.helpers.ip(raw_ip),
"age": age_sec,
}
)
return arp_table
def _get_ntp_entity(self, peer_type):
ntp_entities = {}
command = "show ntp peers"
ntp_peers_table = self._get_command_table(command, "TABLE_peers", "ROW_peers")
for ntp_peer in ntp_peers_table:
if ntp_peer.get("serv_peer", "").strip() != peer_type:
continue
peer_addr = napalm.base.helpers.ip(ntp_peer.get("PeerIPAddress").strip())
ntp_entities[peer_addr] = {}
return ntp_entities
def get_ntp_peers(self):
return self._get_ntp_entity("Peer")
def get_ntp_servers(self):
return self._get_ntp_entity("Server")
def get_ntp_stats(self):
ntp_stats = []
command = "show ntp peer-status"
ntp_stats_table = self._get_command_table(
command, "TABLE_peersstatus", "ROW_peersstatus"
)
for ntp_peer in ntp_stats_table:
peer_address = napalm.base.helpers.ip(ntp_peer.get("remote").strip())
syncmode = ntp_peer.get("syncmode")
stratum = int(ntp_peer.get("st"))
hostpoll = int(ntp_peer.get("poll"))
reachability = int(ntp_peer.get("reach"))
delay = float(ntp_peer.get("delay"))
ntp_stats.append(
{
"remote": peer_address,
"synchronized": (syncmode == "*"),
"referenceid": peer_address,
"stratum": stratum,
"type": "",
"when": "",
"hostpoll": hostpoll,
"reachability": reachability,
"delay": delay,
"offset": 0.0,
"jitter": 0.0,
}
)
return ntp_stats
def get_interfaces_ip(self):
interfaces_ip = {}
ipv4_command = "show ip interface"
ipv4_interf_table_vrf = self._get_command_table(
ipv4_command, "TABLE_intf", "ROW_intf"
)
for interface in ipv4_interf_table_vrf:
interface_name = py23_compat.text_type(interface.get("intf-name", ""))
addr_str = interface.get("prefix")
unnumbered = py23_compat.text_type(interface.get("unnum-intf", ""))
if addr_str:
address = napalm.base.helpers.ip(addr_str)
prefix = int(interface.get("masklen", ""))
if interface_name not in interfaces_ip.keys():
interfaces_ip[interface_name] = {}
if "ipv4" not in interfaces_ip[interface_name].keys():
interfaces_ip[interface_name]["ipv4"] = {}
if address not in interfaces_ip[interface_name].get("ipv4"):
interfaces_ip[interface_name]["ipv4"][address] = {}
interfaces_ip[interface_name]["ipv4"][address].update(
{"prefix_length": prefix}
)
elif unnumbered:
for interf in ipv4_interf_table_vrf:
interf_name = py23_compat.text_type(interf.get("intf-name", ""))
if interf_name == unnumbered:
address = napalm.base.helpers.ip(interf.get("prefix"))
prefix = int(interf.get("masklen", ""))
if interface_name not in interfaces_ip.keys():
interfaces_ip[interface_name] = {}
if "ipv4" not in interfaces_ip[interface_name].keys():
interfaces_ip[interface_name]["ipv4"] = {}
if address not in interfaces_ip[interface_name].get("ipv4"):
interfaces_ip[interface_name]["ipv4"][address] = {}
interfaces_ip[interface_name]["ipv4"][address].update(
{"prefix_length": prefix}
)
secondary_addresses = interface.get("TABLE_secondary_address", {}).get(
"ROW_secondary_address", []
)
if type(secondary_addresses) is dict:
secondary_addresses = [secondary_addresses]
for secondary_address in secondary_addresses:
secondary_address_ip = napalm.base.helpers.ip(
secondary_address.get("prefix1")
)
secondary_address_prefix = int(secondary_address.get("masklen1", ""))
if "ipv4" not in interfaces_ip[interface_name].keys():
interfaces_ip[interface_name]["ipv4"] = {}
if secondary_address_ip not in interfaces_ip[interface_name].get(
"ipv4"
):
interfaces_ip[interface_name]["ipv4"][secondary_address_ip] = {}
interfaces_ip[interface_name]["ipv4"][secondary_address_ip].update(
{"prefix_length": secondary_address_prefix}
)
ipv6_command = "show ipv6 interface"
ipv6_interf_table_vrf = self._get_command_table(
ipv6_command, "TABLE_intf", "ROW_intf"
)
for interface in ipv6_interf_table_vrf:
interface_name = py23_compat.text_type(interface.get("intf-name", ""))
if interface_name not in interfaces_ip.keys():
interfaces_ip[interface_name] = {}
if "ipv6" not in interfaces_ip[interface_name].keys():
interfaces_ip[interface_name]["ipv6"] = {}
if "addr" not in interface.keys():
# Handle nexus 9000 ipv6 interface output
addrs = [addr["addr"] for addr in interface["TABLE_addr"]["ROW_addr"]]
interface["addr"] = addrs
if type(interface.get("addr", "")) is list:
for ipv6_address in interface.get("addr", ""):
address = napalm.base.helpers.ip(ipv6_address.split("/")[0])
prefix = int(ipv6_address.split("/")[-1])
if address not in interfaces_ip[interface_name].get("ipv6"):
interfaces_ip[interface_name]["ipv6"][address] = {}
interfaces_ip[interface_name]["ipv6"][address].update(
{"prefix_length": prefix}
)
else:
address = napalm.base.helpers.ip(
interface.get("addr", "").split("/")[0]
)
prefix = interface.get("prefix", "").split("/")[-1]
if prefix:
prefix = int(interface.get("prefix", "").split("/")[-1])
else:
prefix = 128
if address not in interfaces_ip[interface_name].get("ipv6"):
interfaces_ip[interface_name]["ipv6"][address] = {}
interfaces_ip[interface_name]["ipv6"][address].update(
{"prefix_length": prefix}
)
return interfaces_ip
def get_mac_address_table(self):
mac_table = []
command = "show mac address-table"
mac_table_raw = self._get_command_table(
command, "TABLE_mac_address", "ROW_mac_address"
)
for mac_entry in mac_table_raw:
raw_mac = mac_entry.get("disp_mac_addr")
interface = py23_compat.text_type(mac_entry.get("disp_port"))
try:
vlan = int(mac_entry.get("disp_vlan"))
except ValueError:
vlan = 0
active = True
static = mac_entry.get("disp_is_static") != "0"
moves = 0
last_move = 0.0
mac_table.append(
{
"mac": napalm.base.helpers.mac(raw_mac),
"interface": interface,
"vlan": vlan,
"active": active,
"static": static,
"moves": moves,
"last_move": last_move,
}
)
return mac_table
def get_snmp_information(self):
snmp_information = {}
snmp_command = "show running-config"
snmp_raw_output = self.cli([snmp_command]).get(snmp_command, "")
snmp_config = napalm.base.helpers.textfsm_extractor(
self, "snmp_config", snmp_raw_output
)
if not snmp_config:
return snmp_information
snmp_information = {
"contact": py23_compat.text_type(""),
"location": py23_compat.text_type(""),
"community": {},
"chassis_id": py23_compat.text_type(""),
}
for snmp_entry in snmp_config:
contact = py23_compat.text_type(snmp_entry.get("contact", ""))
if contact:
snmp_information["contact"] = contact
location = py23_compat.text_type(snmp_entry.get("location", ""))
if location:
snmp_information["location"] = location
community_name = py23_compat.text_type(snmp_entry.get("community", ""))
if not community_name:
continue
if community_name not in snmp_information["community"].keys():
snmp_information["community"][community_name] = {
"acl": py23_compat.text_type(snmp_entry.get("acl", "")),
"mode": py23_compat.text_type(snmp_entry.get("mode", "").lower()),
}
else:
acl = py23_compat.text_type(snmp_entry.get("acl", ""))
if acl:
snmp_information["community"][community_name]["acl"] = acl
mode = py23_compat.text_type(snmp_entry.get("mode", "").lower())
if mode:
snmp_information["community"][community_name]["mode"] = mode
return snmp_information
def get_users(self):
_CISCO_TO_CISCO_MAP = {"network-admin": 15, "network-operator": 5}
_DEFAULT_USER_DICT = {"password": "", "level": 0, "sshkeys": []}
users = {}
command = "show running-config"
section_username_raw_output = self.cli([command]).get(command, "")
section_username_tabled_output = napalm.base.helpers.textfsm_extractor(
self, "users", section_username_raw_output
)
for user in section_username_tabled_output:
username = user.get("username", "")
if not username:
continue
if username not in users:
users[username] = _DEFAULT_USER_DICT.copy()
password = user.get("password", "")
if password:
users[username]["password"] = py23_compat.text_type(password.strip())
level = 0
role = user.get("role", "")
if role.startswith("priv"):
level = int(role.split("-")[-1])
else:
level = _CISCO_TO_CISCO_MAP.get(role, 0)
if level > users.get(username).get("level"):
# unfortunately on Cisco you can set different priv levels for the same user
# Good news though: the device will consider the highest level
users[username]["level"] = level
sshkeytype = user.get("sshkeytype", "")
sshkeyvalue = user.get("sshkeyvalue", "")
if sshkeytype and sshkeyvalue:
if sshkeytype not in ["ssh-rsa", "ssh-dsa"]:
continue
users[username]["sshkeys"].append(py23_compat.text_type(sshkeyvalue))
return users
def get_network_instances(self, name=""):
""" get_network_instances implementation for NX-OS """
# command 'show vrf detail' returns all VRFs with detailed information
# format: list of dictionaries with keys such as 'vrf_name' and 'rd'
command = "show vrf detail"
vrf_table_raw = self._get_command_table(command, "TABLE_vrf", "ROW_vrf")
# command 'show vrf interface' returns all interfaces including their assigned VRF
# format: list of dictionaries with keys 'if_name', 'vrf_name', 'vrf_id' and 'soo'
command = "show vrf interface"
intf_table_raw = self._get_command_table(command, "TABLE_if", "ROW_if")
# create a dictionary with key = 'vrf_name' and value = list of interfaces
vrf_intfs = defaultdict(list)
for intf in intf_table_raw:
vrf_intfs[intf["vrf_name"]].append(py23_compat.text_type(intf["if_name"]))
vrfs = {}
for vrf in vrf_table_raw:
vrf_name = py23_compat.text_type(vrf.get("vrf_name"))
vrfs[vrf_name] = {}
vrfs[vrf_name]["name"] = vrf_name
# differentiate between VRF type 'DEFAULT_INSTANCE' and 'L3VRF'
if vrf_name == "default":
vrfs[vrf_name]["type"] = "DEFAULT_INSTANCE"
else:
vrfs[vrf_name]["type"] = "L3VRF"
vrfs[vrf_name]["state"] = {
"route_distinguisher": py23_compat.text_type(vrf.get("rd"))
}
# convert list of interfaces (vrf_intfs[vrf_name]) to expected format
# format = dict with key = interface name and empty values
vrfs[vrf_name]["interfaces"] = {}
vrfs[vrf_name]["interfaces"]["interface"] = dict.fromkeys(
vrf_intfs[vrf_name], {}
)
# if name of a specific VRF was passed as an argument
# only return results for this particular VRF
if name:
if name in vrfs.keys():
return {py23_compat.text_type(name): vrfs[name]}
else:
return {}
# else return results for all VRFs
else:
return vrfs
| 38.911111
| 95
| 0.537801
|
7952cf6f77f4ac9948de6020fb529933a715bc93
| 337
|
py
|
Python
|
pythran/tests/openmp.legacy/omp_parallel_private.py
|
xmar/pythran
|
dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/openmp.legacy/omp_parallel_private.py
|
xmar/pythran
|
dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/openmp.legacy/omp_parallel_private.py
|
xmar/pythran
|
dbf2e8b70ed1e4d4ac6b5f26ead4add940a72592
|
[
"BSD-3-Clause"
] | 1
|
2017-03-12T20:32:36.000Z
|
2017-03-12T20:32:36.000Z
|
def omp_parallel_private():
sum = 0
num_threads = 0
if 'omp parallel':
sum1 = 7
'omp for'
for i in xrange(1, 1000):
sum1 += i
if 'omp critical':
sum += sum1
num_threads += 1
known_sum = (999 * 1000) / 2 + 7 * num_threads
return known_sum == sum
| 18.722222
| 50
| 0.486647
|
7952cffea3bdbe7a056dbe492ad7a909cd23fe13
| 17,818
|
py
|
Python
|
models/external_functions.py
|
fyviezhao/dressing-in-order
|
63790663ad0420d9d2dabed22d5c56dd40422313
|
[
"BSD-3-Clause"
] | null | null | null |
models/external_functions.py
|
fyviezhao/dressing-in-order
|
63790663ad0420d9d2dabed22d5c56dd40422313
|
[
"BSD-3-Clause"
] | null | null | null |
models/external_functions.py
|
fyviezhao/dressing-in-order
|
63790663ad0420d9d2dabed22d5c56dd40422313
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Functions are modified on top of GFLA.
GFLA's license: https://github.com/RenYurui/Global-Flow-Local-Attention/blob/master/LICENSE.md
"""
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
import os
import torchvision.transforms as transforms
import numpy as np
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class MultiAffineRegularizationLoss(nn.Module):
def __init__(self, kz_dic):
super(MultiAffineRegularizationLoss, self).__init__()
self.kz_dic=kz_dic
self.method_dic={}
for key in kz_dic:
instance = AffineRegularizationLoss(kz_dic[key])
self.method_dic[key] = instance
self.layers = sorted(kz_dic, reverse=True)
def __call__(self, flow_fields):
loss=0
for i in range(len(flow_fields)):
method = self.method_dic[self.layers[i]]
loss += method(flow_fields[i])
return loss
class AffineRegularizationLoss(nn.Module):
"""docstring for AffineRegularizationLoss"""
# kernel_size: kz
def __init__(self, kz):
super(AffineRegularizationLoss, self).__init__()
self.kz = kz
self.criterion = torch.nn.L1Loss()
from models.networks.block_extractor.block_extractor import BlockExtractor
from models.networks.local_attn_reshape.local_attn_reshape import LocalAttnReshape
self.extractor = BlockExtractor(kernel_size=kz)
self.reshape = LocalAttnReshape()
temp = np.arange(kz)
A = np.ones([kz*kz, 3])
A[:, 0] = temp.repeat(kz)
A[:, 1] = temp.repeat(kz).reshape((kz,kz)).transpose().reshape(kz**2)
AH = A.transpose()
k = np.dot(A, np.dot(np.linalg.inv(np.dot(AH, A)), AH)) - np.identity(kz**2) #K = (A((AH A)^-1)AH - I)
self.kernel = np.dot(k.transpose(), k)
self.kernel = torch.from_numpy(self.kernel).unsqueeze(1).view(kz**2, kz, kz).unsqueeze(1)
def __call__(self, flow_fields):
grid = self.flow2grid(flow_fields)
grid_x = grid[:,0,:,:].unsqueeze(1)
grid_y = grid[:,1,:,:].unsqueeze(1)
weights = self.kernel.type_as(flow_fields)
#import pdb; pdb.set_trace()
loss_x = self.calculate_loss(grid_x, weights)
loss_y = self.calculate_loss(grid_y, weights)
return loss_x+loss_y
def calculate_loss(self, grid, weights):
results = nn.functional.conv2d(grid, weights) # KH K B [b, kz*kz, w, h]
b, c, h, w = results.size()
kernels_new = self.reshape(results, self.kz)
f = torch.zeros(b, 2, h, w).type_as(kernels_new) + float(int(self.kz/2))
grid_H = self.extractor(grid, f)
result = torch.nn.functional.avg_pool2d(grid_H*kernels_new, self.kz, self.kz)
loss = torch.mean(result)*self.kz**2
return loss
def flow2grid(self, flow_field):
b,c,h,w = flow_field.size()
x = torch.arange(w).view(1, -1).expand(h, -1).type_as(flow_field).float()
y = torch.arange(h).view(-1, 1).expand(-1, w).type_as(flow_field).float()
grid = torch.stack([x,y], dim=0)
grid = grid.unsqueeze(0).expand(b, -1, -1, -1)
return flow_field+grid
class VGGLoss(nn.Module):
r"""
Perceptual loss, VGG-based
https://arxiv.org/abs/1603.08155
https://github.com/dxyang/StyleTransfer/blob/master/utils.py
"""
def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
super(VGGLoss, self).__init__()
self.add_module('vgg', VGG19())
self.criterion = torch.nn.L1Loss()
self.weights = weights
def compute_gram(self, x):
b, ch, h, w = x.size()
f = x.view(b, ch, w * h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (h * w * ch)
return G
def __call__(self, x, y, last_only=False, content_only=False):
# Compute features
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
if not last_only:
content_loss = 0.0
content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])
content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])
content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])
content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])
content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
if content_only:
return content_loss
# Compute loss
style_loss = 0.0
style_loss += self.criterion(self.compute_gram(x_vgg['relu2_2']), self.compute_gram(y_vgg['relu2_2']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu3_4']), self.compute_gram(y_vgg['relu3_4']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu4_4']), self.compute_gram(y_vgg['relu4_4']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu5_2']), self.compute_gram(y_vgg['relu5_2']))
else:
content_loss = self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
if content_only:
return content_loss
style_loss = self.criterion(self.compute_gram(x_vgg['relu5_2']), self.compute_gram(y_vgg['relu5_2']))
return content_loss, style_loss
class PerceptualCorrectness(nn.Module):
r"""
"""
def __init__(self, layer=['rel1_1','relu2_1','relu3_1','relu4_1']):
super(PerceptualCorrectness, self).__init__()
self.add_module('vgg', VGG19())
self.layer = layer
self.eps=1e-8
from models.networks.resample2d_package.resample2d import Resample2d
self.resample = Resample2d(4, 1, sigma=2)
def __call__(self, target, source, flow_list, used_layers, mask=None, use_bilinear_sampling=False):
used_layers=sorted(used_layers, reverse=True)
# self.target=target
# self.source=source
self.target_vgg, self.source_vgg = self.vgg(target), self.vgg(source)
loss = 0
for i in range(len(flow_list)):
loss += self.calculate_loss(flow_list[i], self.layer[used_layers[i]], mask, use_bilinear_sampling)
return loss
def calculate_loss(self, flow, layer, mask=None, use_bilinear_sampling=False):
target_vgg = self.target_vgg[layer]
source_vgg = self.source_vgg[layer]
[b, c, h, w] = target_vgg.shape
# maps = F.interpolate(maps, [h,w]).view(b,-1)
flow = F.interpolate(flow, [h,w])
target_all = target_vgg.view(b, c, -1) #[b C N2]
source_all = source_vgg.view(b, c, -1).transpose(1,2) #[b N2 C]
source_norm = source_all/(source_all.norm(dim=2, keepdim=True)+self.eps)
target_norm = target_all/(target_all.norm(dim=1, keepdim=True)+self.eps)
correction = torch.bmm(source_norm, target_norm) #[b N2 N2]
(correction_max,max_indices) = torch.max(correction, dim=1)
# interple with bilinear sampling
if use_bilinear_sampling:
input_sample = self.bilinear_warp(source_vgg, flow).view(b, c, -1)
else:
input_sample = self.resample(source_vgg, flow).view(b, c, -1)
correction_sample = F.cosine_similarity(input_sample, target_all) #[b 1 N2]
loss_map = torch.exp(-correction_sample/(correction_max+self.eps))
if mask is None:
loss = torch.mean(loss_map) - torch.exp(torch.tensor(-1).type_as(loss_map))
else:
mask=F.interpolate(mask, size=(target_vgg.size(2), target_vgg.size(3)))
mask=mask.view(-1, target_vgg.size(2)*target_vgg.size(3))
loss_map = loss_map - torch.exp(torch.tensor(-1).type_as(loss_map))
loss = torch.sum(mask * loss_map)/(torch.sum(mask)+self.eps)
# print(correction_sample[0,2076:2082])
# print(correction_max[0,2076:2082])
# coor_x = [32,32]
# coor = max_indices[0,32+32*64]
# coor_y = [int(coor%64), int(coor/64)]
# source = F.interpolate(self.source, [64,64])
# target = F.interpolate(self.target, [64,64])
# source_i = source[0]
# target_i = target[0]
# source_i = source_i.view(3, -1)
# source_i[:,coor]=-1
# source_i[0,coor]=1
# source_i = source_i.view(3,64,64)
# target_i[:,32,32]=-1
# target_i[0,32,32]=1
# lists = str(int(torch.rand(1)*100))
# img_numpy = util.tensor2im(source_i.data)
# util.save_image(img_numpy, 'source'+lists+'.png')
# img_numpy = util.tensor2im(target_i.data)
# util.save_image(img_numpy, 'target'+lists+'.png')
return loss
def bilinear_warp(self, source, flow):
[b, c, h, w] = source.shape
x = torch.arange(w).view(1, -1).expand(h, -1).type_as(source).float() / (w-1)
y = torch.arange(h).view(-1, 1).expand(-1, w).type_as(source).float() / (h-1)
grid = torch.stack([x,y], dim=0)
grid = grid.unsqueeze(0).expand(b, -1, -1, -1)
grid = 2*grid - 1
flow = 2*flow/torch.tensor([w, h]).view(1, 2, 1, 1).expand(b, -1, h, w).type_as(flow)
grid = (grid+flow).permute(0, 2, 3, 1)
input_sample = F.grid_sample(source, grid).view(b, c, -1)
return input_sample
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
features = models.vgg19(pretrained=True).features
self.relu1_1 = torch.nn.Sequential()
self.relu1_2 = torch.nn.Sequential()
self.relu2_1 = torch.nn.Sequential()
self.relu2_2 = torch.nn.Sequential()
self.relu3_1 = torch.nn.Sequential()
self.relu3_2 = torch.nn.Sequential()
self.relu3_3 = torch.nn.Sequential()
self.relu3_4 = torch.nn.Sequential()
self.relu4_1 = torch.nn.Sequential()
self.relu4_2 = torch.nn.Sequential()
self.relu4_3 = torch.nn.Sequential()
self.relu4_4 = torch.nn.Sequential()
self.relu5_1 = torch.nn.Sequential()
self.relu5_2 = torch.nn.Sequential()
self.relu5_3 = torch.nn.Sequential()
self.relu5_4 = torch.nn.Sequential()
for x in range(2):
self.relu1_1.add_module(str(x), features[x])
for x in range(2, 4):
self.relu1_2.add_module(str(x), features[x])
for x in range(4, 7):
self.relu2_1.add_module(str(x), features[x])
for x in range(7, 9):
self.relu2_2.add_module(str(x), features[x])
for x in range(9, 12):
self.relu3_1.add_module(str(x), features[x])
for x in range(12, 14):
self.relu3_2.add_module(str(x), features[x])
for x in range(14, 16):
self.relu3_2.add_module(str(x), features[x])
for x in range(16, 18):
self.relu3_4.add_module(str(x), features[x])
for x in range(18, 21):
self.relu4_1.add_module(str(x), features[x])
for x in range(21, 23):
self.relu4_2.add_module(str(x), features[x])
for x in range(23, 25):
self.relu4_3.add_module(str(x), features[x])
for x in range(25, 27):
self.relu4_4.add_module(str(x), features[x])
for x in range(27, 30):
self.relu5_1.add_module(str(x), features[x])
for x in range(30, 32):
self.relu5_2.add_module(str(x), features[x])
for x in range(32, 34):
self.relu5_3.add_module(str(x), features[x])
for x in range(34, 36):
self.relu5_4.add_module(str(x), features[x])
# don't need the gradients, just want the features
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
relu1_1 = self.relu1_1(x)
relu1_2 = self.relu1_2(relu1_1)
relu2_1 = self.relu2_1(relu1_2)
relu2_2 = self.relu2_2(relu2_1)
relu3_1 = self.relu3_1(relu2_2)
relu3_2 = self.relu3_2(relu3_1)
relu3_3 = self.relu3_3(relu3_2)
relu3_4 = self.relu3_4(relu3_3)
relu4_1 = self.relu4_1(relu3_4)
relu4_2 = self.relu4_2(relu4_1)
relu4_3 = self.relu4_3(relu4_2)
relu4_4 = self.relu4_4(relu4_3)
relu5_1 = self.relu5_1(relu4_4)
relu5_2 = self.relu5_2(relu5_1)
relu5_3 = self.relu5_3(relu5_2)
relu5_4 = self.relu5_4(relu5_3)
out = {
'relu1_1': relu1_1,
'relu1_2': relu1_2,
'relu2_1': relu2_1,
'relu2_2': relu2_2,
'relu3_1': relu3_1,
'relu3_2': relu3_2,
'relu3_3': relu3_3,
'relu3_4': relu3_4,
'relu4_1': relu4_1,
'relu4_2': relu4_2,
'relu4_3': relu4_3,
'relu4_4': relu4_4,
'relu5_1': relu5_1,
'relu5_2': relu5_2,
'relu5_3': relu5_3,
'relu5_4': relu5_4,
}
return out
| 39.861298
| 143
| 0.602986
|
7952d2538afb2a1c560af0897c14f0bc2c2e1a03
| 124
|
py
|
Python
|
npm/apps.py
|
alexsilva/django-npm
|
8d5c55c0219fda074ceabdd93b3806e65a008d9e
|
[
"MIT"
] | null | null | null |
npm/apps.py
|
alexsilva/django-npm
|
8d5c55c0219fda074ceabdd93b3806e65a008d9e
|
[
"MIT"
] | null | null | null |
npm/apps.py
|
alexsilva/django-npm
|
8d5c55c0219fda074ceabdd93b3806e65a008d9e
|
[
"MIT"
] | 1
|
2019-10-17T15:13:13.000Z
|
2019-10-17T15:13:13.000Z
|
from django.apps import AppConfig
class NPMConfig(AppConfig):
name = 'npm'
verbose_name = "NPM package installer"
| 17.714286
| 42
| 0.725806
|
7952d2b747f6fc0fb56f9bda37d301b79d4f78ec
| 6,353
|
py
|
Python
|
seahub/api2/endpoints/repo_trash.py
|
weimens/seahub
|
5ecf78ed7a2ddc72a23961804ee41be21c24893f
|
[
"Apache-2.0"
] | 101
|
2021-05-16T06:00:03.000Z
|
2021-12-01T02:02:29.000Z
|
seahub/api2/endpoints/repo_trash.py
|
weimens/seahub
|
5ecf78ed7a2ddc72a23961804ee41be21c24893f
|
[
"Apache-2.0"
] | null | null | null |
seahub/api2/endpoints/repo_trash.py
|
weimens/seahub
|
5ecf78ed7a2ddc72a23961804ee41be21c24893f
|
[
"Apache-2.0"
] | 2
|
2021-10-19T05:22:40.000Z
|
2022-01-12T03:55:34.000Z
|
# Copyright (c) 2012-2016 Seafile Ltd.
import stat
import logging
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.utils import api_error
from seahub.signals import clean_up_repo_trash
from seahub.utils.timeutils import timestamp_to_isoformat_timestr
from seahub.utils.repo import get_repo_owner
from seahub.views import check_folder_permission
from seahub.group.utils import is_group_admin
from seahub.api2.endpoints.group_owned_libraries import get_group_id_by_repo_owner
from seaserv import seafile_api
from pysearpc import SearpcError
from constance import config
logger = logging.getLogger(__name__)
class RepoTrash(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get_item_info(self, trash_item):
item_info = {
'parent_dir': trash_item.basedir,
'obj_name': trash_item.obj_name,
'deleted_time': timestamp_to_isoformat_timestr(trash_item.delete_time),
'scan_stat': trash_item.scan_stat,
'commit_id': trash_item.commit_id,
}
if stat.S_ISDIR(trash_item.mode):
is_dir = True
else:
is_dir = False
item_info['is_dir'] = is_dir
item_info['size'] = trash_item.file_size if not is_dir else ''
item_info['obj_id'] = trash_item.obj_id if not is_dir else ''
return item_info
def get(self, request, repo_id, format=None):
""" Return deleted files/dirs of a repo/folder
Permission checking:
1. all authenticated user can perform this action.
"""
# argument check
path = request.GET.get('path', '/')
# resource check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
dir_id = seafile_api.get_dir_id_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not dir_id:
error_msg = 'Folder %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if check_folder_permission(request, repo_id, path) is None:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
show_days = int(request.GET.get('show_days', '0'))
except ValueError:
show_days = 0
if show_days < 0:
error_msg = 'show_days invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
scan_stat = request.GET.get('scan_stat', None)
try:
# a list will be returned, with at least 1 item in it
# the last item is not a deleted entry, and it contains an attribute named 'scan_stat'
deleted_entries = seafile_api.get_deleted(repo_id,
show_days, path, scan_stat)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
scan_stat = deleted_entries[-1].scan_stat
more = True if scan_stat is not None else False
items = []
if len(deleted_entries) > 1:
entries_without_scan_stat = deleted_entries[0:-1]
# sort entry by delete time
entries_without_scan_stat.sort(
key=lambda x: x.delete_time, reverse=True)
for item in entries_without_scan_stat:
item_info = self.get_item_info(item)
items.append(item_info)
result = {
'data': items,
'more': more,
'scan_stat': scan_stat,
}
return Response(result)
def delete(self, request, repo_id, format=None):
""" Clean library's trash.
Permission checking:
1. repo owner can perform this action.
2. is group admin.
"""
# argument check
try:
keep_days = int(request.data.get('keep_days', 0))
except ValueError:
error_msg = 'keep_days invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# resource check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
username = request.user.username
repo_owner = get_repo_owner(request, repo_id)
if not config.ENABLE_USER_CLEAN_TRASH:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
if '@seafile_group' in repo_owner:
group_id = get_group_id_by_repo_owner(repo_owner)
if not is_group_admin(group_id, username):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
else:
if username != repo_owner:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
seafile_api.clean_up_repo_history(repo_id, keep_days)
org_id = None if not request.user.org else request.user.org.org_id
clean_up_repo_trash.send(sender=None, org_id=org_id,
operator=username, repo_id=repo_id, repo_name=repo.name,
repo_owner=repo_owner, days=keep_days)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
| 35.294444
| 98
| 0.647096
|
7952d3806dbc35bce4dfeb2e6e4b9d9cca068fae
| 1,108
|
py
|
Python
|
tests/passive/test_all_ports.py
|
jonyboi396825/COM-Server
|
e4e8a1a5e9f86c1036ebb7ac3d39c20b63e7e905
|
[
"MIT"
] | 4
|
2021-11-09T04:11:51.000Z
|
2022-01-30T01:03:16.000Z
|
tests/passive/test_all_ports.py
|
jonyboi396825/COM-Server
|
e4e8a1a5e9f86c1036ebb7ac3d39c20b63e7e905
|
[
"MIT"
] | 55
|
2021-11-15T16:36:25.000Z
|
2022-03-10T04:48:08.000Z
|
tests/passive/test_all_ports.py
|
jonyboi396825/COM-Server
|
e4e8a1a5e9f86c1036ebb7ac3d39c20b63e7e905
|
[
"MIT"
] | 1
|
2021-11-12T02:14:07.000Z
|
2021-11-12T02:14:07.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tests if lists ports properly. Needs Arduino microcontroller to be plugged in to test (otherwise skipped).
"""
import glob
import sys
import re
from com_server.tools import all_ports
import pytest
if sys.platform.startswith("linux"):
# usb and acm for linux only
_usb = glob.glob("/dev/ttyUSB[0-9]*")
_acm = glob.glob("/dev/ttyACM[0-9]*")
MATCH = "/dev/ttyUSB[0-9]*|/dev/ttyACM[0-9]*"
elif sys.platform.startswith("darwin"):
# mac; use cu.*, not tty.*
_usb = glob.glob("/dev/cu.usbserial*")
_acm = glob.glob("/dev/cu.usbmodem*")
MATCH = "/dev/cu.usb(serial|modem).*"
elif sys.platform.startswith("win"):
# windows
_usb = [f"COM{i+1}" for i in range(256)]
_acm = []
MATCH = "COM[0-9]+"
else:
# platform not supported for the test
_usb, _acm = [], []
@pytest.mark.skipif(len(_usb + _acm) <= 0, reason="port not connected")
def test_ports():
"""
Tests if `all_ports` is listing properly.
"""
ports = [a for a, _, _ in all_ports() if re.match(MATCH, a)]
assert len(ports) > 0
| 24.622222
| 106
| 0.624549
|
7952d4429bcacf01ac83eb864d49b7b030f98b62
| 5,648
|
py
|
Python
|
core/webserver.py
|
RASSec/Mario-Mario-
|
7bc3e6fbf9548ec4be0864c47b250f5e1fbb0a28
|
[
"Apache-2.0"
] | 1
|
2021-11-09T08:55:10.000Z
|
2021-11-09T08:55:10.000Z
|
core/webserver.py
|
RASSec/Mario-Mario-
|
7bc3e6fbf9548ec4be0864c47b250f5e1fbb0a28
|
[
"Apache-2.0"
] | null | null | null |
core/webserver.py
|
RASSec/Mario-Mario-
|
7bc3e6fbf9548ec4be0864c47b250f5e1fbb0a28
|
[
"Apache-2.0"
] | 1
|
2021-11-09T08:55:12.000Z
|
2021-11-09T08:55:12.000Z
|
import time
import api.web
from api.mongo import evetomongo
from lib.data import config, clean_status
from api.mongo import clean_mongo, show_db
from api.logger import logger
import os
import json
from flask import Flask, request, redirect, url_for, jsonify, send_from_directory, make_response, Response
def webserver():
app = Flask(__name__)
@app.route('/api/map', methods=['GET'])
def map():
begintime = request.args.get("begintime")
endtime = request.args.get("endtime")
result = api.web.map(begintime=begintime, endtime=endtime)
return jsonify(result)
# @app.route('/api/upload', methods=['GET', 'POST'])
# def upload_file():
# if request.method == 'POST':
# file = request.files['file']
# result = api.web.upload_pcap(file)
# return result
# elif request.method == "GET":
# return '''
# <!doctype html>
# <title>Upload new File</title>
# <h1>Upload new File</h1>
# <form action="" method=post enctype=multipart/form-data>
# <p><input type=file name=file>
# <input type=submit value=Upload>
# </form>
# '''
@app.route('/api/evefile', methods=['GET', 'POST'])
def upload_evefile():
if request.method == 'POST':
config['client_ip'] = request.remote_addr
file = request.files['clientfile'].readlines()
evetomongo(eve_file=file)
return "upload eve.json success"
elif request.method == "GET":
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=evefile>
<input type=submit value=Upload>
</form>
'''
@app.route('/api/cleanstatus',methods=['GET'])
def get_clean_status():
return clean_status['clean_db']
@app.route('/api/db', methods=['GET', 'POST'])
def clean_db():
if request.method == 'GET':
result = show_db()
return result
if request.method == 'POST':
logger.warning("{} ่ฏทๆฑๆธ
็ๆฐๆฎๅบ".format(request.remote_addr))
clean_status['clean_db'] = "waiting process"
return "start clean db"
@app.route('/api/rules', methods=['GET', 'POST'])
def set_rules():
if request.method == 'POST':
set_info = request.get_data().decode('utf-8')
set_rules_info = json.loads(set_info)
api.web.set_clientrules(set_rules_info['rules_info'])
return "ok"
elif request.method == 'GET':
server = request.args.get('server')
query = request.args.get('search')
allrules = {}
if query != None:
allrules['rules'] = api.web.get_allrules(server, query)
else:
allrules['rules'] = api.web.get_allrules(server)
return jsonify(allrules)
@app.route('/api/rules/del', methods=['POST', 'DELETE'])
def del_client_rules():
if request.method == 'POST':
del_info = request.get_data().decode('utf-8')
del_id = json.loads(del_info)['id']
del_result = api.web.del_rules(del_id)
return del_result
if request.method == 'DELETE':
del_result = api.web.del_rules("all")
return del_result
@app.route('/api/rules/change', methods=['POST'])
def change_client_rules():
if request.method == 'POST':
change_info = request.get_data().decode('utf-8')
change_id = json.loads(change_info)['id']
change_type = json.loads(change_info)['type']
api.web.change_rules(change_id, change_type)
return "{} changed to type {}".format(change_id, change_type)
# @app.route('/api/pcap', methods=['POST'])
# def analyze_pcap():
# filename = request.form.get('filename')
# result = api.web.analyze_pcap(filename)
# return result
# @app.route('/api/demo', methods=['POST'])
# def demo_information():
# num = request.form.get('demonum')
# generate_demo_information(int(num))
# result = {}
# result['newdatenum'] = num
# return jsonify(result)
@app.route('/api/vulsearch', methods=['POST'])
def vul_search():
ip = request.form.get('query').strip('\n').strip('\r').strip()
search_result = api.web.vul_search(ip)
result = {}
result['data'] = search_result
return jsonify(result)
@app.route('/install.sh')
def send_install_file():
logger.info("{} ๅผๅงๅฎ่ฃ
".format(request.remote_addr))
install_file = api.web.customization_install(request.remote_addr)
return install_file
@app.route('/local.rules')
def get_clientrules():
logger.info("ไธๅ้ฒๅพก็ญ็ฅ่ณ {}".format(request.remote_addr))
clientrules = open(
'./ThirPath/marioips/rules/local.rules', 'r')
rulesfile = clientrules.read()
clientrules.close()
return rulesfile
@app.route('/marioips.tar.gz')
def send_conf_tar():
logger.info("{} ไธ่ฝฝๅฎขๆท็ซฏไธป็จๅบ".format(request.remote_addr))
file_dir = os.getcwd() + "/ThirPath/marioips/"
api.web.make_tar()
response = make_response(send_from_directory(
file_dir, "marioips.tar.gz", as_attachment=True))
return response
app.config['JSON_AS_ASCII'] = False
app.run(host='0.0.0.0', port=5000, debug=True)
| 37.157895
| 106
| 0.578081
|
7952d445d9c100ea5f467f1fd717585ed0f0dc89
| 337
|
py
|
Python
|
testMonthbox.py
|
altmind/npyscreen
|
8ce31204e1de1fbd2939ffe2d8c3b3120e93a4d0
|
[
"BSD-2-Clause"
] | 442
|
2015-06-01T15:02:34.000Z
|
2022-02-23T12:40:58.000Z
|
testMonthbox.py
|
altmind/npyscreen
|
8ce31204e1de1fbd2939ffe2d8c3b3120e93a4d0
|
[
"BSD-2-Clause"
] | 79
|
2015-06-23T15:06:51.000Z
|
2021-09-06T20:29:16.000Z
|
testMonthbox.py
|
altmind/npyscreen
|
8ce31204e1de1fbd2939ffe2d8c3b3120e93a4d0
|
[
"BSD-2-Clause"
] | 138
|
2015-07-07T20:05:21.000Z
|
2022-01-21T17:09:42.000Z
|
#!/bin/env python
import npyscreen
class MainFm(npyscreen.Form):
def create(self):
self.mb = self.add(npyscreen.MonthBox,
use_datetime = True)
class TestApp(npyscreen.NPSAppManaged):
def onStart(self):
self.addForm("MAIN", MainFm)
if __name__ == "__main__":
A = TestApp()
A.run()
| 18.722222
| 46
| 0.620178
|
7952d60c21430e41013b1b0ef3079780b0ec6f18
| 6,150
|
py
|
Python
|
PythonClient/car/lidar_occ_grid.py
|
VegaVK/AirSim
|
c7c1f55a7d5dc97f1fdd98fb1df191db2576da12
|
[
"MIT"
] | null | null | null |
PythonClient/car/lidar_occ_grid.py
|
VegaVK/AirSim
|
c7c1f55a7d5dc97f1fdd98fb1df191db2576da12
|
[
"MIT"
] | null | null | null |
PythonClient/car/lidar_occ_grid.py
|
VegaVK/AirSim
|
c7c1f55a7d5dc97f1fdd98fb1df191db2576da12
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import rospy
import setup_path
import airsim
import cv2
import numpy as np
import os
import sys
import math
import setup_path
import argparse
import pprint
import time
from geometry_msgs.msg import Point
from visualization_msgs.msg import Marker
# Use below in settings.json with blocks environment
# Overall gains and parameters:
TimeStep=0.01
TotalRunTime=60
# Simple Velocity Tracker, Proportional only
Kp=0.3
TargetVel=5 # m/s
AccelScale=4.0
BrakeScale=2.0
binSize=0.2 # in meters, binning size for occupancy grid, for use on lidar data
rospy.init_node('ramp_merge', anonymous=True)
RdrMarkerPub = rospy.Publisher("radar_markers_rviz",Marker,queue_size=100)
def Plant(CaccAccel, PlantID): # Converts CaccAccel into throttle and brake inputs for each vehicle
# Saturation Stuff
if CaccAccel>AccelScale:
CaccAccel=AccelScale
elif CaccAccel<-BrakeScale:
CaccAccel=-BrakeScale
# Now rescale to [0,1] and set them
if CaccAccel>=0:
car_controls.throttle=float(CaccAccel/AccelScale)
car_controls.brake = 0
elif CaccAccel<0:
car_controls.throttle=0
car_controls.brake = -1.0*float(CaccAccel/BrakeScale)
client.setCarControls(car_controls, PlantID)
# if PlantID=="CarLLV": # Debugging stuff
# print(CaccAccel)
# print(car_controls.throttle)
return 0
def parse_lidarData(data):
# reshape array of floats to array of [X,Y,Z]
points = np.array(data.point_cloud, dtype=np.dtype('f4'))
points = np.reshape(points, (int(points.shape[0]/3), 3))
return points
def filter_and_bin(points):
# Filter out all points on the ground and then bin them to the required resolution.
toDelList=[]
for pointIdx in range(len(points)):
# print(points[pointIdx])
# print(points[pointIdx][2])
if (points[pointIdx][2]<=-2) or (points[pointIdx][2]>=0.8) or (points[pointIdx][0]>=30) \
or (points[pointIdx][1]>=30): # Z-axis from pointcloud is inverted
# print(points[pointIdx][2])
# print('APPENDING idx' + str(pointIdx))
toDelList.append(pointIdx)
# print(toDelList)
# print('Before Filtering: '+ str(len(points)))
points=np.delete(points,toDelList,axis=0)
# print('After: ' +str(len(points)))
scaleFactor=1/binSize
# First scale all points, floor them and then rescale
points=points*scaleFactor
points=np.floor(points)
points=points/scaleFactor
return points
def RadarMarkerPublisher(InputList,RadarPublisher):
# MarkerArrayIn=visualization_msgs.msg.MarkerArray()
markerTemp=Marker()
markerTemp.header.frame_id = "map"
markerTemp.type = markerTemp.CUBE_LIST
markerTemp.action = markerTemp.ADD
markerTemp.scale.x = 0.4
markerTemp.scale.y = 0.4
markerTemp.scale.z = 0.2
markerTemp.color.r = 0.0
markerTemp.color.g = 1.0
markerTemp.color.b = 1.0
markerTemp.color.a = 1.0
for itemDxj in InputList:
tempPoint=Point()
tempPoint.x=itemDxj[0]
tempPoint.y=-itemDxj[1]
tempPoint.z=0
if any(np.isnan([itemDxj[0],itemDxj[1],itemDxj[2]])):
# print('found NaN')
pass
else:
markerTemp.points.append(tempPoint)
RadarPublisher.publish(markerTemp)
# Create all cars and setup
client = airsim.CarClient()
client.confirmConnection()
# client.enableApiControl(True, "CarRLV")
client.enableApiControl(True, "CarR1")
client.enableApiControl(True, "CarR2")
client.enableApiControl(True, "CarR3")
client.enableApiControl(True, "CarR4")
client.enableApiControl(True, "CarR5")
client.enableApiControl(True, "CarR6")
client.enableApiControl(True, "CarR7")
client.enableApiControl(True, "CarR8")
client.enableApiControl(True, "CarR9")
client.enableApiControl(True, "CarR10")
car_controls= airsim.CarControls()
car_controls.is_manual_gear = False
startTime=time.time()
RunTime=time.time()-startTime
while RunTime<TotalRunTime: # Max Run time;
RunTime=time.time()-startTime
# Get all states
StateRLV = client.getCarState("CarFPV")
# print(StateRLV)
StateR1 = client.getCarState("CarR1")
StateR2 = client.getCarState("CarR2")
StateR3 = client.getCarState("CarR3")
StateR4 = client.getCarState("CarR4")
StateR5 = client.getCarState("CarR5")
StateR6 = client.getCarState("CarR6")
StateR7 = client.getCarState("CarR7")
StateR8 = client.getCarState("CarR8")
StateR9 = client.getCarState("CarR9")
StateR10 = client.getCarState("CarR10")
accelReq= Kp*(TargetVel-StateR1.speed)
Plant(accelReq, "CarR1")
accelReq= Kp*(TargetVel-StateR2.speed)
Plant(accelReq, "CarR2")
accelReq= Kp*(TargetVel-StateR3.speed)
Plant(accelReq, "CarR3")
accelReq= Kp*(TargetVel-StateR4.speed)
Plant(accelReq, "CarR4")
accelReq= Kp*(TargetVel-StateR5.speed)
Plant(accelReq, "CarR5")
accelReq= Kp*(TargetVel-StateR6.speed)
Plant(accelReq, "CarR6")
accelReq= Kp*(TargetVel-StateR7.speed)
Plant(accelReq, "CarR7")
accelReq= Kp*(TargetVel-StateR8.speed)
Plant(accelReq, "CarR8")
accelReq= Kp*(TargetVel-StateR9.speed)
Plant(accelReq, "CarR9")
accelReq= Kp*(TargetVel-StateR10.speed)
Plant(accelReq, "CarR10")
# Now just sleep so the cars are allowed to move
time.sleep(TimeStep)
# Get Lidar Data:
lidarData = client.getLidarData( lidar_name = 'LidarSensor1', vehicle_name = 'CarFPV')
if (len(lidarData.point_cloud) < 3):
print("\tNo points received from Lidar data")
else:
points = parse_lidarData(lidarData)
points=filter_and_bin(points)
RadarMarkerPublisher(points,RdrMarkerPub)
# print("\tTime_stamp: %d number_of_points: %d" % (lidarData.time_stamp, len(points)))
# print(points[0])
# print("\t\tlidar position: %s" % (pprint.pformat(lidarData.pose.position)))
# print("\t\tlidar orientation: %s" % (pprint.pformat(lidarData.pose.orientation)))
#restore to original state
client.reset()
client.enableApiControl(False)
print('done')
| 32.198953
| 99
| 0.687967
|
7952d6b36661d93c315d7500c0c1c017f48fd367
| 9,728
|
py
|
Python
|
src/train.py
|
prabhudevguntur/voice_conversion
|
0f6799870d8dbd2a61639bfb0530e8979cdd1ff7
|
[
"MIT"
] | null | null | null |
src/train.py
|
prabhudevguntur/voice_conversion
|
0f6799870d8dbd2a61639bfb0530e8979cdd1ff7
|
[
"MIT"
] | null | null | null |
src/train.py
|
prabhudevguntur/voice_conversion
|
0f6799870d8dbd2a61639bfb0530e8979cdd1ff7
|
[
"MIT"
] | null | null | null |
import argparse
import os
import numpy as np
import itertools
import sys
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.autograd import Variable
from models import *
from data_proc import DataProc
import torch.nn as nn
import torch.nn.functional as F
import torch
from utils import plot_batch_train
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=0, help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=100, help="number of epochs of training")
parser.add_argument("--model_name", type=str, help="name of the model")
parser.add_argument("--dataset", type=str, help="path to dataset for training")
parser.add_argument("--n_spkrs", type=int, default=2, help="number of speakers for conversion")
parser.add_argument("--batch_size", type=int, default=4, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0001, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--decay_epoch", type=int, default=50, help="epoch from which to start lr decay")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_height", type=int, default=128, help="size of image height")
parser.add_argument("--img_width", type=int, default=128, help="size of image width")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--plot_interval", type=int, default=1, help="epoch interval between saving plots (disable with -1)")
parser.add_argument("--checkpoint_interval", type=int, default=1, help="interval between saving model checkpoints")
parser.add_argument("--n_downsample", type=int, default=2, help="number downsampling layers in encoder")
parser.add_argument("--dim", type=int, default=32, help="number of filters in first encoder layer")
opt = parser.parse_args()
print(opt)
cuda = True if torch.cuda.is_available() else False
# Create sample and checkpoint directories
os.makedirs("saved_models/%s" % opt.model_name, exist_ok=True)
# Create plot output directories
if opt.plot_interval != -1:
os.makedirs("out_train/%s/plot_A2B/" % opt.model_name, exist_ok=True)
os.makedirs("out_train/%s/plot_B2A/" % opt.model_name, exist_ok=True)
# Losses
criterion_GAN = torch.nn.MSELoss()
criterion_pixel = torch.nn.L1Loss()
input_shape = (opt.channels, opt.img_height, opt.img_width)
# Dimensionality (channel-wise) of image embedding
shared_dim = opt.dim * 2 ** opt.n_downsample
# Initialize generator and discriminator
encoder = Encoder(dim=opt.dim, in_channels=opt.channels, n_downsample=opt.n_downsample)
shared_G = ResidualBlock(features=shared_dim)
G1 = Generator(dim=opt.dim, out_channels=opt.channels, n_upsample=opt.n_downsample, shared_block=shared_G)
G2 = Generator(dim=opt.dim, out_channels=opt.channels, n_upsample=opt.n_downsample, shared_block=shared_G)
D1 = Discriminator(input_shape)
D2 = Discriminator(input_shape)
if cuda:
encoder = encoder.cuda()
G1 = G1.cuda()
G2 = G2.cuda()
D1 = D1.cuda()
D2 = D2.cuda()
criterion_GAN.cuda()
criterion_pixel.cuda()
if opt.epoch != 0:
# Load pretrained models
encoder.load_state_dict(torch.load("saved_models/%s/encoder_%02d.pth" % (opt.model_name, opt.epoch)))
G1.load_state_dict(torch.load("saved_models/%s/G1_%02d.pth" % (opt.model_name, opt.epoch)))
G2.load_state_dict(torch.load("saved_models/%s/G2_%02d.pth" % (opt.model_name, opt.epoch)))
D1.load_state_dict(torch.load("saved_models/%s/D1_%02d.pth" % (opt.model_name, opt.epoch)))
D2.load_state_dict(torch.load("saved_models/%s/D2_%02d.pth" % (opt.model_name, opt.epoch)))
else:
# Initialize weights
encoder.apply(weights_init_normal)
G1.apply(weights_init_normal)
G2.apply(weights_init_normal)
D1.apply(weights_init_normal)
D2.apply(weights_init_normal)
# Loss weights
lambda_0 = 10 # GAN
lambda_1 = 0.1 # KL (encoded spect)
lambda_2 = 100 # ID pixel-wise
lambda_3 = 0.1 # KL (encoded translated spect)
lambda_4 = 100 # Cycle pixel-wise
lambda_5 = 10 # latent space L1
# Optimizers
optimizer_G = torch.optim.Adam(
itertools.chain(encoder.parameters(), G1.parameters(), G2.parameters()),
lr=opt.lr,
betas=(opt.b1, opt.b2),
)
optimizer_D1 = torch.optim.Adam(D1.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D2 = torch.optim.Adam(D2.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
# Learning rate update schedulers
lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(
optimizer_G, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step
)
lr_scheduler_D1 = torch.optim.lr_scheduler.LambdaLR(
optimizer_D1, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step
)
lr_scheduler_D2 = torch.optim.lr_scheduler.LambdaLR(
optimizer_D2, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step
)
Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
# Prepare dataloader
dataloader = torch.utils.data.DataLoader(
DataProc(opt, split='train'),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_cpu,
pin_memory=True
)
def compute_kl(mu):
mu_2 = torch.pow(mu, 2)
loss = torch.mean(mu_2)
return loss
# ----------
# Training
# ----------
for epoch in range(opt.epoch, opt.n_epochs):
losses = {'G': [],'D': []}
progress = tqdm(enumerate(dataloader),desc='',total=len(dataloader))
for i, batch in progress:
# Set model input
X1 = Variable(batch["A"].type(Tensor))
X2 = Variable(batch["B"].type(Tensor))
# Adversarial ground truths
valid = Variable(Tensor(np.ones((X1.size(0), *D1.output_shape))), requires_grad=False)
fake = Variable(Tensor(np.zeros((X1.size(0), *D1.output_shape))), requires_grad=False)
# -------------------------------
# Train Encoder and Generators
# -------------------------------
optimizer_G.zero_grad()
# Get shared latent representation
mu1, Z1 = encoder(X1)
mu2, Z2 = encoder(X2)
# Latent space feat
feat_1 = mu1.view(mu1.size()[0], -1).mean(dim=0)
feat_2 = mu2.view(mu2.size()[0], -1).mean(dim=0)
# Reconstruct speech
recon_X1 = G1(Z1)
recon_X2 = G2(Z2)
# Translate speech
fake_X1 = G1(Z2)
fake_X2 = G2(Z1)
# Cycle translation
mu1_, Z1_ = encoder(fake_X1)
mu2_, Z2_ = encoder(fake_X2)
cycle_X1 = G1(Z2_)
cycle_X2 = G2(Z1_)
# Losses
loss_GAN_1 = lambda_0 * criterion_GAN(D1(fake_X1), valid)
loss_GAN_2 = lambda_0 * criterion_GAN(D2(fake_X2), valid)
loss_KL_1 = lambda_1 * compute_kl(mu1)
loss_KL_2 = lambda_1 * compute_kl(mu2)
loss_ID_1 = lambda_2 * criterion_pixel(recon_X1, X1)
loss_ID_2 = lambda_2 * criterion_pixel(recon_X2, X2)
loss_KL_1_ = lambda_3 * compute_kl(mu1_)
loss_KL_2_ = lambda_3 * compute_kl(mu2_)
loss_cyc_1 = lambda_4 * criterion_pixel(cycle_X1, X1)
loss_cyc_2 = lambda_4 * criterion_pixel(cycle_X2, X2)
loss_feat = lambda_5 * criterion_pixel(feat_1, feat_2)
# Total loss
loss_G = (
loss_KL_1
+ loss_KL_2
+ loss_ID_1
+ loss_ID_2
+ loss_GAN_1
+ loss_GAN_2
+ loss_KL_1_
+ loss_KL_2_
+ loss_cyc_1
+ loss_cyc_2
+ loss_feat
)
loss_G.backward()
optimizer_G.step()
# -----------------------
# Train Discriminator 1
# -----------------------
optimizer_D1.zero_grad()
loss_D1 = criterion_GAN(D1(X1), valid) + criterion_GAN(D1(fake_X1.detach()), fake)
loss_D1.backward()
optimizer_D1.step()
# -----------------------
# Train Discriminator 2
# -----------------------
optimizer_D2.zero_grad()
loss_D2 = criterion_GAN(D2(X2), valid) + criterion_GAN(D2(fake_X2.detach()), fake)
loss_D2.backward()
optimizer_D2.step()
# --------------
# Log Progress
# --------------
losses['G'].append(loss_G.item())
losses['D'].append((loss_D1 + loss_D2).item())
# Update progress bar
progress.set_description("[Epoch %d/%d] [D loss: %f] [G loss: %f] "
% (epoch,opt.n_epochs,np.mean(losses['D']), np.mean(losses['G'])))
# Plot first batch every epoch or few epochs
if opt.plot_interval != -1 and epoch % opt.plot_interval == 0 and i == 0:
plot_batch_train(opt.model_name, 'plot_A2B', epoch, X1, cycle_X1, fake_X2, X2)
plot_batch_train(opt.model_name, 'plot_B2A', epoch, X2, cycle_X2, fake_X1, X1)
# Update learning rates
lr_scheduler_G.step()
lr_scheduler_D1.step()
lr_scheduler_D2.step()
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
# Save model checkpoints
torch.save(encoder.state_dict(), "saved_models/%s/encoder_%02d.pth" % (opt.model_name, epoch))
torch.save(G1.state_dict(), "saved_models/%s/G1_%02d.pth" % (opt.model_name, epoch))
torch.save(G2.state_dict(), "saved_models/%s/G2_%02d.pth" % (opt.model_name, epoch))
torch.save(D1.state_dict(), "saved_models/%s/D1_%02d.pth" % (opt.model_name, epoch))
torch.save(D2.state_dict(), "saved_models/%s/D2_%02d.pth" % (opt.model_name, epoch))
| 36.709434
| 121
| 0.663343
|
7952d8d0d8ac3b4f74da68cb9d1e2efc59cfae5c
| 3,098
|
py
|
Python
|
opsdroid/memory.py
|
JiahnChoi/opsdroid.kr
|
0893456b0f9f6c70edf7c330a7593d87450538cc
|
[
"Apache-2.0"
] | 712
|
2016-08-09T21:30:07.000Z
|
2022-03-24T09:38:21.000Z
|
opsdroid/memory.py
|
JiahnChoi/opsdroid.kr
|
0893456b0f9f6c70edf7c330a7593d87450538cc
|
[
"Apache-2.0"
] | 1,767
|
2016-07-27T13:01:25.000Z
|
2022-03-29T04:25:10.000Z
|
opsdroid/memory.py
|
JiahnChoi/opsdroid.kr
|
0893456b0f9f6c70edf7c330a7593d87450538cc
|
[
"Apache-2.0"
] | 536
|
2016-07-31T14:23:41.000Z
|
2022-03-22T17:35:15.000Z
|
"""Class for persisting information in opsdroid."""
import logging
_LOGGER = logging.getLogger(__name__)
class Memory:
"""A Memory object.
An object to obtain, store and persist data outside of opsdroid.
Attributes:
databases (:obj:`list` of :obj:`Database`): List of database objects.
memory (:obj:`dict`): In-memory dictionary to store data.
"""
def __init__(self):
"""Create object with minimum properties."""
self.databases = []
async def get(self, key, default=None):
"""Get data object for a given key.
Gets the key value found in-memory or from the database(s).
Args:
key (str): Key to retrieve data.
Returns:
A data object for the given key, otherwise `None`.
"""
_LOGGER.debug(_("Getting %s from memory."), key)
result = await self._get_from_database(key)
return result or default
async def put(self, key, data):
"""Put a data object to a given key.
Stores the key and value in memory and the database(s).
Args:
key (str): Key for the data to store.
data (obj): Data object to store.
"""
_LOGGER.debug(_("Putting %s to memory."), key)
await self._put_to_database(key, data)
async def delete(self, key):
"""Delete data object for a given key.
Deletes the key value found in-memory or from the database(s).
Args:
key (str): Key to delete data.
"""
_LOGGER.debug(_("Deleting %s from memory."), key)
await self._delete_from_database(key)
async def _get_from_database(self, key):
"""Get updates from databases for a given key.
Gets the first key value found from the database(s).
Args:
key (str): Key to retrieve data from a database.
Returns:
The first key value (data object) found from the database(s).
Or `None` when no database is defined or no value is found.
Todo:
* Handle multiple databases
"""
if not self.databases:
return None # pragma: nocover
results = []
for database in self.databases:
results.append(await database.get(key))
return results[0]
async def _put_to_database(self, key, data):
"""Put updates into databases for a given key.
Stores the key and value on each database defined.
Args:
key (str): Key for the data to store.
data (obj): Data object to store.
"""
if self.databases:
for database in self.databases:
await database.put(key, data)
async def _delete_from_database(self, key):
"""Delete data from databases for a given key.
Deletes the key and value on each database defined.
Args:
key (str): Key for the data to delete.
"""
if self.databases:
for database in self.databases:
await database.delete(key)
| 26.93913
| 77
| 0.585862
|
7952d9c8f97cebb4f884183a34924c457f717577
| 1,931
|
py
|
Python
|
pipenv/resolver.py
|
ksonj/pipenv
|
72bbd6206ae80fcdbcb25a11a7a289a12ad5c1d6
|
[
"MIT"
] | null | null | null |
pipenv/resolver.py
|
ksonj/pipenv
|
72bbd6206ae80fcdbcb25a11a7a289a12ad5c1d6
|
[
"MIT"
] | null | null | null |
pipenv/resolver.py
|
ksonj/pipenv
|
72bbd6206ae80fcdbcb25a11a7a289a12ad5c1d6
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
import logging
os.environ['PIP_PYTHON_PATH'] = sys.executable
for _dir in ('vendor', 'patched', '..'):
dirpath = os.path.sep.join([os.path.dirname(__file__), _dir])
sys.path.insert(0, dirpath)
def which(*args, **kwargs):
return sys.executable
def main():
is_verbose = '--verbose' in ' '.join(sys.argv)
do_pre = '--pre' in ' '.join(sys.argv)
do_clear = '--clear' in ' '.join(sys.argv)
is_debug = '--debug' in ' '.join(sys.argv)
new_sys_argv = []
for v in sys.argv:
if v.startswith('--'):
continue
else:
new_sys_argv.append(v)
sys.argv = new_sys_argv
import pipenv.core
if is_verbose:
logging.getLogger('pip9').setLevel(logging.INFO)
logging.getLogger('notpip').setLevel(logging.INFO)
if is_debug:
# Shit's getting real at this point.
logging.getLogger('pip9').setLevel(logging.DEBUG)
logging.getLogger('notpip').setLevel(logging.DEBUG)
if 'PIPENV_PACKAGES' in os.environ:
packages = os.environ['PIPENV_PACKAGES'].strip().split('\n')
else:
packages = sys.argv[1:]
for i, package in enumerate(packages):
if package.startswith('--'):
del packages[i]
project = pipenv.core.project
def resolve(packages, pre, sources, verbose, clear):
import pipenv.utils
return pipenv.utils.resolve_deps(
packages,
which,
project=project,
pre=pre,
sources=sources,
clear=clear,
verbose=verbose,
)
results = resolve(
packages,
pre=do_pre,
sources=project.sources,
verbose=is_verbose,
clear=do_clear,
)
print('RESULTS:')
if results:
print(json.dumps(results))
else:
print(json.dumps([]))
if __name__ == '__main__':
main()
| 25.407895
| 68
| 0.5826
|
7952dc01bb9276dbc38704715c2103054e876005
| 3,306
|
py
|
Python
|
CRUD/settings.py
|
Anik-Bardhan/Employee-List
|
70f9361ce2a67c0a4966fb3075a53adc7cf62607
|
[
"MIT"
] | null | null | null |
CRUD/settings.py
|
Anik-Bardhan/Employee-List
|
70f9361ce2a67c0a4966fb3075a53adc7cf62607
|
[
"MIT"
] | null | null | null |
CRUD/settings.py
|
Anik-Bardhan/Employee-List
|
70f9361ce2a67c0a4966fb3075a53adc7cf62607
|
[
"MIT"
] | null | null | null |
"""
Django settings for CRUD project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-$&p($2#g@-yll5-1tlw-jq(871(fza__hmx=%6$)rp@oe%*!qh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'register',
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CRUD.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CRUD.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.430769
| 91
| 0.698427
|
7952dc47df56e7abe32825640599d818abb6b3e1
| 506
|
py
|
Python
|
Interview_Preparation_Kit/balanced-brackets.py
|
csixteen/HackerRank
|
3ef6fa48599341f481b9e266c69df2d449a7b313
|
[
"MIT"
] | 4
|
2018-04-19T20:32:54.000Z
|
2020-04-21T12:28:00.000Z
|
Interview_Preparation_Kit/balanced-brackets.py
|
csixteen/HackerRank
|
3ef6fa48599341f481b9e266c69df2d449a7b313
|
[
"MIT"
] | null | null | null |
Interview_Preparation_Kit/balanced-brackets.py
|
csixteen/HackerRank
|
3ef6fa48599341f481b9e266c69df2d449a7b313
|
[
"MIT"
] | null | null | null |
from collections import deque
def isBalanced(s):
brackets_close = {')': '(', '}': '{', ']': '['}
stack = deque(s[0])
for c in s[1:]:
if c in brackets_close:
if len(stack) > 0 and brackets_close[c] == stack[-1]:
stack.pop()
else:
return 'NO'
else:
stack.append(c)
return 'NO' if len(stack) > 0 else 'YES'
if __name__ == '__main__':
for _ in range(int(input())):
print(isBalanced(input()))
| 22
| 65
| 0.488142
|
7952dc61a2a90601e50cb1193be023c9d0e4eca0
| 6,074
|
py
|
Python
|
tests/django_init.py
|
MicrohexHQ/peeringdb
|
179377ba778c594a1af3cf55a77f3b11e57f4b06
|
[
"BSD-2-Clause"
] | null | null | null |
tests/django_init.py
|
MicrohexHQ/peeringdb
|
179377ba778c594a1af3cf55a77f3b11e57f4b06
|
[
"BSD-2-Clause"
] | null | null | null |
tests/django_init.py
|
MicrohexHQ/peeringdb
|
179377ba778c594a1af3cf55a77f3b11e57f4b06
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf import settings
# lazy init for translations
_ = lambda s: s
#from django.utils.translation import ugettext_lazy as _
settings.configure(
PACKAGE_VERSION="dev",
RELEASE_ENV="dev",
MIGRATION_MODULES={"django_peeringdb":None},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.sites',
'django_inet',
'django_peeringdb',
'django_namespace_perms',
'django_countries',
'oauth2_provider',
'peeringdb_server',
'allauth',
'allauth.account',
'reversion',
'rest_framework',
'dal',
'dal_select2',
'corsheaders',
'captcha',
],
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.db.DatabaseCache",
"LOCATION": "django_cache"
}
},
TEMPLATES=[{
"BACKEND": 'django.template.backends.django.DjangoTemplates',
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
#"loaders" : TEMPLATE_LOADERS
}
}],
LANGUAGE_CODE='en-us',
LANGUAGES=[
('en', _('English')),
('pt', _('Portuguese')),
],
USE_L10N=True,
USE_I18N=True,
MIDDLEWARE_CLASSES=(
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'peeringdb_server.maintenance.Middleware',
),
SOUTH_TESTS_MIGRATE=False,
SOUTH_SKIP_TESTS=True,
AUTH_USER_MODEL='peeringdb_server.User',
TABLE_PREFIX='peeringdb_',
PEERINGDB_ABSTRACT_ONLY=True,
COUNTRIES_OVERRIDE={'XK': _('Kosovo')},
CLIENT_COMPAT={
"client":{"min": (0,6), "max":(0,6,5)},
"backends":{
"django_peeringdb":{"min":(0,6), "max":(0,6,5)}
}
},
DATABASE_ENGINE='django.db.backends.sqlite3',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
#XXX - this is supposed to work to mimic replication
# during tests, but doesnt. So instead we use the
# peeringdb_server.db_router.TestRouter class instead
# which just always used the default db for read and writes
#'read' : {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': ':memory:',
# 'TEST' : { 'MIRROR' : 'default' }
#}
},
#XXX - change to peeringdb_server.db_router.DatabaseRouter
#if repliation mimicing (see above) gets fixed
DATABASE_ROUTERS=["peeringdb_server.db_router.TestRouter"],
DEBUG=False,
GUEST_GROUP_ID=1,
USER_GROUP_ID=2,
TEMPLATE_DEBUG=False,
BASE_URL="localost",
PASSWORD_RESET_URL="localhost",
API_CACHE_ROOT="tests/api-cache",
API_CACHE_ENABLED=False,
SUGGEST_ENTITY_ORG=1234,
API_URL="localhost",
REST_FRAMEWORK={
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication'),
'DEFAULT_MODEL_SERIALIZER_CLASS': 'rest_framework.serializers.HyperlinkedModelSerializer',
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
'django_namespace_perms.rest.BasePermission',
],
'DEFAULT_RENDERER_CLASSES': (
'peeringdb_server.renderers.MetaJSONRenderer', )
},
NSP_MODE="crud",
NSP_GUEST_GROUP="guest",
DEBUG_EMAIL=True,
TIME_ZONE="UTC",
USE_TZ=True,
AUTHENTICATION_BACKENDS=(
"django_namespace_perms.auth.backends.NSPBackend", ),
ROOT_URLCONF="peeringdb_com.urls",
LOGGING={
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'stderr': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['stderr'],
'level': 'DEBUG',
'propagate': False
},
},
},
OAUTH_ENABLED=False,
RECAPTCHA_PUBLIC_KEY="",
EMAIL_SUBJECT_PREFIX="[test]",
CORS_ORIGIN_WHITELIST=[],
CORS_ALLOW_METHODS=["GET", "OPTIONS"],
CORS_ALLOW_CREDENTIALS=False,
DATA_QUALITY_MAX_PREFIX_V4_LIMIT=500000,
DATA_QUALITY_MAX_PREFIX_V6_LIMIT=500000,
DATA_QUALITY_MIN_PREFIXLEN_V4 = 18,
DATA_QUALITY_MAX_PREFIXLEN_V4 = 28,
DATA_QUALITY_MIN_PREFIXLEN_V6 = 64,
DATA_QUALITY_MAX_PREFIXLEN_V6 = 116,
TUTORIAL_MODE=False,
CAPTCHA_TEST_MODE=True,
SITE_ID=1,
IXF_POSTMORTEM_LIMIT=250,
ABSTRACT_ONLY=True,
GOOGLE_GEOLOC_API_KEY="AIzatest",
RATELIMITS={
"view_affiliate_to_org_POST": "100/m",
"resend_confirmation_mail": "2/m",
"view_request_ownership_GET": "3/m",
"view_username_retrieve_initiate": "2/m",
"view_request_ownership_POST": "3/m",
"request_login_POST": "10/m",
"view_verify_POST": "2/m",
"request_translation": "10/m",
"view_import_ixlan_ixf_preview": "1/m",
"view_import_net_ixf_postmortem": "1/m"
})
| 33.558011
| 98
| 0.613928
|
7952dc8a75e875e334299f135e89fe68078f97df
| 1,205
|
py
|
Python
|
quick_sort.py
|
sazlin/data-structures
|
a7ec654dfcb46fdb1e3ccbf16d02c62e36ff2f36
|
[
"MIT"
] | 1
|
2015-02-17T06:23:53.000Z
|
2015-02-17T06:23:53.000Z
|
quick_sort.py
|
sazlin/data-structures
|
a7ec654dfcb46fdb1e3ccbf16d02c62e36ff2f36
|
[
"MIT"
] | null | null | null |
quick_sort.py
|
sazlin/data-structures
|
a7ec654dfcb46fdb1e3ccbf16d02c62e36ff2f36
|
[
"MIT"
] | null | null | null |
def quick_sort(values):
"""simple quick sort implementation"""
if len(values) == 0:
return []
elif len(values) == 1:
return values
elif len(values) == 2:
if values[0] > values[1]:
return values[::-1]
else:
return values
pivot = values[0]
less_list = [x for x in values if x < pivot]
more_list = [x for x in values if x > pivot]
same_list = [x for x in values if x == pivot] # keep track of dupes
less_list = less_list + same_list
if len(more_list) == 0:
more_list.append(less_list.pop())
return quick_sort(less_list) + quick_sort(more_list)
if __name__ == '__main__':
import timeit
print "Quick Sort | Worst Case |",\
timeit.timeit(
setup="""
from quick_sort import quick_sort
worst_case_values = [i for i in xrange(100,1,-1)]
""",
stmt="quick_sort(worst_case_values)",
number=100)
print "Quick Sort | Best Case |",\
timeit.timeit(
setup="""
from quick_sort import quick_sort
best_case_values = [i for i in xrange(1,100,1)]
""",
stmt="quick_sort(best_case_values)",
number=100)
| 30.125
| 72
| 0.580083
|
7952dccce0cf433f8f4ce4f89ca135aca74690aa
| 2,033
|
py
|
Python
|
tests/conftest.py
|
jackdesert/anony-mouse
|
61e111ef6a38ea9e440fc76610676555dce1795c
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
jackdesert/anony-mouse
|
61e111ef6a38ea9e440fc76610676555dce1795c
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
jackdesert/anony-mouse
|
61e111ef6a38ea9e440fc76610676555dce1795c
|
[
"MIT"
] | null | null | null |
import os
from pyramid.paster import get_appsettings
from pyramid.scripting import prepare
from pyramid.testing import DummyRequest, testConfig
import pytest
import webtest
from amouse import main
def pytest_addoption(parser):
parser.addoption('--ini', action='store', metavar='INI_FILE')
@pytest.fixture(scope='session')
def ini_file(request):
# potentially grab this path from a pytest option
return os.path.abspath(request.config.option.ini or 'testing.ini')
@pytest.fixture(scope='session')
def app_settings(ini_file):
return get_appsettings(ini_file)
@pytest.fixture(scope='session')
def app(app_settings):
return main({}, **app_settings)
@pytest.fixture
def testapp(app):
testapp = webtest.TestApp(
app,
extra_environ={
'HTTP_HOST': 'example.com',
},
)
return testapp
@pytest.fixture
def app_request(app):
"""
A real request.
This request is almost identical to a real request but it has some
drawbacks in tests as it's harder to mock data and is heavier.
"""
with prepare(registry=app.registry) as env:
request = env['request']
request.host = 'example.com'
yield request
@pytest.fixture
def dummy_request():
"""
A lightweight dummy request.
This request is ultra-lightweight and should be used only when the request
itself is not a large focus in the call-stack. It is much easier to mock
and control side-effects using this object, however:
- It does not have request extensions applied.
- Threadlocals are not properly pushed.
"""
request = DummyRequest()
request.host = 'example.com'
return request
@pytest.fixture
def dummy_config(dummy_request):
"""
A dummy :class:`pyramid.config.Configurator` object. This allows for
mock configuration, including configuration for ``dummy_request``, as well
as pushing the appropriate threadlocals.
"""
with testConfig(request=dummy_request) as config:
yield config
| 23.367816
| 78
| 0.699951
|
7952dd8663c79f11a2df45c8e1155213164a9bef
| 109
|
py
|
Python
|
Sessions/S04/module2.py
|
paulbordea/session3
|
a2edf697cbe9b9aa98383562416d4fdf1e19b482
|
[
"MIT"
] | null | null | null |
Sessions/S04/module2.py
|
paulbordea/session3
|
a2edf697cbe9b9aa98383562416d4fdf1e19b482
|
[
"MIT"
] | null | null | null |
Sessions/S04/module2.py
|
paulbordea/session3
|
a2edf697cbe9b9aa98383562416d4fdf1e19b482
|
[
"MIT"
] | null | null | null |
def f():
print('f executed from module 2')
if __name__ == '__main__':
print('We are in module 2')
| 13.625
| 37
| 0.605505
|
7952dd9397307dafc607ed562a985e589e5af5a6
| 695
|
py
|
Python
|
validation_tool/iout/ancillary_data.py
|
TUW-GEO/web-validation-tool
|
e73faeeda0a5abe4366f1dd39c77d2e63d8bae93
|
[
"MIT"
] | null | null | null |
validation_tool/iout/ancillary_data.py
|
TUW-GEO/web-validation-tool
|
e73faeeda0a5abe4366f1dd39c77d2e63d8bae93
|
[
"MIT"
] | null | null | null |
validation_tool/iout/ancillary_data.py
|
TUW-GEO/web-validation-tool
|
e73faeeda0a5abe4366f1dd39c77d2e63d8bae93
|
[
"MIT"
] | null | null | null |
'''
Created on Jun 26, 2013
@author: pydev
'''
import scipy.io as sc_io
import numpy as np
#import general.grid.dgg.warp_grid_constants as dgg_const
import psycopg2 as psql
#ind_ld = dgg_const.ind_ld()
porosity = sc_io.readsav('/media/sf_D/porosity_ind_ld.sav')['porosity']
porosity_top=porosity['porosity_top']
conn = psql.connect("host=dbs1.ipf.tuwien.ac.at dbname=grid user=cpa password=pass4cpa")
# Open a cursor to perform database operations
cur = conn.cursor()
for i,gpi in enumerate(ind_ld.tolist()):
command = "UPDATE warp_grid SET porosity_top = '%.6f' WHERE id = '%d'"%(porosity_top[i],gpi)
cur.execute(command)
conn.commit()
cur.close()
conn.close()
| 20.441176
| 96
| 0.717986
|
7952ddc643f193680f513dcde6ca7fba6cbaa322
| 5,539
|
py
|
Python
|
girder/api/docs.py
|
adsorensen/girder
|
ae461d1198e6173f36168a71d4f7a9a5f66e6b70
|
[
"Apache-2.0"
] | null | null | null |
girder/api/docs.py
|
adsorensen/girder
|
ae461d1198e6173f36168a71d4f7a9a5f66e6b70
|
[
"Apache-2.0"
] | null | null | null |
girder/api/docs.py
|
adsorensen/girder
|
ae461d1198e6173f36168a71d4f7a9a5f66e6b70
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import collections
import functools
import six
from girder import logprint
models = collections.defaultdict(dict)
# routes is dict of dicts of dicts
# e.g. routes[resource][path][method]
routes = collections.defaultdict(
functools.partial(collections.defaultdict, dict))
def _toRoutePath(resource, route):
"""
Convert a base resource type and list of route components into a
Swagger-compatible route path.
"""
# Convert wildcard tokens from :foo form to {foo} form
convRoute = [
'{%s}' % token[1:] if token[0] == ':' else token
for token in route
]
path = '/'.join(['', resource] + convRoute)
return path
def _toOperation(info, resource, handler):
"""
Augment route info, returning a Swagger-compatible operation description.
"""
operation = dict(info)
operation['tags'] = [resource]
# Operation Object spec:
# Unique string used to identify the operation. The id MUST be unique among
# all operations described in the API.
if 'operationId' not in operation:
operation['operationId'] = resource + '_' + handler.__name__
return operation
def addRouteDocs(resource, route, method, info, handler):
"""
This is called for route handlers that have a description attr on them. It
gathers the necessary information to build the swagger documentation, which
is consumed by the docs.Describe endpoint.
:param resource: The name of the resource, e.g. "item"
:type resource: str
:param route: The route to describe.
:type route: tuple[str]
:param method: The HTTP method for this route, e.g. "POST"
:type method: str
:param info: The information representing the API documentation, typically
from ``girder.api.describe.Description.asDict``.
:type info: dict
:param handler: The actual handler method for this route.
:type handler: function
"""
path = _toRoutePath(resource, route)
operation = _toOperation(info, resource, handler)
# Add the operation to the given route
if method not in routes[resource][path]:
routes[resource][path][method] = operation
def removeRouteDocs(resource, route, method, info, handler):
"""
Remove documentation for a route handler.
:param resource: The name of the resource, e.g. "item"
:type resource: str
:param route: The route to describe.
:type route: tuple[str]
:param method: The HTTP method for this route, e.g. "POST"
:type method: str
:param info: The information representing the API documentation.
:type info: dict
:param handler: The actual handler method for this route.
:type handler: function
"""
if resource not in routes:
return
path = _toRoutePath(resource, route)
if path not in routes[resource]:
return
if method in routes[resource][path]:
del routes[resource][path][method]
# Clean up any empty route paths
if not routes[resource][path]:
del routes[resource][path]
if not routes[resource]:
del routes[resource]
def addModel(name, model, resources=None, silent=False):
"""
Add a model to the Swagger documentation.
:param resources: The type(s) of resource(s) to add the model to. New
resource types may be implicitly defined, with the expectation that
routes will be added for them at some point. If no resources are
passed, the model will be exposed for every resource type
:param resources: str or tuple/list[str]
:param name: The name of the model.
:type name: str
:param model: The model to add.
:type model: dict
:param silent: Set this to True to suppress warnings.
:type silent: bool
.. warning:: This is a low-level API which does not validate the format of
``model``. See the `Swagger Model documentation`_ for a complete
specification of the correct format for ``model``.
.. versionchanged:: The syntax and behavior of this function was modified
after v1.3.2. The previous implementation did not include a resources
parameter.
.. _Swagger Model documentation: https://github.com/OAI/
OpenAPI-Specification/blob/0122c22e7fb93b571740dd3c6e141c65563a18be/
versions/2.0.md#definitionsObject
"""
if resources:
if isinstance(resources, six.string_types):
resources = (resources,)
for resource in resources:
models[resource][name] = model
else:
if not silent:
logprint.warning(
'WARNING: adding swagger models without specifying resources '
'to bind to is discouraged (%s).' % name)
models[None][name] = model
| 34.836478
| 79
| 0.657339
|
7952dec39289e9e4e164e9af1642a14cd5f7c5ce
| 12,850
|
py
|
Python
|
lib/python/treadmill/tests/presence_test.py
|
krcooke/treadmill
|
613008fee88a150f983ab12d8ef2e118fb77bb51
|
[
"Apache-2.0"
] | 133
|
2016-09-15T13:36:12.000Z
|
2021-01-18T06:29:13.000Z
|
lib/python/treadmill/tests/presence_test.py
|
krcooke/treadmill
|
613008fee88a150f983ab12d8ef2e118fb77bb51
|
[
"Apache-2.0"
] | 108
|
2016-12-28T23:41:27.000Z
|
2020-03-05T21:20:37.000Z
|
lib/python/treadmill/tests/presence_test.py
|
krcooke/treadmill
|
613008fee88a150f983ab12d8ef2e118fb77bb51
|
[
"Apache-2.0"
] | 69
|
2016-09-23T20:38:58.000Z
|
2020-11-11T02:31:21.000Z
|
"""Unit test for Treadmill linux runtime presence module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import io
import shutil
import tempfile
import time
import unittest
import kazoo
import kazoo.client
import mock
import treadmill
from treadmill import exc
from treadmill import presence
from treadmill.tests.testutils import mockzk
PROCCGROUPS = """#subsys_name hierarchy num_cgroups enabled
cpuset 6 1 1
cpu 7 1 1
cpuacct 7 1 1
memory 4 1 1
devices 3 20 1
freezer 8 1 1
net_cls 2 1 1
blkio 10 1 1
perf_event 11 1 1
hugetlb 9 1 1
pids 5 1 1
net_prio 2 1 1"""
# pylint: disable=C0301
PROCMOUNTS = """rootfs / rootfs rw 0 0
sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=239696k,nr_inodes=59924,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_prio,net_cls 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpuacct,cpu 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/mapper/VolGroup00-LogVol00 / xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 0 0
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
vagrant /vagrant vboxsf rw,nodev,relatime 0 0
home_centos_treadmill /home/centos/treadmill vboxsf rw,nodev,relatime 0 0
home_centos_treadmill-pid1 /home/centos/treadmill-pid1 vboxsf rw,nodev,relatime 0 0
tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=50040k,mode=700,uid=1000,gid=1000 0 0""" # noqa: E501
_ORIGINAL_OPEN = open
def _open_side_effect(path, *args):
if path == '/proc/mounts':
return io.StringIO(PROCMOUNTS)
elif path == '/proc/cgroups':
return io.StringIO(PROCCGROUPS)
else:
return _ORIGINAL_OPEN(path, *args)
class PresenceTest(mockzk.MockZookeeperTestCase):
"""Mock test for treadmill.presence."""
def setUp(self):
self.root = tempfile.mkdtemp()
self.events_dir = os.path.join(self.root, 'appevents')
os.mkdir(self.events_dir)
self.zkclient = treadmill.zkutils.ZkClient()
super(PresenceTest, self).setUp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('time.sleep', mock.Mock)
def test_registration(self):
"""Verifies presence registration."""
treadmill.sysinfo.hostname.return_value = 'myhostname'
manifest = {
'task': 't-0001',
'name': 'foo.test1',
'uniqueid': 'AAAAAA',
'proid': 'andreik',
'services': [
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'web_server',
'restart': {
'interval': 60,
'limit': 3
}
},
{
'command': '/usr/bin/python -m SimpleHTTPServer',
'name': 'another_server'
},
{
'command': 'sshd -D -f /etc/ssh/sshd_config',
'name': 'sshd',
'proid': None
}
],
'endpoints': [
{
'port': 22,
'name': 'ssh',
'real_port': 5001,
},
{
'port': 8000,
'name': 'http',
'real_port': 5000,
}
]
}
app_presence = presence.EndpointPresence(self.zkclient, manifest)
app_presence.register_endpoints()
kazoo.client.KazooClient.create.assert_has_calls(
[
mock.call(
'/endpoints/foo/test1:tcp:ssh',
value=b'myhostname:5001',
acl=mock.ANY,
ephemeral=True, makepath=True, sequence=False
),
mock.call(
'/endpoints/foo/test1:tcp:http',
value=b'myhostname:5000',
acl=mock.ANY,
ephemeral=True, makepath=True, sequence=False
),
]
)
retry_happened = []
def node_exists(*_args, **_kwargs):
"""Simulate existence of ephemeral node."""
if retry_happened:
return
else:
retry_happened.append(1)
raise kazoo.client.NodeExistsError()
kazoo.client.KazooClient.create.reset()
kazoo.client.KazooClient.create.side_effect = node_exists
kazoo.client.KazooClient.get.return_value = (b'{}', {})
app_presence.register_endpoints()
self.assertTrue(retry_happened)
self.assertTrue(time.sleep.called)
kazoo.client.KazooClient.create.assert_has_calls(
[
mock.call(
'/endpoints/foo/test1:tcp:ssh',
value=b'myhostname:5001',
acl=mock.ANY,
ephemeral=True, makepath=True, sequence=False
),
mock.call(
'/endpoints/foo/test1:tcp:http',
value=b'myhostname:5000',
acl=mock.ANY,
ephemeral=True, makepath=True, sequence=False
),
]
)
kazoo.client.KazooClient.create.reset()
kazoo.client.KazooClient.create.side_effect = (
kazoo.client.NodeExistsError
)
self.assertRaises(exc.ContainerSetupError,
app_presence.register_endpoints)
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.delete', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
def test_kill(self):
"""Checks removal of the endpoints."""
zk_content = {
'running': {
'myproid.aaa': b'xxx.xx.com',
'myproid.bbb': b'yyy.xx.com'
},
'endpoints': {
'myproid': {
'aaa:tcp:http': b'xxx.xx.com:1234',
'bbb:tcp:http': b'yyy.xx.com:1234',
},
},
'servers': {
'xxx.xx.com': {},
},
'server.presence': {
'xxx.xx.com': {},
},
'placement': {
'xxx.xx.com': {
'myproid.aaa': {},
'myproid.bbb': {},
}
},
'scheduled': {
'myproid.aaa': {
'endpoints': [{'name': 'http', 'port': 8888}],
},
'myproid.bbb': {
'endpoints': [{'name': 'http', 'port': 8888}],
},
}
}
self.make_mock_zk(zk_content)
presence.kill_node(self.zkclient, 'xxx.xx.com')
# aaa running node is removed.
self.assertNotIn('myproid.aaa', zk_content['running'])
# bbb is not removed, as 'running' node has different hostname.
self.assertIn('myproid.bbb', zk_content['running'])
# Same for endpoints - aaa is removed, bbb is not.
self.assertNotIn('aaa:tcp:http', zk_content['endpoints']['myproid'])
self.assertIn('bbb:tcp:http', zk_content['endpoints']['myproid'])
self.assertNotIn('xxx.xx.com', zk_content['server.presence'])
def test_server_node(self):
"""Test returning server.presence node for hostname and presence_id."""
self.assertEqual(
presence.server_node('xxx.xx.com', '-1'),
'xxx.xx.com'
)
self.assertEqual(
presence.server_node('yyy.yy.com', '12345'),
'yyy.yy.com#12345'
)
def test_parse_server(self):
"""Test returning hostname and presence_id for server.presence node."""
self.assertEqual(
presence.parse_server('xxx.xx.com'),
('xxx.xx.com', '-1')
)
self.assertEqual(
presence.parse_server('yyy.yy.com#12345'),
('yyy.yy.com', '12345')
)
def test_server_hostname(self):
"""Test returning hostname for given server.presence node."""
self.assertEqual(presence.server_hostname('xxx.xx.com'), 'xxx.xx.com')
self.assertEqual(
presence.server_hostname('yyy.yy.com#12345'),
'yyy.yy.com'
)
def test_find_server(self):
"""Test finding server."""
zkclient_mock = mock.Mock()
zkclient_mock.get_children.return_value = [
'yyy.yy.com#12345', 'zzz.zz.com'
]
self.assertEqual(
presence.find_server(zkclient_mock, 'xxx.xx.com'),
None
)
self.assertEqual(
presence.find_server(zkclient_mock, 'yyy.yy.com'),
'/server.presence/yyy.yy.com#12345'
)
self.assertEqual(
presence.find_server(zkclient_mock, 'zzz.zz.com'),
'/server.presence/zzz.zz.com'
)
def test_register_server(self):
"""Test registering server."""
zkclient_mock = mock.Mock()
zkclient_mock.get.return_value = (b"{parent: 'rack:test123'}\n", None)
zkclient_mock.create.return_value = '/server.presence/xxx.xx.com#12345'
server_presence_path = presence.register_server(
zkclient_mock, 'xxx.xx.com', {'up_since': '123.45'}
)
self.assertEqual(
server_presence_path,
'/server.presence/xxx.xx.com#12345'
)
zkclient_mock.set.assert_called_once_with(
'/servers/xxx.xx.com',
b'{"parent": "rack:test123", "up_since": "123.45"}'
)
zkclient_mock.create.assert_called_once_with(
'/server.presence/xxx.xx.com#',
b'{"seen": false}',
acl=mock.ANY, ephemeral=True, makepath=True, sequence=True
)
def test_unregister_server(self):
"""Test unregistering server."""
zkclient_mock = mock.Mock()
zkclient_mock.get_children.side_effect = lambda path: {
'/server.presence': ['yyy.yy.com#12345', 'zzz.zz.com']
}.get(path, [])
presence.unregister_server(zkclient_mock, 'xxx.xx.com')
zkclient_mock.delete.assert_not_called()
presence.unregister_server(zkclient_mock, 'yyy.yy.com')
zkclient_mock.delete.assert_called_with(
'/server.presence/yyy.yy.com#12345'
)
presence.unregister_server(zkclient_mock, 'zzz.zz.com')
zkclient_mock.delete.assert_called_with('/server.presence/zzz.zz.com')
if __name__ == '__main__':
unittest.main()
| 35.793872
| 144
| 0.588794
|
7952ded1ad228e7ffae3f9667d8caefdf29a86c8
| 16,846
|
py
|
Python
|
apps/sad/models.py
|
upeu-jul-20161-epis-ads2/americas
|
a9632bb7d6fb4f6479f41e5cd61efcdd3d435095
|
[
"BSD-3-Clause"
] | null | null | null |
apps/sad/models.py
|
upeu-jul-20161-epis-ads2/americas
|
a9632bb7d6fb4f6479f41e5cd61efcdd3d435095
|
[
"BSD-3-Clause"
] | null | null | null |
apps/sad/models.py
|
upeu-jul-20161-epis-ads2/americas
|
a9632bb7d6fb4f6479f41e5cd61efcdd3d435095
|
[
"BSD-3-Clause"
] | null | null | null |
# _*_ coding: utf-8 _*_
"""
@copyright Copyright (c) 2014 Submit Consulting
@author Angel Sullon (@asullom)
@package auth
@Descripcion Definiciรณn de los modelos
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst, get_text_list
from django.dispatch import receiver
from django.db.models import signals
from unicodedata import normalize
from django.core.exceptions import ValidationError
from django.core.exceptions import NON_FIELD_ERRORS
import datetime
# models
from django.contrib.auth.models import AbstractUser # managers
from .managers import UserManager
from apps.params.models import Person
from django.contrib.auth.models import Group, Permission
from apps.space.models import Solution, Association, Enterprise, Headquar
# others
# TODO Is done por ahora
ON = 'ON'
OFF = 'OFF'
USER_STATUS_CHOICES = (
(ON, _('Activate')),
(OFF, _('Deactivate')),
)
class User(AbstractUser):
"""
Tabla para usuarios
"""
class Meta:
# swappable = 'AUTH_USER_MODEL' #ver django-angular-seed
verbose_name = capfirst(_('user'))
verbose_name_plural = capfirst(_('users'))
permissions = (
('user', 'Can ALL user'),
)
db_table = 'auth_user'
# comentar desde aqui
last_headquar_id = models.CharField(max_length=50, null=True, blank=True)
last_module_id = models.CharField(max_length=50, null=True, blank=True)
person = models.OneToOneField(
Person, verbose_name=_('Person'), null=True, blank=True,
# unique=True OneToOneField ya es unico
# related_name='user'
)
# hgroups = models.ManyToManyField(Group, verbose_name=_(u'groups'),
# through='UserHeadquar',
# related_name='users_as_group', null=True, blank=True)
objects = UserManager() # override the default manager
def __str__(self):
return self.username
'''
def validate_unique(self, exclude=None):
raise ValidationError(
{
NON_FIELD_ERRORS:
('Person with same ... already exists.',)
}
)
super(User, self).validate_unique(exclude=exclude)
'''
'''
def clean(self):
#raise ValidationError('foo must not be empty')
raise ValidationError(
{
'identity_num':
('Person with same ... already exists.',)
}
)
'''
'''
def save(self, *args, **kwargs):
# TODO Mandar con Exception no con ValidationError
if Person.objects.exclude(id=self.person.id).filter(identity_type=self.person.identity_type, identity_num=self.person.identity_num).count() > 0:
raise ValidationError({
'identity_num':
(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Person'),
'field_label': get_text_list( (capfirst(_('number')), capfirst(_('Type')) ),_('and') ),
},),
})
return super(User, self).save(*args, **kwargs)
'''
'''
def validate_unique(self, exclude=None):
# if self.person:
if Person.objects.exclude(id=self.person.id).filter(identity_type=self.person.identity_type, identity_num=self.person.identity_num).count() > 0:
raise ValidationError({
'identity_num':
(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': _('Person'),
'field_label': get_text_list( (capfirst(_('number')), capfirst(_('Type')) ),_('and') ),
},),
})
return super(User, self).validate_unique(exclude=exclude)
'''
'''
def save(self, *args, **kwargs):
pk = self.pk
if self.person:
self.first_name = self.person.first_name
self.last_name = self.person.last_name
super(User, self).save(*args, **kwargs)
if not pk: # solo despues de crear un nuevo usuario
UserStatus.objects.create(description='alta', user=self)
'''
def user_pre_save(sender, instance, raw, **kwargs):
instance.last_login = datetime.datetime.now()
if instance.person:
instance.first_name = instance.person.first_name
instance.last_name = instance.person.last_name
@receiver(signals.post_save, sender=User)
def user_post_save(sender, instance, created, raw, **kwargs):
if created: # solo despues de crear un nuevo usuario
UserStatus.objects.create(description='Alta', user=instance)
signals.pre_save.connect(user_pre_save, sender=User)
# signals.post_save.connect(user_post_save, sender=User)
class UserStatus(models.Model):
"""
Tabla para el historial de los estados de los usuarios
"""
status = models.CharField(
_('Status'), max_length=50, choices=USER_STATUS_CHOICES, default=ON
)
description = models.TextField(_('Description'), null=True, blank=True)
# related_name=userstatus_set
user = models.ForeignKey(User, verbose_name=capfirst(_('user')))
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
class Meta:
verbose_name = _('User status')
verbose_name_plural = _('User statuses')
db_table = 'sad_user_status'
def __str__(self):
return '%s %s' % (self.user.username, self.status)
INPUT = "INPUT"
OUTPUT = "OUTPUT"
ACCESS_TYPE_CHOICES = (
(INPUT, "Input"),
(OUTPUT, "Output"),
)
class Access(models.Model):
"""
Tabla que registra los accesos de los usuarios al sistema
"""
access_type = models.CharField(
_('Access type'),
max_length=50, choices=ACCESS_TYPE_CHOICES, default=INPUT)
ip = models.CharField(_('IP'), max_length=50, null=True, blank=True)
session_key = models.TextField(_('Session key'), null=True, blank=True)
user = models.ForeignKey(User, verbose_name=capfirst(_('user')))
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
class Meta:
verbose_name = _('Access')
verbose_name_plural = _('Accesses')
permissions = (
("access", "Can ALL access"),
)
def __str__(self):
return "%s %s" % (self.user.username, self.access_type)
PRO = 'PRO'
WEB = 'WEB'
CONTABILIDAD = 'CONTABILIDAD'
COBRANZAS = 'COBRANZAS'
SOCIOS = 'SOCIOS'
REUNIONES = 'REUNIONES'
BACKEND = 'BACKEND'
MODULE_CHOICES = (
(PRO, 'PROFESIONAL'),
(WEB, 'WEB INFORMATIVA'),
(CONTABILIDAD, 'CONTABILIDAD'),
(COBRANZAS, 'COBRANZAS'),
(SOCIOS, 'SOCIOS'),
(REUNIONES, 'REUNIONES'),
(BACKEND, 'BACKEND'),
)
class Module(models.Model):
"""
Modulos del sistema
"""
module = models.CharField(
_('Module'), max_length=50, choices=MODULE_CHOICES, default=BACKEND)
name = models.CharField(capfirst(_('name')), max_length=50)
is_active = models.BooleanField(capfirst(_('active')), default=True)
icon = models.CharField(_('Icon'), max_length=50, null=True, blank=True)
description = models.TextField(_('Description'), null=True, blank=True)
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
solutions = models.ManyToManyField(
Solution, verbose_name=_('Solutions'), null=True, blank=True) # , through='ModuleSolution'
groups = models.ManyToManyField(
Group, related_name='module_set', verbose_name=capfirst(_('groups')),
null=True, blank=True) # , through='ModuleGroup'
# related_name cambia module_set x initial_groups_module_set
initial_groups = models.ManyToManyField(
Group, related_name='initial_groups_module_set',
verbose_name=_('Initial groups'), null=True, blank=True) # , through='ModuleInitialGroup'
class Meta:
ordering = ['-id', ]
verbose_name = _('Module')
verbose_name_plural = _('Modules')
permissions = (
('module', 'Can ALL module'),
)
unique_together = ('module', 'name',)
def __str__(self):
return '%s (%s)' % (self.name, dict((x, y)
for x, y in MODULE_CHOICES)[self.module])
def validate_unique(self, exclude=None):
if normalize('NFKD', self.name).encode('ascii', 'ignore').lower() in list(
normalize('NFKD', c['name']).encode('ascii', 'ignore').lower()
for c in self.__class__.objects.values('name').exclude(pk=self.pk).filter(module=self.module)
):
raise ValidationError({
'name':
(_(u'%(model_name)s with this %(field_label)s already exists.') % {
'model_name': '%s "%s"' % (capfirst(_('Module')) + '', dict(MODULE_CHOICES).get(self.module)),
'field_label': capfirst(_('name')),
}, ),
})
super(Module, self).validate_unique(exclude=exclude)
class Menu(models.Model):
"""
Menus del sistema
"""
module = models.CharField(
_('Module'), max_length=50, choices=MODULE_CHOICES, default=BACKEND)
title = models.CharField(capfirst(_('title')), max_length=50)
url = models.CharField(max_length=150, default='#')
pos = models.IntegerField(_('Position'), default=1)
icon = models.CharField(
_('Icon'), max_length=50, null=True, blank=True, default='')
is_active = models.BooleanField(capfirst(_('active')), default=True)
description = models.TextField(_('Description'), null=True, blank=True)
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
permission = models.ForeignKey(
Permission, verbose_name=_('permission'), null=True, blank=True)
# related_name='parent',
parent = models.ForeignKey(
'self', verbose_name=_('Parent'), null=True, blank=True)
class Meta:
verbose_name = _('Menu')
verbose_name_plural = _('Menus')
permissions = (
('menu', 'Can ALL menu'),
)
def __str__(self):
return '%s (%s)' % (self.title, dict((x, y)
for x, y in MODULE_CHOICES)[self.module])
class UserEnterprise(models.Model):
"""
Permisos a nivel de empresa
"""
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
user = models.ForeignKey(User, verbose_name=_('user'))
group = models.ForeignKey(Group, verbose_name=_('group'))
enterprise = models.ForeignKey(Enterprise, verbose_name=_('Enterprise'))
class Meta:
verbose_name = _('User enterprise')
verbose_name_plural = _('User enterprises')
db_table = 'sad_user_enterprise'
def __str__(self):
return '%s %s - %s' % (self.user.username, self.enterprise.name,
self.group.name)
class UserHeadquar(models.Model):
"""
Permisos a nivel de sede headquar
"""
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
user = models.ForeignKey(User, verbose_name=_('user'))
group = models.ForeignKey(Group, verbose_name=_('group'))
headquar = models.ForeignKey(Headquar, verbose_name=_('Headquar'))
class Meta:
verbose_name = _('User headquar')
verbose_name_plural = _('User headquars')
db_table = 'sad_user_headquar'
def __str__(self):
return '%s %s %s - %s' % (self.user.username, self.headquar.name,
self.headquar.enterprise.name, self.group.name)
class UserAssociation(models.Model):
"""
Permisos a nivel de association
"""
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
updated_at = models.DateTimeField(_('Updated at'), auto_now=True)
user = models.ForeignKey(User, verbose_name=_('user'))
group = models.ForeignKey(Group, verbose_name=_('group'))
association = models.ForeignKey(Association, verbose_name=_('Association'))
class Meta:
verbose_name = _('User association')
verbose_name_plural = _('User association')
db_table = 'sad_user_association'
def __str__(self):
return '%s %s - %s' % (self.user.username, self.association.name,
self.group.name)
class Ticket(models.Model):
"""
Tabla para impresiones de tickets
"""
text = models.CharField(_('Text'), max_length=150, null=True, blank=True)
row = models.IntegerField(_('Row'), default=1)
user = models.ForeignKey(
User, verbose_name=_('user'), null=True, blank=True)
class Meta:
verbose_name = _('Ticket')
verbose_name_plural = _('Tickets')
permissions = (
('ticket', 'Can ALL ticket'),
)
def __str__(self):
return '%s %s' % (self.user.username, self.text)
class Backup(models.Model):
"""
Tabla para registro de las copias de la db
"""
file_name = models.CharField(_('File name'), max_length=50)
description = models.TextField(_('Description'), null=True, blank=True)
size = models.CharField(_('Size'), max_length=50, null=True, blank=True)
user = models.ForeignKey(User, verbose_name=_('user'))
created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
class Meta:
verbose_name = _('Backup')
verbose_name_plural = _('Backups')
permissions = (
('backup', 'Can ALL backup'),
)
def __str__(self):
return self.file_name
'''
Mantener esto desactivado hasta poner en producciรณn
Modelos que se usan solo para cambiar el verbose_name de las relaciones.
Desactivar antes de hacer una migraciรณn a sad o un backup
$python manage.py makemigrations sad
$python manage.py migrate sad
$python manage.py dumpdata > fixtures/backup_datayyyymmdd.json
class GroupPermission(models.Model):
""" """
permission = models.ForeignKey(
Permission, verbose_name=capfirst(_('permission')), null=True, blank=True)
group = models.ForeignKey(
Group, verbose_name=capfirst(_('group')), null=True, blank=True)
class Meta:
verbose_name = _('Group-permission')
verbose_name_plural = _('Group-permissions')
db_table = 'auth_group_permissions'
def __str__(self):
return '%s-%s' % (self.group, self.permission.codename)
class UserGroup(models.Model):
""" """
user = models.ForeignKey(
User, verbose_name=capfirst(_('user')), null=True, blank=True)
group = models.ForeignKey(
Group, verbose_name=capfirst(_('group')), null=True, blank=True)
class Meta:
verbose_name = _('User-group')
verbose_name_plural = _('User-groups')
db_table = 'auth_user_groups'
def __str__(self):
return '%s-%s' % (self.user, self.group)
class ModuleSolution(models.Model):
"""
Solo para cambiar el mensaje de
Module-group relationship: "Module_initial_groups object" para
Module-group: "Backend-MASTER"
"""
module = models.ForeignKey(
Module, verbose_name=_('Module'), null=True, blank=True)
solution = models.ForeignKey(
Solution, verbose_name=_('Solution'), null=True, blank=True)
class Meta:
verbose_name = _('Module-solution')
verbose_name_plural = _('Module-solutions')
db_table = 'sad_module_solutions'
def __str__(self):
return '%s-%s' % (self.module, self.solution)
class ModuleGroup(models.Model):
""" """
module = models.ForeignKey(
Module, verbose_name=_('Module'), null=True, blank=True)
group = models.ForeignKey(
Group, verbose_name=capfirst(_('group')), null=True, blank=True)
class Meta:
verbose_name = _('Module-group')
verbose_name_plural = _('Module-groups')
db_table = 'sad_module_groups'
def __str__(self):
return '%s-%s' % (self.module, self.group)
class ModuleInitialGroup(models.Model):
""" """
module = models.ForeignKey(
Module, verbose_name=_('Module'), null=True, blank=True)
group = models.ForeignKey(
Group, verbose_name=capfirst(_('group')), null=True, blank=True)
class Meta:
verbose_name = _('Module-initial group')
verbose_name_plural = _('Module-initial groups')
db_table = 'sad_module_initial_groups'
def __str__(self):
return '%s-%s' % (self.module, self.group)
'''
| 31.784906
| 152
| 0.631248
|
7952df80495044abc65ee30af9623cc1e23e09ba
| 7,628
|
py
|
Python
|
base/base_trainer.py
|
Lo1s/superresolution
|
18052465694bfc2543b9af71d8012d854a516d1a
|
[
"MIT",
"Unlicense"
] | 1
|
2021-06-19T15:03:58.000Z
|
2021-06-19T15:03:58.000Z
|
base/base_trainer.py
|
Lo1s/superresolution
|
18052465694bfc2543b9af71d8012d854a516d1a
|
[
"MIT",
"Unlicense"
] | null | null | null |
base/base_trainer.py
|
Lo1s/superresolution
|
18052465694bfc2543b9af71d8012d854a516d1a
|
[
"MIT",
"Unlicense"
] | null | null | null |
import os
import torch
import torchvision.transforms as transforms
import torchvision.utils as vutils
from PIL import Image
from abc import abstractmethod
from numpy import inf
from logger import TensorboardWriter
from model.esrgan.utils.utils import MODEL_KEY, GENERATOR_KEY, DISCRIMINATOR_KEY
from test import save_predictions_as_imgs
# Load base low-resolution image.
fixed_lr = transforms.ToTensor()(Image.open(os.path.join("data/inputs/Set5", "butterfly.png"))).unsqueeze(0)
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, models, criterion, metric_ftns, optimizers, config, device, monitor_cfg_key='monitor',
epochs_cfg_key='epochs'):
self.device = device
self.fixed_lr = fixed_lr.to(self.device)
self.config = config
self.logger = config.get_logger('trainer', config['trainer']['verbosity'])
self.models = models
self.criterion = criterion
self.metric_ftns = metric_ftns
self.optimizers = optimizers
cfg_trainer = config['trainer']
self.epochs = cfg_trainer[epochs_cfg_key]
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get(monitor_cfg_key, 'off')
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = inf if self.mnt_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.plot_epoch_result = cfg_trainer.get('plot_epoch_result', inf)
if self.early_stop <= 0:
self.early_stop = inf
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
# setup visualization writer instance
self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])
if config.resume is not None:
self._resume_checkpoint(config.resume)
@abstractmethod
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def train(self):
"""
Full training logic
"""
not_improved_count = 0
for epoch in range(self.start_epoch, self.epochs + 1):
result = self._train_epoch(epoch)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
# print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = False
if self.mnt_mode != 'off':
try:
# check whether model performance improved or not, according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \
(self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if not_improved_count > self.early_stop:
self.logger.info("Validation performance didn\'t improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param log: logging information of the epoch
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
for i, model in enumerate(self.models):
optimizer = self.optimizers[i]
arch = type(model).__name__
state = {
'arch': arch,
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'monitor_best': self.mnt_best,
'config': self.config
}
filename = str(self.checkpoint_dir / 'checkpoint-{}_epoch_{}.pth'.format(arch, epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if save_best:
best_path = str(self.checkpoint_dir / f'model_{arch}_best.pth')
torch.save(state, best_path)
self.logger.info(f'Saving current best: model_{arch}_best.pth ...')
# Each one epoch create a sr image.
arch = type(self.models[MODEL_KEY]).__name__
with torch.no_grad():
sr = self.models[MODEL_KEY](self.fixed_lr)
vutils.save_image(
sr.detach(),
os.path.join(self.checkpoint_dir, f'checkpoint-{arch}_epoch_{epoch}.png'),
normalize=True
)
def _resume_checkpoint(self, resume_paths):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
for i, path in enumerate(resume_paths):
self.logger.info("Loading checkpoint: {} ...".format(path))
checkpoint = torch.load(path)
self.start_epoch = checkpoint['epoch'] + 1
self.mnt_best = checkpoint['monitor_best']
if 'Generator' in checkpoint['arch']:
key = GENERATOR_KEY
arch_param = 'arch_esrgan_gen'
elif 'Discriminator' in checkpoint['arch']:
key = DISCRIMINATOR_KEY
arch_param = 'arch_esrgan_disc'
else:
key = MODEL_KEY
arch_param = 'arch_single'
# load architecture params from checkpoint.
if checkpoint['config'][arch_param] != self.config[arch_param]:
self.logger.warning(
"Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
self.models[key].load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger.warning(
"Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizers[key].load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
| 39.937173
| 112
| 0.583901
|
7952e11bfc31e447441b435b7e5ed005bbd41a60
| 53,020
|
py
|
Python
|
src/datera/datera_api22.py
|
JochenFriedrich/cinder-driver
|
bab97011d30b8927e13908dc6651d45257d6522c
|
[
"Apache-2.0"
] | null | null | null |
src/datera/datera_api22.py
|
JochenFriedrich/cinder-driver
|
bab97011d30b8927e13908dc6651d45257d6522c
|
[
"Apache-2.0"
] | null | null | null |
src/datera/datera_api22.py
|
JochenFriedrich/cinder-driver
|
bab97011d30b8927e13908dc6651d45257d6522c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import math
import random
import re
import time
import uuid
import eventlet
import ipaddress
import six
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.volume import utils as volutils
from cinder import utils
from cinder.volume import volume_types
from os_brick import exception as brick_exception
import cinder.volume.drivers.datera.datera_common as datc
LOG = logging.getLogger(__name__)
API_VERSION = "2.2"
class DateraApi(object):
# =================
# = Create Volume =
# =================
def _create_volume_2_2(self, volume):
tenant = self._create_tenant_2_2(volume)
policies = self._get_policies_for_resource(volume)
num_replicas = int(policies['replica_count'])
storage_name = policies['default_storage_name']
volume_name = policies['default_volume_name']
template = policies['template']
placement = policies['placement_mode']
if template:
app_params = (
{
'create_mode': "openstack",
# 'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'app_template': '/app_templates/{}'.format(template)
})
else:
app_params = (
{
'create_mode': "openstack",
'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'access_control_mode': 'deny_all',
'storage_instances': [
{
'name': storage_name,
'volumes': [
{
'name': volume_name,
'size': volume['size'],
'placement_mode': placement,
'replica_count': num_replicas,
'snapshot_policies': [
]
}
]
}
]
})
self._issue_api_request(
datc.URL_TEMPLATES['ai'](),
'post',
body=app_params,
api_version=API_VERSION,
tenant=tenant)
self._update_qos_2_2(volume, policies, tenant)
# =================
# = Extend Volume =
# =================
def _extend_volume_2_2(self, volume, new_size):
if volume['size'] >= new_size:
LOG.warning("Volume size not extended due to original size being "
"greater or equal to new size. Originial: "
"%(original)s, New: %(new)s", {
'original': volume['size'],
'new': new_size})
return
tenant = self._create_tenant_2_2(volume)
policies = self._get_policies_for_resource(volume)
template = policies['template']
if template:
LOG.warning("Volume size not extended due to template binding:"
" volume: %(volume)s, template: %(template)s",
{'volume': volume, 'template': template})
return
with self._offline_flip_2_2(volume, tenant):
# Change Volume Size
app_inst = datc._get_name(volume['id'])
data = {
'size': new_size
}
store_name, vol_name = self._scrape_template(policies)
self._issue_api_request(
datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name).format(app_inst),
method='put',
body=data,
api_version=API_VERSION,
tenant=tenant)
# =================
# = Cloned Volume =
# =================
def _create_cloned_volume_2_2(self, volume, src_vref):
policies = self._get_policies_for_resource(volume)
tenant = self._create_tenant_2_2(volume)
store_name, vol_name = self._scrape_template(policies)
src = "/" + datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name).format(datc._get_name(src_vref['id']))
data = {
'create_mode': 'openstack',
'name': datc._get_name(volume['id']),
'uuid': str(volume['id']),
'clone_volume_src': {'path': src},
}
self._issue_api_request(
datc.URL_TEMPLATES['ai'](), 'post', body=data,
api_version=API_VERSION, tenant=tenant)
if volume['size'] > src_vref['size']:
self._extend_volume_2_2(volume, volume['size'])
# =================
# = Delete Volume =
# =================
def _delete_volume_2_2(self, volume):
self._detach_volume_2_2(None, volume)
tenant = self._create_tenant_2_2(volume)
app_inst = datc._get_name(volume['id'])
try:
self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(app_inst),
method='delete',
api_version=API_VERSION,
tenant=tenant)
except exception.NotFound:
msg = ("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(volume['id']))
# =================
# = Ensure Export =
# =================
def _ensure_export_2_2(self, context, volume, connector=None):
self.create_export(context, volume, connector)
# =========================
# = Initialize Connection =
# =========================
def _initialize_connection_2_2(self, volume, connector):
# Now online the app_instance (which will online all storage_instances)
multipath = connector.get('multipath', False)
tenant = self._create_tenant_2_2(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'online'
}
app_inst = self._issue_api_request(
url, method='put', body=data, api_version=API_VERSION,
tenant=tenant)['data']
storage_instances = app_inst["storage_instances"]
si = storage_instances[0]
# randomize portal chosen
choice = 0
policies = self._get_policies_for_resource(volume)
if policies["round_robin"]:
choice = random.randint(0, 1)
portal = si['access']['ips'][choice] + ':3260'
iqn = si['access']['iqn']
if multipath:
portals = [p + ':3260' for p in si['access']['ips']]
iqns = [iqn for _ in si['access']['ips']]
lunids = [self._get_lunid() for _ in si['access']['ips']]
result = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_iqns': iqns,
'target_portal': portal,
'target_portals': portals,
'target_lun': self._get_lunid(),
'target_luns': lunids,
'volume_id': volume['id'],
'discard': False}}
else:
result = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_portal': portal,
'target_lun': self._get_lunid(),
'volume_id': volume['id'],
'discard': False}}
if self.use_chap_auth:
result['data'].update(
auth_method="CHAP",
auth_username=self.chap_username,
auth_password=self.chap_password)
return result
# =================
# = Create Export =
# =================
def _create_export_2_2(self, context, volume, connector):
tenant = self._create_tenant_2_2(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'offline',
'force': True
}
self._issue_api_request(
url, method='put', body=data, api_version=API_VERSION,
tenant=tenant)
policies = self._get_policies_for_resource(volume)
store_name, _ = self._scrape_template(policies)
if connector and connector.get('ip'):
# Case where volume_type has non default IP Pool info
if policies['ip_pool'] != 'default':
initiator_ip_pool_path = self._issue_api_request(
"access_network_ip_pools/{}".format(
policies['ip_pool']),
api_version=API_VERSION,
tenant=tenant)['path']
# Fallback to trying reasonable IP based guess
else:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_2(
connector['ip'], tenant)
ip_pool_url = datc.URL_TEMPLATES['si_inst'](
store_name).format(datc._get_name(volume['id']))
ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}}
self._issue_api_request(ip_pool_url,
method="put",
body=ip_pool_data,
api_version=API_VERSION,
tenant=tenant)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'online'
}
self._issue_api_request(
url, method='put', body=data, api_version=API_VERSION,
tenant=tenant)
# Check if we've already setup everything for this volume
url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id'])))
storage_instances = self._issue_api_request(
url, api_version=API_VERSION, tenant=tenant)
# Handle adding initiator to product if necessary
# Then add initiator to ACL
if (connector and
connector.get('initiator') and
not policies['acl_allow_all']):
initiator_name = "OpenStack_{}_{}".format(
self.driver_prefix, str(uuid.uuid4())[:4])
initiator_group = datc.INITIATOR_GROUP_PREFIX + str(uuid.uuid4())
# TODO(_alastor_): actually check for existing initiator
found = False
initiator = connector['initiator']
if not found:
# TODO(_alastor_): Take out the 'force' flag when we fix
# DAT-15931
data = {'id': initiator, 'name': initiator_name, 'force': True}
# Try and create the initiator
# If we get a conflict, ignore it
self._issue_api_request("initiators",
method="post",
body=data,
conflict_ok=True,
api_version=API_VERSION,
tenant=tenant)
# Create initiator group with initiator in it
initiator_path = "/initiators/{}".format(initiator)
initiator_group_path = "/initiator_groups/{}".format(
initiator_group)
ig_data = {'name': initiator_group,
'members': [{'path': initiator_path}]}
self._issue_api_request("initiator_groups",
method="post",
body=ig_data,
conflict_ok=True,
api_version=API_VERSION,
tenant=tenant)
# Create ACL with initiator group as reference for each
# storage_instance in app_instance
# TODO(_alastor_): We need to avoid changing the ACLs if the
# template already specifies an ACL policy.
for si in storage_instances['data']:
acl_url = (datc.URL_TEMPLATES['si']() +
"/{}/acl_policy").format(
datc._get_name(volume['id']), si['name'])
existing_acl = self._issue_api_request(
acl_url, method="get", api_version=API_VERSION,
tenant=tenant)['data']
data = {}
# Grabbing only the 'path' key from each existing initiator
# within the existing acl. eacli --> existing acl initiator
eacli = []
for acl in existing_acl['initiators']:
nacl = {}
nacl['path'] = acl['path']
eacli.append(nacl)
data['initiators'] = eacli
# Grabbing only the 'path' key from each existing initiator
# group within the existing acl. eaclig --> existing
# acl initiator group
eaclig = []
for acl in existing_acl['initiator_groups']:
nacl = {}
nacl['path'] = acl['path']
eaclig.append(nacl)
data['initiator_groups'] = eaclig
data['initiator_groups'].append({"path": initiator_group_path})
self._issue_api_request(acl_url,
method="put",
body=data,
api_version=API_VERSION,
tenant=tenant)
if self.use_chap_auth:
auth_url = (datc.URL_TEMPLATES['si']() + "/{}/auth").format(
datc._get_name(volume['id']), si['name'])
data = {'type': 'chap',
'target_user_name': self.chap_username,
'target_pswd': self.chap_password}
self._issue_api_request(
auth_url, method="put", api_version=API_VERSION, tenant=tenant,
body=data, sensitive=True)
# Check to ensure we're ready for go-time
self._si_poll_2_2(volume, policies, tenant)
# =================
# = Detach Volume =
# =================
def _detach_volume_2_2(self, context, volume, attachment=None):
tenant = self._create_tenant_2_2(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'offline',
'force': True
}
try:
self._issue_api_request(
url, method='put', body=data, api_version=API_VERSION,
tenant=tenant)
except exception.NotFound:
msg = ("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# TODO(_alastor_): Make acl cleaning multi-attach aware
self._clean_acl_2_2(volume, tenant)
def _check_for_acl_2_2(self, initiator_path, tenant):
"""Returns True if an acl is found for initiator_path """
# TODO(_alastor_) when we get a /initiators/:initiator/acl_policies
# endpoint use that instead of this monstrosity
initiator_groups = self._issue_api_request(
"initiator_groups", api_version=API_VERSION, tenant=tenant)
for ig, igdata in initiator_groups.items():
if initiator_path in igdata['members']:
LOG.debug("Found initiator_group: %s for initiator: %s",
ig, initiator_path)
return True
LOG.debug("No initiator_group found for initiator: %s", initiator_path)
return False
def _clean_acl_2_2(self, volume, tenant):
policies = self._get_policies_for_resource(volume)
store_name, _ = self._scrape_template(policies)
acl_url = (datc.URL_TEMPLATES["si_inst"](
store_name) + "/acl_policy").format(datc._get_name(volume['id']))
try:
initiator_group = self._issue_api_request(
acl_url, api_version=API_VERSION, tenant=tenant)['data'][
'initiator_groups'][0]['path']
# TODO(_alastor_): Re-enable this when we get a force-delete
# option on the /initiators endpoint
# initiator_iqn_path = self._issue_api_request(
# initiator_group.lstrip("/"), api_version=API_VERSION,
# tenant=tenant)[
# "data"]["members"][0]["path"]
# Clear out ACL and delete initiator group
self._issue_api_request(acl_url,
method="put",
body={'initiator_groups': []},
api_version=API_VERSION,
tenant=tenant)
self._issue_api_request(initiator_group.lstrip("/"),
method="delete",
api_version=API_VERSION,
tenant=tenant)
# TODO(_alastor_): Re-enable this when we get a force-delete
# option on the /initiators endpoint
# if not self._check_for_acl_2_2(initiator_iqn_path):
# self._issue_api_request(initiator_iqn_path.lstrip("/"),
# method="delete",
# api_version=API_VERSION,
# tenant=tenant)
except (IndexError, exception.NotFound):
LOG.debug("Did not find any initiator groups for volume: %s",
volume)
# ===================
# = Create Snapshot =
# ===================
def _create_snapshot_2_2(self, snapshot):
tenant = self._create_tenant_2_2(snapshot)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
url_template = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
url = url_template.format(datc._get_name(snapshot['volume_id']))
snap_params = {
'uuid': snapshot['id'],
}
snap = self._issue_api_request(
url, method='post', body=snap_params, api_version=API_VERSION,
tenant=tenant)
snapu = "/".join((url, snap['data']['timestamp']))
self._snap_poll_2_2(snapu, tenant)
# ===================
# = Delete Snapshot =
# ===================
def _delete_snapshot_2_2(self, snapshot):
tenant = self._create_tenant_2_2(snapshot)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
snapshots = []
try:
snapshots = self._issue_api_request(snapu,
method='get',
api_version=API_VERSION,
tenant=tenant)
except exception.NotFound:
msg = ("Tried to delete snapshot %s, but parent volume %s was "
"not found in Datera cluster. Continuing with delete.")
LOG.info(msg,
datc._get_name(snapshot['id']),
datc._get_name(snapshot['volume_id']))
return
try:
for snap in snapshots['data']:
if snap['uuid'] == snapshot['id']:
url_template = snapu + '/{}'
url = url_template.format(snap['timestamp'])
self._issue_api_request(
url,
method='delete',
api_version=API_VERSION,
tenant=tenant)
break
else:
raise exception.NotFound
except exception.NotFound:
msg = ("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(snapshot['id']))
# ========================
# = Volume From Snapshot =
# ========================
def _create_volume_from_snapshot_2_2(self, volume, snapshot):
tenant = self._create_tenant_2_2(volume)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(
snapu, method='get', api_version=API_VERSION, tenant=tenant)
for snap in snapshots['data']:
if snap['uuid'] == snapshot['id']:
found_ts = snap['utc_ts']
break
else:
raise exception.NotFound
snap_url = (snap_temp + '/{}').format(
datc._get_name(snapshot['volume_id']), found_ts)
self._snap_poll_2_2(snap_url, tenant)
src = "/" + snap_url
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'clone_snapshot_src': {'path': src},
})
self._issue_api_request(
datc.URL_TEMPLATES['ai'](),
method='post',
body=app_params,
api_version=API_VERSION,
tenant=tenant)
if (volume['size'] > snapshot['volume_size']):
self._extend_volume_2_2(volume, volume['size'])
# ==========
# = Retype =
# ==========
def _retype_2_2(self, ctxt, volume, new_type, diff, host):
LOG.debug("Retype called\n"
"Volume: %(volume)s\n"
"NewType: %(new_type)s\n"
"Diff: %(diff)s\n"
"Host: %(host)s\n", {'volume': volume, 'new_type': new_type,
'diff': diff, 'host': host})
def _put(vol_params, tenant):
url = datc.URL_TEMPLATES['vol_inst'](
old_pol['default_storage_name'],
old_pol['default_volume_name']).format(
datc._get_name(volume['id']))
self._issue_api_request(
url, method='put', body=vol_params,
api_version=API_VERSION, tenant=tenant)
# We'll take the fast route only if the types share the same backend
# And that backend matches this driver
old_pol = self._get_policies_for_resource(volume)
new_pol = self._get_policies_for_volume_type(new_type)
if (host['capabilities']['vendor_name'].lower() ==
self.backend_name.lower()):
LOG.debug("Starting fast volume retype")
if old_pol.get('template') or new_pol.get('template'):
LOG.warning(
"Fast retyping between template-backed volume-types "
"unsupported. Type1: %s, Type2: %s",
volume['volume_type_id'], new_type)
if old_pol.get('acl_allow_all') != new_pol.get('acl_allow_all'):
LOG.warning(
"Changing acl_allow_all unsupported for fast retyping"
"Type1: %s, Type2: %s", volume['volume_type_id'], new_type)
tenant = self._create_tenant_2_2(volume)
self._update_qos_2_2(volume, new_pol, tenant, clear_old=True)
# Only replica_count ip_pool requires offlining the app_instance
if (new_pol['replica_count'] != old_pol['replica_count'] or
new_pol['ip_pool'] != old_pol['ip_pool']):
with self._offline_flip_2_2(volume, tenant):
vol_params = (
{
'placement_mode': new_pol['placement_mode'],
'replica_count': new_pol['replica_count'],
})
_put(vol_params, tenant)
elif new_pol['placement_mode'] != old_pol['placement_mode']:
vol_params = (
{
'placement_mode': new_pol['placement_mode'],
})
_put(vol_params, tenant)
return True
else:
LOG.debug("Couldn't fast-retype volume between specified types")
return False
# ==========
# = Manage =
# ==========
def _manage_existing_2_2(self, volume, existing_ref):
# Only volumes created under the requesting tenant can be managed in
# the v2.1+ API. Eg. If tenant A is the tenant for the volume to be
# managed, it must also be tenant A that makes this request.
# This will be fixed in a later API update
tenant = self._create_tenant_2_2(volume)
existing_ref = existing_ref['source-name']
if existing_ref.count(":") not in (2, 3):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"tenant:app_inst_name:storage_inst_name:vol_name or "
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name = existing_ref.split(":")[0]
try:
(tenant, app_inst_name, storage_inst_name,
vol_name) = existing_ref.split(":")
except TypeError:
app_inst_name, storage_inst_name, vol_name = existing_ref.split(
":")
tenant = None
LOG.debug("Managing existing Datera volume %s "
"Changing name to %s",
datc._get_name(volume['id']), existing_ref)
data = {'name': datc._get_name(volume['id'])}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
app_inst_name), method='put', body=data,
api_version=API_VERSION, tenant=tenant)
# ===================
# = Manage Get Size =
# ===================
def _manage_existing_get_size_2_2(self, volume, existing_ref):
tenant = self._create_tenant_2_2(volume)
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name, si_name, vol_name = existing_ref.split(":")
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(app_inst_name),
api_version=API_VERSION, tenant=tenant)
return self._get_size_2_2(
volume, tenant, app_inst, si_name, vol_name)
def _get_size_2_2(self, volume, tenant=None, app_inst=None, si_name=None,
vol_name=None):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
policies = self._get_policies_for_resource(volume)
si_name = si_name if si_name else policies['default_storage_name']
vol_name = vol_name if vol_name else policies['default_volume_name']
if not app_inst:
vol_url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
app_inst = self._issue_api_request(
vol_url, api_version=API_VERSION, tenant=tenant)['data']
if 'data' in app_inst:
app_inst = app_inst['data']
sis = app_inst['storage_instances']
found_si = None
for si in sis:
if si['name'] == si_name:
found_si = si
break
found_vol = None
for vol in found_si['volumes']:
if vol['name'] == vol_name:
found_vol = vol
size = found_vol['size']
return size
# =========================
# = Get Manageable Volume =
# =========================
def _get_manageable_volumes_2_2(self, cinder_volumes, marker, limit,
offset, sort_keys, sort_dirs):
# Use the first volume to determine the tenant we're working under
if cinder_volumes:
tenant = self._create_tenant_2_2(cinder_volumes[0])
else:
tenant = None
LOG.debug("Listing manageable Datera volumes")
app_instances = self._issue_api_request(
datc.URL_TEMPLATES['ai'](), api_version=API_VERSION,
tenant=tenant)['data']
results = []
cinder_volume_ids = [vol['id'] for vol in cinder_volumes]
for ai in app_instances:
ai_name = ai['name']
reference = None
size = None
safe_to_manage = False
reason_not_safe = ""
cinder_id = None
extra_info = None
if re.match(datc.UUID4_RE, ai_name):
cinder_id = ai_name.lstrip(datc.OS_PREFIX)
if (not cinder_id and
ai_name.lstrip(datc.OS_PREFIX) not in cinder_volume_ids):
safe_to_manage, reason_not_safe = self._is_manageable_2_2(ai)
if safe_to_manage:
si = list(ai['storage_instances'].values())[0]
si_name = si['name']
vol = list(si['volumes'].values())[0]
vol_name = vol['name']
size = vol['size']
reference = {"source-name": "{}:{}:{}".format(
ai_name, si_name, vol_name)}
results.append({
'reference': reference,
'size': size,
'safe_to_manage': safe_to_manage,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': extra_info})
page_results = volutils.paginate_entries_list(
results, marker, limit, offset, sort_keys, sort_dirs)
return page_results
def _is_manageable_2_2(self, app_inst):
if len(app_inst['storage_instances']) == 1:
si = list(app_inst['storage_instances'].values())[0]
if len(si['volumes']) == 1:
return (True, "")
return (False,
"App Instance has more than one storage instance or volume")
# ============
# = Unmanage =
# ============
def _unmanage_2_2(self, volume):
tenant = self._create_tenant_2_2(volume)
LOG.debug("Unmanaging Cinder volume %s. Changing name to %s",
volume['id'], datc._get_unmanaged(volume['id']))
data = {'name': datc._get_unmanaged(volume['id'])}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
method='put',
body=data,
api_version=API_VERSION,
tenant=tenant)
# ====================
# = Fast Image Clone =
# ====================
def _clone_image_2_2(self, context, volume, image_location, image_meta,
image_service):
# We're not going to fast image clone if the feature is not enabled
# and/or we can't reach the image being requested
if (not self.image_cache or
not self._image_accessible(context, volume, image_meta)):
return None, False
# Check to make sure we're working with a valid volume type
try:
found = volume_types.get_volume_type(context, self.image_type)
except (exception.VolumeTypeNotFound, exception.InvalidVolumeType):
found = None
if not found:
msg = _("Invalid volume type: %s")
LOG.error(msg, self.image_type)
raise ValueError("Option datera_image_cache_volume_type_id must be"
" set to a valid volume_type id")
LOG.debug("Starting fast image clone")
# TODO(_alastor_): determine if Datera is already an image backend
# for this request and direct clone instead of caching
# Dummy volume, untracked by Cinder
src_vol = {'id': image_meta['id'],
'volume_type_id': self.image_type,
'size': volume['size']}
# Determine if we have a cached version of the image
cached = self._vol_exists_2_2(src_vol)
if cached:
metadata = self._get_metadata_2_2(src_vol)
# Check to see if the master image has changed since we created
# The cached version
ts = self._get_vol_timestamp_2_2(src_vol)
mts = time.mktime(image_meta['updated_at'].timetuple())
LOG.debug("Original image timestamp: %s, cache timestamp %s",
mts, ts)
# If the image is created by Glance, we'll trust that even if the
# timestamps don't match up, the data is ok to clone as it's not
# managed by this driver
if metadata.get('type') == 'image':
LOG.debug("Found Glance volume-backed image for %s",
src_vol['id'])
# If the master image time is greater than the volume creation
# time, we invalidate the cache and delete the volume. The
# exception is if the cached volume was created by Glance. We
# NEVER want to delete this volume. It's annotated with
# 'type': 'image' in the metadata, so we'll check for that
elif mts > ts and metadata.get('type') != 'image':
LOG.debug("Cache is older than original image, deleting cache")
cached = False
self._delete_volume_2_2(src_vol)
# If we don't have the image, we'll cache it
if not cached:
LOG.debug("No image cache found for: %s, caching image",
image_meta['id'])
self._cache_vol(context, src_vol, image_meta, image_service)
# Now perform the clone of the found image or newly cached image
self._create_cloned_volume_2_2(volume, src_vol)
# Force volume resize
vol_size = volume['size']
volume['size'] = 0
self._extend_volume_2_2(volume, vol_size)
volume['size'] = vol_size
# Determine if we need to retype the newly created volume
vtype_id = volume.get('volume_type_id')
if vtype_id and self.image_type and vtype_id != self.image_type:
vtype = volume_types.get_volume_type(context, vtype_id)
LOG.debug("Retyping newly cloned volume from type: %s to type: %s",
self.image_type, vtype_id)
diff, discard = volume_types.volume_types_diff(
context, self.image_type, vtype_id)
host = {'capabilities': {'vendor_name': self.backend_name}}
self._retype_2_2(context, volume, vtype, diff, host)
return None, True
def _cache_vol(self, context, vol, image_meta, image_service):
image_id = image_meta['id']
# Pull down image and determine if valid
with image_utils.TemporaryImages.fetch(image_service,
context,
image_id) as tmp_image:
data = image_utils.qemu_img_info(tmp_image)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
vsize = int(
math.ceil(float(data.virtual_size) / units.Gi))
vol['size'] = vsize
vtype = vol['volume_type_id']
LOG.info("Creating cached image with volume type: %(vtype)s and "
"size %(size)s", {'vtype': vtype, 'size': vsize})
self._create_volume_2_2(vol)
with self._connect_vol(context, vol) as device:
LOG.debug("Moving image %s to volume %s",
image_meta['id'], datc._get_name(vol['id']))
image_utils.convert_image(tmp_image,
device,
'raw',
run_as_root=True)
LOG.debug("Finished moving image %s to volume %s",
image_meta['id'], datc._get_name(vol['id']))
data = image_utils.qemu_img_info(device, run_as_root=True)
if data.file_format != 'raw':
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_(
"Converted to %(vol_format)s, but format is "
"now %(file_format)s") % {
'vol_format': 'raw',
'file_format': data.file_format})
# TODO(_alastor_): Remove this snapshot creation when we fix
# "created_at" attribute in the frontend
# We don't actually care about the snapshot uuid, we just want
# a single snapshot
snapshot = {'id': str(uuid.uuid4()),
'volume_id': vol['id']}
self._create_snapshot_2_2(snapshot)
self._update_metadata_2_2(vol, {'type': 'cached_image'})
# Cloning offline AI is ~4 seconds faster than cloning online AI
self._detach_volume_2_2(None, vol)
def _get_vol_timestamp_2_2(self, volume):
tenant = self._create_tenant_2_2()
policies = self._get_policies_for_resource(volume)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(volume['id']))
snapshots = self._issue_api_request(snapu,
method='get',
api_version=API_VERSION,
tenant=tenant)
if len(snapshots['data']) == 1:
return float(snapshots['data'][0]['utc_ts'])
else:
# We'll return 0 if we find no snapshots (or the incorrect number)
# to ensure the timestamp comparison with the master copy fails
# since the master copy will always have a timestamp > 0.
LOG.debug("Number of snapshots found: %s", len(snapshots['data']))
return 0
def _vol_exists_2_2(self, volume):
LOG.debug("Checking if volume %s exists", volume['id'])
tenant = self._create_tenant_2_2(volume)
try:
return self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
api_version=API_VERSION, tenant=tenant)
LOG.debug("Volume %s exists", volume['id'])
except exception.NotFound:
LOG.debug("Volume %s not found", volume['id'])
return {}
@contextlib.contextmanager
def _connect_vol(self, context, vol):
connector = None
try:
# Start connection, get the connector object and create the
# export (ACL, IP-Pools, etc)
conn = self._initialize_connection_2_2(
vol, {'multipath': False})
connector = utils.brick_get_connector(
conn['driver_volume_type'],
use_multipath=False,
device_scan_attempts=10,
conn=conn)
connector_info = {'initiator': connector.get_initiator()}
self._create_export_2_2(None, vol, connector_info)
retries = 10
attach_info = conn['data']
while True:
try:
attach_info.update(
connector.connect_volume(conn['data']))
break
except brick_exception.FailedISCSITargetPortalLogin:
retries -= 1
if not retries:
LOG.error(_("Could not log into portal before end of "
"polling period"))
raise
LOG.debug("Failed to login to portal, retrying")
eventlet.sleep(2)
device_path = attach_info['path']
yield device_path
finally:
# Close target connection
if connector:
# Best effort disconnection
try:
connector.disconnect_volume(attach_info, attach_info)
except Exception:
pass
# ===========
# = Tenancy =
# ===========
def _create_tenant_2_2(self, volume=None):
# Create the Datera tenant if specified in the config
# Otherwise use the tenant provided
if self.tenant_id is None:
tenant = None
elif self.tenant_id.lower() == "map" and volume:
# Convert dashless uuid to uuid with dashes
# Eg: 0e33e95a9b154d348c675a1d8ea5b651 -->
# 0e33e95a-9b15-4d34-8c67-5a1d8ea5b651
tenant = datc._get_name(str(uuid.UUID(volume["project_id"])))
elif self.tenant_id.lower() == "map" and not volume:
tenant = None
else:
tenant = self.tenant_id
if tenant:
params = {'name': tenant}
self._issue_api_request(
'tenants', method='post', body=params, conflict_ok=True,
api_version=API_VERSION)
return tenant
# =========
# = Login =
# =========
def _login_2_2(self):
"""Use the san_login and san_password to set token."""
body = {
'name': self.username,
'password': self.password
}
if self.ldap:
body['remote_server'] = self.ldap
# Unset token now, otherwise potential expired token will be sent
# along to be used for authorization when trying to login.
self.datera_api_token = None
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request(
'login', 'put', body=body, sensitive=True,
api_version=API_VERSION, tenant=None)
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.')
# ===========
# = Polling =
# ===========
def _snap_poll_2_2(self, url, tenant):
eventlet.sleep(datc.DEFAULT_SNAP_SLEEP)
TIMEOUT = 20
retry = 0
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
snap = self._issue_api_request(url,
api_version=API_VERSION,
tenant=tenant)['data']
if snap['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Snapshot not ready.'))
def _si_poll_2_2(self, volume, policies, tenant):
# Initial 4 second sleep required for some Datera versions
eventlet.sleep(datc.DEFAULT_SI_SLEEP)
TIMEOUT = 10
retry = 0
check_url = datc.URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(
datc._get_name(volume['id']))
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
si = self._issue_api_request(check_url,
api_version=API_VERSION,
tenant=tenant)['data']
if si['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
# ================
# = Volume Stats =
# ================
def _get_volume_stats_2_2(self, refresh=False):
if refresh or not self.cluster_stats:
try:
LOG.debug("Updating cluster stats info.")
results = self._issue_api_request(
'system', api_version=API_VERSION)['data']
if 'uuid' not in results:
LOG.error(
'Failed to get updated stats from Datera Cluster.')
stats = {
'volume_backend_name': self.backend_name,
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': (
int(results['total_capacity']) / units.Gi),
'free_capacity_gb': (
int(results['available_capacity']) / units.Gi),
'reserved_percentage': 0,
'QoS_support': True,
}
self.cluster_stats = stats
except exception.DateraAPIException:
LOG.error('Failed to get updated stats from Datera cluster.')
return self.cluster_stats
# =======
# = QoS =
# =======
def _update_qos_2_2(self, resource, policies, tenant, clear_old=False):
url = datc.URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/performance_policy'
url = url.format(datc._get_name(resource['id']))
type_id = resource.get('volume_type_id', None)
if type_id is not None:
iops_per_gb = int(policies.get('iops_per_gb', 0))
bandwidth_per_gb = int(policies.get('bandwidth_per_gb', 0))
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
# Calculate and set iops/gb and bw/gb, but only if they don't
# exceed total_iops_max and total_bw_max aren't set since they take
# priority
if iops_per_gb:
ipg = iops_per_gb * resource['size']
# Not using zero, because zero means unlimited
im = fpolicies.get('total_iops_max', 1)
r = ipg
if ipg > im:
r = im
fpolicies['total_iops_max'] = r
if bandwidth_per_gb:
bpg = bandwidth_per_gb * resource['size']
# Not using zero, because zero means unlimited
bm = fpolicies.get('total_iops_max', 1)
r = bpg
if bpg > bm:
r = bm
fpolicies['total_bandwidth_max'] = r
if fpolicies or clear_old:
try:
self._issue_api_request(
url, 'delete', api_version=API_VERSION,
tenant=tenant)
except exception.NotFound:
LOG.debug("No existing performance policy found")
if fpolicies:
self._issue_api_request(
url, 'post', body=fpolicies, api_version=API_VERSION,
tenant=tenant)
# ============
# = IP Pools =
# ============
def _get_ip_pool_for_string_ip_2_2(self, ip, tenant):
"""Takes a string ipaddress and return the ip_pool API object dict """
pool = 'default'
ip_obj = ipaddress.ip_address(six.text_type(ip))
ip_pools = self._issue_api_request('access_network_ip_pools',
api_version=API_VERSION,
tenant=tenant)
for ipdata in ip_pools['data']:
for adata in ipdata['network_paths']:
if not adata.get('start_ip'):
continue
pool_if = ipaddress.ip_interface(
"/".join((adata['start_ip'], str(adata['netmask']))))
if ip_obj in pool_if.network:
pool = ipdata['name']
return self._issue_api_request(
"access_network_ip_pools/{}".format(pool),
api_version=API_VERSION, tenant=tenant)['data']['path']
# ============
# = Metadata =
# ============
def _get_metadata_2_2(self, volume):
tenant = self._create_tenant_2_2(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])) + "/metadata"
return self._issue_api_request(url, api_version=API_VERSION,
tenant=tenant)['data']
def _update_metadata_2_2(self, volume, keys):
tenant = self._create_tenant_2_2(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])) + "/metadata"
self._issue_api_request(
url, method='put', body=keys, api_version=API_VERSION,
tenant=tenant)
@contextlib.contextmanager
def _detach_flip_2_2(self, volume, tenant):
# Offline App Instance, if necessary
reonline = False
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
api_version=API_VERSION, tenant=tenant)
if app_inst['data']['admin_state'] == 'online':
reonline = True
self._detach_volume_2_2(None, volume)
yield
# Online Volume, if it was online before
if reonline:
self._create_export_2_2(None, volume, None)
@contextlib.contextmanager
def _offline_flip_2_2(self, volume, tenant):
reonline = False
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
api_version=API_VERSION, tenant=tenant)
if app_inst['data']['admin_state'] == 'online':
reonline = True
data = {'admin_state': 'offline'}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])), method='put', body=data,
api_version=API_VERSION, tenant=tenant)
yield
if reonline:
data = {'admin_state': 'online'}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])), method='put', body=data,
api_version=API_VERSION, tenant=tenant)
| 41.78093
| 79
| 0.530441
|
7952e21d54a6679133845f09ca3eb01188569397
| 590
|
py
|
Python
|
exercises/exc_01_11.py
|
attwaben/spacy-course
|
aa684aef31cfe8c41838017b9f6bf77fb89eac0f
|
[
"MIT"
] | null | null | null |
exercises/exc_01_11.py
|
attwaben/spacy-course
|
aa684aef31cfe8c41838017b9f6bf77fb89eac0f
|
[
"MIT"
] | null | null | null |
exercises/exc_01_11.py
|
attwaben/spacy-course
|
aa684aef31cfe8c41838017b9f6bf77fb89eac0f
|
[
"MIT"
] | null | null | null |
import spacy
# Import the Matcher
from spacy.matcher import Matcher
nlp = spacy.load("en_core_web_sm")
doc = nlp("New iPhone X release date leaked as Apple reveals pre-orders by mistake")
# Initialize the Matcher with the shared vocabulary
matcher = Matcher(nlp.vocab)
# Create a pattern matching two tokens: "iPhone" and "X"
pattern = [{"TEXT" : "iPhone" }, {"TEXT" : "X"}]
# Add the pattern to the matcher
matcher.add("IPHONE_X_PATTERN", None, pattern)
# Use the matcher on the doc
matches = matcher(doc)
print("Matches:", [doc[start:end].text for match_id, start, end in matches])
| 28.095238
| 84
| 0.728814
|
7952e44a92a97ea590ea4378f2d36072e403617b
| 3,825
|
py
|
Python
|
pipeline/component_framework/constant.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | 1
|
2020-09-24T07:39:16.000Z
|
2020-09-24T07:39:16.000Z
|
pipeline/component_framework/constant.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:46:54.000Z
|
2021-06-10T22:54:45.000Z
|
pipeline/component_framework/constant.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making ่้ฒธๆบไบPaaSๅนณๅฐ็คพๅบ็ (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import copy
from pipeline.exceptions import ConstantNotExistException, ConstantReferenceException
from pipeline.core.data.expression import ConstantTemplate, deformat_constant_key
from pipeline.utils.graph import Graph
class ConstantPool(object):
def __init__(self, pool, lazy=False):
self.raw_pool = pool
self.pool = None
if not lazy:
self.resolve()
def resolve(self):
if self.pool:
return
refs = self.get_reference_info()
nodes = refs.keys()
flows = []
for node in nodes:
for ref in refs[node]:
if ref in nodes:
flows.append([node, ref])
graph = Graph(nodes, flows)
# circle reference check
trace = graph.get_cycle()
if trace:
raise ConstantReferenceException('Exist circle reference between constants: %s' % '->'.join(trace))
# resolve the constants reference
pool = {}
temp_pool = copy.deepcopy(self.raw_pool)
# get those constants which are referenced only(not refer other constants)
referenced_only = ConstantPool._get_referenced_only(temp_pool)
while temp_pool:
for ref in referenced_only:
value = temp_pool[ref]['value']
# resolve those constants which reference the 'ref'
for key, info in temp_pool.items():
maps = {deformat_constant_key(ref): value}
temp_pool[key]['value'] = ConstantTemplate(info['value']).resolve_data(maps)
pool[ref] = temp_pool[ref]
temp_pool.pop(ref)
referenced_only = ConstantPool._get_referenced_only(temp_pool)
self.pool = pool
@staticmethod
def _get_referenced_only(pool):
referenced_only = []
for key, info in pool.items():
reference = ConstantTemplate(info['value']).get_reference()
formatted_reference = ['${%s}' % ref for ref in reference]
reference = [c for c in formatted_reference if c in pool]
if not reference:
referenced_only.append(key)
return referenced_only
def get_reference_info(self, strict=True):
refs = {}
for key, info in self.raw_pool.items():
reference = ConstantTemplate(info['value']).get_reference()
formatted_reference = ['${%s}' % ref for ref in reference]
ref = [c for c in formatted_reference if not strict or c in self.raw_pool]
refs[key] = ref
return refs
def resolve_constant(self, constant):
if not self.pool:
self.resolve()
if constant not in self.pool:
raise ConstantNotExistException('constant %s not exist.' % constant)
return self.pool[constant]['value']
def resolve_value(self, val):
if not self.pool:
self.resolve()
maps = {deformat_constant_key(key): self.pool[key]['value'] for key in self.pool}
return ConstantTemplate(val).resolve_data(maps)
| 37.135922
| 115
| 0.641569
|
7952e4b01196c04380ea23918dc575f02235b708
| 5,961
|
py
|
Python
|
src/mbed_cloud/_backends/billing/models/quota_usage_report.py
|
GQMai/mbed-cloud-sdk-python
|
76ef009903415f37f69dcc5778be8f5fb14c08fe
|
[
"Apache-2.0"
] | 12
|
2017-12-28T11:18:43.000Z
|
2020-10-04T12:11:15.000Z
|
src/mbed_cloud/_backends/billing/models/quota_usage_report.py
|
GQMai/mbed-cloud-sdk-python
|
76ef009903415f37f69dcc5778be8f5fb14c08fe
|
[
"Apache-2.0"
] | 50
|
2017-12-21T12:50:41.000Z
|
2020-01-13T16:07:08.000Z
|
src/mbed_cloud/_backends/billing/models/quota_usage_report.py
|
GQMai/mbed-cloud-sdk-python
|
76ef009903415f37f69dcc5778be8f5fb14c08fe
|
[
"Apache-2.0"
] | 8
|
2018-04-25T17:47:29.000Z
|
2019-08-29T06:38:27.000Z
|
# coding: utf-8
"""
Billing API
Billing API allows users to retrieve billing reports and service package details.
OpenAPI spec version: 1.4.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class QuotaUsageReport(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'amount': 'int',
'campaign_name': 'str',
'time': 'datetime',
'type': 'str'
}
attribute_map = {
'amount': 'amount',
'campaign_name': 'campaign_name',
'time': 'time',
'type': 'type'
}
def __init__(self, amount=None, campaign_name=None, time=None, type=None):
"""
QuotaUsageReport - a model defined in Swagger
"""
self._amount = amount
self._campaign_name = campaign_name
self._time = time
self._type = type
self.discriminator = None
@property
def amount(self):
"""
Gets the amount of this QuotaUsageReport.
Amount of quota usage entry. Negative if it is quota consumption.
:return: The amount of this QuotaUsageReport.
:rtype: int
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this QuotaUsageReport.
Amount of quota usage entry. Negative if it is quota consumption.
:param amount: The amount of this QuotaUsageReport.
:type: int
"""
if amount is None:
raise ValueError("Invalid value for `amount`, must not be `None`")
self._amount = amount
@property
def campaign_name(self):
"""
Gets the campaign_name of this QuotaUsageReport.
Campaign name of quota usage entry. Null if quota usage entry type is not reservation or reservation release.
:return: The campaign_name of this QuotaUsageReport.
:rtype: str
"""
return self._campaign_name
@campaign_name.setter
def campaign_name(self, campaign_name):
"""
Sets the campaign_name of this QuotaUsageReport.
Campaign name of quota usage entry. Null if quota usage entry type is not reservation or reservation release.
:param campaign_name: The campaign_name of this QuotaUsageReport.
:type: str
"""
self._campaign_name = campaign_name
@property
def time(self):
"""
Gets the time of this QuotaUsageReport.
Added time of quota usage entry in RFC3339 date-time with millisecond accuracy and UTC time zone.
:return: The time of this QuotaUsageReport.
:rtype: datetime
"""
return self._time
@time.setter
def time(self, time):
"""
Sets the time of this QuotaUsageReport.
Added time of quota usage entry in RFC3339 date-time with millisecond accuracy and UTC time zone.
:param time: The time of this QuotaUsageReport.
:type: datetime
"""
if time is None:
raise ValueError("Invalid value for `time`, must not be `None`")
self._time = time
@property
def type(self):
"""
Gets the type of this QuotaUsageReport.
Type of quota usage entry.
:return: The type of this QuotaUsageReport.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this QuotaUsageReport.
Type of quota usage entry.
:param type: The type of this QuotaUsageReport.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
allowed_values = ["reservation", "reservation_release", "reservation_termination", "package_renewal", "package_creation", "package_termination"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, QuotaUsageReport):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.85514
| 152
| 0.571716
|
7952e4f4ec31da946e6d02032a7f8c3804c54a34
| 3,077
|
py
|
Python
|
p053_more_on_functions.py
|
bayramcicek/py-repo
|
e99d8881dd3eb5296ec5dcfba4de2c3418044897
|
[
"Unlicense"
] | null | null | null |
p053_more_on_functions.py
|
bayramcicek/py-repo
|
e99d8881dd3eb5296ec5dcfba4de2c3418044897
|
[
"Unlicense"
] | null | null | null |
p053_more_on_functions.py
|
bayramcicek/py-repo
|
e99d8881dd3eb5296ec5dcfba4de2c3418044897
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3.6
# created by cicek on 13.09.2018 22:19
'''
Python allows to have function with varying number of arguments. Using *args as
a function parameter enables you to pass an arbitrary number of arguments to
that function. The arguments are then accessible as the tuple args in the body
of the function.
'''
def function(named_arg, *args):
# The parameter *args must come after the named parameters to a function.
# The name args is just a convention; you can choose to use another.
print(named_arg) # 1
print(args) # (2, 3, 4, 5)
function(1, 2, 3, 4, 5)
def my_func(x, *args):
print(x) # 8
print(args) # ()
my_func(8)
my_func(8, 9, 12)
# 8
# (9, 12)
my_func(8, 9)
# 8
# (9,)
print("--------------------------------")
def function(x, y, food="pizza"):
print(food)
print(x)
print(y)
function(1, 2)
function(3, 4, "tacos")
# output:
# pizza
# 1
# 2
# tacos
# 3
# 4
print("---------------------------------")
def f(x, *y):
print(x + sum(y)) # 10010
f(1, 2, 3, 4, 10000)
print("---------------------------------")
# you can use *args alone without a named argument
def function(*args):
print(args) # (1,2,3,4,5)
function(1,2,3,4,5)
# "The parameter *args must come after the named parameters to a function[if any]."
print("---------------------------------")
def function(pokemon_motto, *animals):
print(pokemon_motto)
print(animals)
function('Gotta Catch Them All', 'Bulbasaur', 'Ivysaur', 'Venusaur', 'Charmander', 'Charmeleon', 'Charizard')
# output
# Gotta Catch Them All
# ('Bulbasaur', 'Ivysaur', 'Venusaur', 'Charmander', 'Charmeleon', 'Charizard')
print("---------------------------------")
'''
The * means "anyhting else". With *args you could pass any amount of arguments
to that function. In the example, the first argument is named_argument = 1.
This is because it's the first argument passed and in the function it's named.
the rest of the arguments are 2, 3, 4,5. Since they are not named, they fall in
the *args category. Therefore, it they are converted to a tuple for use inside
the function.'''
print("---------------------------------")
def func(*args):
print(args)
func("hi", 1, "hello") # ('hi', 1, 'hello')
print("---------------------------------")
# You can actually use named parameters after *args but they become keyword-only arguments.
def spam(*eggs, egg):
print(egg)
spam(1, 2, egg = 3) #3
# spam(1, 2, 3) #TypeError spam() needs keyword-only argument egg
print("---------------------------------")
def function( named_arg, b, *a):
print(named_arg)
print(b)
print(a)
function(44, "g", 2,6,7)
# 44
# g
# (2, 6, 7) --> tuple
print("---------------------------------")
def function(named_arg, *args):
print(named_arg, type(named_arg)) # (1, 2, 3) <class 'tuple'>
print(args, type(args)) # (4, 5, 6, 7, 8) <class 'tuple'>
function((1,2,3),4, 5, 6, 7, 8)
print("---------------------------------")
def multiply(first,*another):
for i in another:
print(i*first)
multiply(5,2,3,4,5,6)
# Result:
# 10
# 15
# 20
# 25
# 30
| 21.669014
| 109
| 0.579786
|
7952e4fb88eb85def3df4c7041294dd733063c80
| 3,629
|
py
|
Python
|
robinhood/quote.py
|
Marxvim/Robinhood-1
|
c3c83391dcb3eb1b1584bf83a0d965d82c4679ed
|
[
"MIT"
] | 17
|
2020-03-21T14:19:11.000Z
|
2021-12-29T07:16:01.000Z
|
robinhood/quote.py
|
Marxvim/Robinhood-1
|
c3c83391dcb3eb1b1584bf83a0d965d82c4679ed
|
[
"MIT"
] | null | null | null |
robinhood/quote.py
|
Marxvim/Robinhood-1
|
c3c83391dcb3eb1b1584bf83a0d965d82c4679ed
|
[
"MIT"
] | 5
|
2020-06-11T20:51:44.000Z
|
2022-01-10T06:35:25.000Z
|
from .detail.const_dict import ConstDict
from .detail.common import timestamp_now, _to_float
from datetime import datetime
import pandas as pd
class QuoteBase(ConstDict):
def __init__(self, quote, time=None):
quote['time'] = time if time else timestamp_now()
ConstDict.__init__(self, quote)
def _get(self, key):
return self._dict[key]
def _get_float(self, key):
return _to_float(self._get(key))
@property
def symbol(self) -> str:
return self._dict['symbol']
@property
def time(self) -> pd.Timestamp:
return self._dict['time']
class Quote(QuoteBase):
"""
Example json quote
{
"ask_price":"253.810000",
"ask_size":144,
"bid_price":"253.510000",
"bid_size":100,
"last_trade_price":"254.290000",
"last_extended_hours_trade_price":"253.500000",
"previous_close":"254.810000",
"adjusted_previous_close":"254.810000",
"previous_close_date":"2020-03-30",
"symbol":"AAPL",
"trading_halted":"False",
"has_traded":"True",
"last_trade_price_source":"consolidated",
"updated_at":"2020-03-31T21:27:45Z",
"instrument":"https://api.robinhood.com/instruments/450dfc6d-5510-4d40-abfb-f633b7d9be3e/"
}
"""
@property
def ask(self) -> float:
return self._get_float('ask_price')
def __init__(self, quote):
QuoteBase.__init__(self, quote)
@property
def bid(self) -> float:
return self._get_float('bid_price')
@property
def mark(self) -> float:
return self._get_float('last_trade_price')
@property
def previous_close(self) -> float:
return self._get_float('last_trade_price')
@property
def adjusted_previous_close(self) -> float:
return self._get_float('last_trade_price')
@property
def ask_size(self) -> int:
return self._dict['ask_size']
@property
def bid_size(self) -> int:
return self._dict['bid_size']
class CryptoQuote(QuoteBase):
"""
Example json quote
{
"ask_price":"6457.583965",
"bid_price":"6449.317366",
"mark_price":"6453.450665",
"high_price":"6539.245000",
"low_price":"6319.569798",
"open_price":"6441.625000",
"symbol":"BTCUSD",
"id":"3d961844-d360-45fc-989b-f6fca761d511",
"volume":"0.000000" ##Note Rb currently always returns volume being 0
}
"""
def __init__(self, quote):
QuoteBase.__init__(self, quote)
@property
def ask(self) -> float:
return self._get_float('ask_price')
@property
def bid(self) -> float:
return self._get_float('bid_price')
@property
def mark(self) -> float:
return self._get_float('mark_price')
@property
def high(self) -> float:
return self._get_float('high_price')
@property
def low(self) -> float:
return self._get_float('low_price')
@property
def open(self) -> float:
return self._get_float('open_price')
class HistoricalQuote(QuoteBase):
float_columns = ['open_price', 'close_price', 'high_price', 'low_price', 'volume']
"""
Example json historical quote:
{
'begins_at': '2020-04-28T13:00:00Z',
'open_price': '285.150000',
'close_price': '285.130000',
'high_price': '285.300100',
'low_price': '285.130000',
'volume': 3006,
'session': 'pre',
'interpolated': False}
Note: historical quotes are the same for crypto/regular quotes
"""
def __init__(self, quote: dict):
QuoteBase.__init__(self, quote, pd.Timestamp(quote['begins_at']))
@property
def low(self):
return self._get_float('low_price')
@property
def high(self):
return self._get_float('high_price')
@property
def open(self):
return self._get_float('open_price')
@property
def close(self):
return self._get_float('close_price')
@property
def volume(self):
return self._get_float('volume')
| 22.128049
| 93
| 0.695233
|
7952e546db55a99523e0d050f27e09da0723baf3
| 49,045
|
py
|
Python
|
third_party/python/Lib/test/test_sys.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Lib/test/test_sys.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Lib/test/test_sys.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import cosmo
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
import locale
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import _thread
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf("tiny" in cosmo.MODE, "")
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises((RecursionError, MemoryError), f)
self.assertRaises((RecursionError, MemoryError), f)
finally:
sys.setrecursionlimit(oldlimit)
@unittest.skip
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
@unittest.skipIf("tiny" in cosmo.MODE, "")
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: ",
err)
@unittest.skip
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
@unittest.skip # why is this even allowed
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable.replace("//", "/"))
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.replace("//", "/").encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ['', repr(sys.executable.replace("//", "/").encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = "C"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def test_c_locale_surrogateescape(self):
out = self.c_locale_get_error_handler(isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(False and hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
if (os.environ.get('PYTHONMALLOC', None)
and not sys.flags.ignore_environment):
self.skipTest("cannot test if PYTHONMALLOC env var is set")
# Some sanity checks
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
@unittest.skip("alignment of C struct?")
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('4P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2n15Pl4Pn9Pn11PIP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
| 38.346364
| 116
| 0.582852
|
7952e5c7ae85dbca7d6c640913b3074b35e01434
| 602
|
py
|
Python
|
src/main/easy/search_insert.py
|
eu-snehagupta/Leetcode
|
7cfce8a385e0b0290fe054ecfd48e9a7a9a5ba83
|
[
"MIT"
] | null | null | null |
src/main/easy/search_insert.py
|
eu-snehagupta/Leetcode
|
7cfce8a385e0b0290fe054ecfd48e9a7a9a5ba83
|
[
"MIT"
] | null | null | null |
src/main/easy/search_insert.py
|
eu-snehagupta/Leetcode
|
7cfce8a385e0b0290fe054ecfd48e9a7a9a5ba83
|
[
"MIT"
] | null | null | null |
# Given a sorted array of distinct integers and
# a target value, return the index if the target is found.
# If not, return the index where it would be if it were inserted in order.
#
# Example 1:
# Input: nums = [1,3,5,6], target = 5
# Output: 2
# Constraints:
# 1 <= nums.length <= 10**4
# -10**4 <= nums[i] <= 10**4
# nums contains distinct values sorted in ascending order.
# -10**4 <= target <= 10**4
def search_insert(nums, target):
nums.append(target)
nums.sort()
return nums.index(target)
if __name__ == '__main__':
input_array = [1]
print(search_insert(input_array, 0))
| 26.173913
| 74
| 0.664452
|
7952e5ca7de493242a32782badc6302611bb2a91
| 14,175
|
py
|
Python
|
homeassistant/components/onewire/sensor.py
|
galihmelon/core
|
0c852b5f816c9b21f244b7acebfcc952ff29b937
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/onewire/sensor.py
|
galihmelon/core
|
0c852b5f816c9b21f244b7acebfcc952ff29b937
|
[
"Apache-2.0"
] | 33
|
2020-12-18T07:14:41.000Z
|
2022-03-31T06:03:54.000Z
|
homeassistant/components/onewire/sensor.py
|
galihmelon/core
|
0c852b5f816c9b21f244b7acebfcc952ff29b937
|
[
"Apache-2.0"
] | null | null | null |
"""Support for 1-Wire environment sensors."""
from glob import glob
import logging
import os
from pi1wire import InvalidCRCException, Pi1Wire, UnsupportResponseException
from pyownet import protocol
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
CONF_TYPE,
ELECTRICAL_CURRENT_AMPERE,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_MBAR,
TEMP_CELSIUS,
VOLT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from .const import (
CONF_MOUNT_DIR,
CONF_NAMES,
CONF_TYPE_OWFS,
CONF_TYPE_OWSERVER,
CONF_TYPE_SYSBUS,
DEFAULT_OWSERVER_PORT,
DEFAULT_SYSBUS_MOUNT_DIR,
DOMAIN,
PRESSURE_CBAR,
)
_LOGGER = logging.getLogger(__name__)
DEVICE_SENSORS = {
# Family : { SensorType: owfs path }
"10": {"temperature": "temperature"},
"12": {"temperature": "TAI8570/temperature", "pressure": "TAI8570/pressure"},
"22": {"temperature": "temperature"},
"26": {
"temperature": "temperature",
"humidity": "humidity",
"humidity_hih3600": "HIH3600/humidity",
"humidity_hih4000": "HIH4000/humidity",
"humidity_hih5030": "HIH5030/humidity",
"humidity_htm1735": "HTM1735/humidity",
"pressure": "B1-R1-A/pressure",
"illuminance": "S3-R1-A/illuminance",
"voltage_VAD": "VAD",
"voltage_VDD": "VDD",
"current": "IAD",
},
"28": {"temperature": "temperature"},
"3B": {"temperature": "temperature"},
"42": {"temperature": "temperature"},
"1D": {"counter_a": "counter.A", "counter_b": "counter.B"},
"EF": {"HobbyBoard": "special"},
}
DEVICE_SUPPORT_SYSBUS = ["10", "22", "28", "3B", "42"]
# EF sensors are usually hobbyboards specialized sensors.
# These can only be read by OWFS. Currently this driver only supports them
# via owserver (network protocol)
HOBBYBOARD_EF = {
"HobbyBoards_EF": {
"humidity": "humidity/humidity_corrected",
"humidity_raw": "humidity/humidity_raw",
"temperature": "humidity/temperature",
},
"HB_MOISTURE_METER": {
"moisture_0": "moisture/sensor.0",
"moisture_1": "moisture/sensor.1",
"moisture_2": "moisture/sensor.2",
"moisture_3": "moisture/sensor.3",
},
}
SENSOR_TYPES = {
# SensorType: [ Measured unit, Unit ]
"temperature": ["temperature", TEMP_CELSIUS],
"humidity": ["humidity", PERCENTAGE],
"humidity_hih3600": ["humidity", PERCENTAGE],
"humidity_hih4000": ["humidity", PERCENTAGE],
"humidity_hih5030": ["humidity", PERCENTAGE],
"humidity_htm1735": ["humidity", PERCENTAGE],
"humidity_raw": ["humidity", PERCENTAGE],
"pressure": ["pressure", PRESSURE_MBAR],
"illuminance": ["illuminance", LIGHT_LUX],
"wetness_0": ["wetness", PERCENTAGE],
"wetness_1": ["wetness", PERCENTAGE],
"wetness_2": ["wetness", PERCENTAGE],
"wetness_3": ["wetness", PERCENTAGE],
"moisture_0": ["moisture", PRESSURE_CBAR],
"moisture_1": ["moisture", PRESSURE_CBAR],
"moisture_2": ["moisture", PRESSURE_CBAR],
"moisture_3": ["moisture", PRESSURE_CBAR],
"counter_a": ["counter", "count"],
"counter_b": ["counter", "count"],
"HobbyBoard": ["none", "none"],
"voltage": ["voltage", VOLT],
"voltage_VAD": ["voltage", VOLT],
"voltage_VDD": ["voltage", VOLT],
"current": ["current", ELECTRICAL_CURRENT_AMPERE],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAMES): {cv.string: cv.string},
vol.Optional(CONF_MOUNT_DIR, default=DEFAULT_SYSBUS_MOUNT_DIR): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_OWSERVER_PORT): cv.port,
}
)
def hb_info_from_type(dev_type="std"):
"""Return the proper info array for the device type."""
if "std" in dev_type:
return DEVICE_SENSORS
if "HobbyBoard" in dev_type:
return HOBBYBOARD_EF
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up 1-Wire platform."""
if config.get(CONF_HOST):
config[CONF_TYPE] = CONF_TYPE_OWSERVER
elif config[CONF_MOUNT_DIR] == DEFAULT_SYSBUS_MOUNT_DIR:
config[CONF_TYPE] = CONF_TYPE_SYSBUS
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
config[CONF_TYPE] = CONF_TYPE_OWFS
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up 1-Wire platform."""
entities = await hass.async_add_executor_job(get_entities, config_entry.data)
async_add_entities(entities, True)
def get_entities(config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
owhost = config[CONF_HOST]
owport = config[CONF_PORT]
_LOGGER.debug("Initializing using %s:%s", owhost, owport)
try:
owproxy = protocol.proxy(host=owhost, port=owport)
devices = owproxy.dir()
except protocol.Error as exc:
_LOGGER.error(
"Cannot connect to owserver on %s:%d, got: %s", owhost, owport, exc
)
return entities
for device in devices:
_LOGGER.debug("Found device: %s", device)
family = owproxy.read(f"{device}family").decode()
dev_type = "std"
if "EF" in family:
dev_type = "HobbyBoard"
family = owproxy.read(f"{device}type").decode()
if family not in hb_info_from_type(dev_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device,
)
continue
for sensor_key, sensor_value in hb_info_from_type(dev_type)[family].items():
if "moisture" in sensor_key:
s_id = sensor_key.split("_")[1]
is_leaf = int(
owproxy.read(f"{device}moisture/is_leaf.{s_id}").decode()
)
if is_leaf:
sensor_key = f"wetness_{s_id}"
sensor_id = os.path.split(os.path.split(device)[0])[1]
device_file = os.path.join(os.path.split(device)[0], sensor_value)
entities.append(
OneWireProxy(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in Pi1Wire(base_dir).find_all_sensors():
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirect(
device_names.get(sensor_id, sensor_id),
device_file,
"temperature",
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFS(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
class OneWire(Entity):
"""Implementation of a 1-Wire sensor."""
def __init__(self, name, device_file, sensor_type):
"""Initialize the sensor."""
self._name = f"{name} {sensor_type.capitalize()}"
self._device_file = device_file
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._state = None
self._value_raw = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if "count" in self._unit_of_measurement:
return int(self._state)
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {"device_file": self._device_file, "raw_value": self._value_raw}
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._device_file
class OneWireProxy(OneWire):
"""Implementation of a 1-Wire sensor through owserver."""
def __init__(self, name, device_file, sensor_type, owproxy):
"""Initialize the sensor."""
super().__init__(name, device_file, sensor_type)
self._owproxy = owproxy
def _read_value_ownet(self):
"""Read a value from the owserver."""
return self._owproxy.read(self._device_file).decode().lstrip()
def update(self):
"""Get the latest data from the device."""
value = None
value_read = False
try:
value_read = self._read_value_ownet()
except protocol.Error as exc:
_LOGGER.error("Owserver failure in read(), got: %s", exc)
if value_read:
value = round(float(value_read), 1)
self._value_raw = float(value_read)
self._state = value
class OneWireDirect(OneWire):
"""Implementation of a 1-Wire sensor directly connected to RPI GPIO."""
def __init__(self, name, device_file, sensor_type, owsensor):
"""Initialize the sensor."""
super().__init__(name, device_file, sensor_type)
self._owsensor = owsensor
def update(self):
"""Get the latest data from the device."""
value = None
try:
self._value_raw = self._owsensor.get_temperature()
value = round(float(self._value_raw), 1)
except (
FileNotFoundError,
InvalidCRCException,
UnsupportResponseException,
) as ex:
_LOGGER.warning("Cannot read from sensor %s: %s", self._device_file, ex)
self._state = value
class OneWireOWFS(OneWire): # pragma: no cover
"""Implementation of a 1-Wire sensor through owfs.
This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
"""
def _read_value_raw(self):
"""Read the value as it is returned by the sensor."""
with open(self._device_file) as ds_device_file:
lines = ds_device_file.readlines()
return lines
def update(self):
"""Get the latest data from the device."""
value = None
try:
value_read = self._read_value_raw()
if len(value_read) == 1:
value = round(float(value_read[0]), 1)
self._value_raw = float(value_read[0])
except ValueError:
_LOGGER.warning("Invalid value read from %s", self._device_file)
except FileNotFoundError:
_LOGGER.warning("Cannot read from sensor: %s", self._device_file)
self._state = value
| 35.977157
| 131
| 0.605996
|
7952e61159ec31a4be5394b50f30cbc20f9b414e
| 22,649
|
py
|
Python
|
tools/aws_benchmarking/server/cluster_master.py
|
missyliu/Paddle
|
3fd33d4df9aa44f482bca8214cca8e6d90f06da2
|
[
"Apache-2.0"
] | null | null | null |
tools/aws_benchmarking/server/cluster_master.py
|
missyliu/Paddle
|
3fd33d4df9aa44f482bca8214cca8e6d90f06da2
|
[
"Apache-2.0"
] | null | null | null |
tools/aws_benchmarking/server/cluster_master.py
|
missyliu/Paddle
|
3fd33d4df9aa44f482bca8214cca8e6d90f06da2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import json
import math
import time
import threading
import logging
import copy
import netaddr
import boto3
import namesgenerator
import paramiko
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
# You must have aws_access_key_id, aws_secret_access_key, region set in
# ~/.aws/credentials and ~/.aws/config
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--key_name', type=str, default="", help="required, key pair name")
parser.add_argument(
'--security_group_id',
type=str,
default="",
help="required, the security group id associated with your VPC")
parser.add_argument(
'--vpc_id',
type=str,
default="",
help="The VPC in which you wish to run test")
parser.add_argument(
'--subnet_id',
type=str,
default="",
help="The Subnet_id in which you wish to run test")
parser.add_argument(
'--pserver_instance_type',
type=str,
default="c5.2xlarge",
help="your pserver instance type, c5.2xlarge by default")
parser.add_argument(
'--trainer_instance_type',
type=str,
default="p2.8xlarge",
help="your trainer instance type, p2.8xlarge by default")
parser.add_argument(
'--task_name',
type=str,
default="",
help="the name you want to identify your job")
parser.add_argument(
'--pserver_image_id',
type=str,
default="ami-da2c1cbf",
help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-east-2"
)
parser.add_argument(
'--trainer_image_id',
type=str,
default="ami-da2c1cbf",
help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-west-2"
)
parser.add_argument(
'--availability_zone',
type=str,
default="us-east-2a",
help="aws zone id to place ec2 instances")
parser.add_argument(
'--trainer_count', type=int, default=1, help="Trainer count")
parser.add_argument(
'--pserver_count', type=int, default=1, help="Pserver count")
parser.add_argument(
'--pserver_bash_file',
type=str,
default=os.path.join(os.path.dirname(__file__), "pserver.sh.template"),
help="pserver bash file path")
parser.add_argument(
'--pserver_command', type=str, default="", help="pserver start command")
parser.add_argument(
'--trainer_bash_file',
type=str,
default=os.path.join(os.path.dirname(__file__), "trainer.sh.template"),
help="trainer bash file path")
parser.add_argument(
'--trainer_command', type=str, default="", help="trainer start command")
parser.add_argument(
'--action', type=str, default="serve", help="create|cleanup|serve")
parser.add_argument('--pem_path', type=str, help="private key file")
parser.add_argument(
'--pserver_port', type=str, default="5436", help="pserver port")
parser.add_argument(
'--docker_image', type=str, default="busybox", help="training docker image")
parser.add_argument(
'--master_server_port', type=int, default=5436, help="master server port")
parser.add_argument(
'--master_server_ip', type=str, default="", help="master server private ip")
parser.add_argument(
'--no_clean_up',
type=str2bool,
default=False,
help="whether to clean up after training")
args = parser.parse_args()
ec2client = boto3.client('ec2')
args.log_path = os.path.join(os.path.dirname(__file__), "logs/")
logging.basicConfig(
filename=args.log_path + 'master.log',
level=logging.INFO,
format='%(asctime)s %(message)s')
log_files = ["master.log"]
def create_subnet():
# if no vpc id provided, list vpcs
logging.info("start creating subnet")
if not args.vpc_id:
logging.info("no vpc provided, trying to find the default one")
vpcs_desc = ec2client.describe_vpcs(
Filters=[{
"Name": "isDefault",
"Values": ["true", ]
}], )
if len(vpcs_desc["Vpcs"]) == 0:
raise ValueError('No default VPC')
args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"]
vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"]
logging.info("default vpc fount with id %s and CidrBlock %s" %
(args.vpc_id, vpc_cidrBlock))
if not vpc_cidrBlock:
logging.info("trying to find cidrblock for vpc")
vpcs_desc = ec2client.describe_vpcs(
Filters=[{
"Name": "vpc-id",
"Values": [args.vpc_id, ],
}], )
if len(vpcs_desc["Vpcs"]) == 0:
raise ValueError('No VPC found')
vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"]
logging.info("cidrblock for vpc is %s" % vpc_cidrBlock)
# list subnets in vpc in order to create a new one
logging.info("trying to find ip blocks for new subnet")
subnets_desc = ec2client.describe_subnets(
Filters=[{
"Name": "vpc-id",
"Values": [args.vpc_id, ],
}], )
ips_taken = []
for subnet_dec in subnets_desc["Subnets"]:
ips_taken.append(subnet_dec["CidrBlock"])
ip_blocks_avaliable = netaddr.IPSet(
[vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken)
# adding 10 addresses as buffer
cidr_prefix = 32 - math.ceil(
math.log(args.pserver_count + args.trainer_count + 10, 2))
if cidr_prefix <= 16:
raise ValueError('Too many nodes to fit in current VPC')
for ipnetwork in ip_blocks_avaliable.iter_cidrs():
try:
subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next()
logging.info("subnet ip block found %s" % (subnet_cidr))
break
except Exception:
pass
if not subnet_cidr:
raise ValueError(
'No avaliable subnet to fit required nodes in current VPC')
logging.info("trying to create subnet")
subnet_desc = ec2client.create_subnet(
CidrBlock=str(subnet_cidr),
VpcId=args.vpc_id,
AvailabilityZone=args.availability_zone)
subnet_id = subnet_desc["Subnet"]["SubnetId"]
subnet_waiter = ec2client.get_waiter('subnet_available')
# sleep for 1s before checking its state
time.sleep(1)
subnet_waiter.wait(SubnetIds=[subnet_id, ])
logging.info("subnet created")
logging.info("adding tags to newly created subnet")
ec2client.create_tags(
Resources=[subnet_id, ],
Tags=[{
"Key": "Task_name",
'Value': args.task_name
}])
return subnet_id
def generate_task_name():
return namesgenerator.get_random_name()
def script_to_str(file_path):
if not file_path:
return "echo $PSERVER_HOSTS"
file = open(file_path, 'r')
text = file.read().strip()
file.close()
return text
def run_instances(image_id, instance_type, count, role, cmd=""):
if count == 0:
return []
response = ec2client.run_instances(
ImageId=image_id,
InstanceType=instance_type,
MaxCount=count,
MinCount=count,
UserData=cmd,
DryRun=False,
InstanceInitiatedShutdownBehavior="stop",
KeyName=args.key_name,
Placement={'AvailabilityZone': args.availability_zone},
NetworkInterfaces=[{
'DeviceIndex': 0,
'SubnetId': args.subnet_id,
"AssociatePublicIpAddress": True,
'Groups': args.security_group_ids
}],
TagSpecifications=[{
'ResourceType': "instance",
'Tags': [{
"Key": 'Task_name',
"Value": args.task_name
}, {
"Key": 'Role',
"Value": role
}]
}])
instance_ids = []
for instance in response["Instances"]:
instance_ids.append(instance["InstanceId"])
if len(instance_ids) > 0:
logging.info(str(len(instance_ids)) + " instance(s) created")
else:
logging.info("no instance created")
#create waiter to make sure it's running
logging.info("waiting for instance to become accessible")
waiter = ec2client.get_waiter('instance_status_ok')
waiter.wait(
Filters=[{
"Name": "instance-status.status",
"Values": ["ok"]
}, {
"Name": "instance-status.reachability",
"Values": ["passed"]
}, {
"Name": "instance-state-name",
"Values": ["running"]
}],
InstanceIds=instance_ids)
instances_response = ec2client.describe_instances(InstanceIds=instance_ids)
return instances_response["Reservations"][0]["Instances"]
def create_pservers():
try:
return run_instances(
image_id=args.pserver_image_id,
instance_type=args.pserver_instance_type,
count=args.pserver_count,
role="PSERVER", )
except Exception:
logging.exception("error while trying to create pservers")
cleanup(args.task_name)
def log_to_file(source, filename):
if not filename in log_files:
log_files.append(filename)
with open(args.log_path + filename, "a") as log_file:
for line in iter(source.readline, ""):
log_file.write(line)
def parse_command(command_raw, defaults={}):
if not command_raw:
command_raw = ""
commands_processed = []
parameter_map = copy.copy(defaults)
for seg in command_raw.split(","):
if ":" in seg:
parameters = seg.split(":")
parameter_map[parameters[0]] = parameters[1]
else:
commands_processed.append(seg)
for key, val in parameter_map.iteritems():
commands_processed.append("--" + key + " " + str(val))
return " ".join(commands_processed)
def create_trainers(kickoff_cmd, pserver_endpoints_str):
def create_and_start_trainer(trainer_index):
logging.info("trainer " + str(trainer_index) + " is starting")
instance_response = run_instances(
image_id=args.trainer_image_id,
instance_type=args.trainer_instance_type,
count=1,
role="TRAINER", )[0]
trainer_ip = instance_response["PrivateIpAddress"]
logging.info("trainer " + str(trainer_index) + " started")
ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=trainer_ip, username="ubuntu", pkey=ssh_key)
logging.info("trainer " + str(trainer_index) +
" terminal connected via ssh")
cmd = kickoff_cmd.format(
PSERVER_HOSTS=pserver_endpoints_str,
DOCKER_IMAGE=args.docker_image,
TRAINER_INDEX=str(trainer_index),
TASK_NAME=args.task_name,
TRAINER_COUNT=args.trainer_count,
COMMAND=parse_command(args.trainer_command, {"device": "GPU"}),
MASTER_ENDPOINT=args.master_server_ip + ":" +
str(args.master_server_port))
logging.info(cmd)
stdin, stdout, stderr = ssh_client.exec_command(command=cmd)
# read and save output log
logging.info("trainer " + str(trainer_index) +
" command executed, keep fetching log")
stdout_thread = threading.Thread(
target=log_to_file,
args=(
stdout,
"trainer_" + str(trainer_index) + ".log", ))
stderr_thread = threading.Thread(
target=log_to_file,
args=(
stderr,
"trainer_" + str(trainer_index) + "_err.log", ))
stdout_thread.start()
stderr_thread.start()
stdout_thread.join()
stderr_thread.join()
return_code = stdout.channel.recv_exit_status()
if return_code != 0:
trainer_create_results[trainer_index] = {'has_error': True}
raise ValueError("trainer didn't finish with exit code 0")
ssh_client.close()
# multi thread starting trainer instance and run kickoff command
trainer_threads = []
trainer_create_results = {}
try:
for i in xrange(args.trainer_count):
logging.info("starting tread for trainer " + str(i))
trainer_thread = threading.Thread(
target=create_and_start_trainer, args=(i, ))
trainer_thread.start()
trainer_threads.append(trainer_thread)
for trainer_thread in trainer_threads:
trainer_thread.join()
for result in trainer_create_results:
if result["has_error"]:
logging.error(
"error during trainer starting or training, destorying the while cluster "
)
cleanup(args.task_name)
break
logging.info("all trainers stopped")
except Exception, e:
logging.info(
"Training exception, clean up resources, please check log for more info"
)
finally:
cleanup(args.task_name)
def cleanup(task_name):
if args.no_clean_up:
logging.info("no clean up option set, going to leave the setup running")
return
#shutdown all ec2 instances
print("going to clean up " + task_name + " instances")
instances_response = ec2client.describe_instances(Filters=[{
"Name": "tag:Task_name",
"Values": [task_name]
}])
instance_ids = []
if len(instances_response["Reservations"]) > 0:
for reservation in instances_response["Reservations"]:
for instance in reservation["Instances"]:
instance_ids.append(instance["InstanceId"])
ec2client.terminate_instances(InstanceIds=instance_ids)
instance_termination_waiter = ec2client.get_waiter(
'instance_terminated')
instance_termination_waiter.wait(InstanceIds=instance_ids)
#delete the subnet created
subnet = ec2client.describe_subnets(Filters=[{
"Name": "tag:Task_name",
"Values": [task_name]
}])
if len(subnet["Subnets"]) > 0:
ec2client.delete_subnet(SubnetId=subnet["Subnets"][0]["SubnetId"])
# no subnet delete waiter, just leave it.
logging.info("Clearnup done")
return
def kickoff_pserver(host, pserver_endpoints_str):
try:
ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=host, username="ubuntu", pkey=ssh_key)
cmd = (script_to_str(args.pserver_bash_file)).format(
PSERVER_HOSTS=pserver_endpoints_str,
DOCKER_IMAGE=args.docker_image,
PSERVER_PORT=args.pserver_port,
TASK_NAME=args.task_name,
COMMAND=parse_command(args.pserver_command, {"device": "CPU"}),
TRAINER_COUNT=args.trainer_count,
TRAINER_INDEX=0,
# there is no way to use 0.0.0.0:port to start pserver
# has to docker --network="host" with host ip to make this work
SERVER_ENDPOINT=host + ":" + str(args.pserver_port),
MASTER_ENDPOINT=args.master_server_ip + ":" +
str(args.master_server_port))
logging.info(cmd)
stdin, stdout, stderr = ssh_client.exec_command(command=cmd)
stdout_thread = threading.Thread(
target=log_to_file, args=(
stdout,
"pserver_" + host + ".log", ))
stderr_thread = threading.Thread(
target=log_to_file, args=(
stderr,
"pserver_" + host + "_err.log", ))
stdout_thread.start()
stderr_thread.start()
stdout_thread.join()
stderr_thread.join()
return_code = stdout.channel.recv_exit_status()
logging.info(return_code)
if return_code != 0:
raise Exception("Error while kicking off pserver training process")
except Exception:
logging.exception("Error while kicking off pserver training process")
cleanup(args.task_name)
finally:
ssh_client.close()
def init_args():
if not args.task_name:
args.task_name = generate_task_name()
logging.info("task name generated %s" % (args.task_name))
if not args.pem_path:
args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem"
if args.security_group_id:
args.security_group_ids = (args.security_group_id, )
args.trainers_job_done_count = 0
def create_cluster():
if not args.subnet_id:
logging.info("creating subnet for this task")
args.subnet_id = create_subnet()
logging.info("subnet %s created" % (args.subnet_id))
logging.info("creating pservers")
pserver_create_response = create_pservers()
logging.info("pserver created, collecting pserver ips")
pserver_endpoints = []
for pserver in pserver_create_response:
pserver_endpoints.append(pserver["NetworkInterfaces"][0][
"PrivateIpAddress"] + ":" + args.pserver_port)
pserver_endpoints_str = ",".join(pserver_endpoints)
logging.info("kicking off pserver training process")
pserver_threads = []
for pserver in pserver_create_response:
pserver_thread = threading.Thread(
target=kickoff_pserver,
args=(pserver["PrivateIpAddress"], pserver_endpoints_str))
pserver_thread.start()
pserver_threads.append(pserver_thread)
logging.info("all pserver training process started")
logging.info("creating trainers and kicking off trainer training process")
create_trainers(
kickoff_cmd=script_to_str(args.trainer_bash_file),
pserver_endpoints_str=pserver_endpoints_str)
for pserver_thread in pserver_threads:
pserver_thread.join()
logging.info("all process ended")
def start_server(args):
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/text')
self.end_headers()
def do_HEAD(self):
self._set_headers()
def do_404(self):
self.send_response(404)
self.send_header('Content-type', 'text/text')
self.end_headers()
logging.info("Received invalid GET request" + self.path)
self.wfile.write("NO ACTION FOUND")
def do_GET(self):
request_path = self.path
if request_path == "/status" or request_path == "/master_logs":
self._set_headers()
logging.info("Received request to return status")
with open(args.log_path + "master.log", "r") as logfile:
self.wfile.write(logfile.read().strip())
elif request_path == "/list_logs" or request_path == "/logs":
self._set_headers()
self.wfile.write("\n".join(log_files))
elif "/log/" in request_path:
self._set_headers()
log_file_path = request_path.replace("/log/", "")
logging.info("requesting log file path is" + args.log_path +
log_file_path)
with open(args.log_path + log_file_path, "r") as logfile:
self.wfile.write(logfile.read().strip())
else:
self.do_404()
def do_POST(self):
request_path = self.path
if request_path == "/save_data":
self._set_headers()
logging.info("Received request to save data")
self.wfile.write("DATA SAVED!")
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
if args.task_name:
with open(args.task_name + ".txt", "a") as text_file:
text_file.write(post_data + "\n")
elif request_path == "/cleanup":
self._set_headers()
logging.info("Received request to cleanup cluster")
cleanup(args.task_name)
self.wfile.write("cleanup in progress")
else:
self.do_404()
server_address = ('', args.master_server_port)
httpd = HTTPServer(server_address, S)
logging.info("HTTP server is starting")
httpd.serve_forever()
def print_arguments():
logging.info('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).iteritems()):
logging.info('%s: %s' % (arg, value))
logging.info('------------------------------------------------')
if __name__ == "__main__":
print_arguments()
if args.action == "create":
logging.info("going to create cluster")
if not args.key_name or not args.security_group_id:
raise ValueError("key_name and security_group_id are required")
init_args()
create_cluster()
elif args.action == "cleanup":
logging.info("going to cleanup cluster")
if not args.task_name:
raise ValueError("task_name is required")
cleanup(args.task_name)
elif args.action == "serve":
# serve mode
if not args.master_server_ip:
raise ValueError(
"No master server ip set, please run with --action create")
logging.info("going to start serve and create cluster")
init_args()
logging.info("starting server in another thread")
server_thread = threading.Thread(target=start_server, args=(args, ))
server_thread.start()
create_cluster()
server_thread.join()
elif args.action == "test":
start_server(args)
| 32.68254
| 103
| 0.622721
|
7952e748d0d63226000aab83d0f9108214accaf9
| 970
|
py
|
Python
|
mcpipy/mobius.py
|
wangtt03/raspberryjammod
|
d828d1b225c0dfc25d91f4e3569ce620fa231e14
|
[
"MIT"
] | 338
|
2015-01-20T15:07:48.000Z
|
2022-02-25T17:31:06.000Z
|
mcpipy/mobius.py
|
wangtt03/raspberryjammod
|
d828d1b225c0dfc25d91f4e3569ce620fa231e14
|
[
"MIT"
] | 58
|
2015-03-26T12:21:41.000Z
|
2022-02-20T21:01:33.000Z
|
mcpipy/mobius.py
|
wangtt03/raspberryjammod
|
d828d1b225c0dfc25d91f4e3569ce620fa231e14
|
[
"MIT"
] | 112
|
2015-08-10T19:20:44.000Z
|
2022-02-23T08:58:52.000Z
|
#
# Code by Alexander Pruss and under the MIT license
#
from mine import *
import sys
def draw_surface(xf,yf,zf,a0,a1,asteps,b0,b1,bsteps,ox,oy,oz,scalex,scaley,scalez,mcblock,mcmeta):
for i in range(asteps):
a = (a0 * (asteps-1-i) + a1 * i) / asteps
for j in range(bsteps):
b = (b0 * (bsteps-1-j) + b1 * j) / bsteps
x = xf(a,b)
y = yf(a,b)
z = zf(a,b)
# print a,b,ox+x * scalex, oy+y * scaley, oz+z * scalez
mc.setBlock(ox+x * scalex, oy+y * scaley, oz+z * scalez, mcblock, mcmeta)
mc = Minecraft()
playerPos = mc.player.getPos()
xformula = lambda a,b: (3 + a * cos(b/2)) * cos(b)
yformula = lambda a,b: a * sin(b/2)
zformula = lambda a,b: (3 + a * cos(b/2)) * sin(b)
scale = 15
b = block.STONE
m = 0
draw_surface(xformula,yformula,zformula,-1.,1.,10*scale,0,2*pi,30*scale,playerPos.x,playerPos.y+scale,playerPos.z,scale,scale,scale,b, m)
mc.postToChat("Formula done")
| 28.529412
| 138
| 0.593814
|
7952e824ec1f34f2bd465f3e6cfe635a7991d294
| 21,266
|
py
|
Python
|
tests/hikari/impl/test_rate_limits.py
|
sabidib/hikari
|
e1e112a1b2938890e4abd38eb07b559fda7eedbb
|
[
"MIT"
] | 520
|
2020-10-12T22:53:55.000Z
|
2022-03-30T17:59:53.000Z
|
tests/hikari/impl/test_rate_limits.py
|
sabidib/hikari
|
e1e112a1b2938890e4abd38eb07b559fda7eedbb
|
[
"MIT"
] | 319
|
2020-10-11T19:04:03.000Z
|
2022-03-31T16:55:28.000Z
|
tests/hikari/impl/test_rate_limits.py
|
sabidib/hikari
|
e1e112a1b2938890e4abd38eb07b559fda7eedbb
|
[
"MIT"
] | 85
|
2020-10-17T20:25:47.000Z
|
2022-03-31T15:19:40.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021 davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import contextlib
import logging
import math
import statistics
import sys
import threading
import time
import mock
import pytest
from hikari.impl import rate_limits
from tests.hikari import hikari_test_helpers
class MockFuture(mock.Mock):
def __await__(self):
if False:
yield # Turns this into a generator.
return None
class TestBaseRateLimiter:
def test_context_management(self):
class MockedBaseRateLimiter(rate_limits.BaseRateLimiter):
close = mock.Mock()
acquire = NotImplemented
with MockedBaseRateLimiter() as m:
pass
m.close.assert_called_once()
class TestBurstRateLimiter:
@pytest.fixture()
def mock_burst_limiter(self):
class Impl(rate_limits.BurstRateLimiter):
async def acquire(self, *args, **kwargs) -> None:
raise NotImplementedError
return Impl(__name__)
@pytest.mark.parametrize(("queue", "is_empty"), [(["foo", "bar", "baz"], False), ([], True)])
def test_is_empty(self, queue, is_empty, mock_burst_limiter):
mock_burst_limiter.queue = queue
assert mock_burst_limiter.is_empty is is_empty
def test_close_removes_all_futures_from_queue(self, event_loop, mock_burst_limiter):
mock_burst_limiter.throttle_task = None
futures = [event_loop.create_future() for _ in range(10)]
mock_burst_limiter.queue = list(futures)
mock_burst_limiter.close()
assert len(mock_burst_limiter.queue) == 0
def test_close_cancels_all_futures_pending_when_futures_pending(self, event_loop, mock_burst_limiter):
mock_burst_limiter.throttle_task = None
futures = [event_loop.create_future() for _ in range(10)]
mock_burst_limiter.queue = list(futures)
mock_burst_limiter.close()
for i, future in enumerate(futures):
assert future.cancelled(), f"future {i} was not cancelled"
def test_close_is_silent_when_no_futures_pending(self, mock_burst_limiter):
mock_burst_limiter.throttle_task = None
mock_burst_limiter.queue = []
mock_burst_limiter.close()
assert True, "passed successfully"
def test_close_cancels_throttle_task_if_running(self, event_loop, mock_burst_limiter):
task = event_loop.create_future()
mock_burst_limiter.throttle_task = task
mock_burst_limiter.close()
assert mock_burst_limiter.throttle_task is None, "task was not overwritten with None"
assert task.cancelled(), "throttle_task is not cancelled"
def test_close_when_closed(self, mock_burst_limiter):
# Double-running shouldn't do anything adverse.
mock_burst_limiter.close()
mock_burst_limiter.close()
class TestManualRateLimiter:
@pytest.mark.asyncio()
async def test_acquire_returns_completed_future_if_throttle_task_is_None(self, event_loop):
with rate_limits.ManualRateLimiter() as limiter:
limiter.throttle_task = None
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
await limiter.acquire()
future.set_result.assert_called_once_with(None)
@pytest.mark.asyncio()
async def test_acquire_returns_incomplete_future_if_throttle_task_is_not_None(self, event_loop):
with rate_limits.ManualRateLimiter() as limiter:
limiter.throttle_task = event_loop.create_future()
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
await limiter.acquire()
future.set_result.assert_not_called()
@pytest.mark.asyncio()
async def test_acquire_places_future_on_queue_if_throttle_task_is_not_None(self, event_loop):
with rate_limits.ManualRateLimiter() as limiter:
limiter.throttle_task = event_loop.create_future()
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
assert len(limiter.queue) == 0
await limiter.acquire()
assert len(limiter.queue) == 1
assert future in limiter.queue
future.set_result.assert_not_called()
@pytest.mark.asyncio()
async def test_throttle_cancels_existing_task(self):
with rate_limits.ManualRateLimiter() as limiter:
limiter.throttle_task = asyncio.get_running_loop().create_future()
old_task = limiter.throttle_task
limiter.throttle(0)
assert old_task.cancelled()
assert old_task is not limiter.throttle_task
@pytest.mark.asyncio()
async def test_throttle_schedules_throttle(self):
with hikari_test_helpers.mock_class_namespace(rate_limits.ManualRateLimiter, slots_=False)() as limiter:
limiter.unlock_later = mock.AsyncMock()
limiter.throttle(0)
await limiter.throttle_task
limiter.unlock_later.assert_called_once_with(0)
@pytest.mark.asyncio()
async def test_throttle_chews_queue_completing_futures(self, event_loop):
with rate_limits.ManualRateLimiter() as limiter:
futures = [event_loop.create_future() for _ in range(10)]
limiter.queue = list(futures)
await limiter.unlock_later(0.01)
for i, future in enumerate(futures):
assert future.done(), f"future {i} was not done"
@pytest.mark.asyncio()
async def test_throttle_sleeps_before_popping_queue(self, event_loop):
# GIVEN
slept_at = float("nan")
popped_at = []
async def mock_sleep(_):
nonlocal slept_at
slept_at = time.perf_counter()
class MockList(list):
def pop(self, _=-1):
popped_at.append(time.perf_counter())
return event_loop.create_future()
with hikari_test_helpers.mock_class_namespace(rate_limits.ManualRateLimiter, slots_=False)() as limiter:
with mock.patch("asyncio.sleep", wraps=mock_sleep):
limiter.queue = MockList()
# WHEN
await limiter.unlock_later(5)
# THEN
for i, pop_time in enumerate(popped_at):
assert slept_at < pop_time, f"future {i} popped before initial sleep"
@pytest.mark.asyncio()
async def test_throttle_clears_throttle_task(self, event_loop):
with rate_limits.ManualRateLimiter() as limiter:
limiter.throttle_task = event_loop.create_future()
await limiter.unlock_later(0)
assert limiter.throttle_task is None
class TestWindowedBurstRateLimiter:
@pytest.fixture()
def ratelimiter(self):
inst = hikari_test_helpers.mock_class_namespace(rate_limits.WindowedBurstRateLimiter, slots_=False)(
__name__, 3, 3
)
yield inst
with contextlib.suppress(Exception):
inst.close()
@pytest.mark.asyncio()
async def test_drip_if_not_throttled_and_not_ratelimited(self, ratelimiter, event_loop):
ratelimiter.drip = mock.Mock()
ratelimiter.throttle_task = None
ratelimiter.is_rate_limited = mock.Mock(return_value=False)
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
await ratelimiter.acquire()
ratelimiter.drip.assert_called_once_with()
future.set_result.assert_called_once_with(None)
@pytest.mark.asyncio()
async def test_no_drip_if_throttle_task_is_not_None(self, ratelimiter, event_loop):
ratelimiter.drip = mock.Mock()
ratelimiter.throttle_task = asyncio.get_running_loop().create_future()
ratelimiter.is_rate_limited = mock.Mock(return_value=False)
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
await ratelimiter.acquire()
ratelimiter.drip.assert_not_called()
@pytest.mark.asyncio()
async def test_no_drip_if_rate_limited(self, ratelimiter, event_loop):
ratelimiter.drip = mock.Mock()
ratelimiter.throttle_task = False
ratelimiter.is_rate_limited = mock.Mock(return_value=True)
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
await ratelimiter.acquire()
ratelimiter.drip.assert_not_called()
@pytest.mark.asyncio()
async def test_task_scheduled_if_rate_limited_and_throttle_task_is_None(self, ratelimiter, event_loop):
ratelimiter.drip = mock.Mock()
ratelimiter.throttle_task = None
ratelimiter.throttle = mock.AsyncMock()
ratelimiter.is_rate_limited = mock.Mock(return_value=True)
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
await ratelimiter.acquire()
assert ratelimiter.throttle_task is not None
ratelimiter.throttle.assert_called()
@pytest.mark.asyncio()
async def test_task_not_scheduled_if_rate_limited_and_throttle_task_not_None(self, ratelimiter, event_loop):
ratelimiter.drip = mock.Mock()
ratelimiter.throttle_task = event_loop.create_future()
old_task = ratelimiter.throttle_task
ratelimiter.is_rate_limited = mock.Mock(return_value=True)
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
await ratelimiter.acquire()
assert old_task is ratelimiter.throttle_task, "task was rescheduled, that shouldn't happen :("
@pytest.mark.asyncio()
async def test_future_is_added_to_queue_if_throttle_task_is_not_None(self, ratelimiter, event_loop):
ratelimiter.drip = mock.Mock()
ratelimiter.throttle_task = asyncio.get_running_loop().create_future()
ratelimiter.is_rate_limited = mock.Mock(return_value=False)
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
await ratelimiter.acquire()
# use slice to prevent aborting test with index error rather than assertion error if this fails.
assert ratelimiter.queue[-1:] == [future]
@pytest.mark.asyncio()
async def test_future_is_added_to_queue_if_rate_limited(self, ratelimiter, event_loop):
ratelimiter.drip = mock.Mock()
ratelimiter.throttle_task = None
ratelimiter.is_rate_limited = mock.Mock(return_value=True)
future = MockFuture()
event_loop.create_future = mock.Mock(return_value=future)
try:
await ratelimiter.acquire()
# use slice to prevent aborting test with index error rather than assertion error if this fails.
assert ratelimiter.queue[-1:] == [future]
finally:
ratelimiter.throttle_task.cancel()
@pytest.mark.asyncio()
async def test_throttle_consumes_queue(self, event_loop):
with rate_limits.WindowedBurstRateLimiter(__name__, 0.01, 1) as rl:
rl.queue = [event_loop.create_future() for _ in range(15)]
old_queue = list(rl.queue)
await rl.throttle()
assert len(rl.queue) == 0
for i, future in enumerate(old_queue):
assert future.done(), f"future {i} was incomplete!"
@pytest.mark.asyncio()
@hikari_test_helpers.timeout(20)
@hikari_test_helpers.retry(5)
async def test_throttle_when_limited_sleeps_then_bursts_repeatedly(self, event_loop):
# Schedule concurrently but do not break our timeout.
# We should retry a few times, as CI runners may run too slowly if
# under load. As much as we try, this will always be time-relative,
# which is not a good test, but too much mocking is needed to obfuscate
# out that detail.
# TODO: find a better way of doing this?
await event_loop.run_in_executor(None, self._run_test_throttle_logic)
def _run_test_throttle_logic(self):
threads = [
threading.Thread(
target=self._run_test_throttle_logic_on_this_thread,
)
for _ in range(20)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def _run_test_throttle_logic_on_this_thread(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
event_loop.run_until_complete(self._run_test_throttle_logic_on_loop(event_loop))
finally:
event_loop.close()
@staticmethod
async def _run_test_throttle_logic_on_loop(event_loop):
limit = 2
period = 1.5
total_requests = int(period * limit * 2)
max_distance_within_window = 0.05
completion_times = []
logger = logging.getLogger(__name__)
def create_task(i):
logger.info("making task %s", i)
future = event_loop.create_future()
future.add_done_callback(lambda _: completion_times.append(time.perf_counter()))
return future
with rate_limits.WindowedBurstRateLimiter(__name__, period, limit) as rl:
futures = [create_task(i) for i in range(total_requests)]
rl.queue = list(futures)
rl.reset_at = time.perf_counter()
logger.info("throttling back")
await rl.throttle()
# die if we take too long...
logger.info("waiting for stuff to finish")
await asyncio.wait(futures, timeout=period * limit + period)
assert (
len(completion_times) == total_requests
), f"expected {total_requests} completions but got {len(completion_times)}"
# E203 - Whitespace before ":". Black reformats it
windows = [completion_times[i : i + limit] for i in range(0, total_requests, limit)] # noqa: E203
for i, window in enumerate(windows):
logger.info("window %s %s", i, window)
mode = statistics.mode(window)
for j, element in enumerate(window):
assert math.isclose(element, mode, abs_tol=max_distance_within_window), (
f"not close! windows[{i}][{j}], future {i * len(window) + j}, "
f"val {element}, mode {mode}, max diff {max_distance_within_window}"
)
assert len(windows) >= 3, "not enough windows to sample correctly"
assert len(windows[0]) > 1, "not enough datapoints per window to sample correctly"
for i in range(1, len(windows)):
previous_last = windows[i - 1][-1]
next_first = windows[i][0]
logger.info("intra-window index=%s value=%s versus index=%s value=%s", i - 1, previous_last, i, next_first)
assert math.isclose(next_first - previous_last, period, abs_tol=max_distance_within_window), (
f"distance between windows is not acceptable! {i - 1}={previous_last} {i}={next_first}, "
f"max diff = {max_distance_within_window}"
)
@pytest.mark.asyncio()
async def test_throttle_resets_throttle_task(self, event_loop):
with rate_limits.WindowedBurstRateLimiter(__name__, 0.01, 1) as rl:
rl.queue = [event_loop.create_future() for _ in range(15)]
rl.throttle_task = None
await rl.throttle()
assert rl.throttle_task is None
def test_get_time_until_reset_if_not_rate_limited(self):
with hikari_test_helpers.mock_class_namespace(rate_limits.WindowedBurstRateLimiter, slots_=False)(
__name__, 0.01, 1
) as rl:
rl.is_rate_limited = mock.Mock(return_value=False)
assert rl.get_time_until_reset(420) == 0.0
def test_get_time_until_reset_if_rate_limited(self):
with hikari_test_helpers.mock_class_namespace(rate_limits.WindowedBurstRateLimiter, slots_=False)(
__name__, 0.01, 1
) as rl:
rl.is_rate_limited = mock.Mock(return_value=True)
rl.reset_at = 420.4
assert rl.get_time_until_reset(69.8) == 420.4 - 69.8
def test_is_rate_limited_when_rate_limit_expired_resets_self(self):
with rate_limits.WindowedBurstRateLimiter(__name__, 403, 27) as rl:
now = 180
rl.reset_at = 80
rl.remaining = 4
assert not rl.is_rate_limited(now)
assert rl.reset_at == now + 403
assert rl.remaining == 27
@pytest.mark.parametrize("remaining", [-1, 0, 1])
def test_is_rate_limited_when_rate_limit_not_expired_only_returns_False(self, remaining):
with rate_limits.WindowedBurstRateLimiter(__name__, 403, 27) as rl:
now = 420
rl.reset_at = now + 69
rl.remaining = remaining
assert rl.is_rate_limited(now) is (remaining <= 0)
class TestExponentialBackOff:
def test___init___raises_on_too_large_int_base(self):
base = int(sys.float_info.max) + int(sys.float_info.max * 1 / 100)
with pytest.raises(ValueError, match="int too large to be represented as a float"):
rate_limits.ExponentialBackOff(base=base)
def test___init___raises_on_too_large_int_maximum(self):
maximum = int(sys.float_info.max) + int(sys.float_info.max * 1 / 200)
with pytest.raises(ValueError, match="int too large to be represented as a float"):
rate_limits.ExponentialBackOff(maximum=maximum)
def test___init___raises_on_too_large_int_jitter_multiplier(self):
jitter_multiplier = int(sys.float_info.max) + int(sys.float_info.max * 1 / 300)
with pytest.raises(ValueError, match="int too large to be represented as a float"):
rate_limits.ExponentialBackOff(jitter_multiplier=jitter_multiplier)
def test___init___raises_on_not_finite_base(self):
with pytest.raises(ValueError, match="base must be a finite number"):
rate_limits.ExponentialBackOff(base=float("inf"))
def test___init___raises_on_not_finite_maximum(self):
with pytest.raises(ValueError, match="maximum must be a finite number"):
rate_limits.ExponentialBackOff(maximum=float("nan"))
def test___init___raises_on_not_finite_jitter_multiplier(self):
with pytest.raises(ValueError, match="jitter_multiplier must be a finite number"):
rate_limits.ExponentialBackOff(jitter_multiplier=float("inf"))
def test_reset(self):
eb = rate_limits.ExponentialBackOff()
eb.increment = 10
eb.reset()
assert eb.increment == 0
@pytest.mark.parametrize(("iteration", "backoff"), enumerate((1, 2, 4, 8, 16, 32)))
def test_increment_linear(self, iteration, backoff):
eb = rate_limits.ExponentialBackOff(2, 64, 0)
for _ in range(iteration):
next(eb)
assert next(eb) == backoff
def test_increment_raises_on_numerical_limitation(self):
power = math.log(sys.float_info.max, 5) + 0.5
eb = rate_limits.ExponentialBackOff(
base=5, maximum=sys.float_info.max, jitter_multiplier=0.0, initial_increment=power
)
assert next(eb) == sys.float_info.max
def test_increment_maximum(self):
max_bound = 64
eb = rate_limits.ExponentialBackOff(2, max_bound, 0)
iterations = math.ceil(math.log2(max_bound))
for _ in range(iterations):
next(eb)
assert next(eb) == max_bound
def test_increment_does_not_increment_when_on_maximum(self):
eb = rate_limits.ExponentialBackOff(2, 32, initial_increment=5, jitter_multiplier=0)
assert eb.increment == 5
assert next(eb) == 32
assert eb.increment == 5
@pytest.mark.parametrize(("iteration", "backoff"), enumerate((1, 2, 4, 8, 16, 32)))
def test_increment_jitter(self, iteration, backoff):
abs_tol = 1
eb = rate_limits.ExponentialBackOff(2, 64, abs_tol)
for _ in range(iteration):
next(eb)
assert math.isclose(next(eb), backoff, abs_tol=abs_tol)
def test_iter_returns_self(self):
eb = rate_limits.ExponentialBackOff(2, 64, 123)
assert iter(eb) is eb
| 40.429658
| 119
| 0.67615
|
7952e8f694241643e0e7988d971554f6b4d638d0
| 42
|
py
|
Python
|
src/pyecom/__init__.py
|
xavibj/pyecom
|
f66aa8db9ec0edb77d3f707a72ac0b7594002c7b
|
[
"MIT"
] | null | null | null |
src/pyecom/__init__.py
|
xavibj/pyecom
|
f66aa8db9ec0edb77d3f707a72ac0b7594002c7b
|
[
"MIT"
] | null | null | null |
src/pyecom/__init__.py
|
xavibj/pyecom
|
f66aa8db9ec0edb77d3f707a72ac0b7594002c7b
|
[
"MIT"
] | null | null | null |
__version__ = "0.1.1"
from .main import *
| 14
| 21
| 0.666667
|
7952e9370ca246690c07443dd0649685a5fcbfd0
| 18,399
|
py
|
Python
|
datasets/spoken_word_dataset.py
|
lwang114/InformationQuantizer
|
45419140708e612495fd324a9e5724306d4d4129
|
[
"MIT"
] | null | null | null |
datasets/spoken_word_dataset.py
|
lwang114/InformationQuantizer
|
45419140708e612495fd324a9e5724306d4d4129
|
[
"MIT"
] | null | null | null |
datasets/spoken_word_dataset.py
|
lwang114/InformationQuantizer
|
45419140708e612495fd324a9e5724306d4d4129
|
[
"MIT"
] | null | null | null |
import torch
import torchaudio
import torchvision
from torchvision import transforms
import nltk
from nltk.stem import WordNetLemmatizer
from collections import defaultdict
# from allennlp.predictors.predictor import Predictor
# import allennlp_models.structured_prediction
import numpy as np
import re
import os
import json
from tqdm import tqdm
from itertools import combinations
from copy import deepcopy
from PIL import Image
from scipy import signal
from kaldiio import ReadHelper
# dep_parser = Predictor.from_path("https://storage.googleapis.com/allennlp-public-models/biaffine-dependency-parser-ptb-2020.04.06.tar.gz")
# dep_parser._model = dep_parser._model.cuda()
# lemmatizer = WordNetLemmatizer()
UNK = "###UNK###"
NULL = "###NULL###"
BLANK = "###BLANK###"
SIL = "SIL"
IGNORED_TOKENS = ["GARBAGE", "+BREATH+", "+LAUGH+", "+NOISE+"]
lemmatizer = WordNetLemmatizer()
def log_normalize(x):
x.add_(1e-6).log_()
mean = x.mean()
std = x.std()
return x.sub_(mean).div_(std + 1e-6)
def fix_embedding_length(emb, L, padding=0):
size = emb.size()[1:]
if emb.size(0) < L:
if padding == 0:
pad = torch.zeros((L-emb.size(0),)+size, dtype=emb.dtype)
else:
pad = padding*torch.ones((L-emb.size(0),)+size, dtype=emb.dtype)
emb = torch.cat([emb, pad], dim=0)
else:
emb = emb[:L]
return emb
def collate_fn_spoken_word(batch):
audios = [t[0] for t in batch]
phone_labels = [t[1] for t in batch]
labels = [t[2] for t in batch]
input_masks = [t[3] for t in batch]
phone_masks = [t[4] for t in batch]
word_masks = [t[5] for t in batch]
indices = [t[6] for t in batch]
if isinstance(audios[0], list):
audios = [
torch.nn.utils.rnn.pad_sequence(audio)\
for audio in audios
]
input_masks = [
torch.nn.utils.rnn.pad_sequence(input_mask)
for input_mask in input_masks
]
audios = torch.nn.utils.rnn.pad_sequence(audios, batch_first=True) # (bsz, n_seg, n_pos, d)
input_masks = torch.nn.utils.rnn.pad_sequence(input_masks, batch_first=True) # (bsz, n_seg, n_pos)
audios = audios.permute(0, 2, 1, 3)
input_masks = input_masks.permute(0, 2, 1)
else:
audios = torch.nn.utils.rnn.pad_sequence(audios, batch_first=True)
input_masks = torch.nn.utils.rnn.pad_sequence(input_masks, batch_first=True)
phone_labels = torch.nn.utils.rnn.pad_sequence(phone_labels, batch_first=True)
labels = torch.stack(labels)
phone_masks = torch.nn.utils.rnn.pad_sequence(phone_masks, batch_first=True)
return audios, phone_labels, labels, input_masks, phone_masks, indices
def embed(feat, method='average'):
if method == 'average':
return feat.mean(0)
elif method == 'resample':
new_feat = signal.resample(feat.detach().numpy(), 4)
return torch.FloatTensor(new_feat.flatten())
class SpokenWordDataset(torch.utils.data.Dataset):
def __init__(
self, data_path,
preprocessor, split,
splits = {
"train": ["train-clean-100", "train-clean-360"],
"validation": ["dev-clean"],
"test": ["dev-clean"],
},
augment=False,
use_segment=False,
audio_feature="cpc",
phone_label="predicted",
ds_method="average",
sample_rate=16000,
min_class_size=50,
n_overlap=0,
debug=False
):
self.preprocessor = preprocessor
if debug:
splits['train'] = [splits['train'][0]]
self.splits = splits[split]
self.data_path = data_path
self.use_segment = use_segment
self.ds_method = ds_method
self.sample_rate = sample_rate
self.n_overlap = n_overlap
self.debug = debug
data = []
for sp in self.splits:
# Load data paths to audio and visual features
examples = load_data_split(preprocessor.dataset_name,
data_path, sp,
min_class_size=min_class_size,
audio_feature=audio_feature,
phone_label=phone_label,
debug=debug)
data.extend(examples)
print("Number of {} audio files = {}".format(split, len(examples)))
# Set up transforms
self.audio_transforms = [
torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate, win_length=sample_rate * 25 // 1000,
n_mels=preprocessor.num_features,
hop_length=sample_rate * 10 // 1000,
),
torchvision.transforms.Lambda(log_normalize),
]
if augment:
augmentation = [
torchaudio.transforms.FrequencyMasking(27, iid_masks=True),
torchaudio.transforms.FrequencyMasking(27, iid_masks=True),
torchaudio.transforms.TimeMasking(100, iid_masks=True),
torchaudio.transforms.TimeMasking(100, iid_masks=True),
]
self.audio_transforms.extend(augmentation)
self.audio_transforms = torchvision.transforms.Compose(self.audio_transforms)
audio = [example["audio"] for example in data]
text = [example["text"] for example in data]
phonemes = [example["phonemes"] for example in data]
true_phonemes = [example["true_phonemes"] for example in data]
self.dataset = [list(item) for item in zip(audio, text, phonemes, true_phonemes)]
self.class_to_indices = defaultdict(list)
for idx, item in enumerate(self.dataset):
self.class_to_indices[item[1]].append(idx)
self.audio_feature_type = audio_feature
def load_audio(self, audio_file):
if self.audio_feature_type in ["cpc", "cpc_big"]:
if audio_file.split('.')[-1] == "txt":
audio = np.loadtxt(audio_file)
else:
with ReadHelper(f"ark: gunzip -c {audio_file} |") as ark_f:
for k, audio in ark_f:
continue
inputs = torch.FloatTensor(audio)
elif self.audio_feature_type in ["bnf", "bnf+cpc"]:
if audio_file.split('.')[-1] == "txt":
audio = np.loadtxt(audio_file)
else:
with ReadHelper(f"ark: gunzip -c {audio_file} |") as ark_f:
for k, audio in ark_f:
continue
if self.audio_feature_type == "bnf+cpc":
cpc_feat = np.loadtxt(audio_file.replace("bnf", "cpc"))
feat_len = min(audio.shape[0], cpc_feat.shape[0])
audio = np.concatenate([audio[:feat_len], cpc_feat[:feat_len]], axis=-1)
inputs = torch.FloatTensor(audio)
elif self.audio_feature_type in ['vq-wav2vec', 'wav2vec', 'wav2vec2']:
audio, _ = torchaudio.load(audio_file)
inputs = audio.squeeze(0)
else: Exception(f"Audio feature type {self.audio_feature_type} not supported")
input_mask = torch.ones(inputs.size(0))
return inputs, input_mask
def segment(self, feat, segments,
method="average"):
"""
Args:
feat : (num. of frames, feature dim.)
segments : a list of dicts of phoneme boundaries
Returns:
sfeat : (max num. of segments, feature dim.)
mask : (max num. of segments,)
"""
sfeats = []
word_begin = segments[0]["begin"]
dur = segments[-1]["end"] - segments[0]["begin"]
for i, segment in enumerate(segments):
if segment["text"] == SIL:
continue
phn = segment["text"]
begin = int(round((segment["begin"]-word_begin)*100, 3))
end = int(round((segment["end"]-word_begin)*100, 3))
if self.n_overlap > 0:
begin = max(begin - self.n_overlap, 0)
end = max(end + self.n_overlap, feat.size(0))
dur = max(end - begin, 1)
if begin >= feat.size(0):
print(f'Warning: ({phn}, {begin}, {end}) begin idx {begin} >= feature size {feat.size(0)}')
segment_feat = feat[-1]
elif begin != end:
segment_feat = embed(feat[begin:end], method=method)
else:
segment_feat = embed(feat[begin:end+1], method=method)
if torch.any(torch.isnan(segment_feat)):
print(f'Bad segment feature for feature of size {feat.size()}, begin {begin}, end {end}')
sfeats.append(segment_feat)
sfeat = torch.stack(sfeats)
if method == "no-op":
mask = torch.zeros(len(feat), len(sfeats))
else:
mask = torch.ones(len(sfeats))
return sfeat, mask
def unsegment(self, sfeat, segments):
"""
Args:
sfeat : (num. of segments, feature dim.)
segments : a list of dicts of phoneme boundaries
Returns:
feat : (num. of frames, feature dim.)
"""
if sfeat.ndim == 1:
sfeat = sfeat.unsqueeze(-1)
word_begin = segments[0]['begin']
dur = segments[-1]["end"] - segments[0]["begin"]
nframes = int(round(dur * 100, 3))
feat = torch.zeros((nframes, *sfeat.size()[1:]))
for i, segment in enumerate(segments):
if segment["text"] == SIL:
continue
begin = int(round((segment["begin"]-word_begin)*100, 3))
end = int(round((segment["end"]-word_begin)*100, 3))
if i >= sfeat.size(0):
break
if begin != end:
feat[begin:end] = sfeat[i]
else:
feat[begin:end+1] = sfeat[i]
return feat.squeeze(-1)
def __getitem__(self, idx):
audio_file, label, phoneme_dicts, _ = self.dataset[idx]
audio_inputs, input_mask = self.load_audio(audio_file)
if self.use_segment:
audio_inputs, input_mask = self.segment(audio_inputs,
phoneme_dicts,
method=self.ds_method)
phonemes = [phn_dict["text"] for phn_dict in phoneme_dicts]
word_labels = self.preprocessor.to_word_index([label])
phone_labels = self.preprocessor.to_index(phonemes)
if self.use_segment:
word_mask = torch.zeros(1, len(phoneme_dicts), len(phoneme_dicts))
else:
word_mask = torch.zeros(1, len(audio_inputs), len(audio_inputs))
for t in range(len(phoneme_dicts)):
word_mask[0, t, t] = 1.
phone_mask = torch.ones(len(phonemes))
return audio_inputs,\
phone_labels,\
word_labels,\
input_mask,\
phone_mask,\
word_mask,\
idx
def __len__(self):
return len(self.dataset)
class SpokenWordPreprocessor:
def __init__(
self,
dataset_name,
data_path,
num_features,
splits = {
"train": ["train-clean-100", "train-clean-360"],
"validation": ["dev-clean"],
"test": ["dev-clean"]
},
tokens_path=None,
lexicon_path=None,
use_words=False,
prepend_wordsep=False,
audio_feature="mfcc",
phone_label="predicted",
sample_rate=16000,
min_class_size=50,
ignore_index=-100,
use_blank=True,
debug=False,
):
self.dataset_name = dataset_name
self.data_path = data_path
self.num_features = num_features
self.ignore_index = ignore_index
self.min_class_size = min_class_size
self.use_blank = use_blank
self.wordsep = " "
self._prepend_wordsep = prepend_wordsep
if debug:
splits['train'] = [splits['train'][0]]
metadata_file = os.path.join(data_path, f"{dataset_name}.json")
data = []
for split_type, spl in splits.items():
if split_type == 'test_oos':
continue
for sp in spl:
data.extend(load_data_split(dataset_name,
data_path, sp,
audio_feature=audio_feature,
phone_label=phone_label,
min_class_size=self.min_class_size,
debug=debug))
visual_words = set()
tokens = set()
for ex in data:
visual_words.add(ex["text"])
for phn in ex["phonemes"]:
if phone_label == "groundtruth" and not "phoneme" in phn["text"]:
phn["text"] = re.sub(r"[0-9]", "", phn["text"])
tokens.add(phn["text"])
self.tokens = sorted(tokens)
self.visual_words = sorted(visual_words)
if self.use_blank:
self.tokens = [BLANK]+self.tokens
self.visual_words = [BLANK]+self.visual_words
self.tokens_to_index = {t:i for i, t in enumerate(self.tokens)}
self.words_to_index = {t:i for i, t in enumerate(self.visual_words)}
print(f"Preprocessor: number of phone classes: {self.num_tokens}")
print(f"Preprocessor: number of visual word classes: {self.num_visual_words}")
@property
def num_tokens(self):
return len(self.tokens)
@property
def num_visual_words(self):
return len(self.visual_words)
def to_index(self, sent):
tok_to_idx = self.tokens_to_index
return torch.LongTensor([tok_to_idx.get(t, 0) for t in sent])
def to_word_index(self, sent):
tok_to_idx = self.words_to_index
return torch.LongTensor([tok_to_idx.get(t, 0) for t in sent])
def to_text(self, indices):
text = []
for t, i in enumerate(indices):
if (i == 0) and (t != 0):
prev_token = text[t-1]
text.append(prev_token)
else:
text.append(self.tokens[i])
return text
def to_word_text(self, indices):
return [self.visual_words[i] for i in indices]
def tokens_to_word_text(self, indices):
T = len(indices)
path = [self.visual_words[i] for i in indices]
sent = []
for i in range(T):
if path[i] == BLANK:
continue
elif (i != 0) and (path[i] == path[i-1]):
continue
else:
sent.append(path[i])
return sent
def tokens_to_text(self, indices):
T = len(indices)
path = self.to_text(indices)
sent = []
for i in range(T):
if path[i] == BLANK:
continue
elif (i != 0) and (path[i] == path[i-1]):
continue
else:
sent.append(path[i])
return sent
def load_data_split(dataset_name,
data_path, split,
audio_feature="mfcc",
phone_label="predicted",
min_class_size=50,
max_keep_size=1000,
debug=False):
"""
Returns:
examples : a list of mappings of
{ "audio" : filename of audio,
"text" : a list of tokenized words for the class name,
}
"""
examples = []
if os.path.exists(os.path.join(data_path, f'{split}.json')):
word_files = [f'{split}.json']
else:
word_files = [word_file for word_file in os.listdir(data_path) if word_file.split('.')[-1] == 'json']
for word_file in word_files:
word_f = open(os.path.join(data_path, word_file), "r")
label_counts = dict()
for line in word_f:
if debug and len(examples) >= 100:
break
word = json.loads(line.rstrip("\n"))
label = lemmatizer.lemmatize(word["label"].lower())
if not label in label_counts:
label_counts[label] = 1
else:
label_counts[label] += 1
if label_counts[label] > max_keep_size:
continue
audio_path = None
audio_id = word["audio_id"]
word_id = word['word_id']
if audio_feature in ["mfcc", "vq-wav2vec", "wav2vec2", "wav2vec"]:
audio_path = os.path.join(data_path, split, f"{audio_id}_{word_id}.wav")
if not os.path.exists(audio_path):
word_id = int(word_id)
audio_file = f"{audio_id}_{word_id:04d}.wav"
audio_path = os.path.join(data_path, split, audio_file)
elif audio_feature in ["cpc", "cpc_big"]:
audio_path = os.path.join(data_path, f"../{dataset_name}_{audio_feature}_txt/{audio_id}_{word_id}.txt")
if not os.path.exists(audio_path):
audio_path = os.path.join(data_path, f"../{dataset_name}_{audio_feature}/{audio_id}_{word_id}.ark.gz")
if not os.path.exists(audio_path):
word_id = int(word_id)
audio_file = f"{audio_id}_{word_id:04d}.txt"
audio_path = os.path.join(data_path, f"../{dataset_name}_{audio_feature}_txt", audio_file)
elif audio_feature in ["bnf", "bnf+cpc"]:
audio_file = f"{audio_id}_{word_id}.txt"
audio_path = os.path.join(data_path, f"../{dataset_name}_bnf_txt", audio_file)
else: Exception(f"Audio feature type {audio_feature} not supported")
true_phonemes = word["phonemes"]
if "children" in true_phonemes:
true_phonemes = [phn for phn in true_phonemes["children"] if phn["text"] != SIL]
if len(true_phonemes) == 0:
continue
for phn_idx in range(len(true_phonemes)): # In Mboshi, each phoneme is written as ``phoneme{index}''
if not "phoneme" in true_phonemes[phn_idx]["text"]:
true_phonemes[phn_idx]["text"] = re.sub(r"[0-9]", "", true_phonemes[phn_idx]["text"])
noisy = False
for phn in true_phonemes:
if phn["text"] in IGNORED_TOKENS or (phn["text"][0] == "+"):
noisy = True
break
if noisy:
continue
dur = round(true_phonemes[-1]['end'] - true_phonemes[0]['begin'], 3)
phonemes = None
if phone_label == "groundtruth":
phonemes = deepcopy(true_phonemes)
elif phone_label == "multilingual":
phonemes = [phn for phn in word["predicted_segments_multilingual"] if phn["text"] != SIL]
elif phone_label == "multilingual_phones":
phonemes = deepcopy(word["multilingual_phones"])
elif phone_label == "predicted":
phonemes = [phn for phn in word["predicted_segments"] if phn["text"] != SIL]
elif phone_label == "predicted_wav2vec2":
if not "predicted_segments_wav2vec2" in word:
continue
phonemes = [phn for phn in word["predicted_segments_wav2vec2"] if phn["text"] != SIL]
else:
raise ValueError(f"Invalid phone label type: {phone_label}")
phonemes = [phn for phn in phonemes if round(phn['end'] - phonemes[0]['begin'], 3) <= dur]
if not len(phonemes):
print(f'Skip example without segments: {phonemes}')
continue
if phonemes[-1]['end'] - phonemes[0]['begin'] != dur:
phonemes[-1]['end'] = phonemes[0]['begin'] + dur
example = {"audio": audio_path,
"text": label,
"phonemes": phonemes,
"true_phonemes": true_phonemes}
examples.append(example)
word_f.close()
return examples
if __name__ == "__main__":
preproc = SpokenWordPreprocessor(num_features=80, data_path="/ws/ifp-53_2/hasegawa/lwang114/data/flickr30k/")
| 35.179732
| 140
| 0.613946
|
7952eaa0e9016cfbd529661ea6ed10b4951e3805
| 778
|
py
|
Python
|
examples/python/kitti_pipeline.py
|
PRBonn/voxblox_pybind
|
69337cff825260434a908df13aae2bc04ea3f8da
|
[
"MIT"
] | 7
|
2022-02-09T13:46:20.000Z
|
2022-02-10T02:29:38.000Z
|
examples/python/kitti_pipeline.py
|
PRBonn/voxblox_pybind
|
69337cff825260434a908df13aae2bc04ea3f8da
|
[
"MIT"
] | null | null | null |
examples/python/kitti_pipeline.py
|
PRBonn/voxblox_pybind
|
69337cff825260434a908df13aae2bc04ea3f8da
|
[
"MIT"
] | 2
|
2022-02-09T13:07:10.000Z
|
2022-02-09T16:12:56.000Z
|
#!/usr/bin/env python3
# @file kitti_pipeline.py
# @author Ignacio Vizzo [ivizzo@uni-bonn.de]
#
# Copyright (c) 2021 Ignacio Vizzo, all rights reserved
import argh
from kitti_dataset import KITTIOdometryDataset as Dataset
from tsdf_pipeline import TSDFPipeline
def main(
kitti_root_dir: str,
sequence: int = 0,
config: str = "config/kitti.yaml",
n_scans: int = -1,
jump: int = 0,
visualize: bool = False,
):
"""Help here!"""
map_name = f"kitti_{sequence}_scans_{str(n_scans)}"
dataset = Dataset(kitti_root_dir, sequence, config)
pipeline = TSDFPipeline(dataset, config, jump, n_scans, map_name)
pipeline.run()
pipeline.draw_mesh() if visualize else None
if __name__ == "__main__":
argh.dispatch_command(main)
| 25.933333
| 69
| 0.690231
|
7952eab8feb8e3fa120ac79428ec0286d3a9abc8
| 1,601
|
py
|
Python
|
tests/test_linter.py
|
noma4i/SublimeLinter3_backup
|
479feb8acb3847b71c24f88835f091a023948b17
|
[
"MIT"
] | null | null | null |
tests/test_linter.py
|
noma4i/SublimeLinter3_backup
|
479feb8acb3847b71c24f88835f091a023948b17
|
[
"MIT"
] | 1
|
2021-09-08T11:18:17.000Z
|
2021-09-08T11:18:17.000Z
|
tests/test_linter.py
|
noma4i/SublimeLinter3_backup
|
479feb8acb3847b71c24f88835f091a023948b17
|
[
"MIT"
] | 1
|
2021-09-08T09:43:47.000Z
|
2021-09-08T09:43:47.000Z
|
# coding=utf8
#
# test_util.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Joshua Hagins
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module tests functions in the lint.util module."""
from mock import MagicMock
class TestLinter:
""" Class for testing Linter class """
def mock_view_window(self):
mwindow = MagicMock('window')
mview = MagicMock('view')
mview.window = MagicMock(return_value=mwindow)
mview.project_file_name = MagicMock(return_value='ppp')
return mview, mwindow
def test_replace_settings_tokens__no_replace(self):
""" Testing if can leave settings without changes if no tokens match """
from lint import linter
mview, mwindow = self.mock_view_window()
m = linter.Linter(mview, mwindow)
settings = {'ignore_match': {'rb': ['.*unexpected.*end.*', 'some error']}}
m.replace_settings_tokens(settings)
assert settings == {'ignore_match': {'rb': ['.*unexpected.*end.*', 'some error']}}
def test_replace_settings_tokens__replace(self):
""" Testing if can leave settings without changes if token matches """
from lint import linter
mview, mwindow = self.mock_view_window()
m = linter.Linter(mview, mwindow)
settings = {'ignore_match': {'rb': ['.*unexpected.*end.*', '${sublime} error']}}
m.replace_settings_tokens(settings)
assert settings == {'ignore_match': {'rb': ['.*unexpected.*end.*', 'mocked_sublime_packages_path error']}}
| 33.354167
| 114
| 0.662711
|
7952eb7c6fc70c7fa57bceb81769e6bc2c6286fa
| 1,313
|
py
|
Python
|
ieeextreme/x10_memory_management.py
|
ieeeunswsb/cpworkshop
|
8da33deb295f4ca04ec025adc510d67a65d3d56c
|
[
"MIT"
] | 1
|
2019-07-09T14:40:18.000Z
|
2019-07-09T14:40:18.000Z
|
ieeextreme/x10_memory_management.py
|
ieeeunswsb/cpworkshop
|
8da33deb295f4ca04ec025adc510d67a65d3d56c
|
[
"MIT"
] | null | null | null |
ieeextreme/x10_memory_management.py
|
ieeeunswsb/cpworkshop
|
8da33deb295f4ca04ec025adc510d67a65d3d56c
|
[
"MIT"
] | null | null | null |
# from math import floor
answers = []
test_cases = int(input())
for i in range(test_cases):
pages, page_size, memory_accesses = map(int, input().split(" "))
fifo_swaps = 0
lru_swaps = 0
lru_mem = []
fifo_mem = []
for j in range(memory_accesses):
address_accessed = int(input())
page_num = address_accessed // page_size
# first deal with fifo
if page_num not in fifo_mem:
if len(fifo_mem) < pages:
fifo_mem.append(page_num)
else:
# print(f"Swap page {fifo_mem[0]} with page {page_num}")
del fifo_mem[0]
fifo_mem.append(page_num)
fifo_swaps += 1
# print(fifo_mem)
# now deal with lru
if page_num not in lru_mem:
if len(lru_mem) < pages:
lru_mem = [page_num] + lru_mem
else:
del lru_mem[-1]
lru_mem = [page_num] + lru_mem
lru_swaps += 1
else:
del lru_mem[lru_mem.index(page_num)]
lru_mem = [page_num] + lru_mem
if lru_swaps < fifo_swaps:
answers.append(f"yes {fifo_swaps} {lru_swaps}")
else:
answers.append(f"no {fifo_swaps} {lru_swaps}")
for ans in answers:
print(ans)
| 26.26
| 72
| 0.538462
|
7952ecd98f17ddf979b725287530c9581f1bbf37
| 33,956
|
py
|
Python
|
dataset.py
|
etraiger/PCWG
|
e66441df98dc712d3b9915aaf2c6f0d7feb7e52d
|
[
"MIT"
] | 1
|
2019-03-18T17:05:27.000Z
|
2019-03-18T17:05:27.000Z
|
dataset.py
|
etraiger/PCWG
|
e66441df98dc712d3b9915aaf2c6f0d7feb7e52d
|
[
"MIT"
] | null | null | null |
dataset.py
|
etraiger/PCWG
|
e66441df98dc712d3b9915aaf2c6f0d7feb7e52d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import datetime
import math
import configuration
import rews
import binning
import warnings
warnings.simplefilter('ignore', np.RankWarning)
def getSeparatorValue(separator):
try:
return {"TAB":"\t",
"SPACE":" ",
"COMMA": ",",
"SEMI-COLON":";"}[separator.upper()]
except:
raise Exception("Unkown separator: '%s'" % separator)
def getDecimalValue(decimal):
try:
return {"FULL STOP":".",
"COMMA":","}[decimal.upper()]
except:
raise Exception("Unkown decimal: '%s'" % decimal)
class DeviationMatrix(object):
def __init__(self,deviationMatrix,countMatrix):
self.matrix = deviationMatrix
self.count = countMatrix
class CalibrationBase:
def __init__(self, x, y):
self.x = x
self.y = y
self.requiredColumns = [self.x, self.y]
def variance(self, df, col):
return ((df[col].mean() - df[col]) ** 2.0).sum()
def covariance(self, df, colA, colB):
return df[[colA,colB]].cov()[colA][colB] # assumes unbiased estimator (normalises with N-1)
def sigA(self,df,slope, intercept, count):
sumPredYfromX = sum((df[self.y] - (intercept + df[self.x]*slope ))**2)
sumX = (df[self.x]).sum()
sumXX = (df[self.x]**2).sum()
return ((sumPredYfromX/(count-2))*(sumXX/(count*sumXX - sumX**2)))**0.5
def sigB(self,df,slope, intercept, count):
sumPredYfromX = sum((df[self.y] - (intercept + df[self.x]*slope ))**2)
sumX = (df[self.x]).sum()
sumXX = (df[self.x]**2).sum()
return ((sumPredYfromX/(count-2))/(count*sumXX - sumX**2))**0.5
def mean(self, df, col):
return df[col].mean()
def intercept(self, df, slope):
return self.mean(df, self.y) - slope * self.mean(df, self.x)
class York(CalibrationBase):
def covariance(self, df, colA, colB):
return ((df[colA].mean() - df[colA]) * (df[colB].mean() - df[colB])).sum()
def __init__(self, x, y, timeStepInSeconds, df):
movingAverageWindow = self.calculateMovingAverageWindow(timeStepInSeconds)
self.xRolling = "xRolling"
self.yRolling = "yRolling"
self.xDiffSq = "xDiffSq"
self.yDiffSq = "yDiffSq"
df[self.xRolling] = pd.rolling_mean(df[x], window = movingAverageWindow, min_periods = 1)
df[self.yRolling] = pd.rolling_mean(df[y], window = movingAverageWindow, min_periods = 1)
df[self.xDiffSq] = ((df[x] - df[self.xRolling])** 2.0)
df[self.yDiffSq] = ((df[y] - df[self.yRolling])** 2.0) # this needed in uncertainty?
CalibrationBase.__init__(self, x, y)
self.requiredColumns += [self.xDiffSq, self.yDiffSq]
def calculateMovingAverageWindow(self, timeStepInSeconds):
movingAverageMultiplier = 3
minimumMovingAveageWindowInSeconds = movingAverageMultiplier * 60 * 60
movingAveageWindowInSeconds = max([minimumMovingAveageWindowInSeconds, movingAverageMultiplier * timeStepInSeconds])
if movingAveageWindowInSeconds % timeStepInSeconds != 0:
raise Exception("Cannot calculate moving average window. Moving average window (%ds) is not integer multiple of timestep (%ds)" % (movingAveageWindowInSeconds, timeStepInSeconds))
movingAverageWindow = movingAveageWindowInSeconds / timeStepInSeconds
return movingAverageWindow
def slope(self, df):
alpha = self.calculateAlpha(df)
varianceX = self.variance(df, self.x)
varianceY = self.variance(df, self.y)
covarianceXY = self.covariance(df, self.x, self.y)
gradientNumerator = math.sin(alpha) * varianceY + math.cos(alpha) * covarianceXY
gradientDenominator = math.sin(alpha) * covarianceXY + math.cos(alpha) * varianceX
return (gradientNumerator / gradientDenominator)
def calculateAlpha(self, df):
xYorkVariance = df[self.xDiffSq].dropna().sum()
yYorkVariance = df[self.yDiffSq].dropna().sum()
covarianceXY = self.covariance(df, self.x, self.y)
varianceX = self.variance(df, self.x)
print covarianceXY,varianceX,xYorkVariance
return math.atan2(covarianceXY ** 2.0 / varianceX ** 2.0 * xYorkVariance, yYorkVariance)
class RatioOfMeans(CalibrationBase):
def slope(self, df):
return self.mean(df, self.y) / self.mean(df, self.x)
class LeastSquares(CalibrationBase):
def _slope(self, df):
varianceX = self.variance(df, self.x)
covarianceXY = self.covariance(df, self.x, self.y)
return covarianceXY ** 2.0 / varianceX ** 2.0
def slope(self, df):
A =np.vstack([df[self.x].as_matrix(), np.ones(len(df))]).T
slope, residual, rank, s = np.linalg.lstsq(A, df[self.y])
return slope[0]
class SiteCalibrationCalculator:
def __init__(self, directionBinColumn, valueColumn, calibrationSectorDataframe, actives = None):
self.calibrationSectorDataframe = calibrationSectorDataframe
self.valueColumn = valueColumn
self.directionBinColumn = directionBinColumn
if actives != None:
activeSectors = []
for direction in actives:
if actives[direction]:
activeSectors.append(int(direction))
self.calibrationSectorDataframe = self.calibrationSectorDataframe.loc[activeSectors,:]
self.calibrationSectorDataframe['SpeedUpAt10'] = (10*self.calibrationSectorDataframe['Slope'] + self.calibrationSectorDataframe['Offset'])/10.0
self.IECLimitCalculator()
def turbineValue(self, row):
directionBin = row[self.directionBinColumn]
if np.isnan(directionBin): return np.nan
if not directionBin in self.calibrationSectorDataframe.index: return np.nan
value = row[self.valueColumn]
if np.isnan(value): return np.nan
return self.calibrate(directionBin, value)
def calibrate(self, directionBin, value):
return self.calibrationSectorDataframe['Offset'][directionBin] + self.calibrationSectorDataframe['Slope'][directionBin] * value
def IECLimitCalculator(self):
if len(self.calibrationSectorDataframe.index) == 36 and 'vRatio' in self.calibrationSectorDataframe.columns:
self.calibrationSectorDataframe['pctSpeedUp'] = (self.calibrationSectorDataframe['vRatio']-1)*100
self.calibrationSectorDataframe['LowerLimit'] = pd.Series(data=np.roll(((self.calibrationSectorDataframe['vRatio']-1)*100)-2.0,1),index=self.calibrationSectorDataframe.index)
self.calibrationSectorDataframe['UpperLimit'] = pd.Series(data=np.roll(((self.calibrationSectorDataframe['vRatio']-1)*100)+2.0,1),index=self.calibrationSectorDataframe.index)
self.calibrationSectorDataframe['IECValid'] = np.logical_and(self.calibrationSectorDataframe['pctSpeedUp'] > self.calibrationSectorDataframe['LowerLimit'], self.calibrationSectorDataframe['pctSpeedUp'] < self.calibrationSectorDataframe['UpperLimit'])
print self.calibrationSectorDataframe[['pctSpeedUp','LowerLimit','UpperLimit','IECValid']]
return True
def getSectorValidity(self, key, timeStep):
ba = self.calibrationSectorDataframe.loc[key,'belowAbove']
return ba[0]*(timeStep/3600.0) > 6.0 and ba[1]*(timeStep/3600.0) > 6.0
class ShearExponentCalculator:
def __init__(self, shearMeasurements):
self.shearMeasurements = shearMeasurements
def calculateMultiPointShear(self, row):
# 3 point measurement: return shear= 1/ (numpy.polyfit(x, y, deg, rcond=None, full=False) )
windspeeds = [np.log(row[col]) for col in self.shearMeasurements.values()]
heights = [np.log(height) for height in self.shearMeasurements.keys()]
deg = 1 # linear
if len([ws for ws in windspeeds if not np.isnan(ws)]) < 1:
return np.nan
polyfitResult = np.polyfit(windspeeds, heights, deg, rcond=None, full=False)
shearThreePT = 1/ polyfitResult[0]
return shearThreePT
def calculateTwoPointShear(self,row):
# superseded by self.calculateMultiPointShear
return math.log(row[self.upperColumn] / row[self.lowerColumn]) * self.overOneLogHeightRatio
def shearExponent(self, row):
return self.calculateMultiPointShear(row)
class Dataset:
def __init__(self, config, rotorGeometry, analysisConfig):
self.relativePath = configuration.RelativePath(config.path)
self.nameColumn = "Dataset Name"
self.name = config.name
self.timeStepInSeconds = config.timeStepInSeconds
self.timeStamp = config.timeStamp
self.actualPower = "Actual Power"
self.hasAllPowers = None not in (config.powerMin,config.powerMax,config.powerSD)
self.powerMin = "Power Min"
self.powerMax = "Power Max"
self.powerSD = "Power SD"
self.hubWindSpeed = "Hub Wind Speed"
self.hubTurbulence = "Hub Turbulence"
self.hubDensity = "Hub Density"
self.shearExponent = "Shear Exponent"
self.referenceShearExponent = "Reference Shear Exponent"
self.turbineShearExponent = "Turbine Shear Exponent"
self.windDirection = "Wind Direction"
self.profileRotorWindSpeed = "Profile Rotor Wind Speed"
self.profileHubWindSpeed = "Profile Hub Wind Speed"
self.profileHubToRotorRatio = "Hub to Rotor Ratio"
self.profileHubToRotorDeviation = "Hub to Rotor Deviation"
self.residualWindSpeed = "Residual Wind Speed"
self.hasShear = len(config.shearMeasurements) > 1
self.hasDirection = config.referenceWindDirection not in (None,'')
self.shearCalibration = "TurbineLocation" in config.shearMeasurements.keys() and "ReferenceLocation" in config.shearMeasurements.keys()
self.hubWindSpeedForTurbulence = self.hubWindSpeed if config.turbulenceWSsource != 'Reference' else config.referenceWindSpeed
self.turbRenormActive = analysisConfig.turbRenormActive
self.turbulencePower = 'Turbulence Power'
self.rewsDefined = config.rewsDefined
self.sensitivityDataColumns = config.sensitivityDataColumns
dateConverter = lambda x: datetime.datetime.strptime(x, config.dateFormat)
dataFrame = pd.read_csv(self.relativePath.convertToAbsolutePath(config.inputTimeSeriesPath), index_col=config.timeStamp, \
parse_dates = True, date_parser = dateConverter, sep = getSeparatorValue(config.separator), \
skiprows = config.headerRows, decimal = getDecimalValue(config.decimal)).replace(config.badData, np.nan)
if config.startDate != None and config.endDate != None:
dataFrame = dataFrame[config.startDate : config.endDate]
elif config.startDate != None:
dataFrame = dataFrame[config.startDate : ]
elif config.endDate != None:
dataFrame = dataFrame[ : config.endDate]
dataFrame[self.nameColumn] = config.name
dataFrame[self.timeStamp] = dataFrame.index
if self.hasDirection:
dataFrame[self.windDirection] = dataFrame[config.referenceWindDirection]
if self.hasShear:
if not self.shearCalibration:
dataFrame[self.shearExponent] = dataFrame.apply(ShearExponentCalculator(config.shearMeasurements).shearExponent, axis=1)
else:
dataFrame[self.turbineShearExponent] = dataFrame.apply(ShearExponentCalculator(config.shearMeasurements["TurbineLocation"]).shearExponent, axis=1)
dataFrame[self.referenceShearExponent] = dataFrame.apply(ShearExponentCalculator(config.shearMeasurements["ReferenceLocation"]).shearExponent, axis=1)
dataFrame[self.shearExponent] = dataFrame[self.referenceShearExponent]
dataFrame[self.residualWindSpeed] = 0.0
if config.calculateHubWindSpeed:
if dataFrame[config.referenceWindSpeed].count() < 1:
raise Exception("Reference wind speed column is empty: cannot apply calibration")
if dataFrame[config.referenceWindDirection].count() < 1:
raise Exception("Reference wind direction column is empty: cannot apply calibration")
self.calibrationCalculator = self.createCalibration(dataFrame, config, config.timeStepInSeconds)
dataFrame[self.hubWindSpeed] = dataFrame.apply(self.calibrationCalculator.turbineValue, axis=1)
if dataFrame[self.hubWindSpeed].count() < 1:
raise Exception("Hub wind speed column is empty after application of calibration")
if (config.hubTurbulence != ''):
dataFrame[self.hubTurbulence] = dataFrame[config.hubTurbulence]
else:
dataFrame[self.hubTurbulence] = dataFrame[config.referenceWindSpeedStdDev] / dataFrame[self.hubWindSpeedForTurbulence]
if config.calibrationMethod != "Specified":
dataFrame[self.residualWindSpeed] = (dataFrame[self.hubWindSpeed] - dataFrame[config.turbineLocationWindSpeed]) / dataFrame[self.hubWindSpeed]
windSpeedBin = "Wind Speed Bin"
turbulenceBin = "Turbulence Bin"
windSpeedBins = binning.Bins(analysisConfig.powerCurveFirstBin, analysisConfig.powerCurveBinSize, analysisConfig.powerCurveLastBin)
turbulenceBins = binning.Bins(0.01, 0.01/windSpeedBins.numberOfBins, 0.02)
aggregations = binning.Aggregations(analysisConfig.powerCurveMinimumCount)
dataFrame[windSpeedBin] = dataFrame[self.hubWindSpeed].map(windSpeedBins.binCenter)
dataFrame[turbulenceBin] = dataFrame[self.hubTurbulence].map(turbulenceBins.binCenter)
self.residualWindSpeedMatrix = DeviationMatrix( dataFrame[self.residualWindSpeed].groupby([dataFrame[windSpeedBin], dataFrame[turbulenceBin]]).aggregate(aggregations.average),
dataFrame[self.residualWindSpeed].groupby([dataFrame[windSpeedBin], dataFrame[turbulenceBin]]).count())
else:
self.residualWindSpeedMatrix = None
else:
dataFrame[self.hubWindSpeed] = dataFrame[config.hubWindSpeed]
if (config.hubTurbulence != ''):
dataFrame[self.hubTurbulence] = dataFrame[config.hubTurbulence]
else:
dataFrame[self.hubTurbulence] = dataFrame[config.referenceWindSpeedStdDev] / dataFrame[self.hubWindSpeedForTurbulence]
self.residualWindSpeedMatrix = None
if self.shearCalibration and config.shearCalibrationMethod != "Reference":
self.shearCalibrationCalculator = self.createShearCalibration(dataFrame,config, config.timeStepInSeconds)
dataFrame[self.shearExponent] = dataFrame.apply(self.shearCalibrationCalculator.turbineValue, axis=1)
if config.calculateDensity:
dataFrame[self.hubDensity] = 100.0 * dataFrame[config.pressure] / (273.15 + dataFrame[config.temperature]) / 287.058
self.hasDensity = True
else:
if config.density != None:
dataFrame[self.hubDensity] = dataFrame[config.density]
self.hasDensity = True
else:
self.hasDensity = False
if config.power != None:
dataFrame[self.actualPower] = dataFrame[config.power]
self.hasActualPower = True
else:
self.hasActualPower = False
if self.hasAllPowers:
dataFrame[self.powerMin] = dataFrame[config.powerMin]
dataFrame[self.powerMax] = dataFrame[config.powerMax]
dataFrame[self.powerSD] = dataFrame[config.powerSD]
dataFrame = self.filterDataFrame(dataFrame, config.filters)
dataFrame = self.excludeData(dataFrame, config)
if self.rewsDefined:
dataFrame = self.defineREWS(dataFrame, config, rotorGeometry)
self.fullDataFrame = dataFrame.copy()
self.dataFrame = self.extractColumns(dataFrame).dropna()
if self.windDirection in self.dataFrame.columns:
self.fullDataFrame[self.windDirection] = self.fullDataFrame[self.windDirection].astype(float)
self.analysedDirections = (round(self.fullDataFrame[self.windDirection].min() + config.referenceWindDirectionOffset), round(self.fullDataFrame[self.windDirection].max()+config.referenceWindDirectionOffset))
def createShearCalibration(self, dataFrame, config, timeStepInSeconds):
df = dataFrame.copy()
if config.shearCalibrationMethod == "Specified":
raise NotImplementedError
else:
calibration = self.getCalibrationMethod(config.shearCalibrationMethod, self.referenceShearExponent, self.turbineShearExponent, timeStepInSeconds, dataFrame)
if hasattr(self,"filteredCalibrationDataframe"):
dataFrame = self.filteredCalibrationDataframe
else:
dataFrame = self.filterDataFrame(dataFrame, config.calibrationFilters)
self.filteredCalibrationDataframe = dataFrame.copy()
if config.calibrationStartDate != None and config.calibrationEndDate != None:
dataFrame = dataFrame[config.calibrationStartDate : config.calibrationEndDate]
dataFrame = dataFrame[calibration.requiredColumns + [self.referenceDirectionBin, config.referenceWindDirection]].dropna()
if len(dataFrame) < 1:
raise Exception("No data are available to carry out calibration.")
siteCalibCalc = self.createSiteCalibrationCalculator(dataFrame, self.referenceShearExponent, calibration)
dataFrame = df
return siteCalibCalc
def createCalibration(self, dataFrame, config, timeStepInSeconds):
self.referenceDirectionBin = "Reference Direction Bin Centre"
dataFrame[config.referenceWindDirection] = (dataFrame[config.referenceWindDirection] + config.referenceWindDirectionOffset) % 360
siteCalibrationBinWidth = 360.0 / config.siteCalibrationNumberOfSectors
dataFrame[self.referenceDirectionBin] = (dataFrame[config.referenceWindDirection] - config.siteCalibrationCenterOfFirstSector) / siteCalibrationBinWidth
dataFrame[self.referenceDirectionBin] = np.round(dataFrame[self.referenceDirectionBin], 0) * siteCalibrationBinWidth + config.siteCalibrationCenterOfFirstSector
dataFrame[self.referenceDirectionBin] = (dataFrame[self.referenceDirectionBin] + 360) % 360
#dataFrame[self.referenceDirectionBin] -= float(config.siteCalibrationCenterOfFirstSector)
if config.calibrationMethod == "Specified":
if all([dir in config.calibrationSlopes.keys() for dir in config.calibrationActives.keys()]):
print "Applying Specified calibration"
print "Direction\tSlope\tOffset\tApplicable Datapoints"
for direction in config.calibrationSlopes:
if config.calibrationActives[direction]:
mask = (dataFrame[self.referenceDirectionBin] == direction)
dataCount = dataFrame[mask][self.referenceDirectionBin].count()
print "%0.2f\t%0.2f\t%0.2f\t%d" % (direction, config.calibrationSlopes[direction], config.calibrationOffsets[direction], dataCount)
df = pd.DataFrame([config.calibrationSlopes, config.calibrationOffsets], index=['Slope','Offset']).T
return SiteCalibrationCalculator( self.referenceDirectionBin, config.referenceWindSpeed,df, actives = config.calibrationActives)
else:
raise Exception("The specified slopes have different bin centres to that specified by siteCalibrationCenterOfFirstSector which is: {0}".format(config.siteCalibrationCenterOfFirstSector))
else:
df = dataFrame.copy()
calibration = self.getCalibrationMethod(config.calibrationMethod,config.referenceWindSpeed, config.turbineLocationWindSpeed, timeStepInSeconds, dataFrame)
if config.calibrationStartDate != None and config.calibrationEndDate != None:
dataFrame = dataFrame[config.calibrationStartDate : config.calibrationEndDate]
dataFrame = self.filterDataFrame(dataFrame, config.calibrationFilters)
self.filteredCalibrationDataframe = dataFrame.copy()
dataFrame = dataFrame[calibration.requiredColumns + [self.referenceDirectionBin, config.referenceWindDirection]].dropna()
if len(dataFrame) < 1:
raise Exception("No data are available to carry out calibration.")
siteCalibCalc = self.createSiteCalibrationCalculator(dataFrame,config.referenceWindSpeed, calibration)
dataFrame = df
return siteCalibCalc
def getCalibrationMethod(self,calibrationMethod,referenceColumn, turbineLocationColumn, timeStepInSeconds, dataFrame):
if calibrationMethod == "RatioOfMeans":
calibration = RatioOfMeans(referenceColumn, turbineLocationColumn)
elif calibrationMethod == "LeastSquares":
calibration = LeastSquares(referenceColumn, turbineLocationColumn)
elif calibrationMethod == "York":
calibration = York(referenceColumn, turbineLocationColumn, timeStepInSeconds, dataFrame)
else:
raise Exception("Calibration method not recognised: %s" % calibrationMethod)
return calibration
def createSiteCalibrationCalculator(self,dataFrame, valueColumn, calibration ):
groups = dataFrame[calibration.requiredColumns].groupby(dataFrame[self.referenceDirectionBin])
slopes = {}
intercepts = {}
counts = {}
belowAbove = {}
sigA = {}
sigB = {}
cov = {}
corr = {}
vRatio= {}
for group in groups:
directionBinCenter = group[0]
sectorDataFrame = group[1].dropna()
if len(sectorDataFrame.index)>1:
slopes[directionBinCenter] = calibration.slope(sectorDataFrame)
intercepts[directionBinCenter] = calibration.intercept(sectorDataFrame, slopes[directionBinCenter])
counts[directionBinCenter] = sectorDataFrame[valueColumn].count()
try:
sigA[directionBinCenter] = calibration.sigA(sectorDataFrame,slopes[directionBinCenter], intercepts[directionBinCenter], counts[directionBinCenter]) # 'ErrInGradient'
sigB[directionBinCenter] = calibration.sigB(sectorDataFrame,slopes[directionBinCenter], intercepts[directionBinCenter], counts[directionBinCenter]) # 'ErrInIntercept'
#cov[directionBinCenter] = calibration.covariance(sectorDataFrame, calibration.x,calibration.y )
cov[directionBinCenter] = sigA[directionBinCenter]*sigB[directionBinCenter]*(-1.0 * sectorDataFrame[calibration.x].sum())/((counts[directionBinCenter] * (sectorDataFrame[calibration.x]**2).sum())**0.5)
corr[directionBinCenter] =sectorDataFrame[[calibration.x, calibration.y]].corr()[calibration.x][calibration.y]
vRatio[directionBinCenter] = (sectorDataFrame[calibration.y]/sectorDataFrame[calibration.x]).mean()# T_A1/R_A1 - this is currently mean of all data
except:
pass
if valueColumn == self.hubWindSpeedForTurbulence:
belowAbove[directionBinCenter] = (sectorDataFrame[sectorDataFrame[valueColumn] <= 8.0][valueColumn].count(),sectorDataFrame[sectorDataFrame[valueColumn] > 8.0][valueColumn].count())
calibrationSectorDataframe = pd.DataFrame([slopes,intercepts,counts, sigA, sigB, cov, corr, vRatio], ["Slope","Offset","Count","SigA","SigB","Cov","Corr","vRatio"] ).T
if len(belowAbove.keys()):
calibrationSectorDataframe['belowAbove'] = belowAbove.values()
print calibrationSectorDataframe
return SiteCalibrationCalculator(self.referenceDirectionBin, valueColumn, calibrationSectorDataframe)
def isValidText(self, text):
if text == None: return False
return len(text) > 0
def excludeData(self, dataFrame, config):
mask = pd.Series([True]*len(dataFrame),index=dataFrame.index)
print "Data set length prior to exclusions: {0}".format(len(mask[mask]))
for exclusion in config.exclusions:
startDate = exclusion[0]
endDate = exclusion[1]
active = exclusion[2]
if active:
subMask = (dataFrame[self.timeStamp] >= startDate) & (dataFrame[self.timeStamp] <= endDate)
mask = mask & ~subMask
print "Applied exclusion: {0} to {1}\n\t- data set length: {2}".format(exclusion[0].strftime("%Y-%m-%d %H:%M"),exclusion[1].strftime("%Y-%m-%d %H:%M"),len(mask[mask]))
print "Data set length after exclusions: {0}".format(len(mask[mask]))
return dataFrame[mask]
def extractColumns(self, dataFrame):
requiredCols = []
requiredCols.append(self.nameColumn)
requiredCols.append(self.timeStamp)
requiredCols.append(self.hubWindSpeed)
requiredCols.append(self.hubTurbulence)
if self.hasDensity:
requiredCols.append(self.hubDensity)
if self.hasShear:
requiredCols.append(self.shearExponent)
if self.hasDirection:
requiredCols.append(self.windDirection)
if self.rewsDefined:
requiredCols.append(self.profileRotorWindSpeed)
requiredCols.append(self.profileHubWindSpeed)
requiredCols.append(self.profileHubToRotorRatio)
requiredCols.append(self.profileHubToRotorDeviation)
if self.hasAllPowers:
requiredCols.append(self.powerMin)
requiredCols.append(self.powerMax)
requiredCols.append(self.powerSD)
if self.hasActualPower:
requiredCols.append(self.actualPower)
for col in self.sensitivityDataColumns:
if col not in requiredCols:
requiredCols.append(col)
if len(dataFrame[requiredCols].dropna()[requiredCols[0]]) > 0:
return dataFrame[requiredCols]
else:
print "Number of null columns:"
print dataFrame[requiredCols].isnull().sum()
text = "One of the required columns is empty.\n"
for col in requiredCols:
text += "- %s: %d\n" % (col, dataFrame[col].dropna().count())
raise Exception(text)
def createDerivedColumn(self,df,cols):
d = df.copy()
d['Derived'] = 1
for col in cols:
d['Derived'] *= ((df[col[0]]*float(col[1]))+float(col[2]))**float(col[3])
return d['Derived']
def applyToDFilter(self,mask,componentFilter,dataFrame,printMsg=True):
startTime = (dataFrame.index - datetime.timedelta(seconds=self.timeStepInSeconds))
endTime = dataFrame.index # explicit assumption is that we're using end format data.
dayMask = dataFrame[self.timeStamp].apply(lambda x,d : True if x.isoweekday() in d else False, args=[componentFilter.daysOfTheWeek] )
todMask = np.logical_and( startTime.time >= componentFilter.startTime.time(),
endTime.time <= componentFilter.endTime.time() )
if len(componentFilter.months) > 0:
monthMask = dataFrame[self.timeStamp].apply(lambda x,d : True if x.month in d else False, args=[componentFilter.months] )
dayMask = dayMask & monthMask
totalMask = dayMask & todMask
mask = mask | totalMask
if printMsg: print "Applied filter:", str(componentFilter)
return mask.copy()
def applySimpleFilter(self,mask,componentFilter,dataFrame,printMsg=True):
filterColumn = componentFilter.column
filterType = componentFilter.filterType
filterInclusive = componentFilter.inclusive
if not componentFilter.derived:
filterValue = componentFilter.value
else:
filterValue = self.createDerivedColumn(dataFrame,componentFilter.value)
#print (filterColumn, filterType, filterInclusive, filterValue)
if filterType.lower() == "below":
mask = self.addFilterBelow(dataFrame, mask, filterColumn, filterValue, filterInclusive)
elif filterType.lower() == "above":
mask = self.addFilterAbove(dataFrame, mask, filterColumn, filterValue, filterInclusive)
elif filterType.lower() == "aboveorbelow" or filterType.lower() == "notequal":
mask = self.addFilterBelow(dataFrame, mask, filterColumn, filterValue, filterInclusive)
mask = self.addFilterAbove(dataFrame, mask, filterColumn, filterValue, filterInclusive)
else:
raise Exception("Filter type not recognised: %s" % filterType)
if printMsg:
print "Applied Filter:{col}-{typ}-{val}\n\tData set length:{leng}".format(
col=filterColumn,typ=filterType,val="Derived Column" if type(filterValue) == pd.Series else filterValue,leng=len(mask[~mask]))
return mask.copy()
def applyRelationshipFilter(self, mask, componentFilter, dataFrame):
filterConjunction = componentFilter.conjunction
if filterConjunction not in ("AND","OR"):
raise NotImplementedError("Filter conjunction not implemented, please use AND or OR...")
filterConjuction = np.logical_or if filterConjunction == "OR" else np.logical_and
masks = []
newMask = pd.Series([False]*len(mask),index=mask.index)
if len(componentFilter.clauses) < 2:
raise Exception("Number of clauses in a relationship must be > 1")
for filter in componentFilter.clauses:
filterMask = self.applySimpleFilter(newMask,filter,dataFrame,printMsg=False)
masks.append(filterMask)
baseMask = masks[0]
for filterMask in masks[1:]:
baseMask = filterConjuction(baseMask,filterMask) # only if commutative (e.g. AND / OR)
mask = np.logical_or(mask,baseMask)
print "Applied Relationship (AND/OR) Filter:\n\tData set length:{leng}".format(leng=len(mask[~mask]))
return mask.copy()
def filterDataFrame(self, dataFrame, filters):
if len(filters) < 1: return dataFrame
print ""
print "Filter Details"
print "Derived\tColumn\tFilterType\tInclusive\tValue"
for componentFilter in filters:
if componentFilter.active:
componentFilter.printSummary()
print ""
mask = pd.Series([False]*len(dataFrame),index=dataFrame.index)
print "Data set length prior to filtering: {0}".format(len(mask[~mask]))
print ""
for componentFilter in filters:
if componentFilter.active:
if not componentFilter.applied:
try:
if hasattr(componentFilter,"startTime"):
mask = self.applyToDFilter(mask,componentFilter,dataFrame)
elif hasattr(componentFilter, "clauses"):
mask = self.applyRelationshipFilter(mask, componentFilter, dataFrame)
else:
mask = self.applySimpleFilter(mask,componentFilter,dataFrame)
print dataFrame[~mask][self.timeStamp].min() , " to " , dataFrame[~mask][self.timeStamp].max()
componentFilter.applied = True
except:
componentFilter.applied = False
print ""
return dataFrame[~mask]
def addFilterBelow(self, dataFrame, mask, filterColumn, filterValue, filterInclusive):
if filterInclusive:
return mask | (dataFrame[filterColumn] <= filterValue)
else:
return mask | (dataFrame[filterColumn] < filterValue)
def addFilterAbove(self, dataFrame, mask, filterColumn, filterValue, filterInclusive):
if filterInclusive:
return mask | (dataFrame[filterColumn] >= filterValue)
else:
return mask | (dataFrame[filterColumn] > filterValue)
def defineREWS(self, dataFrame, config, rotorGeometry):
profileLevels = rews.ProfileLevels(rotorGeometry, config.windSpeedLevels)
if config.rotorMode == "EvenlySpacedLevels":
self.rotor = rews.EvenlySpacedRotor(rotorGeometry, config.numberOfRotorLevels)
elif config.rotorMode == "ProfileLevels":
self.rotor = rews.ProfileLevelsRotor(rotorGeometry, profileLevels)
else:
raise Exception("Unknown rotor mode: % s" % config.rotorMode)
rotorEquivalentWindSpeedCalculator = rews.RotorEquivalentWindSpeed(profileLevels, self.rotor)
if config.hubMode == "Interpolated":
profileHubWindSpeedCalculator = rews.InterpolatedHubWindSpeed(profileLevels, rotorGeometry)
elif config.hubMode == "PiecewiseExponent":
profileHubWindSpeedCalculator = rews.PiecewiseExponentHubWindSpeed(profileLevels, rotorGeometry)
else:
raise Exception("Unknown hub mode: % s" % config.hubMode)
dataFrame[self.profileHubWindSpeed] = dataFrame.apply(profileHubWindSpeedCalculator.hubWindSpeed, axis=1)
dataFrame[self.profileRotorWindSpeed] = dataFrame.apply(rotorEquivalentWindSpeedCalculator.rotorWindSpeed, axis=1)
dataFrame[self.profileHubToRotorRatio] = dataFrame[self.profileRotorWindSpeed] / dataFrame[self.profileHubWindSpeed]
dataFrame[self.profileHubToRotorDeviation] = dataFrame[self.profileHubToRotorRatio] - 1.0
return dataFrame
| 46.13587
| 264
| 0.668012
|
7952ed9bfc2d2444ea096f1a732945fb800bae3c
| 424
|
py
|
Python
|
examples/textrank-docker/textrank.py
|
peter-xbs/newsroom_chinese
|
7fcae68b2ea5584d08d0c48faee34a0734237e6b
|
[
"Apache-2.0"
] | 82
|
2018-05-01T16:32:38.000Z
|
2019-05-18T01:43:15.000Z
|
examples/textrank-docker/textrank.py
|
peter-xbs/newsroom_chinese
|
7fcae68b2ea5584d08d0c48faee34a0734237e6b
|
[
"Apache-2.0"
] | 20
|
2018-05-01T19:32:48.000Z
|
2019-04-12T07:57:48.000Z
|
examples/textrank-docker/textrank.py
|
peter-xbs/newsroom_chinese
|
7fcae68b2ea5584d08d0c48faee34a0734237e6b
|
[
"Apache-2.0"
] | 15
|
2018-05-01T17:34:11.000Z
|
2019-05-07T09:28:21.000Z
|
import json, sys
from gensim.summarization.summarizer import summarize
WORD_COUNT = 50
for line in sys.stdin:
article = json.loads(line)
try:
summary = summarize(
article["text"],
word_count = WORD_COUNT)
except ValueError:
# Handles "input must have more than one sentence"
summary = article["text"]
print(summary.replace("\n", " "), flush = True)
| 16.96
| 58
| 0.617925
|
7952ee1a3eefcd6338a4c035fbaa51d6e720aae8
| 3,399
|
py
|
Python
|
asposehtmlcloud/models/object_exist.py
|
aspose-html-cloud/aspose-html-cloud-python
|
b5abc7aba8ef0df355207480c2cccb282c09a970
|
[
"MIT"
] | 2
|
2020-08-27T15:45:12.000Z
|
2022-03-07T03:52:16.000Z
|
asposehtmlcloud/models/object_exist.py
|
aspose-html-cloud/aspose-html-cloud-python
|
b5abc7aba8ef0df355207480c2cccb282c09a970
|
[
"MIT"
] | null | null | null |
asposehtmlcloud/models/object_exist.py
|
aspose-html-cloud/aspose-html-cloud-python
|
b5abc7aba8ef0df355207480c2cccb282c09a970
|
[
"MIT"
] | 4
|
2018-05-19T16:03:03.000Z
|
2021-11-19T10:58:33.000Z
|
# coding: utf-8
"""
--------------------------------------------------------------------------------------------------------------------
<copyright company="Aspose" file="object_exist.py">
Copyright (c) 2020 Aspose.HTML for Cloud
</copyright>
<summary>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
</summary>
--------------------------------------------------------------------------------------------------------------------
"""
from asposehtmlcloud.models import BaseModel
class ObjectExist(BaseModel):
"""
Attributes:
model_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
model_types = {
'exists': 'bool',
'is_folder': 'bool'
}
attribute_map = {
'exists': 'exists',
'is_folder': 'isFolder'
}
def __init__(self, exists=None, is_folder=None):
self._exists = None
self._is_folder = None
self.exists = exists
self.is_folder = is_folder
@property
def exists(self):
"""Gets the exists of this ObjectExist.
Indicates that the file or folder exists.
:return: The exists of this ObjectExist.
:rtype: bool
"""
return self._exists
@exists.setter
def exists(self, exists):
"""Sets the exists of this ObjectExist.
Indicates that the file or folder exists.
:param exists: The exists of this ObjectExist.
:type: bool
"""
if exists is None:
raise ValueError("Invalid value for `exists`, must not be `None`")
self._exists = exists
@property
def is_folder(self):
"""Gets the is_folder of this ObjectExist.
True if it is a folder, false if it is a file.
:return: The is_folder of this ObjectExist.
:rtype: bool
"""
return self._is_folder
@is_folder.setter
def is_folder(self, is_folder):
"""Sets the is_folder of this ObjectExist.
True if it is a folder, false if it is a file.
:param is_folder: The is_folder of this ObjectExist.
:type: bool
"""
if is_folder is None:
raise ValueError("Invalid value for `is_folder`, must not be `None`")
self._is_folder = is_folder
| 31.472222
| 116
| 0.614593
|
7952ef68d33755b7f8de00981d79347d5622988d
| 791
|
py
|
Python
|
tests/kubernetes/checks/test_ImageDigest.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
tests/kubernetes/checks/test_ImageDigest.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
tests/kubernetes/checks/test_ImageDigest.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
import os
import unittest
from checkov.kubernetes.checks.ImageDigest import check
from checkov.kubernetes.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestImageDigest(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_ImageDigest"
report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
self.assertEqual(summary['passed'], 3)
self.assertEqual(summary['failed'], 2)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
if __name__ == '__main__':
unittest.main()
| 29.296296
| 101
| 0.71555
|
7952ef9311339b7f142769365f4abb26e89c3009
| 669
|
py
|
Python
|
source_code_final/entries/urls.py
|
momentum-pt-team-1/django-diary-example
|
89bb758a64ede14123b28504c7eea1c8df582a64
|
[
"MIT"
] | null | null | null |
source_code_final/entries/urls.py
|
momentum-pt-team-1/django-diary-example
|
89bb758a64ede14123b28504c7eea1c8df582a64
|
[
"MIT"
] | null | null | null |
source_code_final/entries/urls.py
|
momentum-pt-team-1/django-diary-example
|
89bb758a64ede14123b28504c7eea1c8df582a64
|
[
"MIT"
] | 3
|
2021-07-06T14:07:44.000Z
|
2021-07-06T20:14:08.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.EntryListView.as_view(), name="entry-list"),
path(
"entry/<int:pk>", views.EntryDetailView.as_view(), name="entry-detail"
),
path("create", views.EntryCreateView.as_view(), name="entry-create"),
path(
"entry/<int:pk>/update",
views.EntryUpdateView.as_view(),
name="entry-update",
),
path(
"entry/<int:pk>/delete",
views.EntryDeleteView.as_view(),
name="entry-delete",
),
path("ajax", views.view_returns_json, name="ajax"),
path('ajax/create', views.ajax_create_entry, name="ajax-create"),
]
| 27.875
| 78
| 0.615845
|
7952efb0e9e83b2728995926109d06ff5683ae2c
| 18,674
|
py
|
Python
|
playwright/_impl/_assertions.py
|
px-tech/playwright-python
|
ab954acef18fba57bb1c114fe2399d3d02a9ecb9
|
[
"Apache-2.0"
] | null | null | null |
playwright/_impl/_assertions.py
|
px-tech/playwright-python
|
ab954acef18fba57bb1c114fe2399d3d02a9ecb9
|
[
"Apache-2.0"
] | null | null | null |
playwright/_impl/_assertions.py
|
px-tech/playwright-python
|
ab954acef18fba57bb1c114fe2399d3d02a9ecb9
|
[
"Apache-2.0"
] | 1
|
2022-01-29T10:35:58.000Z
|
2022-01-29T10:35:58.000Z
|
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Pattern, Union
from urllib.parse import urljoin
from playwright._impl._api_structures import ExpectedTextValue, FrameExpectOptions
from playwright._impl._fetch import APIResponse
from playwright._impl._locator import Locator
from playwright._impl._page import Page
from playwright._impl._str_utils import escape_regex_flags
class AssertionsBase:
def __init__(self, locator: Locator, is_not: bool = False) -> None:
self._actual_locator = locator
self._loop = locator._loop
self._dispatcher_fiber = locator._dispatcher_fiber
self._is_not = is_not
async def _expect_impl(
self,
expression: str,
expect_options: FrameExpectOptions,
expected: Any,
message: str,
) -> None:
__tracebackhide__ = True
expect_options["isNot"] = self._is_not
if expect_options.get("timeout") is None:
expect_options["timeout"] = 5_000
if expect_options["isNot"]:
message = message.replace("expected to", "expected not to")
if "useInnerText" in expect_options and expect_options["useInnerText"] is None:
del expect_options["useInnerText"]
result = await self._actual_locator._expect(expression, expect_options)
if result["matches"] == self._is_not:
log = "\n".join(result.get("log", "")).strip()
if log:
log = "\nCall log:\n" + log
if expected is not None:
raise AssertionError(f"{message} '{expected}' {log}")
raise AssertionError(f"{message} {log}")
class PageAssertions(AssertionsBase):
def __init__(self, page: Page, is_not: bool = False) -> None:
super().__init__(page.locator(":root"), is_not)
self._actual_page = page
@property
def _not(self) -> "PageAssertions":
return PageAssertions(self._actual_page, not self._is_not)
async def to_have_title(
self, title_or_reg_exp: Union[Pattern, str], timeout: float = None
) -> None:
expected_values = to_expected_text_values(
[title_or_reg_exp], normalize_white_space=True
)
__tracebackhide__ = True
await self._expect_impl(
"to.have.title",
FrameExpectOptions(expectedText=expected_values, timeout=timeout),
title_or_reg_exp,
"Page title expected to be",
)
async def not_to_have_title(
self, title_or_reg_exp: Union[Pattern, str], timeout: float = None
) -> None:
__tracebackhide__ = True
await self._not.to_have_title(title_or_reg_exp, timeout)
async def to_have_url(
self, url_or_reg_exp: Union[str, Pattern], timeout: float = None
) -> None:
__tracebackhide__ = True
base_url = self._actual_page.context._options.get("baseURL")
if isinstance(url_or_reg_exp, str) and base_url:
url_or_reg_exp = urljoin(base_url, url_or_reg_exp)
expected_text = to_expected_text_values([url_or_reg_exp])
await self._expect_impl(
"to.have.url",
FrameExpectOptions(expectedText=expected_text, timeout=timeout),
url_or_reg_exp,
"Page URL expected to be",
)
async def not_to_have_url(
self, url_or_reg_exp: Union[Pattern, str], timeout: float = None
) -> None:
__tracebackhide__ = True
await self._not.to_have_url(url_or_reg_exp, timeout)
class LocatorAssertions(AssertionsBase):
def __init__(self, locator: Locator, is_not: bool = False) -> None:
super().__init__(locator, is_not)
self._actual_locator = locator
@property
def _not(self) -> "LocatorAssertions":
return LocatorAssertions(self._actual_locator, not self._is_not)
async def to_contain_text(
self,
expected: Union[List[Union[Pattern, str]], Pattern, str],
use_inner_text: bool = None,
timeout: float = None,
) -> None:
__tracebackhide__ = True
if isinstance(expected, list):
expected_text = to_expected_text_values(
expected, match_substring=True, normalize_white_space=True
)
await self._expect_impl(
"to.contain.text.array",
FrameExpectOptions(
expectedText=expected_text,
useInnerText=use_inner_text,
timeout=timeout,
),
expected,
"Locator expected to contain text",
)
else:
expected_text = to_expected_text_values(
[expected], match_substring=True, normalize_white_space=True
)
await self._expect_impl(
"to.have.text",
FrameExpectOptions(
expectedText=expected_text,
useInnerText=use_inner_text,
timeout=timeout,
),
expected,
"Locator expected to contain text",
)
async def not_to_contain_text(
self,
expected: Union[List[Union[Pattern, str]], Pattern, str],
use_inner_text: bool = None,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_contain_text(expected, use_inner_text, timeout)
async def to_have_attribute(
self,
name: str,
value: Union[str, Pattern],
timeout: float = None,
) -> None:
__tracebackhide__ = True
expected_text = to_expected_text_values([value])
await self._expect_impl(
"to.have.attribute",
FrameExpectOptions(
expressionArg=name, expectedText=expected_text, timeout=timeout
),
value,
"Locator expected to have attribute",
)
async def not_to_have_attribute(
self,
name: str,
value: Union[str, Pattern],
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_have_attribute(name, value, timeout)
async def to_have_class(
self,
expected: Union[List[Union[Pattern, str]], Pattern, str],
timeout: float = None,
) -> None:
__tracebackhide__ = True
if isinstance(expected, list):
expected_text = to_expected_text_values(expected)
await self._expect_impl(
"to.have.class.array",
FrameExpectOptions(expectedText=expected_text, timeout=timeout),
expected,
"Locator expected to have class",
)
else:
expected_text = to_expected_text_values([expected])
await self._expect_impl(
"to.have.class",
FrameExpectOptions(expectedText=expected_text, timeout=timeout),
expected,
"Locator expected to have class",
)
async def not_to_have_class(
self,
expected: Union[List[Union[Pattern, str]], Pattern, str],
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_have_class(expected, timeout)
async def to_have_count(
self,
count: int,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.have.count",
FrameExpectOptions(expectedNumber=count, timeout=timeout),
count,
"Locator expected to have count",
)
async def not_to_have_count(
self,
count: int,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_have_count(count, timeout)
async def to_have_css(
self,
name: str,
value: Union[str, Pattern],
timeout: float = None,
) -> None:
__tracebackhide__ = True
expected_text = to_expected_text_values([value])
await self._expect_impl(
"to.have.css",
FrameExpectOptions(
expressionArg=name, expectedText=expected_text, timeout=timeout
),
value,
"Locator expected to have CSS",
)
async def not_to_have_css(
self,
name: str,
value: Union[str, Pattern],
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_have_css(name, value, timeout)
async def to_have_id(
self,
id: Union[str, Pattern],
timeout: float = None,
) -> None:
__tracebackhide__ = True
expected_text = to_expected_text_values([id])
await self._expect_impl(
"to.have.id",
FrameExpectOptions(expectedText=expected_text, timeout=timeout),
id,
"Locator expected to have ID",
)
async def not_to_have_id(
self,
id: Union[str, Pattern],
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_have_id(id, timeout)
async def to_have_js_property(
self,
name: str,
value: Any,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.have.property",
FrameExpectOptions(
expressionArg=name, expectedValue=value, timeout=timeout
),
value,
"Locator expected to have JS Property",
)
async def not_to_have_js_property(
self,
name: str,
value: Any,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_have_js_property(name, value, timeout)
async def to_have_value(
self,
value: Union[str, Pattern],
timeout: float = None,
) -> None:
__tracebackhide__ = True
expected_text = to_expected_text_values([value])
await self._expect_impl(
"to.have.value",
FrameExpectOptions(expectedText=expected_text, timeout=timeout),
value,
"Locator expected to have Value",
)
async def not_to_have_value(
self,
value: Union[str, Pattern],
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_have_value(value, timeout)
async def to_have_text(
self,
expected: Union[List[Union[Pattern, str]], Pattern, str],
use_inner_text: bool = None,
timeout: float = None,
) -> None:
__tracebackhide__ = True
if isinstance(expected, list):
expected_text = to_expected_text_values(
expected, normalize_white_space=True
)
await self._expect_impl(
"to.have.text.array",
FrameExpectOptions(
expectedText=expected_text,
useInnerText=use_inner_text,
timeout=timeout,
),
expected,
"Locator expected to have text",
)
else:
expected_text = to_expected_text_values(
[expected], normalize_white_space=True
)
await self._expect_impl(
"to.have.text",
FrameExpectOptions(
expectedText=expected_text,
useInnerText=use_inner_text,
timeout=timeout,
),
expected,
"Locator expected to have text",
)
async def not_to_have_text(
self,
expected: Union[List[Union[Pattern, str]], Pattern, str],
use_inner_text: bool = None,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_have_text(expected, use_inner_text, timeout)
async def to_be_checked(
self,
timeout: float = None,
checked: bool = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.be.checked"
if checked is None or checked is True
else "to.be.unchecked",
FrameExpectOptions(timeout=timeout),
None,
"Locator expected to be checked",
)
async def not_to_be_checked(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_be_checked(timeout)
async def to_be_disabled(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.be.disabled",
FrameExpectOptions(timeout=timeout),
None,
"Locator expected to be disabled",
)
async def not_to_be_disabled(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_be_disabled(timeout)
async def to_be_editable(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.be.editable",
FrameExpectOptions(timeout=timeout),
None,
"Locator expected to be editable",
)
async def not_to_be_editable(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_be_editable(timeout)
async def to_be_empty(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.be.empty",
FrameExpectOptions(timeout=timeout),
None,
"Locator expected to be empty",
)
async def not_to_be_empty(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_be_empty(timeout)
async def to_be_enabled(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.be.enabled",
FrameExpectOptions(timeout=timeout),
None,
"Locator expected to be enabled",
)
async def not_to_be_enabled(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_be_enabled(timeout)
async def to_be_hidden(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.be.hidden",
FrameExpectOptions(timeout=timeout),
None,
"Locator expected to be hidden",
)
async def not_to_be_hidden(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_be_hidden(timeout)
async def to_be_visible(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.be.visible",
FrameExpectOptions(timeout=timeout),
None,
"Locator expected to be visible",
)
async def not_to_be_visible(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_be_visible(timeout)
async def to_be_focused(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._expect_impl(
"to.be.focused",
FrameExpectOptions(timeout=timeout),
None,
"Locator expected to be focused",
)
async def not_to_be_focused(
self,
timeout: float = None,
) -> None:
__tracebackhide__ = True
await self._not.to_be_focused(timeout)
class APIResponseAssertions:
def __init__(self, response: APIResponse, is_not: bool = False) -> None:
self._loop = response._loop
self._dispatcher_fiber = response._dispatcher_fiber
self._is_not = is_not
self._actual = response
@property
def _not(self) -> "APIResponseAssertions":
return APIResponseAssertions(self._actual, not self._is_not)
async def to_be_ok(
self,
) -> None:
__tracebackhide__ = True
if self._is_not is not self._actual.ok:
return
message = f"Response status expected to be within [200..299] range, was '{self._actual.status}'"
if self._is_not:
message = message.replace("expected to", "expected not to")
log_list = await self._actual._fetch_log()
log = "\n".join(log_list).strip()
if log:
message += f"\n Call log:\n{log}"
raise AssertionError(message)
async def not_to_be_ok(self) -> None:
__tracebackhide__ = True
await self._not.to_be_ok()
def expected_regex(
pattern: Pattern, match_substring: bool, normalize_white_space: bool
) -> ExpectedTextValue:
expected = ExpectedTextValue(
regexSource=pattern.pattern,
regexFlags=escape_regex_flags(pattern),
matchSubstring=match_substring,
normalizeWhiteSpace=normalize_white_space,
)
return expected
def to_expected_text_values(
items: Union[List[Pattern], List[str], List[Union[str, Pattern]]],
match_substring: bool = False,
normalize_white_space: bool = False,
) -> List[ExpectedTextValue]:
out: List[ExpectedTextValue] = []
assert isinstance(items, list)
for item in items:
if isinstance(item, str):
out.append(
ExpectedTextValue(
string=item,
matchSubstring=match_substring,
normalizeWhiteSpace=normalize_white_space,
)
)
elif isinstance(item, Pattern):
out.append(expected_regex(item, match_substring, normalize_white_space))
return out
| 31.227425
| 104
| 0.584717
|
7952f0281ff39a40555f5d12a1bb289ddbcf363f
| 1,995
|
py
|
Python
|
gauss_carrier_vs_space.py
|
kai-neuhaus/mr-oct-simulation
|
a99576377da916418b937eb553b62af6ab44b8fd
|
[
"MIT"
] | 4
|
2020-07-30T00:38:34.000Z
|
2021-11-09T10:53:39.000Z
|
gauss_carrier_vs_space.py
|
kai-neuhaus/mr-oct-simulation
|
a99576377da916418b937eb553b62af6ab44b8fd
|
[
"MIT"
] | null | null | null |
gauss_carrier_vs_space.py
|
kai-neuhaus/mr-oct-simulation
|
a99576377da916418b937eb553b62af6ab44b8fd
|
[
"MIT"
] | null | null | null |
from scipy import *
from scipy.signal import hilbert
from matplotlib.pyplot import *
import my_format_lib as mylib
mylib.format_plot()
# manuscript:lst:gausscarrspace
N = 20000 # buffer size
SR = 20e6 # sample rate [s]
tau_scan = linspace(0, N / SR, N) # time range [s]
wavelength = 1330e-9 # [m]
wavelengthBW = 60e-9 # [m]
L_FWHM = 2*log(2)/pi * wavelength**2 / wavelengthBW
L_sigma = L_FWHM/2/sqrt(2*log(2))
D_k = pi/sqrt(log(2))*wavelengthBW/wavelength**2
v_M = 0.05 # [m/s]
D_L = tau_scan * v_M # spatial range [m]
D_L_um = D_L*1e6
f_D = 2 * v_M / wavelength #[1/s]
L_0 = 23e-6 #[m]
K = 2*pi / wavelength
I_t = exp(-1j * 2 * K * D_L) * exp(-(D_L-L_0)**2 * (D_k**2))
plot(D_L_um,I_t)
# manuscript:lst:gausscarrspace
plot(D_L_um,abs(hilbert(real(I_t))))
arrow_x = D_L_um[where(abs(hilbert(real(I_t))) >= 0.5+0j)[0]]
print(arrow_x)
gca().annotate('', # leave empty as otherwise arrow is scaled to text
textcoords='data',
xycoords='data',
xy=(arrow_x[0],0.5),
xytext=(arrow_x[0]-10,0.5),
arrowprops=dict(arrowstyle='->'))
gca().annotate('', # leave empty as otherwise arrow is scaled to text
textcoords='data',
xycoords='data',
xy=(arrow_x[-1],0.5),
xytext=(arrow_x[-1]+10,0.5),
arrowprops=dict(arrowstyle='->'))
gca().annotate('FWHM',
xycoords='data',
textcoords='data',
xy=(arrow_x[-1]+1,0.55)
)
# grid(True)
print('L_FWHM',L_FWHM,'m')
print('L_sigma',L_sigma,'m')
print('scan distance',max(D_L),'m')
print('f_D',f_D,'Hz')
s0 = D_L[(array(where(abs(hilbert(real(I_t))) > 0.5)).min())]
s1 = D_L[(array(where(abs(hilbert(real(I_t))) > 0.5)).max())]
FWHM = abs(s0 - s1)
print('measured FHWM', FWHM, 'm')
xlabel('Space ($\mathrm{\mu m}$)') # Dk
# xlabel('$\Delta l$ ($\mu$m)') # spr
ylabel('Amplitude (arb.)')
# grid(True)
tight_layout()
savefig('gauss_carrier_vs_space.pdf')
show()
| 32.177419
| 69
| 0.594486
|
7952f09d5b4ba247e95a7aef094f51906cbf8652
| 3,702
|
py
|
Python
|
n3jet/general/single/general_init_model_rerun.py
|
JosephPB/n3jet
|
f097c5e829b5f86fc0ce9007c5fa76ea229dfc56
|
[
"MIT"
] | 3
|
2020-06-03T13:50:59.000Z
|
2021-12-01T08:21:34.000Z
|
n3jet/general/single/general_init_model_rerun.py
|
JosephPB/n3jet
|
f097c5e829b5f86fc0ce9007c5fa76ea229dfc56
|
[
"MIT"
] | 2
|
2021-08-25T16:15:38.000Z
|
2022-02-10T03:36:12.000Z
|
n3jet/general/single/general_init_model_rerun.py
|
JosephPB/n3jet
|
f097c5e829b5f86fc0ce9007c5fa76ea229dfc56
|
[
"MIT"
] | 1
|
2020-06-12T15:00:56.000Z
|
2020-06-12T15:00:56.000Z
|
import argparse
from n3jet.general import SingleModelRun
from n3jet.utils.general_utils import bool_convert
def parse():
"""
Parse arguments
"""
parser = argparse.ArgumentParser(description=
"""
Training multiple models on the same dataset for error analysis.
Here we assume that the momenta and njet files already exist and
will be passed to the script by the user
"""
)
parser.add_argument(
'--yaml_file',
dest='yaml_file',
help='YAML file with config parameters',
type=str,
default = "False"
)
parser.add_argument(
'--mom_file',
dest='mom_file',
help='destination of momenta file',
type=str,
)
parser.add_argument(
'--nj_file',
dest='nj_file',
help='NJet file',
type=str,
)
parser.add_argument(
'--delta_cut',
dest='delta_cut',
help='proximity of jets according to JADE algorithm',
type=float,
default=0.01,
)
parser.add_argument(
'--model_base_dir',
dest='model_base_dir',
help='model base directory in which folders will be created',
type=str,
)
parser.add_argument(
'--model_dir',
dest='model_dir',
help='model directory which will be created on top of model_base_dir',
type=str,
)
parser.add_argument(
'--training_reruns',
dest='training_reruns',
help='number of training reruns for testing, default: 1',
type=int,
default=1,
)
parser.add_argument(
'--all_legs',
dest='all_legs',
help='train on data from all legs, not just all jets, default: False',
type=str,
default='False',
)
parser.add_argument(
'--all_pairs',
dest='all_pairs',
help='train on data from all pairs (except for initial state particles), not just all jets, default: False',
type=str,
default='False',
)
parser.add_argument(
'--lr',
dest='lr',
help='learning rate',
type=float,
default=0.01,
)
parser.add_argument(
'--hp',
dest='hp',
help='use float64 precision',
type=str,
default="False",
)
parser.add_argument(
'--md',
dest='md',
help='train using model_dataset flag',
type=str,
default="False",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse()
yaml_file = args.yaml_file
mom_file = args.mom_file
nj_file = args.nj_file
delta_cut = args.delta_cut
model_base_dir = args.model_base_dir
model_dir = args.model_dir
training_reruns = args.training_reruns
all_legs = bool_convert(args.all_legs)
all_pairs = bool_convert(args.all_pairs)
lr = args.lr
hp = bool_convert(args.hp)
md = bool_convert(args.md)
if yaml_file != "False":
singlemodel = SingleModelRun.from_yaml(yaml_file)
else:
singlemodel = SingleModelRun(
mom_file = mom_file,
nj_file = nj_file,
delta_cut = delta_cut,
model_base_dir = model_base_dir,
model_dir = model_dir,
training_reruns = training_reruns,
all_legs = all_legs,
all_pairs = all_pairs,
lr = lr,
high_precision = hp,
model_dataset = md
)
singlemodel.train()
| 24.038961
| 116
| 0.548622
|
7952f0ae105c9158cae0204ff0d3102e8bd96c83
| 2,192
|
py
|
Python
|
scripts/patches/sagemaker.py
|
compose-x/troposphere
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/patches/sagemaker.py
|
compose-x/troposphere
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/patches/sagemaker.py
|
compose-x/troposphere
|
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
|
[
"BSD-2-Clause"
] | null | null | null |
patches = [
# Rename AWS::SageMaker::Device.Device to AWS::SageMaker::Device.DeviceProperty
{
"op": "move",
"from": "/PropertyTypes/AWS::SageMaker::Device.Device",
"path": "/PropertyTypes/AWS::SageMaker::Device.DeviceProperty",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::SageMaker::Device/Properties/Device/Type",
"value": "DeviceProperty",
},
# Rename AWS::SageMaker::ModelBiasJobDefinition.EndpointInput to AWS::SageMaker::ModelBiasJobDefinition.ModelBiasEndpointInput
{
"op": "move",
"from": "/PropertyTypes/AWS::SageMaker::ModelBiasJobDefinition.EndpointInput",
"path": "/PropertyTypes/AWS::SageMaker::ModelBiasJobDefinition.ModelBiasEndpointInput",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::SageMaker::ModelBiasJobDefinition.ModelBiasJobInput/Properties/EndpointInput/Type",
"value": "ModelBiasEndpointInput",
},
# Rename AWS::SageMaker::ModelExplainabilityJobDefinition.EndpointInput to AWS::SageMaker::ModelExplainabilityJobDefinition.ModelExplainabilityEndpointInput
{
"op": "move",
"from": "/PropertyTypes/AWS::SageMaker::ModelExplainabilityJobDefinition.EndpointInput",
"path": "/PropertyTypes/AWS::SageMaker::ModelExplainabilityJobDefinition.ModelExplainabilityEndpointInput",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::SageMaker::ModelExplainabilityJobDefinition.ModelExplainabilityJobInput/Properties/EndpointInput/Type",
"value": "ModelExplainabilityEndpointInput",
},
# Rename AWS::SageMaker::ModelQualityJobDefinition.EndpointInput to AWS::SageMaker::ModelQualityJobDefinition.ModelQualityEndpointInput
{
"op": "move",
"from": "/PropertyTypes/AWS::SageMaker::ModelQualityJobDefinition.EndpointInput",
"path": "/PropertyTypes/AWS::SageMaker::ModelQualityJobDefinition.ModelQualityEndpointInput",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::SageMaker::ModelQualityJobDefinition.ModelQualityJobInput/Properties/EndpointInput/Type",
"value": "ModelQualityEndpointInput",
},
]
| 46.638298
| 160
| 0.694343
|
7952f0eed3a20f9c04bf3c7a1b79a112291b37d7
| 220
|
py
|
Python
|
exercises/en/exc_01_28b.py
|
betatim/MCL-DSCI-011-programming-in-python
|
b51f43a6bb1bedf0db028613d48d6566309ec44a
|
[
"MIT"
] | 1
|
2020-06-26T20:15:44.000Z
|
2020-06-26T20:15:44.000Z
|
exercises/en/exc_01_28b.py
|
betatim/MCL-DSCI-011-programming-in-python
|
b51f43a6bb1bedf0db028613d48d6566309ec44a
|
[
"MIT"
] | 20
|
2020-06-15T23:05:20.000Z
|
2020-09-01T22:07:45.000Z
|
exercises/en/exc_01_28b.py
|
betatim/MCL-DSCI-011-programming-in-python
|
b51f43a6bb1bedf0db028613d48d6566309ec44a
|
[
"MIT"
] | 1
|
2020-06-25T20:53:13.000Z
|
2020-06-25T20:53:13.000Z
|
import pandas as pd
# The data
hockey_players = pd.read_csv('data/canucks.csv')
# Find the total salary of the team
# Save it in an object called player_cost
____ = hockey_players[[____]].____()
# Display it
____
| 14.666667
| 48
| 0.727273
|
7952f13c6642c0e09f4344de98e4f32f9c029029
| 2,801
|
py
|
Python
|
asreview/webapp/start_flask.py
|
J535D165/asreview
|
eda3c52a595d739093c3cd6cd37d41eeed6dd15c
|
[
"Apache-2.0"
] | null | null | null |
asreview/webapp/start_flask.py
|
J535D165/asreview
|
eda3c52a595d739093c3cd6cd37d41eeed6dd15c
|
[
"Apache-2.0"
] | null | null | null |
asreview/webapp/start_flask.py
|
J535D165/asreview
|
eda3c52a595d739093c3cd6cd37d41eeed6dd15c
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import webbrowser
from threading import Timer
from flask import Flask
from flask import send_from_directory
from flask.json import jsonify
from flask.templating import render_template
from flask_cors import CORS
from asreview import __version__ as asreview_version
from asreview.entry_points.lab import _lab_parser
from asreview.webapp import api
# set logging level
if os.environ.get('FLASK_ENV', "") == "development":
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
def _url(host, port):
return "http://{host}:{port}/".format(host=host, port=port)
def _open_browser(host, port):
webbrowser.open_new(_url(host, port))
def create_app(**kwargs):
app = Flask(
__name__,
instance_relative_config=True,
static_folder="build/static",
template_folder="build"
)
# Get the ASReview arguments.
kwargs.pop("dataset", None)
app.config['asr_kwargs'] = kwargs
# Ensure the instance folder exists.
try:
os.makedirs(app.instance_path)
except OSError:
pass
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.register_blueprint(api.bp)
@app.route('/', methods=['GET'])
def index():
return render_template("index.html")
@app.route('/favicon.ico')
def send_favicon():
return send_from_directory(
'build',
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
@app.route('/boot', methods=["GET"])
def api_boot():
"""Get the boot info"""
if os.environ.get("FLASK_ENV", None) == "development":
status = "development"
else:
status = "asreview"
try:
import asreviewcontrib.covid19 # noqa
status = "asreview-covid19"
except ImportError:
logging.debug("covid19 plugin not found")
# get the asreview version
response = jsonify({
"status": status,
"version": asreview_version,
})
response.headers.add('Access-Control-Allow-Origin', '*')
return response
return app
def main(argv):
parser = _lab_parser(prog="lab")
kwargs = vars(parser.parse_args(argv))
host = kwargs.pop("ip")
port = kwargs.pop("port")
def _internal_open_webbrowser():
_open_browser(host, port)
# open webbrowser if not in flask development mode
if os.environ.get('FLASK_ENV', "") != "development":
Timer(1, _internal_open_webbrowser).start()
print(
"\n\n\n\nIf your browser doesn't open. "
"Please navigate to '{url}'\n\n\n\n".format(url=_url(host, port)))
app = create_app(**kwargs)
app.run(host=host, port=port)
| 24.146552
| 74
| 0.625134
|
7952f2635431c35255bc154f6b7373b0943fcb7a
| 2,156
|
py
|
Python
|
deprecated/examples_robust/affect/mosi_early_fusion_robust.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 148
|
2021-03-06T06:54:13.000Z
|
2022-03-29T19:27:21.000Z
|
deprecated/examples_robust/affect/mosi_early_fusion_robust.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 10
|
2021-07-19T22:57:49.000Z
|
2022-02-04T03:12:29.000Z
|
deprecated/examples_robust/affect/mosi_early_fusion_robust.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 18
|
2021-07-22T07:17:27.000Z
|
2022-03-27T16:11:40.000Z
|
from unimodals.common_models import GRU, MLP
from robustness.all_in_one import general_train, general_test
from get_data_robust import get_dataloader
from fusions.common_fusions import ConcatEarly
from training_structures.Simple_Early_Fusion import train, test
import torch
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
sys.path.append('/home/pliang/multibench/MultiBench/datasets/affect')
# Support mosi/mosi_unaligned/mosei/mosei_unaligned
traindata, validdata, robust_text, robust_vision, robust_audio, robust_timeseries = get_dataloader(
'../../../affect/processed/mosi_data.pkl', '../../../affect/mosi', 'mosi')
# mosi
# encoders=GRU(325,512,dropout=True,has_padding=True).cuda()
# head=MLP(512,256, 1).cuda()
# mosei
encoders = GRU(409, 800, dropout=True, has_padding=True).cuda()
head = MLP(800, 400, 1).cuda()
# encoders=[GRU(35,70,dropout=True,has_padding=True).cuda(), \
# GRU(74,150,dropout=True,has_padding=True).cuda(),\
# GRU(300,600,dropout=True,has_padding=True).cuda()]
# head=MLP(820,400,1).cuda()
# iemocap
'''
encoders=[GRU(35,70,dropout=True,has_padding=True).cuda(), \
GRU(74,150,dropout=True,has_padding=True).cuda(),\
GRU(300,600,dropout=True,has_padding=True).cuda()]
head=MLP(820,400,4).cuda()
'''
fusion = ConcatEarly().cuda()
# Support simple early_fusion and early_fusion with removing bias
# mosi/mosei
def trainprocess(filename):
train(encoders, fusion, head, traindata, validdata, 1000, True, True, task="regression", optimtype=torch.optim.AdamW,
lr=1e-5, save=filename, weight_decay=0.01, criterion=torch.nn.L1Loss(), regularization=False)
filename = general_train(trainprocess, 'mosi_early_fusion')
# iemocap
'''
train(encoders,fusion,head,traindata,validdata,1000,True,True, \
optimtype=torch.optim.AdamW,lr=1e-4,save='best.pt', \
weight_decay=0.01,regularization=False)
'''
def testprocess(model, robustdata):
return test(model, robustdata, True, torch.nn.L1Loss(), "regression")
general_test(testprocess, filename, [
robust_text, robust_vision, robust_audio, robust_timeseries])
# test(model,testdata,True,)
| 34.222222
| 121
| 0.743043
|
7952f2bd3b02fc360aa21f0d361a6d348cd09ff7
| 9,372
|
py
|
Python
|
ansible/modules/cloud/digital_ocean/digital_ocean_floating_ip.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
ansible/modules/cloud/digital_ocean/digital_ocean_floating_ip.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
ansible/modules/cloud/digital_ocean/digital_ocean_floating_ip.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Patrick F. Marques <patrickfmarques@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_floating_ip
short_description: Manage DigitalOcean Floating IPs
description:
- Create/delete/assign a floating IP.
version_added: "2.4"
author: "Patrick Marques (@patrickfmarques)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
ip:
description:
- Public IP address of the Floating IP. Used to remove an IP
required: false
default: None
region:
description:
- The region that the Floating IP is reserved to.
required: false
default: None
droplet_id:
description:
- The Droplet that the Floating IP has been assigned to.
required: false
default: None
oauth_token:
description:
- DigitalOcean OAuth token.
required: true
notes:
- Version 2 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- name: "Create a Floating IP in region lon1"
digital_ocean_floating_ip:
state: present
region: lon1
- name: "Create a Floating IP assigned to Droplet ID 123456"
digital_ocean_floating_ip:
state: present
droplet_id: 123456
- name: "Delete a Floating IP with ip 1.2.3.4"
digital_ocean_floating_ip:
state: absent
ip: "1.2.3.4"
'''
RETURN = '''
# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#floating-ips
data:
description: a DigitalOcean Floating IP resource
returned: success and no resource constraint
type: dict
sample: {
"action": {
"id": 68212728,
"status": "in-progress",
"type": "assign_ip",
"started_at": "2015-10-15T17:45:44Z",
"completed_at": null,
"resource_id": 758603823,
"resource_type": "floating_ip",
"region": {
"name": "New York 3",
"slug": "nyc3",
"sizes": [
"512mb",
"1gb",
"2gb",
"4gb",
"8gb",
"16gb",
"32gb",
"48gb",
"64gb"
],
"features": [
"private_networking",
"backups",
"ipv6",
"metadata"
],
"available": true
},
"region_slug": "nyc3"
}
}
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
class Rest(object):
def __init__(self, module, headers):
self.module = module
self.headers = headers
self.baseurl = 'https://api.digitalocean.com/v2'
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None, headers=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
timeout = self.module.params['timeout']
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=timeout)
# Exceptions in fetch_url may result in a status -1, the ensures a
if info['status'] == -1:
self.module.fail_json(msg=info['msg'])
return Response(resp, info)
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
def wait_action(module, rest, ip, action_id, timeout=10):
end_time = time.time() + 10
while time.time() < end_time:
response = rest.get('floating_ips/{0}/actions/{1}'.format(ip, action_id))
status_code = response.status_code
status = response.json['action']['status']
# TODO: check status_code == 200?
if status == 'completed':
return True
elif status == 'errored':
module.fail_json(msg='Floating ip action error [ip: {0}: action: {1}]'.format(
ip, action_id), data=json)
module.fail_json(msg='Floating ip action timeout [ip: {0}: action: {1}]'.format(
ip, action_id), data=json)
def core(module):
api_token = module.params['oauth_token']
state = module.params['state']
ip = module.params['ip']
droplet_id = module.params['droplet_id']
rest = Rest(module, {'Authorization': 'Bearer {0}'.format(api_token),
'Content-type': 'application/json'})
if state in ('present'):
if droplet_id is not None and module.params['ip'] is not None:
# Lets try to associate the ip to the specified droplet
associate_floating_ips(module, rest)
else:
create_floating_ips(module, rest)
elif state in ('absent'):
response = rest.delete("floating_ips/{0}".format(ip))
status_code = response.status_code
json_data = response.json
if status_code == 204:
module.exit_json(changed=True)
elif status_code == 404:
module.exit_json(changed=False)
else:
module.exit_json(changed=False, data=json_data)
def get_floating_ip_details(module, rest):
ip = module.params['ip']
response = rest.get("floating_ips/{0}".format(ip))
status_code = response.status_code
json_data = response.json
if status_code == 200:
return json_data['floating_ip']
else:
module.fail_json(msg="Error assigning floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def assign_floating_id_to_droplet(module, rest):
ip = module.params['ip']
payload = {
"type": "assign",
"droplet_id": module.params['droplet_id'],
}
response = rest.post("floating_ips/{0}/actions".format(ip), data=payload)
status_code = response.status_code
json_data = response.json
if status_code == 201:
wait_action(module, rest, ip, json_data['action']['id'])
module.exit_json(changed=True, data=json_data)
else:
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def associate_floating_ips(module, rest):
floating_ip = get_floating_ip_details(module, rest)
droplet = floating_ip['droplet']
# TODO: If already assigned to a droplet verify if is one of the specified as valid
if droplet is not None and str(droplet['id']) in [module.params['droplet_id']]:
module.exit_json(changed=False)
else:
assign_floating_id_to_droplet(module, rest)
def create_floating_ips(module, rest):
payload = {
}
if module.params['region'] is not None:
payload["region"] = module.params['region']
if module.params['droplet_id'] is not None:
payload["droplet_id"] = module.params['droplet_id']
response = rest.post("floating_ips", data=payload)
status_code = response.status_code
json_data = response.json
if status_code == 202:
module.exit_json(changed=True, data=json_data)
else:
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
ip=dict(aliases=['id'], required=False),
region=dict(required=False),
droplet_id=dict(required=False),
oauth_token=dict(
no_log=True,
# Support environment variable for DigitalOcean OAuth Token
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']),
required=True,
),
validate_certs=dict(type='bool', default=True),
timeout=dict(type='int', default=30),
),
required_if=([
('state', 'delete', ['ip'])
]),
mutually_exclusive=(
['region', 'droplet_id']
),
)
core(module)
if __name__ == '__main__':
main()
| 28.925926
| 113
| 0.606701
|
7952f36b73aea65f0b8715440b4ec099ba021ea4
| 896
|
py
|
Python
|
main.py
|
jphandrigan/exposedsurface
|
796c2af01247750e9957173ee9b4938e537af467
|
[
"Unlicense"
] | null | null | null |
main.py
|
jphandrigan/exposedsurface
|
796c2af01247750e9957173ee9b4938e537af467
|
[
"Unlicense"
] | null | null | null |
main.py
|
jphandrigan/exposedsurface
|
796c2af01247750e9957173ee9b4938e537af467
|
[
"Unlicense"
] | null | null | null |
#import libraries
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import numpy as np
#set the tank/compartment volume (in Litres)
volume = '12124'
x = np.array([20820, 43910, 5254, 6272, 4343, 2380, 3372, 5678, 1575, 2978, 3675, 4155, 7948, 10510, 3186, 9464, 7949, 3785, 5678, 7949, 34000, 16670, 3747, 2300, 4500,11356,3785,7570,13249, 36000, 22976, 34000, 34000, 31797.46, 31000, 36000, 16655, 4921,14763, 18927.05, 31797.4]) #L
y = np.array([54, 88.17, 18, 20, 16, 13, 13,16.3, 6.6, 13, 7.9, 15, 23, 24.8, 12, 24, 17.8, 8.83, 11.9, 19.8, 83.94, 41.2, 12.4, 7.6, 8.7,26.85,7.62,15.4,30.94, 84.6, 55.57, 85.60, 85.00,75.5302, 63.7, 84.1,38.46, 9.94, 31.31, 56.11, 79.61]) #m2
f = interp1d(x,y,fill_value="extrapolate")
type(f)
f(volume)
print ('A tank/compartment with a volume of {} L has an estimated exposed surface area of: {} m2.'.format(volume,f(volume)))
| 49.777778
| 284
| 0.668527
|
7952f3af071807311bee5afeb1e9ec3bf7e1fa4f
| 288
|
py
|
Python
|
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex104.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex104.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex104.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
def leiaint(msg):
while True:
n = input(msg)
if n.isnumeric():
valor = int(n)
break
else:
print("Digite um nรบmero vรกlido")
return valor
num = leiaint("digite um nรบmero inteiro: ")
print(f"Vocรช digitou o nรบmero {num}")
| 20.571429
| 44
| 0.538194
|
7952f3b68dabd65d8015eb5dcae6347e711f3ea0
| 2,101
|
py
|
Python
|
frappe/desk/form/run_method.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 5
|
2017-09-12T15:56:31.000Z
|
2022-03-09T13:50:21.000Z
|
frappe/desk/form/run_method.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 212
|
2017-08-16T13:03:18.000Z
|
2020-10-06T12:26:21.000Z
|
frappe/desk/form/run_method.py
|
chentaoz/frappe
|
ee3c4943bf6177ad3b410cdb0d802af486751a65
|
[
"MIT"
] | 14
|
2020-11-04T11:22:44.000Z
|
2022-02-01T20:59:37.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import json, inspect
import frappe
from frappe import _
from frappe.utils import cint
from six import text_type, string_types
@frappe.whitelist()
def runserverobj(method, docs=None, dt=None, dn=None, arg=None, args=None):
"""run controller method - old style"""
if not args: args = arg or ""
if dt: # not called from a doctype (from a page)
if not dn: dn = dt # single
doc = frappe.get_doc(dt, dn)
else:
doc = frappe.get_doc(json.loads(docs))
doc._original_modified = doc.modified
doc.check_if_latest()
if not doc.has_permission("read"):
frappe.msgprint(_("Not permitted"), raise_exception = True)
if doc:
try:
args = json.loads(args)
except ValueError:
args = args
try:
fnargs, varargs, varkw, defaults = inspect.getargspec(getattr(doc, method))
except ValueError:
fnargs = inspect.getfullargspec(getattr(doc, method)).args
varargs = inspect.getfullargspec(getattr(doc, method)).varargs
varkw = inspect.getfullargspec(getattr(doc, method)).varkw
defaults = inspect.getfullargspec(getattr(doc, method)).defaults
if not fnargs or (len(fnargs)==1 and fnargs[0]=="self"):
r = doc.run_method(method)
elif "args" in fnargs or not isinstance(args, dict):
r = doc.run_method(method, args)
else:
r = doc.run_method(method, **args)
if r:
#build output as csv
if cint(frappe.form_dict.get('as_csv')):
make_csv_output(r, doc.doctype)
else:
frappe.response['message'] = r
frappe.response.docs.append(doc)
def make_csv_output(res, dt):
"""send method response as downloadable CSV file"""
import frappe
from six import StringIO
import csv
f = StringIO()
writer = csv.writer(f)
for r in res:
row = []
for v in r:
if isinstance(v, string_types):
v = v.encode("utf-8")
row.append(v)
writer.writerow(row)
f.seek(0)
frappe.response['result'] = text_type(f.read(), 'utf-8')
frappe.response['type'] = 'csv'
frappe.response['doctype'] = dt.replace(' ','')
| 25.621951
| 78
| 0.698239
|
7952f4768e11aeef8d12a312c4032a70c9ce1d05
| 8,080
|
py
|
Python
|
pysnmp/HH3C-MP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/HH3C-MP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/HH3C-MP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HH3C-MP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-MP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:15:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
hh3cRhw, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cRhw")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
NotificationType, Gauge32, TimeTicks, Counter64, ObjectIdentity, IpAddress, Integer32, Bits, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32, MibIdentifier, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Gauge32", "TimeTicks", "Counter64", "ObjectIdentity", "IpAddress", "Integer32", "Bits", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Unsigned32", "MibIdentifier", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hh3cMultilinkPPP = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 8, 33))
if mibBuilder.loadTexts: hh3cMultilinkPPP.setLastUpdated('200405180000Z')
if mibBuilder.loadTexts: hh3cMultilinkPPP.setOrganization('Hangzhou H3C Tech. Co., Ltd.')
hh3cMpObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1))
hh3cMpMultilinkTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1), )
if mibBuilder.loadTexts: hh3cMpMultilinkTable.setStatus('current')
hh3cMpMultilinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hh3cMpMultilinkEntry.setStatus('current')
hh3cMpMultilinkDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMultilinkDescr.setStatus('current')
hh3cMpBundleName = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpBundleName.setStatus('current')
hh3cMpBundledSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpBundledSlot.setStatus('current')
hh3cMpBundledMemberCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpBundledMemberCnt.setStatus('current')
hh3cMpLostFragments = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpLostFragments.setStatus('current')
hh3cMpReorderedPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpReorderedPkts.setStatus('current')
hh3cMpUnassignedPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpUnassignedPkts.setStatus('current')
hh3cMpInterleavedPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpInterleavedPkts.setStatus('current')
hh3cMpRcvdSequence = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpRcvdSequence.setStatus('current')
hh3cMpSentSequence = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpSentSequence.setStatus('current')
hh3cMpMemberlinkTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2), )
if mibBuilder.loadTexts: hh3cMpMemberlinkTable.setStatus('current')
hh3cMpMemberlinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HH3C-MP-MIB", "hh3cMpMemberlinkSeqNumber"))
if mibBuilder.loadTexts: hh3cMpMemberlinkEntry.setStatus('current')
hh3cMpMemberlinkSeqNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMemberlinkSeqNumber.setStatus('current')
hh3cMpMemberlinkIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMemberlinkIfIndex.setStatus('current')
hh3cMpMemberlinkDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMemberlinkDescr.setStatus('current')
hh3cMpMemberlinkMpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMemberlinkMpStatus.setStatus('current')
hh3cMpNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 2))
hh3cMpConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3))
hh3cMpCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 1))
hh3cMpCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 1, 1)).setObjects(("HH3C-MP-MIB", "hh3cMpMandatoryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hh3cMpCompliance = hh3cMpCompliance.setStatus('current')
hh3cMpGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 2))
hh3cMpMandatoryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 2, 1)).setObjects(("HH3C-MP-MIB", "hh3cMpBundledMemberCnt"), ("HH3C-MP-MIB", "hh3cMpMemberlinkSeqNumber"), ("HH3C-MP-MIB", "hh3cMpMemberlinkIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hh3cMpMandatoryGroup = hh3cMpMandatoryGroup.setStatus('current')
hh3cMpInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 2, 2)).setObjects(("HH3C-MP-MIB", "hh3cMpMultilinkDescr"), ("HH3C-MP-MIB", "hh3cMpBundleName"), ("HH3C-MP-MIB", "hh3cMpBundledSlot"), ("HH3C-MP-MIB", "hh3cMpBundledMemberCnt"), ("HH3C-MP-MIB", "hh3cMpLostFragments"), ("HH3C-MP-MIB", "hh3cMpReorderedPkts"), ("HH3C-MP-MIB", "hh3cMpUnassignedPkts"), ("HH3C-MP-MIB", "hh3cMpInterleavedPkts"), ("HH3C-MP-MIB", "hh3cMpRcvdSequence"), ("HH3C-MP-MIB", "hh3cMpSentSequence"), ("HH3C-MP-MIB", "hh3cMpMemberlinkDescr"), ("HH3C-MP-MIB", "hh3cMpMemberlinkMpStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hh3cMpInfoGroup = hh3cMpInfoGroup.setStatus('current')
mibBuilder.exportSymbols("HH3C-MP-MIB", hh3cMpMemberlinkTable=hh3cMpMemberlinkTable, hh3cMpNotifications=hh3cMpNotifications, hh3cMpBundleName=hh3cMpBundleName, hh3cMpMultilinkEntry=hh3cMpMultilinkEntry, hh3cMpInterleavedPkts=hh3cMpInterleavedPkts, hh3cMpCompliances=hh3cMpCompliances, hh3cMpMultilinkTable=hh3cMpMultilinkTable, hh3cMpMemberlinkDescr=hh3cMpMemberlinkDescr, hh3cMultilinkPPP=hh3cMultilinkPPP, hh3cMpBundledMemberCnt=hh3cMpBundledMemberCnt, hh3cMpObjects=hh3cMpObjects, hh3cMpUnassignedPkts=hh3cMpUnassignedPkts, hh3cMpMemberlinkSeqNumber=hh3cMpMemberlinkSeqNumber, hh3cMpMemberlinkEntry=hh3cMpMemberlinkEntry, hh3cMpCompliance=hh3cMpCompliance, hh3cMpSentSequence=hh3cMpSentSequence, hh3cMpMultilinkDescr=hh3cMpMultilinkDescr, hh3cMpLostFragments=hh3cMpLostFragments, hh3cMpMemberlinkIfIndex=hh3cMpMemberlinkIfIndex, hh3cMpConformance=hh3cMpConformance, hh3cMpRcvdSequence=hh3cMpRcvdSequence, hh3cMpMemberlinkMpStatus=hh3cMpMemberlinkMpStatus, PYSNMP_MODULE_ID=hh3cMultilinkPPP, hh3cMpGroups=hh3cMpGroups, hh3cMpInfoGroup=hh3cMpInfoGroup, hh3cMpMandatoryGroup=hh3cMpMandatoryGroup, hh3cMpBundledSlot=hh3cMpBundledSlot, hh3cMpReorderedPkts=hh3cMpReorderedPkts)
| 113.802817
| 1,175
| 0.757797
|
7952f4da8d0f59c070bdbc91205dac936d33fe41
| 489
|
py
|
Python
|
setup.py
|
shardul08/Code-Sleep-Python
|
b76e6eccca62f92d6064eaeeaadc385a6f2b39d8
|
[
"MIT"
] | 420
|
2017-05-13T13:43:46.000Z
|
2022-03-29T20:22:39.000Z
|
setup.py
|
shardul08/Code-Sleep-Python
|
b76e6eccca62f92d6064eaeeaadc385a6f2b39d8
|
[
"MIT"
] | 162
|
2017-10-04T15:48:10.000Z
|
2022-02-04T07:32:07.000Z
|
setup.py
|
shardul08/Code-Sleep-Python
|
b76e6eccca62f92d6064eaeeaadc385a6f2b39d8
|
[
"MIT"
] | 280
|
2017-10-04T16:20:04.000Z
|
2022-02-04T07:15:06.000Z
|
from setuptools import setup
setup(
name="code_sleep_python",
description="Launch some awesome python projects",
url="https://github.com/prateekiiest/Code-Sleep-Python",
author="Prateek Chanda",
author_email="prateekkol21@gmail.com",
license="MIT",
packages=['code_sleep_python'],
include_package_data=True,
version="1.0.0",
classifiers=[
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
ยฉ 2018 GitHub, Inc.
| 25.736842
| 60
| 0.660532
|
7952f50419b3ddda31f9908adb2efe1750212d2f
| 695
|
bzl
|
Python
|
source/bazel/deps/ubuntu_14_04_clang_10_sysroot/get.bzl
|
luxe/unilang
|
6c8a431bf61755f4f0534c6299bd13aaeba4b69e
|
[
"MIT"
] | 33
|
2019-05-30T07:43:32.000Z
|
2021-12-30T13:12:32.000Z
|
source/bazel/deps/ubuntu_14_04_clang_10_sysroot/get.bzl
|
luxe/unilang
|
6c8a431bf61755f4f0534c6299bd13aaeba4b69e
|
[
"MIT"
] | 371
|
2019-05-16T15:23:50.000Z
|
2021-09-04T15:45:27.000Z
|
source/bazel/deps/ubuntu_14_04_clang_10_sysroot/get.bzl
|
luxe/unilang
|
6c8a431bf61755f4f0534c6299bd13aaeba4b69e
|
[
"MIT"
] | 6
|
2019-08-22T17:37:36.000Z
|
2020-11-07T07:15:32.000Z
|
# Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def ubuntu1404Clang10Sysroot():
http_archive(
name="ubuntu_14_04_clang_10_sysroot" ,
build_file="//bazel/deps/ubuntu_14_04_clang_10_sysroot:build.BUILD" ,
sha256="6204f7998b543e7190ba55c7a0fe81d59afcaf8171e9dc34975fbf18bc9e4853" ,
strip_prefix="ubuntu_14_04_clang_10_sysroot-79690a1aefd7fd84e77e9bf785acb1dc82e55c4e" ,
urls = [
"https://github.com/Unilang/ubuntu_14_04_clang_10_sysroot/archive/79690a1aefd7fd84e77e9bf785acb1dc82e55c4e.tar.gz",
],
)
| 40.882353
| 127
| 0.755396
|
7952f5cfcfcb30086bde61904447171d9e42d0cd
| 28,101
|
py
|
Python
|
northstar/averages.py
|
iosonofabio/semiknn
|
c7819a7ae850df1264f8d92f7bdc02f85afc21c2
|
[
"MIT"
] | null | null | null |
northstar/averages.py
|
iosonofabio/semiknn
|
c7819a7ae850df1264f8d92f7bdc02f85afc21c2
|
[
"MIT"
] | null | null | null |
northstar/averages.py
|
iosonofabio/semiknn
|
c7819a7ae850df1264f8d92f7bdc02f85afc21c2
|
[
"MIT"
] | null | null | null |
# vim: fdm=indent
# author: Fabio Zanini
# date: 17/06/19
# content: Atlas averages
__all__ = ['Averages']
import warnings
import numpy as np
import pandas as pd
from anndata import AnnData
import leidenalg
from .fetch_atlas import AtlasFetcher
from .cluster_with_annotations import ClusterWithAnnotations
class Averages(object):
'''Annotate new cell types using averages of an atlas'''
def __init__(
self,
atlas,
n_cells_per_type=None,
n_features_per_cell_type=30,
n_features_overdispersed=500,
features_additional=None,
n_pcs=20,
n_neighbors=10,
n_neighbors_out_of_atlas=5,
distance_metric='correlation',
threshold_neighborhood=0.8,
clustering_metric='cpm',
resolution_parameter=0.001,
normalize_counts=True,
join='keep_first',
):
'''Prepare the model for cell annotation
Args:
atlas (str, list of str, list of dict, dict, or AnnData): cell
atlas to use. Generally there are two kind of choices:
The first possibility selects the corresponding cell atlas or
atlases from northstar's online list. The names of currently
available dataset is here:
https://github.com/iosonofabio/atlas_averages/blob/master/table.tsv
(check the first column for atlas names). If a list of
str, multiple atlases will be fetched and combined. Only features
that are in all atlases will be kept. If you use this feature, be
careful to not mix atlases from different species. If a list of
dict, it merges atlases as above but you can specify what cell
types to fetch from each atlas. Each element of the list must be a
dict with two key-value pairs: 'atlas_name' is the atlas name, and
'cell_types' must be a list of cell types to retain. Example:
atlas=[{'atlas_name': 'Enge_2017', 'cell_types': ['alpha']}] would
load the atlas Enge_2017 and only retain alpha cells. You can also
use a dict to specify a single atlas and to retain only certain cell
types. The format is as above, e.g. to select only alpha cells
from Enge_2017 you can use:
atlas={'atlas_name': 'Enge_2017', 'cell_types': ['alpha']}.
The second possibility is to use a custom atlas (e.g. some
unpublished data). 'atlas' must be an AnnData object with cell
type averages ("cells") as rows and genes as columns and one cell
metadata column 'NumberOfCells' describing the number of cells
for each cell type. In other words:
adata.obs['NumberOfCells']
must exist, and:
adata.obs_names
must contain the known cell types.
n_cells_per_type (None or int): if None, use the number of cells
per type from the atlas. Else, fix it to this number for all types.
n_features_per_cell_type (int): number of features marking each
fixed column (atlas cell type). The argument 'features' takes
priority over this one.
n_features_overdispersed (int): number of unbiased, overdispersed
features to be picked from the new dataset. The argument
'features' takes priority over this one.
features_additional (list of str or None): additional features to
keep on top of automatic selection. The argument 'features' takes
priority over this one.
n_pcs (int): number of principal components to keep in the weighted
PCA.
n_neighbors (int): number of neighbors in the similarity graph.
n_neighbors_out_of_atlas (int): number of neighbors coming out of
the atlas nodes into the new dataset.
distance_metric (str): metric to use as distance. It should be a
metric accepted by scipy.spatial.distance.cdist.
threshold_neighborhood (float): do not consider distances larger than this as
neighbors
clustering_metric (str): 'cpm' (default, Cell Potts Model) or
'modularity'. Sets the type of partition used in the clustering
step.
resolution_parameter (float): number between 0 and 1 that sets
how easy it is for the clustering algorithm to make new clusters
normalize_counts (bool): whether to renormalize the counts at the
merging stage to make sure atlas and new data follow the same
normalization. Be careful if you turn this off.
join (str): must be 'keep_first', 'union', or 'intersection'. This
argument is used when sourcing multiple atlases and decides what
to do with features that are not present in all atlases.
'keep_first' keeps the features in the first atlas and pads the
other atlases with zeros, 'union' pads every atlas that is missing
a feature and 'intersection' only keep features that are in all
atlases.
'''
self.atlas = atlas
self.n_cells_per_type = n_cells_per_type
self.n_features_per_cell_type = n_features_per_cell_type
self.n_features_overdispersed = n_features_overdispersed
self.features_additional = features_additional
self.n_pcs = n_pcs
self.n_neighbors = n_neighbors
self.n_neighbors_out_of_atlas = n_neighbors_out_of_atlas
self.distance_metric = distance_metric
self.threshold_neighborhood = threshold_neighborhood
self.clustering_metric = clustering_metric
self.resolution_parameter = resolution_parameter
self.normalize_counts = normalize_counts
self.join = join
def fit(self, new_data):
'''Run with averages of the atlas
Args:
new_data (pandas.DataFrame or anndata.AnnData): the new data to be
clustered. If a dataframe, t must have features as rows and
cell names as columns (as in loom files). anndata uses the opposite
convention, so it must have cell names as rows (obs_names) and
features as columns (var_names) and this class will transpose it.
Returns:
None, but this instance of Averages acquired the property
`membership` containing the cluster memberships (cell types) of the
columns except the first n_fixed. The first n_fixed columns are
assumes to have distinct memberships in the range [0, n_fixed - 1].
'''
self.new_data = new_data
self._check_init_arguments()
self.fetch_atlas_if_needed()
self.compute_feature_intersection()
self._check_feature_intersection()
self.prepare_feature_selection()
self.select_features()
self._check_feature_selection()
self.merge_atlas_newdata()
self.compute_pca()
self.compute_similarity_graph()
self.cluster_graph()
def fit_transform(self, new_data):
'''Run with averages of the atlas and return the cell types
Args:
new_data (pandas.DataFrame or anndata.AnnData): the new data to be
clustered. If a dataframe, t must have features as rows and
cell names as columns (as in loom files). anndata uses the opposite
convention, so it must have cell names as rows (obs_names) and
features as columns (var_names) and this class will transpose it.
Returns:
the cluster memberships (cell types) of the
columns except the first n_fixed. The first n_fixed columns are
assumes to have distinct memberships in the range [0, n_fixed - 1].
'''
self.fit(new_data)
return self.membership
def _check_init_arguments(self):
# Check atlas
at = self.atlas
if isinstance(at, str):
pass
elif isinstance(at, list) or isinstance(at, tuple):
for elem in at:
if isinstance(elem, str):
pass
elif isinstance(elem, dict):
if 'atlas_name' not in elem:
raise ValueError('List of atlases: format incorrect')
if 'cell_types' not in elem:
raise ValueError('List of atlases: format incorrect')
else:
raise ValueError('List of atlases: format incorrect')
elif isinstance(at, dict) and ('atlas_name' in at) and \
('cell_types' in at):
pass
elif isinstance(at, AnnData):
if 'NumberOfCells' not in at.obs:
raise AttributeError(
'atlas must have a "NumberOfCells" obs column')
else:
raise ValueError('Atlas not formatted correctly')
# Convert new data to anndata if needed
nd = self.new_data
if isinstance(nd, AnnData):
pass
elif isinstance(nd, pd.DataFrame):
# AnnData uses features as columns, so transpose and convert
# (the assumption is that in the DataFrame convention, rows are
# features)
nd = AnnData(
X=nd.values.T,
obs={'CellID': nd.columns.values},
var={'GeneName': nd.index.values},
)
nd.obs_names = nd.obs['CellID']
nd.var_names = nd.var['GeneName']
self.new_data = nd
else:
raise ValueError(
'New data must be an AnnData object or pd.DataFrame',
)
# New data could be too small to do PCA
n_newcells, n_newgenes = self.new_data.shape
if n_newgenes < self.n_pcs:
warnings.warn(
('The number of features in the new data is lenn than ' +
'the number of PCs, so northstar might give inaccurate ' +
'results'))
if n_newcells < self.n_pcs:
warnings.warn(
('The number of cells in the new data is lenn than ' +
'the number of PCs, so northstar might give inaccurate ' +
'results'))
if min(n_newgenes, n_newcells) < self.n_pcs:
warnings.warn('Reducing the number of PCs to {:}'.format(
min(n_newgenes, n_newcells)))
self.n_pcs = min(n_newgenes, n_newcells)
# New data could be too small for knn
if n_newcells < self.n_neighbors + 1:
warnings.warn(
('The number of cells in the new data is less than the ' +
'number of neighbors requested for the knn: reducing the ' +
'number of graph neighbors to {:}'.format(
max(1, n_newcells - 1)),
))
self.n_neighbors = max(1, n_newcells - 1)
nf1 = self.n_features_per_cell_type
nf2 = self.n_features_overdispersed
nf3 = self.features_additional
if not isinstance(nf1, int):
raise ValueError('n_features_per_cell_type must be an int >= 0')
if not isinstance(nf1, int):
raise ValueError('n_features_overdispersed must be an int >= 0')
if (nf1 < 1) and (nf2 < 1) and (nf3 < 1):
raise ValueError('No features selected')
def _check_feature_intersection(self):
L = len(self.features_ovl)
if L == 0:
raise ValueError(
('No overlapping features in atlas and new data, are gene ' +
'names correct for this species?'))
if L < 50:
warnings.warn(
('Only {:} overlapping features found in atlas and new ' +
'data'.format(L)))
def _check_feature_selection(self):
L = len(self.features)
if L == 0:
raise ValueError(
('No features survived selection, check nortstar parameters'))
if L < self.n_pcs:
warnings.warn(
('Only {0} features selected, reducing PCA to {0} components'.format(L)))
self.n_pcs = L
def fetch_atlas_if_needed(self):
'''Fetch atlas(es) if needed'''
at = self.atlas
if isinstance(at, str):
self.atlas = AtlasFetcher().fetch_atlas(
at,
kind='average',
)
elif isinstance(self.atlas, list) or isinstance(self.atlas, tuple):
self.atlas = AtlasFetcher().fetch_multiple_atlases(
at,
kind='average',
join=self.join,
)
elif isinstance(at, dict) and ('atlas_name' in at) and \
('cell_types' in at):
self.atlas = AtlasFetcher().fetch_atlas(
at['atlas_name'],
kind='average',
cell_types=at['cell_types'],
)
def compute_feature_intersection(self):
'''Calculate the intersection of features between atlas and new data'''
# Intersect features
self.features_atlas = self.atlas.var_names.values
self.features_newdata = self.new_data.var_names.values
self.features_ovl = np.intersect1d(
self.features_atlas,
self.features_newdata,
)
def prepare_feature_selection(self):
# Cell names and types
self.cell_types_atlas = self.atlas.obs_names
self.cell_names_atlas = self.atlas.obs_names
self.cell_names_newdata = self.new_data.obs_names
ctypes_ext = []
cnames_ext = []
if self.n_cells_per_type is None:
ncells_per_ct = self.atlas.obs['NumberOfCells'].astype(np.int64)
else:
ncells_per_ct = [self.n_cells_per_type] * self.atlas.shape[0]
for i, ni in enumerate(ncells_per_ct):
for ii in range(ni):
ctypes_ext.append(self.cell_types_atlas[i])
cnames_ext.append(self.cell_types_atlas[i]+'_{:}'.format(ii+1))
self.cell_types_atlas_extended = ctypes_ext
self.cell_names_atlas_extended = cnames_ext
# Numbers
self.n_atlas = self.atlas.shape[0]
self.n_newdata = self.new_data.shape[0]
self.n_total = self.n_atlas + self.n_newdata
self.n_atlas_extended = len(self.cell_names_atlas_extended)
self.n_total_extended = self.n_atlas_extended + self.n_newdata
# Cell numbers
self.sizes = np.ones(self.n_total, np.float32)
if self.n_cells_per_type is not None:
self.sizes[:self.n_atlas] *= self.n_cells_per_type
else:
self.sizes[:self.n_atlas] = self.atlas.obs['NumberOfCells'].astype(
np.float32)
def select_features(self):
'''Select features among the overlap of atlas and new data
Returns:
ndarray of feature names.
'''
features_atlas = self.features_atlas
features_newdata = self.features_newdata
features_ovl = list(self.features_ovl)
features_add = self.features_additional
n_atlas = self.n_atlas
nf1 = self.n_features_per_cell_type
nf2 = self.n_features_overdispersed
features = set()
# Atlas markers
if (nf1 > 0) and (n_atlas > 1):
matrix = self.atlas.X
for icol in range(n_atlas):
ge1 = matrix[icol]
ge2 = (matrix.sum(axis=0) - ge1) / (n_atlas - 1)
fold_change = np.log2(ge1 + 0.1) - np.log2(ge2 + 0.1)
tmp = np.argsort(fold_change)[::-1]
ind_markers_atlas = []
for i in tmp:
if features_atlas[i] in features_ovl:
ind_markers_atlas.append(i)
if len(ind_markers_atlas) == nf1:
break
# Add atlas markers
features |= set(features_atlas[ind_markers_atlas])
# Overdispersed features from new data
if nf2 > 0:
if nf2 >= len(features_ovl):
features |= set(features_ovl)
else:
matrix = self.new_data.X
nd_mean = matrix.mean(axis=0)
nd_var = matrix.var(axis=0)
fano = (nd_var + 1e-10) / (nd_mean + 1e-10)
tmp = np.argsort(fano)[::-1]
ind_ovd_newdata = []
for i in tmp:
if features_newdata[i] in features_ovl:
ind_ovd_newdata.append(i)
if len(ind_ovd_newdata) == nf2:
break
# Add overdispersed features
features |= set(features_newdata[ind_ovd_newdata])
# Additional features
if features_add is not None:
features |= (set(features_add) & set(features_ovl))
self.features = np.array(list(features))
def merge_atlas_newdata(self):
'''Merge atlas data and the new data after feature selection
NOTE: is self.normalize is True, the merged count matrix is normalized
by 1 million total counts.
'''
features = self.features
L = len(features)
N1 = self.n_atlas
N = self.n_total
# This is the largest memory footprint of northstar
matrix = np.empty((N, L), dtype=np.float32)
# Find the feature indices for atlas
ind_features_atlas = pd.Series(
np.arange(len(self.features_atlas)),
index=self.features_atlas,
).loc[features].values
matrix[:N1] = self.atlas.X[:, ind_features_atlas]
# Find the feature indices for new data
ind_features_newdata = pd.Series(
np.arange(len(self.features_newdata)),
index=self.features_newdata,
).loc[features].values
matrix[N1:] = self.new_data.X[:, ind_features_newdata]
# The normalization function also sets pseudocounts
if self.normalize_counts:
matrix = 1e6 * (matrix.T / (matrix.sum(axis=1) + 0.1)).T
self.matrix = matrix
def compute_pca(self):
'''Compute k nearest neighbors from a matrix with fixed nodes
Returns:
list of lists with the first k or less indices of the neighbors for
each free column. The length is N - n_fixed. For each now, there are
less than k entries if no other column within the distance threshold
were found, or if N < k.
The algorithm proceeds as follows:
0. take the log of the counts
1. subtract the mean along the observation axis (N) and divide by the
standard dev along the same axis
2. calculate the weighted covariance matrix
3. calculate normal PCA on that matrix
'''
matrix = self.matrix
sizes = self.sizes
n_atlas = self.n_atlas
n_pcs = self.n_pcs
# Test input arguments
N, L = matrix.shape
if len(sizes) != N:
raise ValueError('Matrix and sizes dimensions do not match')
if n_atlas >= N:
raise ValueError('n_fixed larger or equal matrix number of columns')
if n_pcs > min(L, N):
raise ValueError('n_pcs greater than smaller matrix dimension, those eigenvalues are zero')
# 0. take log
matrix = np.log10(matrix + 0.1)
# 1. standardize
weights = 1.0 * sizes / sizes.sum()
mean_w = weights @ matrix
var_w = weights @ ((matrix - mean_w)**2)
std_w = np.sqrt(var_w)
Xnorm = (matrix - mean_w) / std_w
# take care of non-varying components
Xnorm[np.isnan(Xnorm)] = 0
# 2. weighted covariance
# This matrix has size L x L. Typically L ~ 500 << N, so the covariance
# L x L is much smaller than N x N
cov_w = np.cov(Xnorm.T, fweights=sizes)
# 3. PCA
# rvects columns are the right singular vectors
evals, evects = np.linalg.eig(cov_w)
# sort by decreasing eigenvalue (explained variance) and truncate
ind = evals.argsort()[::-1][:n_pcs]
# NOTE: we do not actually need the eigenvalues anymore
lvects = np.real(evects.T[ind])
# calculate right singular vectors given the left singular vectors
# NOTE: this is true even if we truncated the PCA via n_pcs << L
# rvects columns are the right singular vectors
rvects = (lvects @ Xnorm.T).T
# 4. expand embedded vectors to account for sizes
# NOTE: this could be done by carefully tracking multiplicities
# in the neighborhood calculation, but it's not worth it: the
# amount of overhead memory used here is small because only a few
# principal components are used
Ne = int(np.sum(sizes))
rvectse = np.empty((Ne, n_pcs), np.float32)
cell_type_expanded = []
i = 0
n_fixed_expanded = 0
for isi, size in enumerate(sizes):
if isi < n_atlas:
cte = self.cell_types_atlas[isi]
n_fixed_expanded += int(size)
else:
cte = ''
cell_type_expanded.extend([cte] * int(size))
for j in range(int(size)):
rvectse[i] = rvects[isi]
i += 1
cell_type_expanded = np.array(cell_type_expanded)
self.pca_data = {
'pcs': rvects,
'pcs_expanded': rvectse,
'cell_type': cell_type_expanded,
'n_atlas': n_fixed_expanded,
}
def compute_similarity_graph(self):
'''Compute similarity graph from the extended PC space
1. calculate the distance matrix by expanding atlas columns
2. calculate neighborhoods
3. construct similarity graph from neighborhood lists
'''
from scipy.spatial.distance import cdist
import igraph as ig
sizes = self.sizes
n_atlas = self.n_atlas
k = self.n_neighbors
kout = self.n_neighbors_out_of_atlas
metric = self.distance_metric
threshold = self.threshold_neighborhood
rvects = self.pca_data['pcs']
rvectse = self.pca_data['pcs_expanded']
Ne = len(rvectse)
# 5. calculate distance matrix and neighbors
# we do it row by row, it costs a bit in terms of runtime but
# has huge savings in terms of memory since we don't need the square
# distance matrix
n_fixede = int(np.sum(sizes[:n_atlas]))
neighbors = []
# Treat things within and outside of the atlas differently
# Atlas neighbors
i = 0
for isi in range(n_atlas):
# Find the nearest neighbors in the new data
drow = cdist(rvects[[isi]], rvects[n_atlas:], metric=metric)[0]
ind = np.argpartition(-drow, -kout)[-kout:]
# Discard the ones beyond threshold
ind = ind[drow[ind] <= threshold]
# Indices are not sorted within ind, so we need to sort them
# in descending order by distance (more efficient in the next step)
ind = ind[np.argsort(drow[ind])]
for ii in range(int(sizes[isi])):
# Internal edges
neis = list(range(i, i+int(sizes[isi])))
# Remove self
neis.remove(i+ii)
# External edges
neis.extend(list(ind + n_fixede))
neighbors.append(neis)
i += int(sizes[isi])
# New data neighbors
for i in range(n_fixede, Ne):
drow = cdist(rvectse[[i]], rvectse, metric=metric)[0]
# set distance to self as a high number, to avoid self
drow[i] = drow.max() + 1
# Find largest k negative distances (k neighbors)
ind = np.argpartition(-drow, -k)[-k:]
# Discard the ones beyond threshold
ind = ind[drow[ind] <= threshold]
# Indices are not sorted within ind, so we need to sort them
# in descending order by distance (more efficient in the next step)
ind = ind[np.argsort(drow[ind])]
neighbors.append(list(ind))
self.neighbors = neighbors
# Construct graph from the lists of neighbors
edges_d = set()
for i, neis in enumerate(neighbors):
for n in neis:
edges_d.add(frozenset((i, n)))
edges = [tuple(e) for e in edges_d]
self.graph = ig.Graph(n=Ne, edges=edges, directed=False)
def cluster_graph(self):
'''Compute communities from a matrix with fixed nodes
Returns:
None, but Averages.membership is set as an array with
size N - n_fixed with the atlas cell types of all cells from the
new dataset.
'''
clu = ClusterWithAnnotations(
self.graph,
self.cell_types_atlas_extended,
resolution_parameter=self.resolution_parameter,
metric=self.clustering_metric,
)
self.membership = clu.fit_transform()
def estimate_closest_atlas_cell_type(self):
'''Estimate atlas cell type closest to each new cluster'''
from scipy.spatial.distance import cdist
# Use PC space
rvectse = self.pca_data['pcs']
n_atlas = self.pca_data['n_atlas']
cell_types = self.pca_data['cell_type'][:n_atlas]
L = rvectse.shape[1]
# Extract atlas averages in PC space
cell_types_atlas = np.unique(cell_types)
rvectse_atlas = rvectse[:n_atlas]
N = len(cell_types_atlas)
avg_atl = np.empty((L, N), np.float32)
for i, ct in enumerate(cell_types_atlas):
# They are already replicates, take the first copy
avg_atl[:, i] = rvectse_atlas[cell_types == ct][0]
# Calculate averages for the new clusters
cell_types_new = list(set(self.membership) - set(cell_types_atlas))
rvectse_new = rvectse[n_atlas:]
N = len(cell_types_new)
avg_new = np.empty((L, N), np.float32)
for i, ct in enumerate(cell_types_new):
avg_new[:, i] = rvectse_new[self.membership == ct].mean(axis=0)
# Calculate distance matrix between new and old in the high-dimensional
# feature-selected space
dmat = cdist(avg_new.T, avg_atl.T, metric='euclidean')
# Pick the closest
closest = np.argmin(dmat, axis=1)
# Give it actual names
closest = pd.Series(cell_types[closest], index=cell_types_new)
return closest
def embed(self, method='tsne', **kwargs):
X = self.pca_data['pcs_expanded']
index = list(self.cell_names_atlas_extended) + list(self.cell_names_newdata)
if method == 'pca':
emb = X[:, :2]
elif method == 'tsne':
from sklearn.manifold import TSNE
kwargs['perplexity'] = kwargs.get('perplexity', 30)
model = TSNE(
n_components=2,
**kwargs,
)
emb = model.fit_transform(X)
elif method == 'umap':
from umap import UMAP
model = UMAP(
n_components=2,
**kwargs,
)
emb = model.fit_transform(X)
res = pd.DataFrame(
emb,
index=index,
columns=['Dimension 1', 'Dimension 2'],
)
res['CellType'] = list(self.cell_types_atlas_extended) + list(self.membership)
res['Dataset'] = (['Atlas'] * self.n_atlas_extended) + (['New'] * self.n_newdata)
return res
| 38.813536
| 103
| 0.585388
|
7952f6c31b36e611f26f0127b69a16a8c23ba42c
| 750
|
py
|
Python
|
benchmark_apps/elmerfem/umfpack/src/amd/deps.py
|
readex-eu/readex-apps
|
38493b11806c306f4e8f1b7b2d97764b45fac8e2
|
[
"BSD-3-Clause"
] | 2
|
2020-11-25T13:10:11.000Z
|
2021-03-15T20:26:35.000Z
|
elmerfem/umfpack/src/amd/deps.py
|
jcmcmurry/pipelining
|
8fface1a501b5050f58e7b902aacdcdde68e9648
|
[
"MIT"
] | null | null | null |
elmerfem/umfpack/src/amd/deps.py
|
jcmcmurry/pipelining
|
8fface1a501b5050f58e7b902aacdcdde68e9648
|
[
"MIT"
] | 2
|
2021-08-02T23:23:40.000Z
|
2022-02-26T12:39:30.000Z
|
#!/usr/bin/python
#
# create preprocessing instructions
# from sources.lst
#
import re
sources="amd_aat amd_1 amd_2 amd_dump amd_postorder amd_post_tree amd_defaults amd_order amd_control amd_info amd_valid amd_preprocess"
amd_sources=[]
for f in re.split(" ",sources):
nf=re.sub('amd_','amd_i_',f) + ".c"
amd_sources.append(nf)
print nf + ":\n\t" + "$(CPP) $(INCLUDES) -DINT " + f + ".c > " + nf
for f in re.split(" ",sources):
nf=re.sub('amd_','amd_l_',f) + ".c"
amd_sources.append(nf)
print nf + ":\n\t" + "$(CPP) $(INCLUDES) -DLONG " + f + ".c > " + nf
print
print "AMD_CPP_SOURCES = \\"
for f in amd_sources:
if f == amd_sources[len(amd_sources)-1]:
print "\t"+f
else:
print "\t"+f + " \\"
| 25.862069
| 135
| 0.612
|
7952f7ba9aac09e27d901d3b907dbba68d8f6bd6
| 1,665
|
py
|
Python
|
nemo_text_processing/inverse_text_normalization/ru/taggers/electronic.py
|
hamjam/NeMo
|
b3484d32e1317666151f931bfa39867d88ed8658
|
[
"Apache-2.0"
] | 4,145
|
2019-09-13T08:29:43.000Z
|
2022-03-31T18:31:44.000Z
|
nemo_text_processing/inverse_text_normalization/ru/taggers/electronic.py
|
hamjam/NeMo
|
b3484d32e1317666151f931bfa39867d88ed8658
|
[
"Apache-2.0"
] | 2,031
|
2019-09-17T16:51:39.000Z
|
2022-03-31T23:52:41.000Z
|
nemo_text_processing/inverse_text_normalization/ru/taggers/electronic.py
|
hamjam/NeMo
|
b3484d32e1317666151f931bfa39867d88ed8658
|
[
"Apache-2.0"
] | 1,041
|
2019-09-13T10:08:21.000Z
|
2022-03-30T06:37:38.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
try:
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic, e.g.
"ัะน ะฑะธ ัะพะฑะฐะบะฐ ัะฝ ะดะธ ัะพัะบะฐ ัั" -> electronic { username: "ab@nd.ru" }
Args:
tn_electronic: Text normalization Electronic graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_electronic, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
graph = tn_electronic.final_graph
graph = graph.invert().optimize()
graph = pynutil.insert("username: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| 37
| 94
| 0.718318
|
7952f8e616853ea1228bb4e68a099563a531d6be
| 2,704
|
py
|
Python
|
dask/array/tests/test_svg.py
|
Juanlu001/dask
|
ba29ba377ae71e5a90fa5ef5198c7d317b45c06a
|
[
"BSD-3-Clause"
] | 9,684
|
2016-02-12T16:09:21.000Z
|
2022-03-31T19:38:26.000Z
|
dask/array/tests/test_svg.py
|
Juanlu001/dask
|
ba29ba377ae71e5a90fa5ef5198c7d317b45c06a
|
[
"BSD-3-Clause"
] | 7,059
|
2016-02-11T18:32:45.000Z
|
2022-03-31T22:12:40.000Z
|
dask/array/tests/test_svg.py
|
Juanlu001/dask
|
ba29ba377ae71e5a90fa5ef5198c7d317b45c06a
|
[
"BSD-3-Clause"
] | 1,794
|
2016-02-13T23:28:39.000Z
|
2022-03-30T14:33:19.000Z
|
import xml.etree.ElementTree
import pytest
import dask.array as da
from dask.array.svg import draw_sizes
def parses(text):
cleaned = text.replace("→", "") # xml doesn't like righarrow character
assert xml.etree.ElementTree.fromstring(cleaned) is not None # parses cleanly
def test_basic():
parses(da.ones(10).to_svg())
parses(da.ones((10, 10)).to_svg())
parses(da.ones((10, 10, 10)).to_svg())
parses(da.ones((10, 10, 10, 10)).to_svg())
parses(da.ones((10, 10, 10, 10, 10)).to_svg())
parses(da.ones((10, 10, 10, 10, 10, 10)).to_svg())
parses(da.ones((10, 10, 10, 10, 10, 10, 10)).to_svg())
def test_repr_html():
pytest.importorskip("jinja2")
assert da.ones([])._repr_html_()
assert da.ones(10)[:0]._repr_html_()
assert da.ones(10)._repr_html_()
assert da.ones((10, 10))._repr_html_()
assert da.ones((10, 10, 10))._repr_html_()
assert da.ones((10, 10, 10, 10))._repr_html_()
def test_errors():
# empty arrays
with pytest.raises(NotImplementedError) as excpt:
da.ones([]).to_svg()
assert "0 dimensions" in str(excpt.value)
# Scalars
with pytest.raises(NotImplementedError) as excpt:
da.asarray(1).to_svg()
assert "0 dimensions" in str(excpt.value)
# 0-length dims arrays
with pytest.raises(NotImplementedError) as excpt:
da.ones(10)[:0].to_svg()
assert "0-length dimensions" in str(excpt.value)
# unknown chunk sizes
with pytest.raises(NotImplementedError) as excpt:
x = da.ones(10)
x = x[x > 5]
x.to_svg()
assert "unknown chunk sizes" in str(excpt.value)
def test_repr_html_size_units():
pytest.importorskip("jinja2")
x = da.ones((10000, 5000))
x = da.ones((3000, 10000), chunks=(1000, 1000))
text = x._repr_html_()
assert "MB" in text or "MiB" in text
assert str(x.shape) in text
assert str(x.dtype) in text
parses(text)
x = da.ones((3000, 10000, 50), chunks=(1000, 1000, 10))
parses(x._repr_html_())
def test_draw_sizes():
assert draw_sizes((10, 10), size=100) == (100, 100) # respect symmetry
assert draw_sizes((10, 10), size=200) == (200, 200) # respect size keyword
assert draw_sizes((10, 5), size=100) == (100, 50) # respect small ratios
a, b, c = draw_sizes((1000, 100, 10))
assert a > b
assert b > c
assert a < b * 5
assert b < c * 5
def test_too_many_lines_fills_sides_darker():
data = da.ones((16000, 2400, 3600), chunks=(1, 2400, 3600))
text = data.to_svg()
assert "8B4903" in text
assert text.count("\n") < 300
def test_3d():
text = da.ones((10, 10, 10, 10, 10)).to_svg()
assert text.count("<svg") == 1
| 28.166667
| 82
| 0.629438
|
7952f9949f07290893265d144331fc1b932ae0fa
| 197
|
py
|
Python
|
membership/admin.py
|
ADpDinamo/site
|
d7313cd6c151a381ccc803b81768673587cb8d45
|
[
"Apache-2.0"
] | null | null | null |
membership/admin.py
|
ADpDinamo/site
|
d7313cd6c151a381ccc803b81768673587cb8d45
|
[
"Apache-2.0"
] | 8
|
2021-03-19T10:14:39.000Z
|
2022-03-12T00:24:41.000Z
|
membership/admin.py
|
ADpDinamo/site
|
d7313cd6c151a381ccc803b81768673587cb8d45
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Membership, UserMembership, Subcription
admin.site.register(Membership)
admin.site.register(UserMembership)
admin.site.register(Subcription)
| 21.888889
| 59
| 0.837563
|
7952f99a1868e1fbb8a2550993cea9165a5f32aa
| 10,530
|
py
|
Python
|
src/Methods/utils/Crqa.py
|
syncpy/SyncPy
|
70f990971a4b4215549559134812c7469c87c88f
|
[
"CECILL-B"
] | 20
|
2015-11-27T10:08:57.000Z
|
2021-11-17T22:19:36.000Z
|
src/Methods/utils/Crqa.py
|
syncpy/SyncPy
|
70f990971a4b4215549559134812c7469c87c88f
|
[
"CECILL-B"
] | 3
|
2015-11-13T13:08:04.000Z
|
2022-02-03T15:42:40.000Z
|
src/Methods/utils/Crqa.py
|
syncpy/SyncPy
|
70f990971a4b4215549559134812c7469c87c88f
|
[
"CECILL-B"
] | 14
|
2015-12-17T09:25:43.000Z
|
2021-02-26T14:37:48.000Z
|
### This file is a part of the Syncpy library.
### Copyright 2015, ISIR / Universite Pierre et Marie Curie (UPMC)
### Main contributor(s): Giovanna Varni, Marie Avril,
### syncpy@isir.upmc.fr
###
### This software is a computer program whose for investigating
### synchrony in a fast and exhaustive way.
###
### This software is governed by the CeCILL-B license under French law
### and abiding by the rules of distribution of free software. You
### can use, modify and/ or redistribute the software under the terms
### of the CeCILL-B license as circulated by CEA, CNRS and INRIA at the
### following URL "http://www.cecill.info".
### As a counterpart to the access to the source code and rights to
### copy, modify and redistribute granted by the license, users are
### provided only with a limited warranty and the software's author,
### the holder of the economic rights, and the successive licensors
### have only limited liability.
###
### In this respect, the user's attention is drawn to the risks
### associated with loading, using, modifying and/or developing or
### reproducing the software by the user in light of its specific
### status of free software, that may mean that it is complicated to
### manipulate, and that also therefore means that it is reserved for
### developers and experienced professionals having in-depth computer
### knowledge. Users are therefore encouraged to load and test the
### software's suitability as regards their requirements in conditions
### enabling the security of their systems and/or data to be ensured
### and, more generally, to use and operate it in the same conditions
### as regards security.
###
### The fact that you are presently reading this means that you have
### had knowledge of the CeCILL-B license and that you accept its terms.
"""
.. moduleauthor:: Giovanna Varni
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
import CrossRecurrencePlot
def Crqa(x,y,m,t,e,distance,standardization,window,window_size,step,lmin,thw):
"""
It computes the following (cross)recurrence measures from the (cross)recurrence plot of two uni/multi-variate signals x and y
(in pandas DataFrame format): Recurrence Rate (RR), Determinism (DET), Average Diagonal Line Length (L), Maximum Diagonal Line Length (L_max),
Entropy (ENT).
:param x:
first input signal
:type x: pd.DataFrame
:param y:
second input signal
:type y: pd.DataFrame
:param m:
embedding dimension
:type m: int
:param t:
embedding delay
:type t: int
:param eps:
threshold for recurrence
:type eps: float
:param distance:
It specifies which distance method is used. It can assumes the following values:\n
1. 'euclidean';
2. 'maximum';
3. 'manhattan'
:type distance: str
:param standardization:
if True data are nomalize to zero mean and unitary variance
:type standardization: bool
:param window:
second input signal
:type window: bool
:param window_size:
embedding dimension
:type window_size: int
:param step:
embedding delay
:type step: int
:param lmin:
threshold
:type lmin: int
:param thw:
distance method
:type thw: int
"""
' Raise error if parameters are not in the correct type '
if not(isinstance(x, pd.DataFrame)) : raise TypeError("Requires x to be a pd.DataFrame")
if not(isinstance(y, pd.DataFrame)) : raise TypeError("Requires y to be a pd.DataFrame")
if not(isinstance(m, int)) : raise TypeError("Requires m to be an integer")
if not(isinstance(t, int)) : raise TypeError("Requires t to be an integer")
if not(isinstance(e, float)): raise TypeError("requires eps to be a float")
if not(isinstance(distance, str)) : raise TypeError("Requires distance to be a string")
if not(isinstance(standardization, bool)) : raise TypeError("Requires standardization to be a bool")
if not(isinstance(window, bool)) : raise TypeError("Requires window to be an boolean")
if not(isinstance(window_size, int)) : raise TypeError("Requires window_size to be an integer")
if not(isinstance(step, int)) : raise TypeError("Requires step to be an integer")
if not(isinstance(lmin, int)) : raise TypeError("Requires lmin to be an integer")
if not(isinstance(thw, int)) : raise TypeError("Requires thw to be an integer")
' Raise error if parameters do not respect input rules '
if m <= 0 : raise ValueError("Requires m to be positive and greater than 0")
if t <= 0 : raise ValueError("Requires t to be positive and greater from 0")
if e <0: raise ValueError("Requires eps to be positive")
if distance != 'euclidean' and distance != 'maximum' and distance !='manhattan': raise ValueError("Requires a valid way to compute distance")
if window_size<= 0 or window_size>x.shape[0]: raise ValueError("Requires window_size to be positive and greater than 0 and lesser than the length of the input signals")
if step <= 0 or step > x.shape[0]/3.0: raise ValueError("Requires window to be positive and greater than 0 and lesser equal to one third of the length of the signals")
if lmin <=0 or lmin > x.shape[0]: raise ValueError("Requires lmin to be positive and greater than 0 and lesser than the length of the input signal")
if thw < 0 or thw > x.shape[0]: raise ValueError("Requires thw to be positive and greater than 0 and lesser than the length of the input signals")
'Error if x and y have not the same size'
if x.shape[0]!=y.shape[0] :
raise ValueError("The two signals have different length")
plot=False
RR_w=np.array([])
DET_w=np.array([])
L_w=np.array([])
L_w_max=np.array([])
Entr_w=np.array([])
pos = 0
result=dict()
if not window:
c=CrossRecurrencePlot.CrossRecurrencePlot(x,y,m,t,e,distance,standardization,plot)
crp_m=1-c['crp'].copy()
if (crp_m.shape[0]!=crp_m.shape[1]):
thw=0
hist_P=np.zeros([1,crp_m.shape[0]])[0]
RR_w=np.append(RR_w, RR(crp_m,thw))
DET_w=np.append(DET_w, DET(crp_m,hist_P,RR_w,lmin))
L_w=np.append(L_w, L(crp_m,hist_P,lmin))
L_w_max=np.append(L_w_max, L_max(crp_m,hist_P,lmin))
Entr_w=np.append(Entr_w,Entr(crp_m,hist_P,lmin))
result['RR']= RR_w
result['DET']= DET_w
result['L']= L_w
result['L_max']= L_w_max
result['ENT']=Entr_w
else:
if window_size < 5+(m-1)*t:
window_size=5+(m-1)*t
while((pos+window_size)<x.shape[0]):
end = pos+window_size-1
windowed_x=x[pos:end].reset_index(drop=True)
windowed_y=y[pos:end].reset_index(drop=True)
hist_P=np.zeros([1,window_size])[0]
c_wind=CrossRecurrencePlot.CrossRecurrencePlot(windowed_x,windowed_y,m,t,e,distance,standardization,plot)
crp_m_wind=1-c_wind['crp'].copy()
RR_w=np.append(RR_w, RR(crp_m_wind,thw))
DET_w=np.append(DET_w, DET(crp_m_wind,hist_P,RR_w,lmin))
L_w=np.append(L_w, L(crp_m_wind,hist_P,lmin))
L_w_max=np.append(L_w_max, L_max(crp_m_wind,hist_P,lmin))
Entr_w=np.append(Entr_w,Entr(crp_m_wind,hist_P,lmin))
result['RR']= RR_w
result['DET']= DET_w
result['L']= L_w
result['L_max']= L_w_max
result['ENT']=Entr_w
pos += step
return result
#crqa measures
def RR(crp_matrix,thw):
"""
It computes the Recurrence Rate (RR)
"""
if crp_matrix.shape[0] == 0 : raise ValueError("Error : crp_matrix signal 0 is empty")
if crp_matrix.shape[1] == 0 : raise ValueError("Error : crp_matrix signal 1 is empty")
if (thw==0) or (thw==1):
rr=(1.0/(crp_matrix.shape[0]*crp_matrix.shape[1]))*(np.count_nonzero(crp_matrix)-thw*np.trace(crp_matrix,offset=thw))
else:
rr=(1.0/(crp_matrix.shape[0]*crp_matrix[1]))*(np.count_nonzero(crp_matrix)-2*np.trace(crp_matrix,offset=thw))
return rr
def DET(crp_matrix,hist_P,rr,lmin):
"""
It computes the Determinism (DET)
"""
if np.any(rr == 0) :
raise ValueError("DET cannot computed, a division for zero occurred")
for offs in range(-(crp_matrix.shape[0]-1),crp_matrix.shape[0],1):
diag_line=np.diagonal(crp_matrix,offset=offs)
length_diag_line=np.array(length_ones_seq(diag_line))
if (not length_diag_line.size) or (length_ones_seq(length_diag_line)<lmin).all():
continue
indices_diag_line=np.hstack(np.where(length_diag_line >=lmin))
for i in range(0,indices_diag_line.size):
hist_P[length_diag_line[indices_diag_line[i]]-1]=hist_P[length_diag_line[indices_diag_line[i]]-1]+1
det=1.0*(sum(np.arange(lmin,crp_matrix.shape[0])*hist_P[lmin:crp_matrix.shape[0]]))/(rr*(crp_matrix.shape[0]*crp_matrix.shape[1]))
if det>1:
det=1.0
return det
def L(crp_matrix,hist_P,lmin):
"""
It computes the Average Diagonal Line Length (L)
"""
if sum(hist_P[lmin-1:crp_matrix.shape[0]])==0 :
raise ValueError("L cannot computed, a division for zero occurred")
l_avg=1.0*(np.sum(np.arange(lmin,crp_matrix.shape[0]+1)*hist_P[lmin-1:crp_matrix.shape[0]]))/sum(hist_P[lmin-1:crp_matrix.shape[0]])
return l_avg
def L_max(crp_matrix,hist_P,lmin):
"""
It computes the Maximum Diagonal Line Length (L)
"""
l_max=np.max(np.where(hist_P!=0))+1
return l_max
def Entr(crp_matrix,hist_P,lmin):
"""
It computes the Entropy (ENTR)
"""
if np.sum(hist_P[lmin-1:crp_matrix.shape[0]])==0 :
raise ValueError("ENTR cannot computed, a division for zero occurred")
hist_P_norm=1.0*hist_P[lmin-1:crp_matrix.shape[0]]/np.sum(hist_P[lmin-1:crp_matrix.shape[0]])
hist_P_norm_def=hist_P_norm[np.nonzero(hist_P_norm)]
entr=-np.sum(hist_P_norm_def*np.log(hist_P_norm_def))
return entr
def length_ones_seq(diag_line):
"""
It computes the length of a sequence of ones
"""
return np.array([sum(g) for b, g in itertools.groupby(diag_line) if b])
| 36.310345
| 172
| 0.653086
|
7952f9fcef76496c3d8f5d171b6746ea451f785c
| 4,949
|
py
|
Python
|
gr_utilities/_paramtreecfg.py
|
ZeitgeberH/FISH-VIEWER
|
ce7e2e89d1f1895e8e7596da1d04afb324a0075d
|
[
"BSD-3-Clause"
] | 5
|
2022-01-25T17:35:15.000Z
|
2022-02-09T00:52:20.000Z
|
gr_utilities/_paramtreecfg.py
|
ZeitgeberH/FISH-VIEWER
|
ce7e2e89d1f1895e8e7596da1d04afb324a0075d
|
[
"BSD-3-Clause"
] | null | null | null |
gr_utilities/_paramtreecfg.py
|
ZeitgeberH/FISH-VIEWER
|
ce7e2e89d1f1895e8e7596da1d04afb324a0075d
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from pyqtgraph.parametertree.parameterTypes import QtEnumParameter as enum
from pyqtgraph.Qt import QtWidgets
dlg = QtWidgets.QFileDialog
cfg = {
'list': {
'limits': {
'type': 'checklist',
'limits': ['a', 'b', 'c']
}
},
'file': {
'acceptMode': {
'type': 'list',
'limits': list(enum(dlg.AcceptMode, dlg).enumMap)
},
'fileMode': {
'type': 'list',
'limits': list(enum(dlg.FileMode, dlg).enumMap)
},
'viewMode': {
'type': 'list',
'limits': list(enum(dlg.ViewMode, dlg).enumMap)
},
'dialogLabel': {
'type': 'list',
'limits': list(enum(dlg.DialogLabel, dlg).enumMap)
},
'relativeTo': {
'type': 'str',
'value': None
},
'directory': {
'type': 'str',
'value': None
},
'windowTitle': {
'type': 'str',
'value': None
},
'nameFilter': {
'type': 'str',
'value': None
}
},
'float': {
'Float Information': {
'type': 'str',
'readonly': True,
'value': 'Note that all options except "finite" also apply to "int" parameters',
},
'step': {
'type': 'float',
'limits': [0, None],
'value': 1,
},
'limits': {
'type': 'list',
'limits': {'[0, None]': [0, None], '[1, 5]': [1, 5]},
},
'suffix': {
'type': 'list',
'limits': ['Hz', 's', 'm'],
},
'siPrefix': {
'type': 'bool',
'value': True
},
'finite': {
'type': 'bool',
'value': True,
},
'dec': {
'type': 'bool',
'value': False,
},
'minStep': {
'type': 'float',
'value': 1.0e-12,
},
},
'checklist': {
'limits': {
'type': 'checklist',
'limits': ['one', 'two', 'three', 'four'],
},
'exclusive': {
'type': 'bool',
'value': False,
}
},
'pen': {
'Pen Information': {
'type': 'str',
'value': 'Click the button to see options',
'readonly': True,
},
},
'slider': {
'step': {
'type': 'float',
'limits': [0, None],
'value': 1, },
'format': {
'type': 'str',
'value': '{0:>3}',
},
'precision': {
'type': 'int',
'value': 2,
'limits': [1, None],
},
'span': {
'type': 'list',
'limits': {'linspace(-pi, pi)': np.linspace(-np.pi, np.pi), 'arange(10)**2': np.arange(10) ** 2},
},
'How to Set': {
'type': 'list',
'limits': ['Use span', 'Use step + limits'],
}
},
'calendar': {
'format': {
'type': 'str',
'value': 'MM DD',
}
},
'Applies to All Types': {
'Extra Information': {
'type': 'text',
'value': 'These apply to all parameters. Watch how this text box is altered by any setting you change.',
'default': 'These apply to all parameters. Watch how this text box is altered by any setting you change.',
'readonly': True,
},
'readonly': {
'type': 'bool',
'value': True,
},
'removable': {
'type': 'bool',
'tip': 'Adds a context menu option to remove this parameter',
'value': False,
},
'visible': {
'type': 'bool',
'value': True,
},
'disabled': {
'type': 'bool',
'value': False,
},
'title': {
'type': 'str',
'value': 'Meta Options',
},
'default': {
'tip': 'The default value that gets set when clicking the arrow in the right column',
'type': 'str',
},
'expanded': {
'type': 'bool',
'value': True,
},
},
'No Extra Options': {
'text': 'Unlike the other parameters shown, these don\'t have extra settable options.\n' \
+ 'Note: "int" *does* have the same options as float, mentioned above',
'int': 10,
'str': 'Hi, world!',
'color': '#fff',
'bool': False,
'colormap': None,
'progress': 50,
'action': None,
'font': 'Inter',
}
}
| 26.324468
| 119
| 0.372398
|
7952fb4aeaa03cc7c4b7284e40eb5e8011bbbaa9
| 4,904
|
py
|
Python
|
layers.py
|
zhangbo2008/GAT_network
|
c871a2aceceaa5d638c96c21d23d64ed07c07b4c
|
[
"MIT"
] | null | null | null |
layers.py
|
zhangbo2008/GAT_network
|
c871a2aceceaa5d638c96c21d23d64ed07c07b4c
|
[
"MIT"
] | null | null | null |
layers.py
|
zhangbo2008/GAT_network
|
c871a2aceceaa5d638c96c21d23d64ed07c07b4c
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# ่ฟไธชๆฏๅcities ้ฎ้ข็
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
# ่ฟไธชๅๅงๅๆฒก็ๅคชๆ. in_features ๅฐฑๆฏ่ฎบๆ่ฟไธช็F out_features ๅฐฑๆฏ่ฎบๆไธญ็F'
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2*out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W) # ๅw็ไธ่ก
N = h.size()[0]# batch_size
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
# ่ฟไธชๆฏๅcontent้ฎ้ข็.
class SpecialSpmmFunction(torch.autograd.Function):
"""Special function for only sparse region backpropataion layer."""
@staticmethod
def forward(ctx, indices, values, shape, b):
assert indices.requires_grad == False
a = torch.sparse_coo_tensor(indices, values, shape)
ctx.save_for_backward(a, b)
ctx.N = shape[0]
return torch.matmul(a, b)
@staticmethod
def backward(ctx, grad_output):
a, b = ctx.saved_tensors
grad_values = grad_b = None
if ctx.needs_input_grad[1]:
grad_a_dense = grad_output.matmul(b.t())
edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]
grad_values = grad_a_dense.view(-1)[edge_idx]
if ctx.needs_input_grad[3]:
grad_b = a.t().matmul(grad_output)
return None, grad_values, None, grad_b
class SpecialSpmm(nn.Module):
def forward(self, indices, values, shape, b):
return SpecialSpmmFunction.apply(indices, values, shape, b)
class SpGraphAttentionLayer(nn.Module):
"""
Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(SpGraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_normal_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(1, 2*out_features)))
nn.init.xavier_normal_(self.a.data, gain=1.414)
self.dropout = nn.Dropout(dropout)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.special_spmm = SpecialSpmm()
def forward(self, input, adj):
dv = 'cuda' if input.is_cuda else 'cpu'
N = input.size()[0]
edge = adj.nonzero().t()
h = torch.mm(input, self.W)
# h: N x out
assert not torch.isnan(h).any()
# Self-attention on the nodes - Shared attention mechanism
edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()
# edge: 2*D x E
edge_e = torch.exp(-self.leakyrelu(self.a.mm(edge_h).squeeze()))
assert not torch.isnan(edge_e).any()
# edge_e: E
e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N,1), device=dv))
# e_rowsum: N x 1
edge_e = self.dropout(edge_e)
# edge_e: E
h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)
assert not torch.isnan(h_prime).any()
# h_prime: N x out
h_prime = h_prime.div(e_rowsum)
# h_prime: N x out
assert not torch.isnan(h_prime).any()
if self.concat:
# if this layer is not last layer,
return F.elu(h_prime)
else:
# if this layer is last layer,
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| 34.780142
| 119
| 0.611338
|
7952fbb0ee1a2207bcd4f39f82106a3ed13b8043
| 14,773
|
py
|
Python
|
pytype/blocks_test.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 3,882
|
2015-03-22T12:17:15.000Z
|
2022-03-31T17:13:20.000Z
|
pytype/blocks_test.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 638
|
2015-11-03T06:34:44.000Z
|
2022-03-31T23:41:48.000Z
|
pytype/blocks_test.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 301
|
2015-08-14T10:21:17.000Z
|
2022-03-08T11:03:40.000Z
|
"""Tests for blocks.py.
To create test cases, you can disassemble source code with the help of the dis
module. For example, in Python 3.7, this snippet:
import dis
import opcode
def f(): return None
bytecode = dis.Bytecode(f)
for x in bytecode.codeobj.co_code:
print(f'{x} ({opcode.opname[x]})')
prints:
100 (LOAD_CONST)
0 (<0>)
83 (RETURN_VALUE)
0 (<0>)
"""
from pytype import blocks
from pytype.pyc import opcodes
from pytype.pyc import pyc
from pytype.tests import test_utils
import unittest
class BaseBlocksTest(unittest.TestCase, test_utils.MakeCodeMixin):
"""A base class for implementing tests testing blocks.py."""
# These tests check disassembled bytecode, which varies from version to
# version, so we fix the test version.
python_version = (3, 7)
class OrderingTest(BaseBlocksTest):
"""Tests for order_code in blocks.py."""
def _order_code(self, code):
"""Helper function to disassemble and then order code."""
disassembled_code = pyc.visit(code, blocks.DisCodeVisitor())
return blocks.order_code(disassembled_code, self.python_version)
def test_trivial(self):
# Disassembled from:
# | return None
o = test_utils.Py37Opcodes
co = self.make_code([
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="trivial")
ordered_code = self._order_code(co)
b0, = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertCountEqual([], b0.incoming)
self.assertCountEqual([], b0.outgoing)
def test_has_opcode(self):
# Disassembled from:
# | return None
o = test_utils.Py37Opcodes
co = self.make_code([
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="trivial")
ordered_code = self._order_code(co)
self.assertTrue(ordered_code.has_opcode(opcodes.LOAD_CONST))
self.assertTrue(ordered_code.has_opcode(opcodes.RETURN_VALUE))
self.assertFalse(ordered_code.has_opcode(opcodes.POP_TOP))
def test_yield(self):
# Disassembled from:
# | yield 1
# | yield None
o = test_utils.Py37Opcodes
co = self.make_code([
# b0:
o.LOAD_CONST, 0,
o.YIELD_VALUE, 0,
# b1:
o.POP_TOP, 0,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="yield")
ordered_code = self._order_code(co)
self.assertEqual(ordered_code.co_name, "yield")
b0, b1 = ordered_code.order
self.assertCountEqual(b0.outgoing, [b1])
self.assertCountEqual(b1.incoming, [b0])
self.assertCountEqual(b0.incoming, [])
self.assertCountEqual(b1.outgoing, [])
def test_triangle(self):
# Disassembled from:
# | x = y
# | if y > 1:
# | x -= 2
# | return x
o = test_utils.Py37Opcodes
co = self.make_code([
# b0:
o.LOAD_GLOBAL, 0,
o.STORE_FAST, 0,
o.LOAD_GLOBAL, 0,
o.LOAD_CONST, 1,
o.COMPARE_OP, 4,
o.POP_JUMP_IF_FALSE, 20,
# b1:
o.LOAD_FAST, 0,
o.LOAD_CONST, 2,
o.INPLACE_SUBTRACT, 0,
o.STORE_FAST, 0,
# b2:
o.LOAD_FAST, 0,
o.RETURN_VALUE, 0,
], name="triangle")
ordered_code = self._order_code(co)
self.assertEqual(ordered_code.co_name, "triangle")
b0, b1, b2 = ordered_code.order
self.assertCountEqual(b0.incoming, [])
self.assertCountEqual(b0.outgoing, [b1, b2])
self.assertCountEqual(b1.incoming, [b0])
self.assertCountEqual(b1.outgoing, [b2])
self.assertCountEqual(b2.incoming, [b0, b1])
self.assertCountEqual(b2.outgoing, [])
def test_diamond(self):
# Disassembled from:
# | x = y
# | if y > 1:
# | x -= 2
# | else:
# | x += 2
# | return x
o = test_utils.Py37Opcodes
co = self.make_code([
# b0:
o.LOAD_GLOBAL, 0,
o.STORE_FAST, 0,
o.LOAD_GLOBAL, 0,
o.LOAD_CONST, 1,
o.COMPARE_OP, 4,
o.POP_JUMP_IF_FALSE, 22,
# b1:
o.LOAD_FAST, 0,
o.LOAD_CONST, 2,
o.INPLACE_SUBTRACT, 0,
o.STORE_FAST, 0,
o.JUMP_FORWARD, 8,
# b2:
o.LOAD_FAST, 0,
o.LOAD_CONST, 2,
o.INPLACE_ADD, 0,
o.STORE_FAST, 0,
# b3:
o.LOAD_FAST, 0,
o.RETURN_VALUE, 0,
], name="diamond")
ordered_code = self._order_code(co)
self.assertEqual(ordered_code.co_name, "diamond")
b0, b1, b2, b3 = ordered_code.order
self.assertCountEqual(b0.incoming, [])
self.assertCountEqual(b0.outgoing, [b1, b2])
self.assertCountEqual(b1.incoming, [b0])
self.assertCountEqual(b1.outgoing, [b3])
self.assertCountEqual(b2.incoming, [b0])
self.assertCountEqual(b2.outgoing, [b3])
self.assertCountEqual(b3.incoming, [b1, b2])
self.assertCountEqual(b3.outgoing, [])
def test_raise(self):
# Disassembled from:
# | raise ValueError()
# | return 1
o = test_utils.Py37Opcodes
co = self.make_code([
# b0:
o.LOAD_GLOBAL, 0,
o.CALL_FUNCTION, 0,
o.RAISE_VARARGS, 1,
o.LOAD_CONST, 1,
o.RETURN_VALUE, 0, # dead.
], name="raise")
ordered_code = self._order_code(co)
self.assertEqual(ordered_code.co_name, "raise")
b0, b1 = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertCountEqual(b0.incoming, [])
self.assertCountEqual(b0.outgoing, [b1])
self.assertCountEqual(b1.incoming, [b0])
self.assertCountEqual(b1.outgoing, [])
def test_call(self):
# Disassembled from:
# | f()
o = test_utils.Py37Opcodes
co = self.make_code([
# b0:
o.LOAD_GLOBAL, 0,
o.CALL_FUNCTION, 0,
# b1:
o.POP_TOP, 0,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="call")
ordered_code = self._order_code(co)
b0, b1 = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertEqual(len(b1.code), 3)
self.assertCountEqual(b0.outgoing, [b1])
def test_finally(self):
# Disassembled from:
# | try:
# | pass
# | finally:
# | pass
o = test_utils.Py37Opcodes
co = self.make_code([
# b0:
o.SETUP_FINALLY, 4,
o.POP_BLOCK, 0,
# b1:
o.LOAD_CONST, 0,
# b2:
o.END_FINALLY, 0,
# b3:
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="finally")
ordered_code = self._order_code(co)
b0, b1, b2, b3 = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertEqual(len(b1.code), 1)
self.assertEqual(len(b2.code), 1)
self.assertEqual(len(b3.code), 2)
self.assertCountEqual(b0.outgoing, [b1, b2])
def test_except(self):
# Disassembled from:
# | try:
# | pass
# | except:
# | pass
o = test_utils.Py37Opcodes
co = self.make_code([
# b0:
o.SETUP_EXCEPT, 4,
o.POP_BLOCK, 0,
# b1:
o.JUMP_FORWARD, 12,
# b2:
o.POP_TOP, 0,
o.POP_TOP, 0,
o.POP_TOP, 0,
o.POP_EXCEPT, 0,
o.JUMP_FORWARD, 2,
# b3:
o.END_FINALLY, 0,
# b4:
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="except")
ordered_code = self._order_code(co)
b0, b1, b2, b3 = ordered_code.order
self.assertEqual(len(b0.code), 2)
self.assertEqual(len(b1.code), 1)
self.assertEqual(len(b2.code), 5)
self.assertEqual(len(b3.code), 2)
self.assertCountEqual([b1, b2], b0.outgoing)
self.assertCountEqual([b3], b1.outgoing)
self.assertCountEqual([b3], b2.outgoing)
def test_return(self):
# Disassembled from:
# | return None
# | return None
o = test_utils.Py37Opcodes
co = self.make_code([
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0, # dead.
o.LOAD_CONST, 1, # dead.
o.RETURN_VALUE, 0, # dead.
], name="return")
ordered_code = self._order_code(co)
b0, = ordered_code.order
self.assertEqual(len(b0.code), 2)
def test_with(self):
# Disassembled from:
# | with None:
# | pass
o = test_utils.Py37Opcodes
co = self.make_code([
# b0:
o.LOAD_CONST, 0,
o.SETUP_WITH, 6,
o.POP_TOP, 0,
o.POP_BLOCK, 0,
# b1:
o.LOAD_CONST, 0,
# b2:
o.WITH_CLEANUP_START, 0,
# b3:
o.WITH_CLEANUP_FINISH, 0,
o.END_FINALLY, 0,
# b4:
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="with")
ordered_code = self._order_code(co)
b0, b1, b2, b3, b4 = ordered_code.order
self.assertEqual(len(b0.code), 4)
self.assertEqual(len(b1.code), 1)
self.assertEqual(len(b2.code), 1)
self.assertEqual(len(b3.code), 2)
self.assertEqual(len(b4.code), 2)
class BlockStackTest(BaseBlocksTest):
"""Test the add_pop_block_targets function."""
def test_finally(self):
# Disassembled from:
# | try:
# | pass
# | finally:
# | pass
o = test_utils.Py37Opcodes
co = self.make_code([
o.SETUP_FINALLY, 4,
o.POP_BLOCK, 0,
o.LOAD_CONST, 0,
o.END_FINALLY, 0,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="finally")
bytecode = opcodes.dis(co.co_code, python_version=self.python_version)
blocks.add_pop_block_targets(bytecode, self.python_version)
# END_FINALLY == SETUP_FINALLY.target
self.assertEqual(bytecode[3], bytecode[0].target)
# END_FINALLY == POP_BLOCK.block_target
self.assertEqual(bytecode[3], bytecode[1].block_target)
def test_except(self):
# Disassembled from:
# | try:
# | pass
# | except:
# | pass
o = test_utils.Py37Opcodes
co = self.make_code([
o.SETUP_EXCEPT, 4,
o.POP_BLOCK, 0,
o.JUMP_FORWARD, 12,
o.POP_TOP, 0,
o.POP_TOP, 0,
o.POP_TOP, 0,
o.JUMP_FORWARD, 2,
o.END_FINALLY, 0,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="except")
bytecode = opcodes.dis(co.co_code, python_version=self.python_version)
blocks.add_pop_block_targets(bytecode, self.python_version)
# POP_TOP == SETUP_EXCEPT.target
self.assertEqual(bytecode[3], bytecode[0].target)
# POP_TOP == POP_BLOCK.block_target
self.assertEqual(bytecode[3], bytecode[1].block_target)
def test_with(self):
# Disassembled from:
# | with None:
# | pass
o = test_utils.Py37Opcodes
co = self.make_code([
o.LOAD_CONST, 0,
o.SETUP_WITH, 6,
o.POP_TOP, 0,
o.POP_BLOCK, 0,
o.LOAD_CONST, 0,
o.WITH_CLEANUP_START, 0,
o.WITH_CLEANUP_FINISH, 0,
o.END_FINALLY, 0,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
], name="with")
bytecode = opcodes.dis(co.co_code, python_version=self.python_version)
blocks.add_pop_block_targets(bytecode, self.python_version)
# WITH_CLEANUP_START == SETUP_WITH.target
self.assertEqual(bytecode[5], bytecode[1].target)
# WITH_CLEANUP_START == POP_BLOCK.block_target
self.assertEqual(bytecode[5], bytecode[3].block_target)
def test_loop(self):
# Disassembled from:
# | while []:
# | break
o = test_utils.Py37Opcodes
co = self.make_code([
o.SETUP_LOOP, 10,
o.BUILD_LIST, 0,
o.POP_JUMP_IF_FALSE, 10,
o.BREAK_LOOP, 0,
o.JUMP_ABSOLUTE, 2,
o.POP_BLOCK, 0,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
])
bytecode = opcodes.dis(co.co_code, python_version=self.python_version)
blocks.add_pop_block_targets(bytecode, self.python_version)
# LOAD_CONST == SETUP_LOOP.target
self.assertEqual(bytecode[6], bytecode[0].target)
# POP_BLOCK == POP_JUMP_IF_FALSE.target
self.assertEqual(bytecode[5], bytecode[2].target)
# BUILD_LIST == JUMP_ABSOLUTE.target
self.assertEqual(bytecode[1], bytecode[4].target)
# LOAD_CONST == POP_BLOCK.block_target
self.assertEqual(bytecode[6], bytecode[5].block_target)
def test_break(self):
# Disassembled from:
# | while True:
# | if []:
# | break
o = test_utils.Py37Opcodes
co = self.make_code([
o.SETUP_LOOP, 10,
o.BUILD_LIST, 0,
o.POP_JUMP_IF_FALSE, 2,
o.BREAK_LOOP, 0,
o.JUMP_ABSOLUTE, 2,
o.POP_BLOCK, 0,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
])
bytecode = opcodes.dis(co.co_code, python_version=self.python_version)
blocks.add_pop_block_targets(bytecode, self.python_version)
# LOAD_CONST == SETUP_LOOP.target
self.assertEqual(bytecode[6], bytecode[0].target)
# LOAD_CONST == BREAK_LOOP.block_target
self.assertEqual(bytecode[6], bytecode[3].block_target)
# BUILD_LIST == POP_JUMP_IF_FALSE.target
self.assertEqual(bytecode[1], bytecode[2].target)
# BUILD_LIST == JUMP_ABSOLUTE.target
self.assertEqual(bytecode[1], bytecode[4].target)
def test_continue(self):
# Disassembled from:
# | while True:
# | try:
# | continue
# | except:
# | pass
o = test_utils.Py37Opcodes
co = self.make_code([
o.SETUP_LOOP, 24,
o.SETUP_EXCEPT, 6,
o.CONTINUE_LOOP, 2,
o.POP_BLOCK, 0,
o.JUMP_ABSOLUTE, 2,
o.POP_TOP, 0,
o.POP_TOP, 0,
o.POP_TOP, 0,
o.POP_EXCEPT, 0,
o.JUMP_ABSOLUTE, 2,
o.END_FINALLY, 0,
o.JUMP_ABSOLUTE, 2,
o.POP_BLOCK, 0,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
])
bytecode = opcodes.dis(co.co_code, python_version=self.python_version)
blocks.add_pop_block_targets(bytecode, self.python_version)
# LOAD_CONST == SETUP_LOOP.target
self.assertEqual(bytecode[13], bytecode[0].target)
# POP_TOP == SETUP_EXCEPT.target
self.assertEqual(bytecode[5], bytecode[1].target)
# SETUP_EXCEPT == CONTINUE_LOOP.target
self.assertEqual(bytecode[1], bytecode[2].target)
# SETUP_EXCEPT == JUMP_ABSOLUTE.target
self.assertEqual(bytecode[1], bytecode[4].target)
# SETUP_EXCEPT == JUMP_ABSOLUTE.target
self.assertEqual(bytecode[1], bytecode[9].target)
# SETUP_EXCEPT == JUMP_ABSOLUTE.target
self.assertEqual(bytecode[1], bytecode[11].target)
def test_apply_typecomments(self):
# Disassembly + type comment map from
# a = 1; b = 2 # type: float
# The type comment should only apply to b.
o = test_utils.Py37Opcodes
co = self.make_code([
o.LOAD_CONST, 1,
o.STORE_FAST, 0,
o.LOAD_CONST, 2,
o.STORE_FAST, 1,
o.LOAD_CONST, 0,
o.RETURN_VALUE, 0,
])
ordered_code = blocks.merge_annotations(
blocks.process_code(co, self.python_version), {1: "float"}, [])
bytecode = ordered_code.order[0].code
self.assertIsNone(bytecode[1].annotation)
self.assertEqual(bytecode[3].annotation, "float")
if __name__ == "__main__":
unittest.main()
| 28.90998
| 78
| 0.613484
|
7952fc086c62542f78d17bc2a34db6fbd7bd1105
| 1,468
|
py
|
Python
|
test/test_remove_file_extensions.py
|
DanB288/youtube_title_parse
|
965b4a6a4d233ac20a6bbd974a26490d6306059c
|
[
"MIT"
] | 15
|
2018-10-28T09:30:18.000Z
|
2022-02-08T11:40:15.000Z
|
test/test_remove_file_extensions.py
|
DanB288/youtube_title_parse
|
965b4a6a4d233ac20a6bbd974a26490d6306059c
|
[
"MIT"
] | 12
|
2020-04-19T12:54:14.000Z
|
2021-05-19T20:34:40.000Z
|
test/test_remove_file_extensions.py
|
DanB288/youtube_title_parse
|
965b4a6a4d233ac20a6bbd974a26490d6306059c
|
[
"MIT"
] | 4
|
2020-05-24T17:18:07.000Z
|
2021-11-12T20:50:42.000Z
|
# -*- coding: utf-8 -*-
import unittest
from six import with_metaclass
from .meta import MetaTestSequence
tests = [
# https://youtu.be/A2RwHnfI2y8
{
"input": "Ga-In (๊ฐ์ธ) - Nostalgia (๋
ธ์คํ
์ง์) - Lyrics [Hangul+Translation] .mov",
"expected": ["Ga-In (๊ฐ์ธ)", "Nostalgia (๋
ธ์คํ
์ง์)"],
},
# https://www.youtube.com/watch?v=PYBuIwuD1DA
{"input": "show me - B-free.m4v", "expected": ["show me", "B-free"]},
# https://www.youtube.com/watch?v=5hINYNZslP0
{
"input": "์ฑ์๊ฒฝ Sung Si Kyung - ๋ด๊ฒ ์ค๋ ๊ธธ.mp4",
"expected": ["์ฑ์๊ฒฝ Sung Si Kyung", "๋ด๊ฒ ์ค๋ ๊ธธ"],
},
# Things that are NOT file extensions are not removed:
# https://www.youtube.com/watch?v=E2yLg9iW1_0
{"input": "์์ดํํฌ - Mr.chu", "expected": ["์์ดํํฌ", "Mr.chu"]},
# https://www.youtube.com/watch?v=P1Oya1PqKFc
{
"input": "Far East Movement - Live My Life (Feat. Justin Bieber) cover by J.Fla",
"expected": [
"Far East Movement",
"Live My Life (Feat. Justin Bieber) cover by J.Fla",
],
},
# https://www.youtube.com/watch?v=rnQBF2CIygg
# Thing that ends in a file extension without a preceding `.`:
{
"input": "Baka Oppai - A Piece Of Toast",
"expected": ["Baka Oppai", "A Piece Of Toast"],
},
]
class TestSequence(with_metaclass(MetaTestSequence, unittest.TestCase)):
test_cases = tests
test_type = __file__
if __name__ == "__main__":
unittest.main()
| 31.913043
| 89
| 0.595368
|
7952fc143114ff6681ef7955223d2174514d27f5
| 266
|
py
|
Python
|
xontrib/macro_lib/data.py
|
anki-code/xontrib-macro-lib
|
9ec22bb104399ad92a4b4f583c4a7d1de7fbf576
|
[
"MIT"
] | 5
|
2021-03-27T09:28:05.000Z
|
2022-03-23T08:14:38.000Z
|
xontrib/macro_lib/data.py
|
anki-code/xontrib-macro-lib
|
9ec22bb104399ad92a4b4f583c4a7d1de7fbf576
|
[
"MIT"
] | null | null | null |
xontrib/macro_lib/data.py
|
anki-code/xontrib-macro-lib
|
9ec22bb104399ad92a4b4f583c4a7d1de7fbf576
|
[
"MIT"
] | 1
|
2022-03-18T20:40:09.000Z
|
2022-03-18T20:40:09.000Z
|
import json
from xonsh.contexts import Block
class JsonBlock(Block):
__xonsh_block__ = str
def __enter__(self):
return json.loads(self.macro_block)
def __exit__(self, *exc):
del self.macro_block, self.macro_globals, self.macro_locals
| 20.461538
| 67
| 0.714286
|
7952fd29bf76b6400d20eaed6774b5e72f8c970b
| 267
|
py
|
Python
|
125. Valid palindrome.py
|
bogdan824/LeetCode-Problems_02
|
b5ffd0b0bbb8f63fde2f89c672ca1976c2855b09
|
[
"MIT"
] | null | null | null |
125. Valid palindrome.py
|
bogdan824/LeetCode-Problems_02
|
b5ffd0b0bbb8f63fde2f89c672ca1976c2855b09
|
[
"MIT"
] | null | null | null |
125. Valid palindrome.py
|
bogdan824/LeetCode-Problems_02
|
b5ffd0b0bbb8f63fde2f89c672ca1976c2855b09
|
[
"MIT"
] | null | null | null |
def isPalindrome(s):
s = s.lower()
holdit = ""
for charac in s:
if charac.isalpha():
holdit+=charac
i=0
j=len(holdit)-1
while i<=j:
if holdit[i]!=holdit[j]:
return False
i+=1
j-=1
return True
s = "0P"
print(isPalindrome(s))
| 14.052632
| 27
| 0.561798
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.