hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96fc97b3e2e36b994710eb0688b02a2f2af90727
| 387
|
py
|
Python
|
setup.py
|
sansbacon/pangadfs-pydfsoptimizer
|
4a0ee4bcfe5e4d228136b5a32690d410c521880c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
sansbacon/pangadfs-pydfsoptimizer
|
4a0ee4bcfe5e4d228136b5a32690d410c521880c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
sansbacon/pangadfs-pydfsoptimizer
|
4a0ee4bcfe5e4d228136b5a32690d410c521880c
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
def run():
setup(
name='pangadfs-pydfs',
packages=find_packages(),
entry_points={
'pangadfs.pool': ['pool_pydfs = pydfs:PyDfsPool'],
'pangadfs.populate': ['populate_pydfs = pydfs:PyDfsPopulate'],
},
zip_safe=False,
)
if __name__ == '__main__':
run()
| 21.5
| 73
| 0.560724
|
1eba5e5190a1f7dcf15c864b00b303a817b7726a
| 1,241
|
py
|
Python
|
firmware_kmk/lib/kmk/extensions/__init__.py
|
telzo2000/NumAtreus_pico
|
3c8558e80869eca3ee753de21a02afc8108e5fcf
|
[
"MIT"
] | 1
|
2022-01-21T06:09:18.000Z
|
2022-01-21T06:09:18.000Z
|
firmware_kmk/lib/kmk/extensions/__init__.py
|
telzo2000/NumAtreus_pico
|
3c8558e80869eca3ee753de21a02afc8108e5fcf
|
[
"MIT"
] | 1
|
2021-09-07T21:42:32.000Z
|
2021-09-07T21:44:19.000Z
|
firmware_kmk/lib/kmk/extensions/__init__.py
|
telzo2000/NumAtreus_pico
|
3c8558e80869eca3ee753de21a02afc8108e5fcf
|
[
"MIT"
] | 1
|
2021-09-07T21:37:16.000Z
|
2021-09-07T21:37:16.000Z
|
class InvalidExtensionEnvironment(Exception):
pass
class Extension:
_enabled = True
def enable(self, keyboard):
self._enabled = True
self.on_runtime_enable(keyboard)
def disable(self, keyboard):
self._enabled = False
self.on_runtime_disable(keyboard)
# The below methods should be implemented by subclasses
def on_runtime_enable(self, keyboard):
raise NotImplementedError
def on_runtime_disable(self, keyboard):
raise NotImplementedError
def during_bootup(self, keyboard):
raise NotImplementedError
def before_matrix_scan(self, keyboard):
'''
Return value will be injected as an extra matrix update
'''
raise NotImplementedError
def after_matrix_scan(self, keyboard):
'''
Return value will be replace matrix update if supplied
'''
raise NotImplementedError
def before_hid_send(self, keyboard):
raise NotImplementedError
def after_hid_send(self, keyboard):
raise NotImplementedError
def on_powersave_enable(self, keyboard):
raise NotImplementedError
def on_powersave_disable(self, keyboard):
raise NotImplementedError
| 23.865385
| 63
| 0.681708
|
48aaaadb735b639bcae569dd64ce944fadbf97b1
| 240
|
py
|
Python
|
Course-2004/Lesson 2/hw3.py
|
kasztp/python-lessons
|
2a159ad5e1186c749b96c5d0ede45b7142c6bbb5
|
[
"MIT"
] | 35
|
2015-05-18T08:08:41.000Z
|
2022-03-07T09:38:02.000Z
|
Course-2004/Lesson 2/hw3.py
|
kasztp/python-lessons
|
2a159ad5e1186c749b96c5d0ede45b7142c6bbb5
|
[
"MIT"
] | 1
|
2021-09-29T02:08:26.000Z
|
2021-09-29T02:08:26.000Z
|
Course-2004/Lesson 2/hw3.py
|
kasztp/python-lessons
|
2a159ad5e1186c749b96c5d0ede45b7142c6bbb5
|
[
"MIT"
] | 40
|
2015-04-28T00:38:54.000Z
|
2022-02-13T14:18:34.000Z
|
num1 = float(eval(input('Please enter a number: ')))
if num1 < 0:
num1 *= 2
else:
num1 *= 3
num2 = float(eval(input('Please enter a number: ')))
if num2 < 0:
num2 *= 2
else:
num2 *= 3
print(('The result is', num1 + num2))
| 17.142857
| 52
| 0.575
|
7b3645d969730ab46156d48b6a65b4151d9901ee
| 75
|
py
|
Python
|
run.py
|
monicabgs/via_hacka
|
67ef12878efa48bbe262a6533e7c50cc1ab7ebc7
|
[
"MIT"
] | null | null | null |
run.py
|
monicabgs/via_hacka
|
67ef12878efa48bbe262a6533e7c50cc1ab7ebc7
|
[
"MIT"
] | null | null | null |
run.py
|
monicabgs/via_hacka
|
67ef12878efa48bbe262a6533e7c50cc1ab7ebc7
|
[
"MIT"
] | null | null | null |
from app import manager
if __name__ == "__main__":
manager.run()
| 12.5
| 26
| 0.64
|
c6cb537a78cf501e7e26ceba7be8c4f74006276b
| 1,178
|
py
|
Python
|
prediction/train_model.py
|
CovResearchProject/prediction
|
1e176a73bedff09bb1a9989c767967b8eb24f10a
|
[
"MIT"
] | null | null | null |
prediction/train_model.py
|
CovResearchProject/prediction
|
1e176a73bedff09bb1a9989c767967b8eb24f10a
|
[
"MIT"
] | null | null | null |
prediction/train_model.py
|
CovResearchProject/prediction
|
1e176a73bedff09bb1a9989c767967b8eb24f10a
|
[
"MIT"
] | null | null | null |
from src import *
#get the data
df = get_data()
date_time = pd.to_datetime(df.pop('Date'), format='%d/%m/%Y')
timestamp_s = date_time.map(pd.Timestamp.timestamp)
#split
column_indices = {name: i for i, name in enumerate(df.columns)}
n = len(df)
train_df = df[0:int(n*0.8)]
val_df = df[int(n*0.8):int(n*0.9)]
test_df = df[int(n*0.90):]
num_features = df.shape[1]
#normalize
train_mean = train_df.mean()
train_std = train_df.std()
train_df = (train_df - train_mean) / train_std
val_df = (val_df - train_mean) / train_std
test_df = (test_df - train_mean) / train_std
#create data window
OUT_STEPS = 21
multi_window = WindowGenerator(input_width=30,
label_width=OUT_STEPS,
shift=OUT_STEPS,
train_df = train_df,
val_df = val_df,
test_df = test_df)
#create model
feedback_model = FeedBack(units=2048, out_steps=OUT_STEPS)
prediction, state = feedback_model.warmup(multi_window.example[0])
#train
compile_and_fit(feedback_model, multi_window,MAX_EPOCHS = 500,patience=5)
| 25.608696
| 73
| 0.613752
|
1338e6aefa64b9d97d9b9b5b1edf75493c33d02f
| 5,202
|
py
|
Python
|
src/pyasn1/type/useful.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 184
|
2017-12-20T21:50:06.000Z
|
2022-03-19T13:24:58.000Z
|
src/pyasn1/type/useful.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 68
|
2016-12-12T20:38:47.000Z
|
2020-07-26T18:28:49.000Z
|
src/pyasn1/type/useful.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 136
|
2018-01-09T22:52:06.000Z
|
2022-02-24T13:26:18.000Z
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
import datetime
from pyasn1.type import univ, char, tag
from pyasn1.compat import string, dateandtime
from pyasn1 import error
__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
NoValue = univ.NoValue
noValue = univ.noValue
class ObjectDescriptor(char.GraphicString):
__doc__ = char.GraphicString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.GraphicString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
)
# Optimization for faster codec lookup
typeId = char.GraphicString.getTypeId()
class TimeMixIn(object):
_yearsDigits = 4
_hasSubsecond = False
_optionalMinutes = False
_shortTZ = False
class FixedOffset(datetime.tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = datetime.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return datetime.timedelta(0)
UTC = FixedOffset(0, 'UTC')
@property
def asDateTime(self):
"""Create :py:class:`datetime.datetime` object from a |ASN.1| object.
Returns
-------
:
new instance of :py:class:`datetime.datetime` object
"""
text = str(self)
if text.endswith('Z'):
tzinfo = TimeMixIn.UTC
text = text[:-1]
elif '-' in text or '+' in text:
if '+' in text:
text, plusminus, tz = string.partition(text, '+')
else:
text, plusminus, tz = string.partition(text, '-')
if self._shortTZ and len(tz) == 2:
tz += '00'
if len(tz) != 4:
raise error.PyAsn1Error('malformed time zone offset %s' % tz)
try:
minutes = int(tz[:2]) * 60 + int(tz[2:])
if plusminus == '-':
minutes *= -1
except ValueError:
raise error.PyAsn1Error('unknown time specification %s' % self)
tzinfo = TimeMixIn.FixedOffset(minutes, '?')
else:
tzinfo = None
if '.' in text or ',' in text:
if '.' in text:
text, _, ms = string.partition(text, '.')
else:
text, _, ms = string.partition(text, ',')
try:
ms = int(ms) * 10000
except ValueError:
raise error.PyAsn1Error('bad sub-second time specification %s' % self)
else:
ms = 0
if self._optionalMinutes and len(text) - self._yearsDigits == 6:
text += '0000'
elif len(text) - self._yearsDigits == 8:
text += '00'
try:
dt = dateandtime.strptime(text, self._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
except ValueError:
raise error.PyAsn1Error('malformed datetime format %s' % self)
return dt.replace(microsecond=ms, tzinfo=tzinfo)
@classmethod
def fromDateTime(cls, dt):
"""Create |ASN.1| object from a :py:class:`datetime.datetime` object.
Parameters
----------
dt : :py:class:`datetime.datetime` object
The `datetime.datetime` object to initialize the |ASN.1| object from
Returns
-------
:
new instance of |ASN.1| value
"""
text = dt.strftime(cls._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
if cls._hasSubsecond:
text += '.%d' % (dt.microsecond // 10000)
if dt.utcoffset():
seconds = dt.utcoffset().seconds
if seconds < 0:
text += '-'
else:
text += '+'
text += '%.2d%.2d' % (seconds // 3600, seconds % 3600)
else:
text += 'Z'
return cls(text)
class GeneralizedTime(char.VisibleString, TimeMixIn):
__doc__ = char.VisibleString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
)
# Optimization for faster codec lookup
typeId = char.VideotexString.getTypeId()
_yearsDigits = 4
_hasSubsecond = True
_optionalMinutes = True
_shortTZ = True
class UTCTime(char.VisibleString, TimeMixIn):
__doc__ = char.VisibleString.__doc__
#: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
tagSet = char.VisibleString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
)
# Optimization for faster codec lookup
typeId = char.VideotexString.getTypeId()
_yearsDigits = 2
_hasSubsecond = False
_optionalMinutes = False
_shortTZ = False
| 27.967742
| 104
| 0.566705
|
17fb8a968c2c6f2c69d754a6500d41339ec0c793
| 592
|
py
|
Python
|
mne/stats/__init__.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | 2
|
2015-09-27T20:33:49.000Z
|
2020-04-22T19:10:56.000Z
|
mne/stats/__init__.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | null | null | null |
mne/stats/__init__.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | 1
|
2018-09-15T09:45:38.000Z
|
2018-09-15T09:45:38.000Z
|
"""Functions for statistical analysis"""
from .parametric import f_threshold_twoway_rm, f_twoway_rm
from .permutations import permutation_t_test
from .cluster_level import (permutation_cluster_test,
permutation_cluster_1samp_test,
spatio_temporal_cluster_1samp_test,
spatio_temporal_cluster_test,
_st_mask_from_s_inds,
ttest_1samp_no_p,
summarize_clusters_stc)
from .multi_comp import fdr_correction, bonferroni_correction
| 45.538462
| 63
| 0.633446
|
28ac1eef8fc1d6bdd1715ee3928268e278e7376e
| 600
|
py
|
Python
|
photonsdi/constants.py
|
felixheld/photonSDI
|
674925db3275efff3a936bf610a68839ffdf81b2
|
[
"BSD-2-Clause"
] | 8
|
2018-02-05T05:35:06.000Z
|
2021-04-08T13:49:42.000Z
|
photonsdi/constants.py
|
felixheld/photon-sdi
|
674925db3275efff3a936bf610a68839ffdf81b2
|
[
"BSD-2-Clause"
] | 1
|
2018-08-01T07:29:21.000Z
|
2018-08-02T17:16:51.000Z
|
photonsdi/constants.py
|
felixheld/photon-sdi
|
674925db3275efff3a936bf610a68839ffdf81b2
|
[
"BSD-2-Clause"
] | 3
|
2017-12-17T12:03:45.000Z
|
2020-08-26T06:56:12.000Z
|
# common constants and defines for the SDI core
SDI_ELEMENTARY_STREAM_DATA_WIDTH = 10
# the 10 bit SDI data words are transmitted LSB first
SDI_LINE_LENGTH_WIDTH = 12 # 1080p resolution at maximum for now
SDI_LINE_NUMBER_WIDTH = 11
SDI_FIRST_PIXEL_NUMBER = 0 # pixel 0 is in the middle of a line(!)
SDI_FIRST_LINE_NUMBER = 1
SDI_BLANKING_YRGB_10BIT = 0x040
SDI_BLANKING_CrCb_10BIT = 0x200
SDI_CRC_TAPS = [0, 4, 5, 18]
SDI_CRC_LENGTH = max(SDI_CRC_TAPS)
SDI_SCRAMBLER_TAPS = [0, 4, 9]
SDI_SCRAMBLER_LENGTH = max(SDI_SCRAMBLER_TAPS)
SDI_NRZI_TAPS = [0, 1]
SDI_NRZI_LENGTH = max(SDI_NRZI_TAPS)
| 26.086957
| 67
| 0.785
|
ebd25c23413074ac23d55dfd9bb770f3aab1a3b8
| 3,129
|
py
|
Python
|
django_extensions/management/commands/runscript.py
|
google-code-export/django-command-extensions
|
581988d8027928fc0193e2dcf730c87ebdcfbc19
|
[
"MIT"
] | 3
|
2015-12-25T14:45:36.000Z
|
2016-11-28T09:58:03.000Z
|
django_extensions/management/commands/runscript.py
|
clones/django-command-extensions
|
cb18dbb5fc352b4fd1ac9696a15724466915367b
|
[
"MIT"
] | null | null | null |
django_extensions/management/commands/runscript.py
|
clones/django-command-extensions
|
cb18dbb5fc352b4fd1ac9696a15724466915367b
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from optparse import make_option
import sys
import os
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--fixtures', action='store_true', dest='infixtures', default=False,
help='Only look in app.fixtures subdir'),
make_option('--noscripts', action='store_true', dest='noscripts', default=False,
help='Look in app.scripts subdir'),
)
help = 'Runs a script in django context.'
args = "script [script ...]"
def handle(self, *scripts, **options):
from django.db.models import get_apps
subdirs = []
if not options.get('noscripts'):
subdirs.append('scripts')
if options.get('infixtures'):
subdirs.append('fixtures')
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
if len(subdirs) < 1:
print "No subdirs to run left."
return
if len(scripts) < 1:
print "Script name required."
return
def run_script(name):
if verbosity > 1:
print "check for %s" % name
try:
t = __import__(name, [], [], [" "])
if verbosity > 0:
print "Found script %s ..." %name
if hasattr(t, "run"):
if verbosity > 1:
print "found run() in %s. executing..." % name
# TODO: add arguments to run
try:
t.run()
except Exception, e:
if verbosity > 0:
print "Exception while running run() in %s" %name
if show_traceback:
raise
else:
if verbosity > 1:
print "no run() function found."
except ImportError:
pass
for app in get_apps():
app_name = app.__name__.split(".")[:-1] # + ['fixtures']
for subdir in subdirs:
for script in scripts:
run_script(".".join(app_name + [subdir, script]))
# try app.DIR.script import
for script in scripts:
sa = script.split(".")
for subdir in subdirs:
nn = ".".join(sa[:-1] + [subdir, sa[-1]])
run_script(nn)
# try direct import
if script.find(".") != -1:
run_script(script)
# Backwards compatibility for Django r9110
if not [opt for opt in Command.option_list if opt.dest=='verbosity']:
Command.option_list += (
make_option('--verbosity', '-v', action="store", dest="verbosity",
default='1', type='choice', choices=['0', '1', '2'],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"),
)
| 32.59375
| 88
| 0.512304
|
5fcc1b5eb7f71ab02d8783fba695eb6dbbbfcd78
| 21
|
py
|
Python
|
megumi/version.py
|
dev-techmoe/megumi
|
67d5626b168dada5f42671a81eca46ea8b7a603c
|
[
"MIT"
] | 4
|
2020-10-28T08:37:49.000Z
|
2022-03-30T05:39:27.000Z
|
megumi/version.py
|
dev-techmoe/megumi
|
67d5626b168dada5f42671a81eca46ea8b7a603c
|
[
"MIT"
] | null | null | null |
megumi/version.py
|
dev-techmoe/megumi
|
67d5626b168dada5f42671a81eca46ea8b7a603c
|
[
"MIT"
] | null | null | null |
VERSION = '0.0.1-dev'
| 21
| 21
| 0.619048
|
9018b81faf932eaf4e7ccb632bba5c8736218337
| 3,168
|
py
|
Python
|
attribution/distributions.py
|
cmu-transparency/lib-attribution
|
248d10ac5518a220205081ff6a5954fa594d4d78
|
[
"MIT"
] | 5
|
2020-09-16T21:22:59.000Z
|
2021-08-12T01:03:31.000Z
|
attribution/distributions.py
|
cmu-transparency/lib-attribution
|
248d10ac5518a220205081ff6a5954fa594d4d78
|
[
"MIT"
] | 1
|
2021-06-25T10:35:19.000Z
|
2021-06-27T08:17:07.000Z
|
attribution/distributions.py
|
cmu-transparency/lib-attribution
|
248d10ac5518a220205081ff6a5954fa594d4d78
|
[
"MIT"
] | 1
|
2021-08-12T01:03:48.000Z
|
2021-08-12T01:03:48.000Z
|
'''
Docstring for the distributions module.
'''
import keras.backend as K
class Doi(object):
'''
Interface for a distribution of interest.
The *distribution of interest* lets us specify the set of instances over
which we want our explanations to be faithful.
'''
def __call__(self, z):
'''
Takes a tensor, z, which is the input to g (in model, f = g o h), and
returns a new tensor, z', which has an entry for each point in the
distribution of interest for each instance. The distribution of interest
is assumed be a uniform distribution over all points returned this way.
Parameters
----------
z : keras.backend.Tensor
The tensor representing the output of the layer defining the slce.
Returns
-------
keras.backend.Tensor
A new tensor connected to `z`, which represents the distribution of
interest.
'''
raise NotImplementedError('This is an abstract method.')
@staticmethod
def linear_interp():
return LinearInterpDoi()
@staticmethod
def point():
return PointDoi()
class LinearInterpDoi(Doi):
'''
A distribution of interest, which, for point, z, is the uniform distribution
over the linear interpolation from a given baseline to z. This distribution
of interest yields the Aumann-Shapley value.
'''
def __call__(self, z):
# Make placeholders for the resolution and baseline.
r = K.variable(1)
baseline = K.variable(K.zeros(shape=K.int_shape(z)[1:]))
b = K.expand_dims(baseline, axis=0)
# Allocate the alpha term for the interpolation.
a = K.tile(
(1. + K.arange(r, dtype='float32')) / K.cast(r, 'float32'),
[K.shape(z)[0]])
for _ in range(K.ndim(z) - 1):
a = K.expand_dims(a, axis=-1)
# K.repeat_elements has inconsistent behavior across backends. For
# theano, it is fine to use a tensor for reps; for tensorflow, it is
# not, and repeat_elements needs a Python integer.
# The following hack for tensorflow is adapted from:
# https://github.com/keras-team/keras/issues/2656
if K.backend() == 'theano':
z_rep = K.repeat_elements(z, r, axis=0)
elif K.backend() == 'tensorflow':
multiples = K.variable(
[r]+[1 for i in range(K.ndim(z)-1)],
dtype='int32')
z_rep = K.tf.tile(z, multiples)
z_interp = b + a * (z_rep - b)
# Set keras metadata on the resulting tensor.
z_interp._uses_learning_phase = True
z_interp._keras_shape = K.int_shape(z)
# Set Doi metadata on the resulting tensor.
z_interp._doi_parent = z
z_interp._doi_params = {
'resolution': r,
'baseline': baseline}
return z_interp
class PointDoi(Doi):
'''
A distribution of intest where all of the probability mass is on a single
point.
'''
def __call__(self, z):
z._doi_parent = z
z._doi_params = {}
return z
| 30.461538
| 80
| 0.598801
|
9de3d202b88f174c676ea874dbd04cccff69a2d7
| 2,924
|
py
|
Python
|
peering/api/nested_serializers.py
|
schiederme/peering-manager
|
2d29427fd4f2b91a5208f31e1a7ad69eaf82924c
|
[
"Apache-2.0"
] | null | null | null |
peering/api/nested_serializers.py
|
schiederme/peering-manager
|
2d29427fd4f2b91a5208f31e1a7ad69eaf82924c
|
[
"Apache-2.0"
] | 8
|
2022-03-02T20:47:24.000Z
|
2022-03-28T23:23:49.000Z
|
peering/api/nested_serializers.py
|
schiederme/peering-manager
|
2d29427fd4f2b91a5208f31e1a7ad69eaf82924c
|
[
"Apache-2.0"
] | 1
|
2021-01-25T01:58:22.000Z
|
2021-01-25T01:58:22.000Z
|
from rest_framework import serializers
from peering.models import (
AutonomousSystem,
BGPGroup,
Community,
Configuration,
DirectPeeringSession,
Email,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
)
from utils.api import WritableNestedSerializer
class AutonomousSystemNestedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="peering-api:autonomoussystem-detail"
)
class Meta:
model = AutonomousSystem
fields = ["id", "url", "asn", "name", "ipv6_max_prefixes", "ipv4_max_prefixes"]
class BGPGroupNestedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="peering-api:bgpgroup-detail")
class Meta:
model = BGPGroup
fields = ["id", "url", "name", "slug"]
class CommunityNestedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="peering-api:community-detail")
class Meta:
model = Community
fields = ["id", "url", "name", "slug", "value", "type"]
class ConfigurationNestedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="peering-api:configuration-detail"
)
class Meta:
model = Configuration
fields = ["id", "url", "name"]
class DirectPeeringSessionNestedSerializer(WritableNestedSerializer):
class Meta:
model = DirectPeeringSession
fields = [
"id",
"local_asn",
"ip_address",
"enabled",
]
class EmailNestedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="peering-api:email-detail")
class Meta:
model = Configuration
fields = ["id", "url", "name"]
class InternetExchangeNestedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="peering-api:internetexchange-detail"
)
class Meta:
model = InternetExchange
fields = ["id", "url", "name", "slug"]
class InternetExchangePeeringSessionNestedSerializer(WritableNestedSerializer):
class Meta:
model = InternetExchangePeeringSession
fields = [
"id",
"ip_address",
"enabled",
"is_route_server",
]
class RouterNestedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(view_name="peering-api:router-detail")
class Meta:
model = Router
fields = ["id", "url", "name", "hostname", "platform"]
class RoutingPolicyNestedSerializer(WritableNestedSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="peering-api:routingpolicy-detail"
)
class Meta:
model = RoutingPolicy
fields = ["id", "url", "name", "slug", "type"]
| 26.581818
| 88
| 0.674419
|
750a1cefc589595ff95d5df6c9a6745a6f2a58d6
| 116
|
py
|
Python
|
fdap/app/contracts/jsonable.py
|
miniyus/AutomaticPosting-Python
|
0627066e60d5cd474ed858e1567bcf6b5b1d9336
|
[
"MIT"
] | null | null | null |
fdap/app/contracts/jsonable.py
|
miniyus/AutomaticPosting-Python
|
0627066e60d5cd474ed858e1567bcf6b5b1d9336
|
[
"MIT"
] | 1
|
2021-11-06T02:30:37.000Z
|
2021-11-06T02:30:37.000Z
|
fdap/app/contracts/jsonable.py
|
miniyus/AutomaticPosting-Python
|
0627066e60d5cd474ed858e1567bcf6b5b1d9336
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Jsonable(ABC):
@abstractmethod
def to_json(self):
pass
| 12.888889
| 35
| 0.672414
|
51bf30ebd084b5c6b621ac2b1c94f155822f69b8
| 3,740
|
py
|
Python
|
script/setmgt_ip.py
|
shaitmm/NetDevOps
|
19478519f6c792353045fcfed7c56bc10428e680
|
[
"MIT"
] | 1
|
2019-04-30T06:24:33.000Z
|
2019-04-30T06:24:33.000Z
|
script/setmgt_ip.py
|
shaitmm/NetDevOps
|
19478519f6c792353045fcfed7c56bc10428e680
|
[
"MIT"
] | null | null | null |
script/setmgt_ip.py
|
shaitmm/NetDevOps
|
19478519f6c792353045fcfed7c56bc10428e680
|
[
"MIT"
] | null | null | null |
import threading
from telnet import Telnet
devices_info = [{'port': 33281, 'hostname': 'xrv1', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.101"},
{'port': 33282, 'hostname': 'xrv2', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.102"},
{'port': 33283, 'hostname': 'xrv3', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.103"},
{'port': 33284, 'hostname': 'xrv4', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.104"},
{'port': 33285, 'hostname': 'xrv5', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.105"},
{'port': 33286, 'hostname': 'xrv6', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.106"},
{'port': 33287, 'hostname': 'xrv7', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.107"},
{'port': 33288, 'hostname': 'xrv8', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.108"},
{'port': 33289, 'hostname': 'xrv9', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.109"},
{'port': 33290, 'hostname': 'xrv10', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.110"},
{'port': 33291, 'hostname': 'xrv11', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.111"},
{'port': 33292, 'hostname': 'xrv12', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.112"},
{'port': 33293, 'hostname': 'xrv13', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.113"},
{'port': 33294, 'hostname': 'xrv14', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.114"},
{'port': 33295, 'hostname': 'xrv15', 'username': 'admin', 'password': 'admin',"mgt_ip": "172.20.3.115"},
{'port': 33296, 'hostname': 'vIOS16', 'username': 'admin', 'password': 'admin', "mgt_ip": "172.20.3.116"},
{'port': 33297, 'hostname': 'vIOS17', 'username': 'admin', 'password': 'admin', "mgt_ip": "172.20.3.117"},
{'port': 33298, 'hostname': 'vIOS18', 'username': 'admin', 'password': 'admin', "mgt_ip": "172.20.3.118"},
{'port': 33299, 'hostname': 'vIOS19', 'username': 'admin', 'password': 'admin', "mgt_ip": "172.20.3.119"},
{'port': 33300, 'hostname': 'vIOS20', 'username': 'admin', 'password': 'admin', "mgt_ip": "172.20.3.120"},
{'port': 33301, 'hostname': 'vIOS21', 'username': 'admin', 'password': 'admin', "mgt_ip": "172.20.3.121"},
]
def setmgt_ip(d):
device_type =""
if "xrv" in d.get("hostname"):
device_type = "iosxr"
tn = Telnet(host="172.20.0.1", port=d.get("port"), device_type=device_type, debug=True)
cfgs_list = []
cfgs_list.append("interface mgmtEth 0/0/CPU0/0")
cfgs_list.append("no shutdown")
cfgs_list.append("ipv4 address %s 255.255.0.0" %d.get("mgt_ip"))
<<<<<<< HEAD
=======
>>>>>>> e5cb7b6fff33bab1c3ba587054d7d9923c8cdc99
tn.send_config_set(cfgs_list)
if "IOS" in d.get("hostname"):
device_type = "ios"
tn = Telnet(host="172.20.0.1", port=d.get("port"), device_type=device_type, debug=True)
cfgs_list = []
cfgs_list.append("interface gi0/3")
cfgs_list.append("no shutdown")
cfgs_list.append("ip address %s 255.255.0.0" %d.get("mgt_ip"))
tn.send_config_set(cfgs_list)
if __name__ == "__main__":
thread_tasks = []
for device in devices_info:
task = threading.Thread(target=setmgt_ip, args=(device,))
task.start()
thread_tasks.append(task)
for t in thread_tasks:
t.join()
| 62.333333
| 122
| 0.549198
|
0b910a08d3d51359e454c67aa645f16c9e6ecc0a
| 10,674
|
py
|
Python
|
testing/sdk_tasks.py
|
danielitit/dcos-commons
|
69ba6429bdcfc4a2ffd0240d163730034e31a3c5
|
[
"Apache-2.0"
] | null | null | null |
testing/sdk_tasks.py
|
danielitit/dcos-commons
|
69ba6429bdcfc4a2ffd0240d163730034e31a3c5
|
[
"Apache-2.0"
] | 1
|
2018-01-26T04:16:31.000Z
|
2018-01-26T04:16:31.000Z
|
testing/sdk_tasks.py
|
minyk/dcos-etcd
|
bcd7cd86d87c7ce164862dd28a5aee2a0ae53e15
|
[
"Apache-2.0"
] | null | null | null |
'''Utilities relating to running commands and HTTP requests
************************************************************************
FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
SHOULD ALSO BE APPLIED TO sdk_tasks IN ANY OTHER PARTNER REPOS
************************************************************************
'''
import logging
import retrying
import shakedown
import dcos.errors
import sdk_cmd
import sdk_package_registry
import sdk_plan
DEFAULT_TIMEOUT_SECONDS = 30 * 60
# From dcos-cli:
COMPLETED_TASK_STATES = set([
"TASK_FINISHED", "TASK_KILLED", "TASK_FAILED", "TASK_LOST", "TASK_ERROR",
"TASK_GONE", "TASK_GONE_BY_OPERATOR", "TASK_DROPPED", "TASK_UNREACHABLE",
"TASK_UNKNOWN"
])
log = logging.getLogger(__name__)
def check_running(service_name, expected_task_count, timeout_seconds=DEFAULT_TIMEOUT_SECONDS, allow_more=True):
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout_seconds*1000,
retry_on_result=lambda res: not res)
def fn():
try:
tasks = shakedown.get_service_tasks(service_name)
except dcos.errors.DCOSHTTPException:
log.info('Failed to get tasks for service {}'.format(service_name))
tasks = []
running_task_names = []
other_tasks = []
for t in tasks:
if t['state'] == 'TASK_RUNNING':
running_task_names.append(t['name'])
else:
other_tasks.append('{}={}'.format(t['name'], t['state']))
log.info('Waiting for {} running tasks, got {} running/{} total:\n- running: {}\n- other: {}'.format(
expected_task_count,
len(running_task_names), len(tasks),
sorted(running_task_names),
sorted(other_tasks)))
if allow_more:
return len(running_task_names) >= expected_task_count
else:
return len(running_task_names) == expected_task_count
fn()
def get_task_ids(service_name, task_prefix):
tasks = shakedown.get_service_tasks(service_name)
matching_tasks = [t for t in tasks if t['name'].startswith(task_prefix)]
return [t['id'] for t in matching_tasks]
class Task(object):
'''Entry value returned by get_summary()'''
@staticmethod
def parse(task_entry, agents):
agent_id = task_entry['slave_id']
matching_agent_hosts = [agent['hostname'] for agent in agents['slaves'] if agent['id'] == agent_id]
if len(matching_agent_hosts) != 1:
host = "UNKNOWN:" + agent_id
else:
host = matching_agent_hosts[0]
return Task(
task_entry['name'],
host,
task_entry['state'],
task_entry['id'],
task_entry['framework_id'],
agent_id)
def __init__(self, name, host, state, task_id, framework_id, agent):
self.name = name
self.host = host
self.state = state
self.id = task_id
self.framework_id = framework_id
self.agent = agent
def __repr__(self):
return 'Task[name="{}"\tstate={}\tid={}\thost={}\tframework_id={}\tagent={}]'.format(
self.name, self.state.split('_')[-1], self.id, self.host, self.framework_id, self.agent)
def get_status_history(task_name: str) -> list:
'''Returns a list of task status values (of the form 'TASK_STARTING', 'TASK_KILLED', etc) for a given task.
The returned values are ordered chronologically from first to last.
'''
cluster_tasks = sdk_cmd.cluster_request('GET', '/mesos/tasks').json()
statuses = []
for cluster_task in cluster_tasks['tasks']:
if cluster_task['name'] != task_name:
continue
statuses += cluster_task['statuses']
history = [entry['state'] for entry in sorted(statuses, key=lambda x: x['timestamp'])]
log.info('Status history for task {}: {}'.format(task_name, ', '.join(history)))
return history
def get_summary(with_completed=False):
'''Returns a summary of task information as returned by the DC/OS CLI.
This may be used instead of invoking 'dcos task [--all]' directly.
Returns a list of Task objects.
'''
cluster_tasks = sdk_cmd.cluster_request('GET', '/mesos/tasks').json()
cluster_agents = sdk_cmd.cluster_request('GET', '/mesos/slaves').json()
all_tasks = [Task.parse(entry, cluster_agents) for entry in cluster_tasks['tasks']]
if with_completed:
output = all_tasks
else:
output = list(filter(lambda t: t.state not in COMPLETED_TASK_STATES, all_tasks))
log.info('Task summary (with_completed={}):\n- {}'.format(
with_completed, '\n- '.join([str(e) for e in output])))
return output
def get_tasks_avoiding_scheduler(service_name, task_name_pattern):
'''Returns a list of tasks which are not located on the Scheduler's machine.
Avoid also killing the system that the scheduler is on. This is just to speed up testing.
In practice, the scheduler would eventually get relaunched on a different node by Marathon and
we'd be able to proceed with repairing the service from there. However, it takes 5-20 minutes
for Mesos to decide that the agent is dead. This is also why we perform a manual 'ls' check to
verify the host is down, rather than waiting for Mesos to tell us.
'''
skip_tasks = {sdk_package_registry.PACKAGE_REGISTRY_SERVICE_NAME}
server_tasks = [
task for task in get_summary() if
task.name not in skip_tasks and task_name_pattern.match(task.name)
]
scheduler_ip = shakedown.get_service_ips('marathon', service_name).pop()
log.info('Scheduler IP: {}'.format(scheduler_ip))
# Always avoid package registry (if present)
registry_ips = shakedown.get_service_ips(
'marathon',
sdk_package_registry.PACKAGE_REGISTRY_SERVICE_NAME
)
log.info('Package Registry [{}] IP(s): {}'.format(
sdk_package_registry.PACKAGE_REGISTRY_SERVICE_NAME, registry_ips
))
skip_ips = {scheduler_ip} | set(registry_ips)
avoid_tasks = [task for task in server_tasks if task.host not in skip_ips]
log.info('Found tasks avoiding scheduler and {} at {}: {}'.format(
sdk_package_registry.PACKAGE_REGISTRY_SERVICE_NAME,
skip_ips,
avoid_tasks
))
return avoid_tasks
def get_completed_task_id(task_name):
try:
tasks = [t['id'] for t in shakedown.get_tasks(completed=True) if t['name'] == task_name]
except dcos.errors.DCOSHTTPException:
tasks = []
return tasks[0] if tasks else None
def check_task_relaunched(task_name, old_task_id, timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout_seconds*1000,
retry_on_result=lambda res: not res)
def fn():
try:
task_ids = set([t['id'] for t in shakedown.get_tasks(completed=True) if t['name'] == task_name])
except dcos.errors.DCOSHTTPException:
log.info('Failed to get task ids. task_name=%s', task_name)
task_ids = set([])
return len(task_ids) > 0 and (old_task_id not in task_ids or len(task_ids) > 1)
fn()
def check_task_not_relaunched(service_name, task_name, old_task_id, timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
sdk_plan.wait_for_completed_deployment(service_name)
sdk_plan.wait_for_completed_recovery(service_name)
try:
task_ids = set([t['id'] for t in shakedown.get_tasks() if t['name'] == task_name])
except dcos.errors.DCOSHTTPException:
log.info('Failed to get task ids for service {}'.format(service_name))
task_ids = set([])
assert len(task_ids) == 1 and old_task_id in task_ids
def check_tasks_updated(service_name, prefix, old_task_ids, timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
# TODO: strongly consider merging the use of checking that tasks have been replaced (this method)
# and checking that the deploy/upgrade/repair plan has completed. Each serves a part in the bigger
# atomic test, that the plan completed properly where properly includes that no old tasks remain.
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout_seconds*1000,
retry_on_result=lambda res: not res)
def fn():
try:
task_ids = get_task_ids(service_name, prefix)
except dcos.errors.DCOSHTTPException:
log.info('Failed to get task ids for service {}'.format(service_name))
task_ids = []
prefix_clause = ''
if prefix:
prefix_clause = ' starting with "{}"'.format(prefix)
old_set = set(old_task_ids)
new_set = set(task_ids)
newly_launched_set = new_set.difference(old_set)
old_remaining_set = old_set.intersection(new_set)
# the constraints of old and new task cardinality match should be covered by completion of
# deploy/recovery/whatever plan, not task cardinality, but some uses of this method are not
# using the plan, so not the definitive source, so will fail when the finished state of a
# plan yields more or less tasks per pod.
all_updated = len(newly_launched_set) == len(new_set) \
and len(old_remaining_set) == 0 \
and len(new_set) >= len(old_set)
if all_updated:
log.info('All of the tasks{} have updated\n- Old tasks: {}\n- New tasks: {}'.format(
prefix_clause,
old_set,
new_set))
return all_updated
# forgive the language a bit, but len('remained') == len('launched'),
# and similar for the rest of the label for task ids in the log line,
# so makes for easier reading
log.info('Waiting for tasks%s to have updated ids:\n'
'- Old tasks (remaining): %s\n'
'- New tasks (launched): %s',
prefix_clause,
old_remaining_set,
newly_launched_set)
fn()
def check_tasks_not_updated(service_name, prefix, old_task_ids):
sdk_plan.wait_for_completed_deployment(service_name)
sdk_plan.wait_for_completed_recovery(service_name)
task_ids = get_task_ids(service_name, prefix)
task_sets = "\n- Old tasks: {}\n- Current tasks: {}".format(sorted(old_task_ids), sorted(task_ids))
log.info('Checking tasks starting with "{}" have not been updated:{}'.format(prefix, task_sets))
assert set(old_task_ids).issubset(set(task_ids)), 'Tasks starting with "{}" were updated:{}'.format(prefix,
task_sets)
| 39.977528
| 114
| 0.647836
|
f397fc2520e8500967c1ca9fccba6f1479f74b7f
| 4,984
|
py
|
Python
|
TheNounProjectAPI/collections.py
|
CubieDev/TheNounProjectAPI
|
6db6031800047421146bc72e74060b8af9a7b3be
|
[
"MIT"
] | 8
|
2019-10-18T21:07:33.000Z
|
2021-01-10T01:36:23.000Z
|
TheNounProjectAPI/collections.py
|
CubieDev/TheNounProjectAPI
|
6db6031800047421146bc72e74060b8af9a7b3be
|
[
"MIT"
] | 1
|
2020-07-20T14:00:21.000Z
|
2020-08-24T10:13:04.000Z
|
TheNounProjectAPI/collections.py
|
CubieDev/TheNounProjectAPI
|
6db6031800047421146bc72e74060b8af9a7b3be
|
[
"MIT"
] | 1
|
2021-07-22T01:52:04.000Z
|
2021-07-22T01:52:04.000Z
|
from typing import Union, List
from TheNounProjectAPI.core import Core
from TheNounProjectAPI.call import Call
from TheNounProjectAPI.models import CollectionModel
from TheNounProjectAPI.exceptions import IncorrectType
class Collections(Core):
@Call.collection
def get_collection_by_id(self, _id: int) -> CollectionModel:
"""
Fetches a single :ref:`collection-label` by id.
:param _id: Collection ID.
:type _id: int
:raise NonPositive: Raises exception when id is nonpositive.
:returns: CollectionModel object identified by the _id.
:rtype: CollectionModel
"""
self._type_assert(_id, "id", int)
self._id_assert(_id, "id")
return self._prepare_url(f"{self._base_url}/collection/{_id}")
@Call.collection
def get_collection_by_slug(self, slug: str) -> CollectionModel:
"""
Fetches a single :ref:`collection-label` by slug.
:param slug: Collection slug.
:type slug: str
:raise IllegalSlug: Raises exception when slug is an empty string, contains non ascii characters or contains multiple words.
:returns: CollectionModel object identified by the slug.
:rtype: CollectionModel
"""
self._type_assert(slug, "slug", str)
self._slug_assert(slug, "slug")
return self._prepare_url(f"{self._base_url}/collection/{slug}")
@Call.dispatch
def get_collection(self, identifier: Union[int, str]) -> CollectionModel:
"""
Fetches a single :ref:`collection-label`, either by id or by slug.
:param identifier: Collection identifier (id or slug).
:type identifier: Union[int, str]
:raise NonPositive: Raises exception when identifier is a nonpositive integer.
:raise IllegalSlug: Raises exception when identifier is a string that's empty, non-ascii or with multiple words.
:returns: CollectionModel object identified by the identifier.
:rtype: CollectionModel
"""
raise IncorrectType("identifier", (int, str))
@get_collection.register(int)
def _(self, _id: int) -> CollectionModel:
"""
This method is the implementation of get_collection, in the case that the identifier is an integer.
"""
return self.get_collection_by_id(_id)
@get_collection.register(str)
def _(self, slug: str) -> CollectionModel:
"""
This method is the implementation of get_collection, in the case that the identifier is a string.
"""
return self.get_collection_by_slug(slug)
@Call.collections
def get_collections(self, limit:int = None, offset:int = None, page:int = None) -> List[CollectionModel]:
"""
Fetches a list of all :ref:`collections-label`.
:param limit: Maximum number of results. (defaults to None)
:type limit: int
:param offset: Number of results to displace or skip over. (defaults to None)
:type offset: int
:param page: Number of results of limit length to displace or skip over. (defaults to None)
:type page: int
:returns: List of CollectionModel objects.
:rtype: List[CollectionModel]
"""
self._lop_assert(limit, offset, page)
return self._prepare_url(f"{self._base_url}/collections", limit=limit, offset=offset, page=page)
@Call.collections
def get_user_collections(self, user_id: int) -> List[CollectionModel]:
"""
Fetches a list of :ref:`collections-label` associated with a user.
:param user_id: User id.
:type user_id: int
:raise NonPositive: Raises exception when user_id is not positive.
:returns: List of CollectionModel objects associated with a user identified by the user_id.
:rtype: List[CollectionModel]
"""
self._type_assert(user_id, "user_id", int)
self._id_assert(user_id, "user_id")
return self._prepare_url(f"{self._base_url}/user/{user_id}/collections")
@Call.collection
def get_user_collection(self, user_id: int, slug: str) -> CollectionModel:
"""
Fetches a single :ref:`collection-label` associated with a user.
:param user_id: User id.
:type user_id: int
:param slug: Collection slug.
:type slug: str
:raise NonPositive: Raises exception when user_id is not positive.
:raise IllegalSlug: Raises exception when slug is an empty string, contains non ascii characters or contains multiple words.
:returns: CollectionModel object identified by the slug, from the user identified by the user_id.
:rtype: CollectionModel
"""
self._type_assert(user_id, "user_id", int)
self._type_assert(slug, "slug", str)
self._id_assert(user_id, "user_id")
self._slug_assert(slug, "slug")
return self._prepare_url(f"{self._base_url}/user/{user_id}/collections/{slug}")
| 38.045802
| 132
| 0.663122
|
e95a075b37d06493f60d307539257b25b9a827f9
| 3,897
|
py
|
Python
|
src/oci/apigateway/models/work_request_error.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/apigateway/models/work_request_error.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/apigateway/models/work_request_error.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkRequestError(object):
"""
An error encountered while executing a work request.
"""
def __init__(self, **kwargs):
"""
Initializes a new WorkRequestError object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param code:
The value to assign to the code property of this WorkRequestError.
:type code: str
:param message:
The value to assign to the message property of this WorkRequestError.
:type message: str
:param timestamp:
The value to assign to the timestamp property of this WorkRequestError.
:type timestamp: datetime
"""
self.swagger_types = {
'code': 'str',
'message': 'str',
'timestamp': 'datetime'
}
self.attribute_map = {
'code': 'code',
'message': 'message',
'timestamp': 'timestamp'
}
self._code = None
self._message = None
self._timestamp = None
@property
def code(self):
"""
**[Required]** Gets the code of this WorkRequestError.
A machine-usable code for the error that occured. See
<a href=\"/Content/API/References/apierrors.htm\">API Errors</a>.
:return: The code of this WorkRequestError.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this WorkRequestError.
A machine-usable code for the error that occured. See
<a href=\"/Content/API/References/apierrors.htm\">API Errors</a>.
:param code: The code of this WorkRequestError.
:type: str
"""
self._code = code
@property
def message(self):
"""
**[Required]** Gets the message of this WorkRequestError.
A human-readable description of the issue encountered.
:return: The message of this WorkRequestError.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this WorkRequestError.
A human-readable description of the issue encountered.
:param message: The message of this WorkRequestError.
:type: str
"""
self._message = message
@property
def timestamp(self):
"""
**[Required]** Gets the timestamp of this WorkRequestError.
The time the error occured. An RFC3339 formatted datetime string.
:return: The timestamp of this WorkRequestError.
:rtype: datetime
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this WorkRequestError.
The time the error occured. An RFC3339 formatted datetime string.
:param timestamp: The timestamp of this WorkRequestError.
:type: datetime
"""
self._timestamp = timestamp
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 28.866667
| 245
| 0.62176
|
e32c3691bda9c5d923f1504d775f876a0dab2710
| 4,213
|
py
|
Python
|
src/models/TextRank/summarize_textrank.py
|
Carouge/TextSummarization
|
570260dcfccfeaa9ca74fe5b57abd1f81631a74e
|
[
"MIT"
] | 3
|
2018-06-11T11:10:23.000Z
|
2018-12-27T05:18:33.000Z
|
src/models/TextRank/summarize_textrank.py
|
MachineLearningUCU/TextSummarization
|
570260dcfccfeaa9ca74fe5b57abd1f81631a74e
|
[
"MIT"
] | 1
|
2018-12-25T13:04:55.000Z
|
2019-05-15T13:36:24.000Z
|
src/models/TextRank/summarize_textrank.py
|
Carouge/TextSummarization
|
570260dcfccfeaa9ca74fe5b57abd1f81631a74e
|
[
"MIT"
] | null | null | null |
from gensim.summarization.summarizer import summarize
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.translate.bleu_score import sentence_bleu
from rouge import Rouge
import pandas as pd
def summarize_doc(content, len_words):
"""
Summarization of one document using TextRank-based model from gensim library.
:param content: text in string format
:param len_words: number of words to 'generate', output summary size
:return: summary and list of cleaned words from summary
"""
summarized = summarize(content, word_count=len_words)
words = summarized.split(' ')
tokenizer = RegexpTokenizer(r'\w+')
filtered_words = [word for word in words if word not in stopwords.words('english')]
filtered_words = tokenizer.tokenize(' '.join(filtered_words))
return summarized, filtered_words
dataset = pd.read_csv('../../../data/papers-2K.csv')
testrank_res = pd.DataFrame(dataset['id'])
testrank_res['BLEU'], testrank_res['ROUGE2_f'], testrank_res['ROUGE1_f'], testrank_res['ROUGE1_p'], testrank_res['ROUGE2_p'] = None, None, None, None, None
testrank_res['BLEU_unfilter'], testrank_res['ROUGE2_f_unfilter'], testrank_res['ROUGE1_f_unfilter'], testrank_res['ROUGE1_p_unfilter'], testrank_res['ROUGE2_p_unfilter'] = None, None, None, None, None
testrank_res['Summary'] = None
testrank_res['ROUGEl_p_unfilter'], testrank_res['ROUGEl_p'], testrank_res['ROUGEl_f'], testrank_res['ROUGEl_f_unfilter'] = None, None, None, None
for index, paper in dataset.iterrows():
try:
content = paper['text']
if len(content) < len(paper['abstract']) * 3:
print("Too small text for paper", paper['id'], index)
raise ValueError
# ratio = round(len(paper['abstract'])/len(content), 3)
num_words = len(paper['abstract'].split(' '))
sum_text, filtered_words = summarize_doc(content, num_words)
abstract = paper['abstract'].split()
tokenizer = RegexpTokenizer(r'\w+')
filtered_abstract = [word for word in abstract if word not in stopwords.words('english')]
filtered_abstract = tokenizer.tokenize(' '.join(filtered_abstract))
bleu_score = sentence_bleu(filtered_abstract, filtered_words)
rouge = Rouge()
rouge_score = rouge.get_scores(' '.join(filtered_words), ' '.join(filtered_abstract))
# print(len(sum_text), len(paper['abstract']), len(' '.join(filtered_abstract)), len(' '.join(filtered_words)))
testrank_res['Summary'].iloc[index] = sum_text
testrank_res['BLEU'].iloc[index] = bleu_score
testrank_res['ROUGE2_f'].iloc[index] = rouge_score[0]['rouge-2']['f']
testrank_res['ROUGE1_f'].iloc[index] = rouge_score[0]['rouge-1']['f']
testrank_res['ROUGE2_p'].iloc[index] = rouge_score[0]['rouge-2']['p']
testrank_res['ROUGE1_p'].iloc[index] = rouge_score[0]['rouge-1']['p']
# Score on not cleaned text
bleu_score_unfilter = sentence_bleu(sum_text.split(' '), abstract)
rouge_unfilter = Rouge()
rouge_score_unfilter = rouge_unfilter.get_scores(sum_text, paper['abstract'])
# print(len(sum_text), len(paper['abstract']), len(' '.join(filtered_abstract)), len(' '.join(filtered_words)))
testrank_res['BLEU_unfilter'].iloc[index] = bleu_score_unfilter
testrank_res['ROUGE2_f_unfilter'].iloc[index] = rouge_score_unfilter[0]['rouge-2']['f']
testrank_res['ROUGE1_f_unfilter'].iloc[index] = rouge_score_unfilter[0]['rouge-1']['f']
testrank_res['ROUGE2_p_unfilter'].iloc[index] = rouge_score_unfilter[0]['rouge-2']['p']
testrank_res['ROUGE1_p_unfilter'].iloc[index] = rouge_score_unfilter[0]['rouge-1']['p']
testrank_res['ROUGEl_p_unfilter'].iloc[index] = rouge_score_unfilter[0]['rouge-l']['p']
testrank_res['ROUGEl_p'].iloc[index] = rouge_score[0]['rouge-l']['p']
testrank_res['ROUGEl_f'].iloc[index] = rouge_score[0]['rouge-l']['f']
testrank_res['ROUGEl_f_unfilter'].iloc[index] = rouge_score_unfilter[0]['rouge-l']['f']
print("Iteration: ", index)
except:
pass
print(testrank_res.head(5))
testrank_res.to_csv('textrank_scores.csv', index=False)
| 52.6625
| 200
| 0.68977
|
25eab122ff7f84114f276f0e15407eefb53d09e7
| 4,696
|
py
|
Python
|
opta/commands/deploy.py
|
run-x/opta
|
64606498334f2b1aa79f5a431465eafdf5ca5ed7
|
[
"Apache-2.0"
] | 595
|
2021-05-21T22:30:48.000Z
|
2022-03-31T15:40:25.000Z
|
opta/commands/deploy.py
|
run-x/opta
|
64606498334f2b1aa79f5a431465eafdf5ca5ed7
|
[
"Apache-2.0"
] | 463
|
2021-05-24T21:32:59.000Z
|
2022-03-31T17:12:33.000Z
|
opta/commands/deploy.py
|
run-x/opta
|
64606498334f2b1aa79f5a431465eafdf5ca5ed7
|
[
"Apache-2.0"
] | 29
|
2021-05-21T22:27:52.000Z
|
2022-03-28T16:43:45.000Z
|
import os
from pathlib import Path
from typing import Optional
import click
from opta.amplitude import amplitude_client
from opta.commands.apply import _apply
from opta.commands.local_flag import _clean_tf_folder, _handle_local_flag
from opta.commands.push import _push, is_service_config
from opta.core.terraform import Terraform
from opta.error_constants import USER_ERROR_TF_LOCK
from opta.exceptions import MissingState, UserErrors
from opta.layer import Layer
from opta.pre_check import pre_check
from opta.utils import check_opta_file_exists, fmt_msg, logger
@click.command()
@click.option(
"-i", "--image", required=True, help="Your local image in the for myimage:tag"
)
@click.option("-c", "--config", default="opta.yaml", help="Opta config file.")
@click.option(
"-e", "--env", default=None, help="The env to use when loading the config file."
)
@click.option(
"-t",
"--tag",
default=None,
help="The image tag associated with your docker container. Defaults to your local image tag.",
)
@click.option(
"--auto-approve",
is_flag=True,
default=False,
help="Automatically approve terraform plan.",
)
@click.option(
"--detailed-plan",
is_flag=True,
default=False,
help="Show full terraform plan in detail, not the opta provided summary",
)
@click.option(
"--local",
is_flag=True,
default=False,
help="""Run the service locally on a local Kubernetes cluster for development and testing, irrespective of the environment specified inside the opta service yaml file""",
hidden=False,
)
def deploy(
image: str,
config: str,
env: Optional[str],
tag: Optional[str],
auto_approve: bool,
detailed_plan: bool,
local: Optional[bool],
) -> None:
"""Push your new image to the cloud and deploy it in your environment
You can build your container image locally and then pass it with the -i flag
Examples:
opta deploy -c my_config.yaml -i my_container:latest --auto-approve
opta deploy -c my_config.yaml -i my_container:latest -e prod
opta deploy -c my_config.yaml -i my_container:latest --local
"""
pre_check()
config = check_opta_file_exists(config)
if not is_service_config(config):
raise UserErrors(
fmt_msg(
"""
Opta deploy can only run on service yaml files. This is an environment yaml file.
~See https://docs.opta.dev/getting-started/ for more details.
~
~(We think that this is an environment yaml file, because service yaml must
~specify the "environments" field).
"""
)
)
if local:
adjusted_config = _handle_local_flag(config, False)
if adjusted_config != config: # Only do this for service opta files
config = adjusted_config
localopta_envfile = os.path.join(
Path.home(), ".opta", "local", "localopta.yaml"
)
_apply(
config=localopta_envfile,
auto_approve=True,
local=False,
env="",
refresh=True,
image_tag="",
test=False,
detailed_plan=True,
)
_clean_tf_folder()
layer = Layer.load_from_yaml(config, env)
amplitude_client.send_event(
amplitude_client.DEPLOY_EVENT,
event_properties={"org_name": layer.org_name, "layer_name": layer.name},
)
layer.verify_cloud_credentials()
layer.validate_required_path_dependencies()
if Terraform.download_state(layer):
tf_lock_exists, _ = Terraform.tf_lock_details(layer)
if tf_lock_exists:
raise UserErrors(USER_ERROR_TF_LOCK)
try:
outputs = Terraform.get_outputs(layer)
except MissingState:
outputs = {}
if "docker_repo_url" not in outputs or outputs["docker_repo_url"] == "":
logger.info(
"Did not find docker repository in state, so applying once to create it before deployment"
)
_apply(
config=config,
env=env,
refresh=False,
image_tag=None,
test=False,
local=local,
auto_approve=auto_approve,
stdout_logs=False,
detailed_plan=detailed_plan,
)
image_digest, image_tag = _push(image=image, config=config, env=env, tag=tag)
_apply(
config=config,
env=env,
refresh=False,
image_tag=None,
test=False,
local=local,
auto_approve=auto_approve,
image_digest=image_digest,
detailed_plan=detailed_plan,
)
| 30.69281
| 175
| 0.638416
|
300c26be8f6b33c0cdd4a57e75648e444a25d763
| 3,136
|
py
|
Python
|
python/examples/ocr/rec_web_server.py
|
BohaoWu/Serving
|
fe7fb5465bfbf6dae587aaf080ad27c08a0f7057
|
[
"Apache-2.0"
] | null | null | null |
python/examples/ocr/rec_web_server.py
|
BohaoWu/Serving
|
fe7fb5465bfbf6dae587aaf080ad27c08a0f7057
|
[
"Apache-2.0"
] | null | null | null |
python/examples/ocr/rec_web_server.py
|
BohaoWu/Serving
|
fe7fb5465bfbf6dae587aaf080ad27c08a0f7057
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_client import Client
from paddle_serving_app.reader import OCRReader
import cv2
import sys
import numpy as np
import os
from paddle_serving_client import Client
from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose
from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
if sys.argv[1] == 'gpu':
from paddle_serving_server_gpu.web_service import WebService
elif sys.argv[1] == 'cpu':
from paddle_serving_server.web_service import WebService
import time
import re
import base64
class OCRService(WebService):
def init_rec(self):
self.ocr_reader = OCRReader()
def preprocess(self, feed=[], fetch=[]):
# TODO: to handle batch rec images
img_list = []
for feed_data in feed:
data = base64.b64decode(feed_data["image"].encode('utf8'))
data = np.fromstring(data, np.uint8)
im = cv2.imdecode(data, cv2.IMREAD_COLOR)
img_list.append(im)
feed_list = []
max_wh_ratio = 0
for i, boximg in enumerate(img_list):
h, w = boximg.shape[0:2]
wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio)
for img in img_list:
norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
#feed = {"image": norm_img}
feed_list.append(norm_img)
if len(feed_list) == 1:
feed_batch = {
"image": np.concatenate(
feed_list, axis=0)[np.newaxis, :]
}
else:
feed_batch = {"image": np.concatenate(feed_list, axis=0)}
fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
return feed_batch, fetch, True
def postprocess(self, feed={}, fetch=[], fetch_map=None):
rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
res_lst = []
for res in rec_res:
res_lst.append(res[0])
res = {"res": res_lst}
return res
ocr_service = OCRService(name="ocr")
ocr_service.load_model_config("ocr_rec_model")
ocr_service.init_rec()
if sys.argv[1] == 'gpu':
ocr_service.set_gpus("0")
ocr_service.prepare_server(
workdir="workdir", port=9292, device="gpu", gpuid=0)
elif sys.argv[1] == 'cpu':
ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu")
ocr_service.run_rpc_service()
ocr_service.run_web_service()
| 36.465116
| 97
| 0.676658
|
72ec20938f12666aac22b02ba34eff699ba7ab83
| 7,741
|
py
|
Python
|
youtube_dl/extractor/ooyala.py
|
LyleH/youtube-dl
|
7564b09ef5c09454908f78cb91c3bd2d6daacac5
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/ooyala.py
|
LyleH/youtube-dl
|
7564b09ef5c09454908f78cb91c3bd2d6daacac5
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/ooyala.py
|
LyleH/youtube-dl
|
7564b09ef5c09454908f78cb91c3bd2d6daacac5
|
[
"Unlicense"
] | null | null | null |
from __future__ import unicode_literals
import re
import base64
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
ExtractorError,
unsmuggle_url,
determine_ext,
)
from ..compat import compat_urllib_parse_urlencode
class OoyalaBaseIE(InfoExtractor):
_PLAYER_BASE = 'http://player.ooyala.com/'
_CONTENT_TREE_BASE = _PLAYER_BASE + 'player_api/v1/content_tree/'
_AUTHORIZATION_URL_TEMPLATE = _PLAYER_BASE + 'sas/player_api/v2/authorization/embed_code/%s/%s?'
def _extract(self, content_tree_url, video_id, domain='example.org'):
content_tree = self._download_json(content_tree_url, video_id)['content_tree']
metadata = content_tree[list(content_tree)[0]]
embed_code = metadata['embed_code']
pcode = metadata.get('asset_pcode') or embed_code
title = metadata['title']
auth_data = self._download_json(
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
compat_urllib_parse_urlencode({
'domain': domain,
'supportedFormats': 'mp4,rtmp,m3u8,hds',
}), video_id)
cur_auth_data = auth_data['authorization_data'][embed_code]
urls = []
formats = []
if cur_auth_data['authorized']:
for stream in cur_auth_data['streams']:
s_url = base64.b64decode(
stream['url']['data'].encode('ascii')).decode('utf-8')
if s_url in urls:
continue
urls.append(s_url)
ext = determine_ext(s_url, None)
delivery_type = stream['delivery_type']
if delivery_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
re.sub(r'/ip(?:ad|hone)/', '/all/', s_url), embed_code, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif delivery_type == 'hds' or ext == 'f4m':
formats.extend(self._extract_f4m_formats(
s_url + '?hdcore=3.7.0', embed_code, f4m_id='hds', fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
s_url, embed_code, fatal=False))
else:
formats.append({
'url': s_url,
'ext': ext or stream.get('delivery_type'),
'vcodec': stream.get('video_codec'),
'format_id': delivery_type,
'width': int_or_none(stream.get('width')),
'height': int_or_none(stream.get('height')),
'abr': int_or_none(stream.get('audio_bitrate')),
'vbr': int_or_none(stream.get('video_bitrate')),
'fps': float_or_none(stream.get('framerate')),
})
else:
raise ExtractorError('%s said: %s' % (
self.IE_NAME, cur_auth_data['message']), expected=True)
self._sort_formats(formats)
subtitles = {}
for lang, sub in metadata.get('closed_captions_vtt', {}).get('captions', {}).items():
sub_url = sub.get('url')
if not sub_url:
continue
subtitles[lang] = [{
'url': sub_url,
}]
return {
'id': embed_code,
'title': title,
'description': metadata.get('description'),
'thumbnail': metadata.get('thumbnail_image') or metadata.get('promo_image'),
'duration': float_or_none(metadata.get('duration'), 1000),
'subtitles': subtitles,
'formats': formats,
}
class OoyalaIE(OoyalaBaseIE):
_VALID_URL = r'(?:ooyala:|https?://.+?\.ooyala\.com/.*?(?:embedCode|ec)=)(?P<id>.+?)(&|$)'
_TESTS = [
{
# From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
'info_dict': {
'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
'ext': 'mp4',
'title': 'Explaining Data Recovery from Hard Drives and SSDs',
'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
'duration': 853.386,
},
# The video in the original webpage now uses PlayWire
'skip': 'Ooyala said: movie expired',
}, {
# Only available for ipad
'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'info_dict': {
'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'ext': 'mp4',
'title': 'Simulation Overview - Levels of Simulation',
'duration': 194.948,
},
},
{
# Information available only through SAS api
# From http://community.plm.automation.siemens.com/t5/News-NX-Manufacturing/Tool-Path-Divide/ba-p/4187
'url': 'http://player.ooyala.com/player.js?embedCode=FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'md5': 'a84001441b35ea492bc03736e59e7935',
'info_dict': {
'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'ext': 'mp4',
'title': 'Divide Tool Path.mp4',
'duration': 204.405,
}
}
]
@staticmethod
def _url_for_embed_code(embed_code):
return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
@classmethod
def _build_url_result(cls, embed_code):
return cls.url_result(cls._url_for_embed_code(embed_code),
ie=cls.ie_key())
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
embed_code = self._match_id(url)
domain = smuggled_data.get('domain')
content_tree_url = self._CONTENT_TREE_BASE + 'embed_code/%s/%s' % (embed_code, embed_code)
return self._extract(content_tree_url, embed_code, domain)
class OoyalaExternalIE(OoyalaBaseIE):
_VALID_URL = r'''(?x)
(?:
ooyalaexternal:|
https?://.+?\.ooyala\.com/.*?\bexternalId=
)
(?P<partner_id>[^:]+)
:
(?P<id>.+)
(?:
:|
.*?&pcode=
)
(?P<pcode>.+?)
(?:&|$)
'''
_TEST = {
'url': 'https://player.ooyala.com/player.js?externalId=espn:10365079&pcode=1kNG061cgaoolOncv54OAO1ceO-I&adSetCode=91cDU6NuXTGKz3OdjOxFdAgJVtQcKJnI&callback=handleEvents&hasModuleParams=1&height=968&playerBrandingId=7af3bd04449c444c964f347f11873075&targetReplaceId=videoPlayer&width=1656&wmode=opaque&allowScriptAccess=always',
'info_dict': {
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
'ext': 'mp4',
'title': 'dm_140128_30for30Shorts___JudgingJewellv2',
'duration': 1302.0,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
partner_id, video_id, pcode = re.match(self._VALID_URL, url).groups()
content_tree_url = self._CONTENT_TREE_BASE + 'external_id/%s/%s:%s' % (pcode, partner_id, video_id)
return self._extract(content_tree_url, video_id)
| 41.395722
| 334
| 0.547474
|
2972ccf8562d98df247283d9cf2791dd1d527e84
| 2,302
|
py
|
Python
|
fastai2/callback/data.py
|
antoinebon/fastai2
|
c14315d5a73565a54301acd2c41a4f04c4042f54
|
[
"Apache-2.0"
] | null | null | null |
fastai2/callback/data.py
|
antoinebon/fastai2
|
c14315d5a73565a54301acd2c41a4f04c4042f54
|
[
"Apache-2.0"
] | null | null | null |
fastai2/callback/data.py
|
antoinebon/fastai2
|
c14315d5a73565a54301acd2c41a4f04c4042f54
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/14a_callback.data.ipynb (unless otherwise specified).
__all__ = ['CollectDataCallback', 'WeightedDL', 'weighted_dataloaders', 'PartialDL', 'partial_dataloaders']
# Cell
from ..basics import *
# Cell
class CollectDataCallback(Callback):
"Collect all batches, along with `pred` and `loss`, into `self.data`. Mainly for testing"
def begin_fit(self): self.data = L()
def after_batch(self): self.data.append(to_detach((self.xb,self.yb,self.pred,self.loss)))
# Cell
@log_args(but_as=TfmdDL.__init__)
@delegates()
class WeightedDL(TfmdDL):
def __init__(self, dataset=None, bs=None, wgts=None, **kwargs):
super().__init__(dataset=dataset, bs=bs, **kwargs)
wgts = array([1.]*len(dataset) if wgts is None else wgts)
self.wgts = wgts/wgts.sum()
def get_idxs(self):
if self.n==0: return []
if not self.shuffle: return super().get_idxs()
return list(np.random.choice(self.n, self.n, p=self.wgts))
# Cell
@patch
@delegates(Datasets.dataloaders)
def weighted_dataloaders(self:Datasets, wgts, bs=64, **kwargs):
xtra_kwargs = [{}] * (self.n_subsets-1)
return self.dataloaders(bs=bs, dl_type=WeightedDL, dl_kwargs=({'wgts':wgts}, *xtra_kwargs), **kwargs)
# Cell
@log_args(but_as=TfmdDL.__init__)
@delegates()
class PartialDL(TfmdDL):
"Select randomly partial quantity of data at each epoch"
def __init__(self, dataset=None, bs=None, partial_n=None, **kwargs):
super().__init__(dataset=dataset, bs=bs, **kwargs)
self.partial_n = min(partial_n, self.n) if partial_n else None
def get_idxs(self):
if self.partial_n is None: return super().get_idxs()
return list(np.random.choice(self.n, self.partial_n, replace=False))
def __len__(self):
if self.partial_n is None: return super().__len__()
return self.partial_n//self.bs + (0 if self.drop_last or self.partial_n%self.bs==0 else 1)
# Cell
@patch
@delegates(Datasets.dataloaders)
def partial_dataloaders(self:FilteredBase, partial_n, bs=64, **kwargs):
"Create a partial dataloader `PartialDL` for the training set"
xtra_kwargs = [{}] * (self.n_subsets-1)
return self.dataloaders(bs=bs, dl_type=PartialDL, dl_kwargs=({'partial_n':partial_n}, *xtra_kwargs), **kwargs)
| 39.689655
| 114
| 0.69722
|
9bd533514b2edeec5ac3f2dfa365f2e254b1d3d8
| 161
|
py
|
Python
|
core/primitive/hex.py
|
ponyatov/metaLpy
|
96149313e8083536ade1c331825242f6996f05b3
|
[
"MIT"
] | null | null | null |
core/primitive/hex.py
|
ponyatov/metaLpy
|
96149313e8083536ade1c331825242f6996f05b3
|
[
"MIT"
] | null | null | null |
core/primitive/hex.py
|
ponyatov/metaLpy
|
96149313e8083536ade1c331825242f6996f05b3
|
[
"MIT"
] | null | null | null |
## @file
from .integer import *
## machine hexadecimal number
## @ingroup primitive
class Hex(Integer):
def __init__(self, V):
super().__init__(V)
| 16.1
| 29
| 0.658385
|
5e710f9f16ace8ecf5a682859004354ab3d05002
| 3,342
|
py
|
Python
|
tests/functional/test_linux_changes.py
|
xianfang-qian/docker_ci
|
0ff773d1e7d9420ef6a05a63bc56b32952b71192
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_linux_changes.py
|
xianfang-qian/docker_ci
|
0ff773d1e7d9420ef6a05a63bc56b32952b71192
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_linux_changes.py
|
xianfang-qian/docker_ci
|
0ff773d1e7d9420ef6a05a63bc56b32952b71192
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
import re
import pytest
@pytest.mark.usefixtures('_is_not_distribution')
@pytest.mark.parametrize('_is_not_distribution', [('base', 'internal_dev',
'custom-no-omz', 'custom-no-cv', 'custom-full')], indirect=True)
class TestLinuxChanges:
@pytest.mark.usefixtures('_is_image_os')
@pytest.mark.parametrize('_is_image_os', [('ubuntu18', 'ubuntu20', 'rhel8')], indirect=True)
def test_linux_deps_change(self, tester, image):
root = pathlib.Path(os.path.realpath(__name__)).parent
image_folder = image.replace('/', '_').replace(':', '_')
log_folder = root / 'logs' / image_folder / 'linux_deps'
linux_deps_file_name = re.search(r'(.*_\d{4}\.\d)', image.split('/')[-1].replace(':', '_'))
if linux_deps_file_name:
linux_deps_file_name = f'{linux_deps_file_name.group(1)}.txt'
if not log_folder.exists():
log_folder.mkdir(parents=True)
kwargs = {
'volumes': {
root / 'tests' / 'resources' / 'linux_deps':
{'bind': '/tmp/linux_deps', 'mode': 'rw'}, # nosec # noqa: S108
log_folder: {'bind': '/tmp/logs', 'mode': 'rw'}, # nosec # noqa: S108
},
}
tester.test_docker_image(
image,
['/bin/bash -ac ". /opt/intel/openvino/install_dependencies/install_openvino_dependencies.sh -p 2>&1 | '
"sed 's/ /\\n/g' | tee /tmp/logs/install_openvino_dependencies_script_packages.log\"",
f'/bin/bash -ac "python3 /tmp/linux_deps/linux_deps_compare.py -i {image} '
f'-e /tmp/linux_deps/{linux_deps_file_name} '
'-c /tmp/logs/install_openvino_dependencies_script_packages.log -l /tmp/logs"',
],
self.test_linux_deps_change.__name__, **kwargs,
)
@pytest.mark.save_deps
@pytest.mark.usefixtures('_is_image_os')
@pytest.mark.parametrize('_is_image_os', [('ubuntu18', 'ubuntu20', 'rhel8')], indirect=True)
def test_save_linux_deps(self, tester, image):
root = pathlib.Path(os.path.realpath(__name__)).parent
image_folder = image.replace('/', '_').replace(':', '_')
log_folder = root / 'logs' / image_folder / 'linux_deps'
linux_deps_file_name = re.search(r'(.*_\d{4}\.\d)', image.split('/')[-1].replace(':', '_'))
if linux_deps_file_name:
linux_deps_file_name = f'{linux_deps_file_name.group(1)}.txt'
if not log_folder.exists():
log_folder.mkdir(parents=True)
kwargs = {
'volumes': {
root / 'tests' / 'resources' / 'linux_deps':
{'bind': '/tmp/linux_deps', 'mode': 'rw'}, # nosec # noqa: S108
log_folder: {'bind': '/tmp/logs/linux_deps', 'mode': 'rw'}, # nosec # noqa: S108
},
}
tester.test_docker_image(
image,
['/bin/bash -ac ". /opt/intel/openvino/install_dependencies/install_openvino_dependencies.sh -p 2>&1 | '
f"sed 's/ /\\n/g' | tee /tmp/logs/linux_deps/{linux_deps_file_name}\"",
],
self.test_save_linux_deps.__name__, **kwargs,
)
| 47.742857
| 116
| 0.581388
|
0175fd276c2a3a99e791e0b48b2744b44f849b76
| 5,081
|
py
|
Python
|
tensorflow_model_analysis/metrics/ndcg_test.py
|
hbrylkowski/model-analysis
|
3ffa05396ca8cbb92755a40f58528a8808f63c5b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/metrics/ndcg_test.py
|
hbrylkowski/model-analysis
|
3ffa05396ca8cbb92755a40f58528a8808f63c5b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/metrics/ndcg_test.py
|
hbrylkowski/model-analysis
|
3ffa05396ca8cbb92755a40f58528a8808f63c5b
|
[
"Apache-2.0"
] | 1
|
2020-06-04T23:26:00.000Z
|
2020-06-04T23:26:00.000Z
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for NDCG metric."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.metrics import metric_util
from tensorflow_model_analysis.metrics import ndcg
class NDCGMetricsTest(testutil.TensorflowModelAnalysisTest):
def testNDCG(self):
metric = ndcg.NDCG(gain_key='gain').computations(
sub_keys=[metric_types.SubKey(top_k=1),
metric_types.SubKey(top_k=2)],
query_key='query')[0]
query1_example1 = {
'labels': np.array([1.0]),
'predictions': np.array([0.2]),
'example_weights': np.array([1.0]),
'features': {
'query': np.array(['query1']),
'gain': np.array([1.0])
}
}
query1_example2 = {
'labels': np.array([0.0]),
'predictions': np.array([0.8]),
'example_weights': np.array([1.0]),
'features': {
'query': np.array(['query1']),
'gain': np.array([0.5])
}
}
query2_example1 = {
'labels': np.array([0.0]),
'predictions': np.array([0.5]),
'example_weights': np.array([2.0]),
'features': {
'query': np.array(['query2']),
'gain': np.array([0.5])
}
}
query2_example2 = {
'labels': np.array([1.0]),
'predictions': np.array([0.9]),
'example_weights': np.array([2.0]),
'features': {
'query': np.array(['query2']),
'gain': np.array([1.0])
}
}
query2_example3 = {
'labels': np.array([0.0]),
'predictions': np.array([0.1]),
'example_weights': np.array([2.0]),
'features': {
'query': np.array(['query2']),
'gain': np.array([0.1])
}
}
query3_example1 = {
'labels': np.array([1.0]),
'predictions': np.array([0.9]),
'example_weights': np.array([3.0]),
'features': {
'query': np.array(['query3']),
'gain': np.array([1.0])
}
}
examples = [[query1_example1, query1_example2],
[query2_example1, query2_example2, query2_example3],
[query3_example1]]
def to_standard_metric_inputs_list(list_of_extracts):
return [
metric_util.to_standard_metric_inputs(e, True)
for e in list_of_extracts
]
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create(examples)
| 'Process' >> beam.Map(to_standard_metric_inputs_list)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(metric.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
ndcg1_key = metric_types.MetricKey(
name='ndcg', sub_key=metric_types.SubKey(top_k=1))
ndcg2_key = metric_types.MetricKey(
name='ndcg', sub_key=metric_types.SubKey(top_k=2))
# Query1 (weight=1): (p=0.8, g=0.5) (p=0.2, g=1.0)
# Query2 (weight=2): (p=0.9, g=1.0) (p=0.5, g=0.5) (p=0.1, g=0.1)
# Query3 (weight=3): (p=0.9, g=1.0)
#
# DCG@1: 0.5, 1.0, 1.0
# NDCG@1: 0.5, 1.0, 1.0
# Average NDCG@1: (1 * 0.5 + 2 * 1.0 + 3 * 1.0) / (1 + 2 + 3) ~ 0.92
#
# DCG@2: (0.5 + 1.0/log(3), (1.0 + 0.5/log(3), (1.0)
# NDCG@2: (0.5 + 1.0/log(3)) / (1.0 + 0.5/log(3)),
# (1.0 + 0.5/log(3)) / (1.0 + 0.5/log(3)),
# 1.0
# Average NDCG@2: (1 * 0.860 + 2 * 1.0 + 3 * 1.0) / (1 + 2 + 3) ~ 0.97
self.assertDictElementsAlmostEqual(
got_metrics, {
ndcg1_key: 0.9166667,
ndcg2_key: 0.9766198
},
places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
if __name__ == '__main__':
tf.test.main()
| 33.427632
| 80
| 0.562291
|
11b9e1768f2a80246c72fd3440dfe071f9171dcf
| 987
|
py
|
Python
|
blog/admin.py
|
arminadm/django_projects
|
8134e764d87f582f946634942a3d4a1448192dc7
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
arminadm/django_projects
|
8134e764d87f582f946634942a3d4a1448192dc7
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
arminadm/django_projects
|
8134e764d87f582f946634942a3d4a1448192dc7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from blog.models import Post, Category, Comment
from django_summernote.admin import SummernoteModelAdmin
# Register your models here.
class postAdmin(SummernoteModelAdmin):
date_hierarchy = 'created_date'
empty_value_display = '-empty-'
list_display = ('id', 'author', 'title', 'status', 'published_date', 'created_date')
list_filter = ('status',)
search_fields = ['title', 'content']
# exclude = ('content',)
# fields = ('content', 'title')
summernote_fields = ('content',)
admin.site.register(Post, postAdmin)
class categoryAdmin(admin.ModelAdmin):
pass
admin.site.register(Category, categoryAdmin)
class commentAdmin(admin.ModelAdmin):
date_hierarchy = 'created_time'
empty_value_display = '-empty-'
list_display = ('post', 'author', 'approved', 'created_time')
list_filter = ('approved',)
search_fields = ['post', 'author', 'email', 'created_time']
admin.site.register(Comment, commentAdmin)
| 37.961538
| 88
| 0.713273
|
eab06e8abfa4fc3b0feb03ba8f2808416b86dd15
| 174
|
py
|
Python
|
serendipiart/plot/shape.py
|
tobiade/serendipi-art
|
481383c2a8fbeab6955c8250e3e488a47b862d88
|
[
"MIT"
] | null | null | null |
serendipiart/plot/shape.py
|
tobiade/serendipi-art
|
481383c2a8fbeab6955c8250e3e488a47b862d88
|
[
"MIT"
] | null | null | null |
serendipiart/plot/shape.py
|
tobiade/serendipi-art
|
481383c2a8fbeab6955c8250e3e488a47b862d88
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Tuple
@dataclass
class Shape:
xcoords: list[float]
ycoords: list[float]
colour: Tuple[float, float, float]
| 17.4
| 38
| 0.735632
|
3eeedda1b66b938af6ec6ec80c27c6cf424cc7bf
| 12,408
|
py
|
Python
|
lib/datasets/pascal_voc_rbg.py
|
JakeCowton/faster-rcnn.pytorch
|
4d80b91b24a2235eda6dbe5c3faf42bd9d56e07b
|
[
"MIT"
] | null | null | null |
lib/datasets/pascal_voc_rbg.py
|
JakeCowton/faster-rcnn.pytorch
|
4d80b91b24a2235eda6dbe5c3faf42bd9d56e07b
|
[
"MIT"
] | null | null | null |
lib/datasets/pascal_voc_rbg.py
|
JakeCowton/faster-rcnn.pytorch
|
4d80b91b24a2235eda6dbe5c3faf42bd9d56e07b
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
import model.utils.cython_bbox
import pickle
import subprocess
import uuid
from .voc_eval import voc_eval
from model.utils.config import cfg
import pdb
class pascal_voc(imdb):
def __init__(self, image_set, year, devkit_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
path = os.path.join(
self._devkit_path,
'results',
'VOC' + self._year,
'Main',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
pdb.set_trace()
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.pascal_voc import pascal_voc
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| 39.642173
| 89
| 0.543923
|
b45467dddd8e76fc03339f3af197443120b75cdd
| 6,705
|
py
|
Python
|
python/fastscore/v2/models/active_model_info.py
|
modelop/fastscore-sdk
|
2206a4b9294cd83b6b8c2470193070bdc35a9061
|
[
"Apache-2.0"
] | 2
|
2018-06-05T19:14:30.000Z
|
2019-02-06T17:15:10.000Z
|
python/fastscore/v2/models/active_model_info.py
|
modelop/fastscore-sdk
|
2206a4b9294cd83b6b8c2470193070bdc35a9061
|
[
"Apache-2.0"
] | 2
|
2018-02-20T21:58:43.000Z
|
2018-10-07T10:10:54.000Z
|
python/fastscore/v2/models/active_model_info.py
|
modelop/fastscore-sdk
|
2206a4b9294cd83b6b8c2470193070bdc35a9061
|
[
"Apache-2.0"
] | 1
|
2017-12-29T20:38:06.000Z
|
2017-12-29T20:38:06.000Z
|
# coding: utf-8
"""
FastScore API (proxy)
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ActiveModelInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'mtype': 'str',
'source': 'str',
'slots': 'list[SlotInfo]',
'attachments': 'list[str]',
'snapshots': 'str',
'jets': 'list[JetInfo]'
}
attribute_map = {
'name': 'name',
'mtype': 'mtype',
'source': 'source',
'slots': 'slots',
'attachments': 'attachments',
'snapshots': 'snapshots',
'jets': 'jets'
}
def __init__(self, name=None, mtype=None, source=None, slots=None, attachments=None, snapshots=None, jets=None):
"""
ActiveModelInfo - a model defined in Swagger
"""
self._name = None
self._mtype = None
self._source = None
self._slots = None
self._attachments = None
self._snapshots = None
self._jets = None
if name is not None:
self.name = name
if mtype is not None:
self.mtype = mtype
if source is not None:
self.source = source
if slots is not None:
self.slots = slots
if attachments is not None:
self.attachments = attachments
if snapshots is not None:
self.snapshots = snapshots
if jets is not None:
self.jets = jets
@property
def name(self):
"""
Gets the name of this ActiveModelInfo.
:return: The name of this ActiveModelInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ActiveModelInfo.
:param name: The name of this ActiveModelInfo.
:type: str
"""
self._name = name
@property
def mtype(self):
"""
Gets the mtype of this ActiveModelInfo.
:return: The mtype of this ActiveModelInfo.
:rtype: str
"""
return self._mtype
@mtype.setter
def mtype(self, mtype):
"""
Sets the mtype of this ActiveModelInfo.
:param mtype: The mtype of this ActiveModelInfo.
:type: str
"""
self._mtype = mtype
@property
def source(self):
"""
Gets the source of this ActiveModelInfo.
:return: The source of this ActiveModelInfo.
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this ActiveModelInfo.
:param source: The source of this ActiveModelInfo.
:type: str
"""
self._source = source
@property
def slots(self):
"""
Gets the slots of this ActiveModelInfo.
:return: The slots of this ActiveModelInfo.
:rtype: list[SlotInfo]
"""
return self._slots
@slots.setter
def slots(self, slots):
"""
Sets the slots of this ActiveModelInfo.
:param slots: The slots of this ActiveModelInfo.
:type: list[SlotInfo]
"""
self._slots = slots
@property
def attachments(self):
"""
Gets the attachments of this ActiveModelInfo.
:return: The attachments of this ActiveModelInfo.
:rtype: list[str]
"""
return self._attachments
@attachments.setter
def attachments(self, attachments):
"""
Sets the attachments of this ActiveModelInfo.
:param attachments: The attachments of this ActiveModelInfo.
:type: list[str]
"""
self._attachments = attachments
@property
def snapshots(self):
"""
Gets the snapshots of this ActiveModelInfo.
:return: The snapshots of this ActiveModelInfo.
:rtype: str
"""
return self._snapshots
@snapshots.setter
def snapshots(self, snapshots):
"""
Sets the snapshots of this ActiveModelInfo.
:param snapshots: The snapshots of this ActiveModelInfo.
:type: str
"""
self._snapshots = snapshots
@property
def jets(self):
"""
Gets the jets of this ActiveModelInfo.
:return: The jets of this ActiveModelInfo.
:rtype: list[JetInfo]
"""
return self._jets
@jets.setter
def jets(self, jets):
"""
Sets the jets of this ActiveModelInfo.
:param jets: The jets of this ActiveModelInfo.
:type: list[JetInfo]
"""
self._jets = jets
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ActiveModelInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 23.946429
| 116
| 0.542878
|
a82446226f1b451f0a42e792e4a31ed3f9db80dd
| 601
|
py
|
Python
|
collect_ros_data/check_file.py
|
kuriatsu/CarlaAutoLogging
|
0a578e21961abe991c9efdc41bc923dfea00cdd7
|
[
"Apache-2.0"
] | null | null | null |
collect_ros_data/check_file.py
|
kuriatsu/CarlaAutoLogging
|
0a578e21961abe991c9efdc41bc923dfea00cdd7
|
[
"Apache-2.0"
] | null | null | null |
collect_ros_data/check_file.py
|
kuriatsu/CarlaAutoLogging
|
0a578e21961abe991c9efdc41bc923dfea00cdd7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pickle
import sys
if __name__ == "__main__":
with open(sys.argv[1], 'rb') as f:
data = pickle.load(f)
with open("last_int_distance.txt", 'r') as f_2:
intervention_mieage = float(f_2.read())
if len(data.get("drive_data")) > 10:
if intervention_mieage == 0.0:
print("no intervention occured")
sys.exit(0)
elif intervention_mieage > 0.0:
print("next intervention trial")
sys.exit(1)
else:
print("data collection failed")
sys.exit(-1)
| 23.115385
| 51
| 0.569052
|
d8b1e807afe02ab558ddaec75074e7f39c4e4ebe
| 21,350
|
py
|
Python
|
tests/template_tests/syntax_tests/i18n/test_blocktrans.py
|
andreip/django
|
c61d1361d027a729d07d277879950ff133c19f4c
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2020-09-25T04:02:25.000Z
|
2020-10-15T00:01:00.000Z
|
tests/template_tests/syntax_tests/i18n/test_blocktrans.py
|
djk2/django
|
6b00af50146335485d8414c42efec7d8dd5397fc
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/template_tests/syntax_tests/i18n/test_blocktrans.py
|
djk2/django
|
6b00af50146335485d8414c42efec7d8dd5397fc
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2017-08-03T13:06:19.000Z
|
2017-08-03T13:06:19.000Z
|
import os
from threading import local
from django.template import Context, Template, TemplateSyntaxError
from django.test import SimpleTestCase, override_settings
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real
from ...utils import setup
from .base import MultipleLocaleActivationTestCase, extended_locale_paths, here
class I18nBlockTransTagTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'i18n03': '{% load i18n %}{% blocktrans %}{{ anton }}{% endblocktrans %}'})
def test_i18n03(self):
"""simple translation of a variable"""
output = self.engine.render_to_string('i18n03', {'anton': 'Å'})
self.assertEqual(output, 'Å')
@setup({'i18n04': '{% load i18n %}{% blocktrans with berta=anton|lower %}{{ berta }}{% endblocktrans %}'})
def test_i18n04(self):
"""simple translation of a variable and filter"""
output = self.engine.render_to_string('i18n04', {'anton': 'Å'})
self.assertEqual(output, 'å')
@setup({'legacyi18n04': '{% load i18n %}'
'{% blocktrans with anton|lower as berta %}{{ berta }}{% endblocktrans %}'})
def test_legacyi18n04(self):
"""simple translation of a variable and filter"""
output = self.engine.render_to_string('legacyi18n04', {'anton': 'Å'})
self.assertEqual(output, 'å')
@setup({'i18n05': '{% load i18n %}{% blocktrans %}xxx{{ anton }}xxx{% endblocktrans %}'})
def test_i18n05(self):
"""simple translation of a string with interpolation"""
output = self.engine.render_to_string('i18n05', {'anton': 'yyy'})
self.assertEqual(output, 'xxxyyyxxx')
@setup({'i18n07': '{% load i18n %}'
'{% blocktrans count counter=number %}singular{% plural %}'
'{{ counter }} plural{% endblocktrans %}'})
def test_i18n07(self):
"""translation of singular form"""
output = self.engine.render_to_string('i18n07', {'number': 1})
self.assertEqual(output, 'singular')
@setup({'legacyi18n07': '{% load i18n %}'
'{% blocktrans count number as counter %}singular{% plural %}'
'{{ counter }} plural{% endblocktrans %}'})
def test_legacyi18n07(self):
"""translation of singular form"""
output = self.engine.render_to_string('legacyi18n07', {'number': 1})
self.assertEqual(output, 'singular')
@setup({'i18n08': '{% load i18n %}'
'{% blocktrans count number as counter %}singular{% plural %}'
'{{ counter }} plural{% endblocktrans %}'})
def test_i18n08(self):
"""translation of plural form"""
output = self.engine.render_to_string('i18n08', {'number': 2})
self.assertEqual(output, '2 plural')
@setup({'legacyi18n08': '{% load i18n %}'
'{% blocktrans count counter=number %}singular{% plural %}'
'{{ counter }} plural{% endblocktrans %}'})
def test_legacyi18n08(self):
"""translation of plural form"""
output = self.engine.render_to_string('legacyi18n08', {'number': 2})
self.assertEqual(output, '2 plural')
@setup({'i18n17': '{% load i18n %}'
'{% blocktrans with berta=anton|escape %}{{ berta }}{% endblocktrans %}'})
def test_i18n17(self):
"""
Escaping inside blocktrans and trans works as if it was directly in the
template.
"""
output = self.engine.render_to_string('i18n17', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n18': '{% load i18n %}'
'{% blocktrans with berta=anton|force_escape %}{{ berta }}{% endblocktrans %}'})
def test_i18n18(self):
output = self.engine.render_to_string('i18n18', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n19': '{% load i18n %}{% blocktrans %}{{ andrew }}{% endblocktrans %}'})
def test_i18n19(self):
output = self.engine.render_to_string('i18n19', {'andrew': 'a & b'})
self.assertEqual(output, 'a & b')
@setup({'i18n21': '{% load i18n %}{% blocktrans %}{{ andrew }}{% endblocktrans %}'})
def test_i18n21(self):
output = self.engine.render_to_string('i18n21', {'andrew': mark_safe('a & b')})
self.assertEqual(output, 'a & b')
@setup({'legacyi18n17': '{% load i18n %}'
'{% blocktrans with anton|escape as berta %}{{ berta }}{% endblocktrans %}'})
def test_legacyi18n17(self):
output = self.engine.render_to_string('legacyi18n17', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'legacyi18n18': '{% load i18n %}'
'{% blocktrans with anton|force_escape as berta %}'
'{{ berta }}{% endblocktrans %}'})
def test_legacyi18n18(self):
output = self.engine.render_to_string('legacyi18n18', {'anton': 'α & β'})
self.assertEqual(output, 'α & β')
@setup({'i18n26': '{% load i18n %}'
'{% blocktrans with extra_field=myextra_field count counter=number %}'
'singular {{ extra_field }}{% plural %}plural{% endblocktrans %}'})
def test_i18n26(self):
"""
translation of plural form with extra field in singular form (#13568)
"""
output = self.engine.render_to_string('i18n26', {'myextra_field': 'test', 'number': 1})
self.assertEqual(output, 'singular test')
@setup({'legacyi18n26': '{% load i18n %}'
'{% blocktrans with myextra_field as extra_field count number as counter %}'
'singular {{ extra_field }}{% plural %}plural{% endblocktrans %}'})
def test_legacyi18n26(self):
output = self.engine.render_to_string('legacyi18n26', {'myextra_field': 'test', 'number': 1})
self.assertEqual(output, 'singular test')
@setup({'i18n27': '{% load i18n %}{% blocktrans count counter=number %}'
'{{ counter }} result{% plural %}{{ counter }} results'
'{% endblocktrans %}'})
def test_i18n27(self):
"""translation of singular form in Russian (#14126)"""
with translation.override('ru'):
output = self.engine.render_to_string('i18n27', {'number': 1})
self.assertEqual(output, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442')
@setup({'legacyi18n27': '{% load i18n %}'
'{% blocktrans count number as counter %}{{ counter }} result'
'{% plural %}{{ counter }} results{% endblocktrans %}'})
def test_legacyi18n27(self):
with translation.override('ru'):
output = self.engine.render_to_string('legacyi18n27', {'number': 1})
self.assertEqual(output, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442')
@setup({'i18n28': '{% load i18n %}'
'{% blocktrans with a=anton b=berta %}{{ a }} + {{ b }}{% endblocktrans %}'})
def test_i18n28(self):
"""simple translation of multiple variables"""
output = self.engine.render_to_string('i18n28', {'anton': 'α', 'berta': 'β'})
self.assertEqual(output, 'α + β')
@setup({'legacyi18n28': '{% load i18n %}'
'{% blocktrans with anton as a and berta as b %}'
'{{ a }} + {{ b }}{% endblocktrans %}'})
def test_legacyi18n28(self):
output = self.engine.render_to_string('legacyi18n28', {'anton': 'α', 'berta': 'β'})
self.assertEqual(output, 'α + β')
# blocktrans handling of variables which are not in the context.
# this should work as if blocktrans was not there (#19915)
@setup({'i18n34': '{% load i18n %}{% blocktrans %}{{ missing }}{% endblocktrans %}'})
def test_i18n34(self):
output = self.engine.render_to_string('i18n34')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n34_2': '{% load i18n %}{% blocktrans with a=\'α\' %}{{ missing }}{% endblocktrans %}'})
def test_i18n34_2(self):
output = self.engine.render_to_string('i18n34_2')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n34_3': '{% load i18n %}{% blocktrans with a=anton %}{{ missing }}{% endblocktrans %}'})
def test_i18n34_3(self):
output = self.engine.render_to_string(
'i18n34_3', {'anton': '\xce\xb1'})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'i18n37': '{% load i18n %}'
'{% trans "Page not found" as page_not_found %}'
'{% blocktrans %}Error: {{ page_not_found }}{% endblocktrans %}'})
def test_i18n37(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n37')
self.assertEqual(output, 'Error: Seite nicht gefunden')
# blocktrans tag with asvar
@setup({'i18n39': '{% load i18n %}'
'{% blocktrans asvar page_not_found %}Page not found{% endblocktrans %}'
'>{{ page_not_found }}<'})
def test_i18n39(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n39')
self.assertEqual(output, '>Seite nicht gefunden<')
@setup({'i18n40': '{% load i18n %}'
'{% trans "Page not found" as pg_404 %}'
'{% blocktrans with page_not_found=pg_404 asvar output %}'
'Error: {{ page_not_found }}'
'{% endblocktrans %}'})
def test_i18n40(self):
output = self.engine.render_to_string('i18n40')
self.assertEqual(output, '')
@setup({'i18n41': '{% load i18n %}'
'{% trans "Page not found" as pg_404 %}'
'{% blocktrans with page_not_found=pg_404 asvar output %}'
'Error: {{ page_not_found }}'
'{% endblocktrans %}'
'>{{ output }}<'})
def test_i18n41(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n41')
self.assertEqual(output, '>Error: Seite nicht gefunden<')
@setup({'template': '{% load i18n %}{% blocktrans asvar %}Yes{% endblocktrans %}'})
def test_blocktrans_syntax_error_missing_assignment(self):
msg = "No argument provided to the 'blocktrans' tag for the asvar option."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% blocktrans %}%s{% endblocktrans %}'})
def test_blocktrans_tag_using_a_string_that_looks_like_str_fmt(self):
output = self.engine.render_to_string('template')
self.assertEqual(output, '%s')
class TranslationBlockTransTagTests(SimpleTestCase):
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_template_tags_pgettext(self):
"""{% blocktrans %} takes message contexts into account (#14806)."""
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
# Nonexistent context
t = Template('{% load i18n %}{% blocktrans context "nonexistent" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context... using a literal
t = Template('{% load i18n %}{% blocktrans context "month name" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context "verb" %}May{% endblocktrans %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% blocktrans context message_context|lower %}May{% endblocktrans %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'count'
t = Template(
'{% load i18n %}{% blocktrans count number=1 context "super search" %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '1 Super-Ergebnis')
t = Template(
'{% load i18n %}{% blocktrans count number=2 context "super search" %}{{ number }}'
' super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 Super-Ergebnisse')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=1 %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '1 anderen Super-Ergebnis')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=2 %}'
'{{ number }} super result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Using 'with'
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "comment count" %}'
'There are {{ num_comments }} comments{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "other comment count" %}'
'There are {{ num_comments }} comments{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Andere: Es gibt 5 Kommentare')
# Using trimmed
t = Template(
'{% load i18n %}{% blocktrans trimmed %}\n\nThere\n\t are 5 '
'\n\n comments\n{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'There are 5 comments')
t = Template(
'{% load i18n %}{% blocktrans with num_comments=5 context "comment count" trimmed %}\n\n'
'There are \t\n \t {{ num_comments }} comments\n\n{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, 'Es gibt 5 Kommentare')
t = Template(
'{% load i18n %}{% blocktrans context "other super search" count number=2 trimmed %}\n'
'{{ number }} super \n result{% plural %}{{ number }} super results{% endblocktrans %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, '2 andere Super-Ergebnisse')
# Misuses
with self.assertRaises(TemplateSyntaxError):
Template('{% load i18n %}{% blocktrans context with month="May" %}{{ month }}{% endblocktrans %}')
with self.assertRaises(TemplateSyntaxError):
Template('{% load i18n %}{% blocktrans context %}{% endblocktrans %}')
with self.assertRaises(TemplateSyntaxError):
Template(
'{% load i18n %}{% blocktrans count number=2 context %}'
'{{ number }} super result{% plural %}{{ number }}'
' super results{% endblocktrans %}'
)
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_1(self):
"""
Error in translation file should not crash template rendering (#16516).
(%(person)s is translated as %(personne)s in fr.po).
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My name is James.')
@override_settings(LOCALE_PATHS=[os.path.join(here, 'other', 'locale')])
def test_bad_placeholder_2(self):
"""
Error in translation file should not crash template rendering (#18393).
(%(person) misses a 's' in fr.po, causing the string formatting to fail)
.
"""
with translation.override('fr'):
t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')
rendered = t.render(Context({'person': 'James'}))
self.assertEqual(rendered, 'My other name is James.')
class MultipleLocaleActivationBlockTransTests(MultipleLocaleActivationTestCase):
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n
constructs.
"""
with translation.override('fr'):
self.assertEqual(
Template("{% load i18n %}{% blocktrans %}Yes{% endblocktrans %}").render(Context({})),
'Oui'
)
def test_multiple_locale_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_btrans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_btrans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
class MiscTests(SimpleTestCase):
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_in_translatable_block(self):
t_sing = Template("{% load i18n %}{% blocktrans %}The result was {{ percent }}%{% endblocktrans %}")
t_plur = Template(
"{% load i18n %}{% blocktrans count num as number %}"
"{{ percent }}% represents {{ num }} object{% plural %}"
"{{ percent }}% represents {{ num }} objects{% endblocktrans %}"
)
with translation.override('de'):
self.assertEqual(t_sing.render(Context({'percent': 42})), 'Das Ergebnis war 42%')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '42% stellt 1 Objekt dar')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '42% stellt 4 Objekte dar')
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_percent_formatting_in_blocktrans(self):
"""
Python's %-formatting is properly escaped in blocktrans, singular, or
plural.
"""
t_sing = Template("{% load i18n %}{% blocktrans %}There are %(num_comments)s comments{% endblocktrans %}")
t_plur = Template(
"{% load i18n %}{% blocktrans count num as number %}"
"%(percent)s% represents {{ num }} object{% plural %}"
"%(percent)s% represents {{ num }} objects{% endblocktrans %}"
)
with translation.override('de'):
# Strings won't get translated as they don't match after escaping %
self.assertEqual(t_sing.render(Context({'num_comments': 42})), 'There are %(num_comments)s comments')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 1})), '%(percent)s% represents 1 object')
self.assertEqual(t_plur.render(Context({'percent': 42, 'num': 4})), '%(percent)s% represents 4 objects')
| 49.193548
| 116
| 0.575597
|
5382a0486560db333f78c33ad8859217c4f7a4d7
| 2,181
|
py
|
Python
|
mmt/utils/time_handler.py
|
jianzhnie/MultimodalTransformer
|
6cd4ca8034a53da361149745aecead68fbe304a0
|
[
"Apache-2.0"
] | 1
|
2021-11-08T14:32:24.000Z
|
2021-11-08T14:32:24.000Z
|
mmt/utils/time_handler.py
|
jianzhnie/MultimodalTransformer
|
6cd4ca8034a53da361149745aecead68fbe304a0
|
[
"Apache-2.0"
] | null | null | null |
mmt/utils/time_handler.py
|
jianzhnie/MultimodalTransformer
|
6cd4ca8034a53da361149745aecead68fbe304a0
|
[
"Apache-2.0"
] | null | null | null |
'''
Author: jianzhnie
Date: 2021-11-18 18:22:32
LastEditTime: 2021-12-03 17:21:47
LastEditors: jianzhnie
Description:
'''
import signal
import time
import torch
import torch.distributed as dist
def timed_generator(gen):
start = time.time()
for g in gen:
end = time.time()
t = end - start
yield g, t
start = time.time()
def timed_function(f):
def _timed_function(*args, **kwargs):
start = time.time()
ret = f(*args, **kwargs)
return ret, time.time() - start
return _timed_function
def first_n(n, generator):
for i, d in zip(range(n), generator):
yield d
class TimeoutHandler:
def __init__(self, sig=signal.SIGTERM):
self.sig = sig
self.device = torch.device('cuda')
@property
def interrupted(self):
if not dist.is_initialized():
return self._interrupted
interrupted = torch.tensor(self._interrupted).int().to(self.device)
dist.broadcast(interrupted, 0)
interrupted = bool(interrupted.item())
return interrupted
def __enter__(self):
self._interrupted = False
self.released = False
self.original_handler = signal.getsignal(self.sig)
def master_handler(signum, frame):
self.release()
self._interrupted = True
print('Received SIGTERM')
def ignoring_handler(signum, frame):
self.release()
print('Received SIGTERM, ignoring')
rank = dist.get_rank() if dist.is_initialized() else 0
if rank == 0:
signal.signal(self.sig, master_handler)
else:
signal.signal(self.sig, ignoring_handler)
return self
def __exit__(self, type, value, tb):
self.release()
def release(self):
if self.released:
return False
signal.signal(self.sig, self.original_handler)
self.released = True
return True
def calc_ips(batch_size, time):
world_size = (
torch.distributed.get_world_size()
if torch.distributed.is_initialized() else 1)
tbs = world_size * batch_size
return tbs / time
| 23.202128
| 75
| 0.612105
|
9ed80e31be85239a0e1b368c80a8a74d915edbd4
| 3,157
|
py
|
Python
|
qgevalcap/eval.py
|
p208p2002/nqg
|
ee77c279f4276dfde1e37540e8dbd6d3786747d9
|
[
"MIT"
] | 2
|
2021-12-08T06:53:08.000Z
|
2022-03-30T06:51:23.000Z
|
qgevalcap/eval.py
|
p208p2002/nqg
|
ee77c279f4276dfde1e37540e8dbd6d3786747d9
|
[
"MIT"
] | null | null | null |
qgevalcap/eval.py
|
p208p2002/nqg
|
ee77c279f4276dfde1e37540e8dbd6d3786747d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
__author__ = 'xinya'
from bleu.bleu import Bleu
from meteor.meteor import Meteor
from rouge.rouge import Rouge
from cider.cider import Cider
from collections import defaultdict
from argparse import ArgumentParser
import sys
# reload(sys)
# import importlib,sys
# importlib.reload(sys)
# sys.setdefaultencoding('utf-8')
class QGEvalCap:
def __init__(self, gts, res):
self.gts = gts
self.res = res
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
# (Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print ("%s: %0.5f"%(m, sc))
output.append(sc)
else:
print ("%s: %0.5f"%(method, score))
output.append(score)
return output
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1]
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1]
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
line = line[:-1]
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
## eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction'].encode('utf-8')]
## gts
gts[key].append(pair['tokenized_question'].encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-out", "--out_file", dest="out_file", default="./output/pred.txt", help="output file to compare")
parser.add_argument("-src", "--src_file", dest="src_file", default="../data/processed/src-test.txt", help="src file")
parser.add_argument("-tgt", "--tgt_file", dest="tgt_file", default="../data/processed/tgt-test.txt", help="target file")
args = parser.parse_args()
print ("scores: \n")
eval(args.out_file, args.src_file, args.tgt_file)
| 29.231481
| 124
| 0.558125
|
18f1b3248fff8057d1aa6bff4293936a41ce109e
| 2,098
|
py
|
Python
|
VisualPython/lib/pywidgets.py
|
ADKosm/YaRPO
|
b1437a052ecbd3de1a40b108f4718d24b1a67f24
|
[
"MIT"
] | null | null | null |
VisualPython/lib/pywidgets.py
|
ADKosm/YaRPO
|
b1437a052ecbd3de1a40b108f4718d24b1a67f24
|
[
"MIT"
] | null | null | null |
VisualPython/lib/pywidgets.py
|
ADKosm/YaRPO
|
b1437a052ecbd3de1a40b108f4718d24b1a67f24
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append(os.getcwd())
from libpywidgets import *
class Object:
def __init__(self, p, d = True):
self.pointer = p
self.toDel = d
def get_class_name(self):
return Object_GetClassName(self.pointer)
def __del__(self):
if self.toDel:
Object_Delete(self.pointer)
class Application(Object):
def __init__(self):
Object.__init__(self, Application_New())
def exec(self):
return Application_Exec(self.pointer)
class Widget(Object):
def __init__(self, parent = None):
Object.__init__(self, Widget_New()) if parent is None else Object.__init__(self, Widget_New(parent.pointer), False)
def set_layout(self, layout):
Widget_SetLayout(self.pointer, layout.pointer)
def set_window_title(self, title):
Widget_SetWindowTitle(self.pointer, title)
def set_size(self, w, h):
Widget_SetSize(self.pointer, w, h)
def set_visible(self, v):
Widget_SetVisible(self.pointer, v)
class Label(Widget):
def __init__(self, parent = None):
Object.__init__(self, Label_New()) if parent is None else Object.__init__(self, Label_New(parent.pointer), False)
def set_text(self, text):
Label_SetText(self.pointer, text)
class PushButton(Widget):
def __init__(self, parent = None):
self.calls = []
Object.__init__(self, PushButton_New()) if parent is None else Object.__init__(self, PushButton_New(parent.pointer), False)
def set_text(self, text):
PushButton_SetText(self.pointer, text)
def set_on_clicked(self, callback):
def wraped_callback():
callback(self)
self.calls.append(wraped_callback)
PushButton_SetOnClicked(self.pointer, self.calls[-1])
class Layout(Object):
def add_widget(self, widget):
Layout_AddWidget(self.pointer, widget.pointer)
class VBoxLayout(Layout):
def __init__(self, parent = None):
Object.__init__(self, VBoxLayout_New()) if parent is None else Object.__init__(self, VBoxLayout_New(parent.pointer), False)
| 29.971429
| 131
| 0.682555
|
6b96d78af02f6a47affed0bcc608fcecac2397b0
| 2,859
|
py
|
Python
|
jax_dft/jax_dft/np_utils_test.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-19T04:26:12.000Z
|
2022-03-19T04:26:12.000Z
|
jax_dft/jax_dft/np_utils_test.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
jax_dft/jax_dft/np_utils_test.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for jax_dft.np_utils."""
from absl.testing import absltest
from jax.config import config
import jax.numpy as jnp
import numpy as np
from jax_dft import np_utils
# Set the default dtype as float64
config.update('jax_enable_x64', True)
class OnpUtilsTest(absltest.TestCase):
def test_flatten(self):
(tree, shapes), vec = np_utils.flatten(
[(jnp.array([1, 2, 3]), (jnp.array([4, 5]))), jnp.array([99])])
self.assertIsInstance(vec, np.ndarray)
np.testing.assert_allclose(vec, [1., 2., 3., 4., 5., 99.])
self.assertEqual(shapes, [(3,), (2,), (1,)])
# unflatten should convert 1d array back to pytree.
params = np_utils.unflatten((tree, shapes), vec)
self.assertIsInstance(params[0][0], np.ndarray)
np.testing.assert_allclose(params[0][0], [1., 2., 3.])
self.assertIsInstance(params[0][1], np.ndarray)
np.testing.assert_allclose(params[0][1], [4., 5.])
self.assertIsInstance(params[1], np.ndarray)
np.testing.assert_allclose(params[1], [99.])
def test_get_exact_h_atom_density(self):
grids = np.linspace(-10, 10, 1001)
dx = 0.02
displacements = np.array([
grids,
grids - 2,
])
density = np_utils._get_exact_h_atom_density(displacements, dx)
self.assertIsInstance(density, np.ndarray)
self.assertEqual(density.shape, (2, 1001))
np.testing.assert_allclose(np.sum(density, axis=1) * dx, [1, 1])
self.assertAlmostEqual(density[0][501], 0.40758, places=4)
self.assertAlmostEqual(density[1][601], 0.40758, places=4)
def test_get_exact_h_atom_density_wrong_shape(self):
grids = np.linspace(-10, 10, 1001)
dx = 0.02
with self.assertRaisesRegex(
ValueError, 'displacements is expected to have ndim=2, but got 1'):
np_utils._get_exact_h_atom_density(grids, dx)
def test_spherical_superposition_density(self):
density = np_utils.spherical_superposition_density(
grids=np.linspace(-10, 10, 1001),
locations=np.array([0, 2]),
nuclear_charges=np.array([1, 2]))
self.assertIsInstance(density, np.ndarray)
self.assertEqual(density.shape, (1001,))
np.testing.assert_allclose(np.sum(density) * 0.02, 3)
if __name__ == '__main__':
absltest.main()
| 34.865854
| 75
| 0.697796
|
dfbc2c7b65b590e0269c9292750a058ed6a9a25d
| 6,268
|
py
|
Python
|
tree.py
|
cognitobuoy/Search-Algorithms-Python
|
01cf5720488c0fe08a9e604ceae60b562545da18
|
[
"MIT"
] | 1
|
2021-05-13T12:57:12.000Z
|
2021-05-13T12:57:12.000Z
|
tree.py
|
cognitobuoy/Search-Algorithms-Python
|
01cf5720488c0fe08a9e604ceae60b562545da18
|
[
"MIT"
] | 1
|
2021-05-21T23:16:31.000Z
|
2021-05-21T23:16:31.000Z
|
tree.py
|
cognitobuoy/Search-Algorithms-Python
|
01cf5720488c0fe08a9e604ceae60b562545da18
|
[
"MIT"
] | null | null | null |
import time
import math
import heapq
from tkinter import constants
LEFT = 0
RIGHT = 1
DEPTH_FIRST = 0
BREATH_FIRST = 1
UNIFORM_COST = 2
class Node:
def __init__(self, key, x=0, y=0):
self.path_cost = 0
self.parent = None
self.right = None
self.left = None
self.key = key
self.x = x
self.y = y
def __lt__(self, other):
return self.x < other.x and self.y < other.y
def __le__(self, other):
return self.x <= other.x and self.y <= other.y
def getPathCost(self):
return self.path_cost
def setPathCost(self, cost):
self.path_cost = cost
def getX(self):
return self.x
def getY(self):
return self.y
def setX(self, point):
self.x = point
def setY(self, point):
self.y = point
def getKey(self):
return self.key
def hasLeft(self):
return self.left != None
def hasRight(self):
return self.right != None
def getLeft(self):
return self.left
def getRight(self):
return self.right
def setLeft(self, node):
self.left = node
def setRight(self, node):
self.right = node
def getParent(self):
return self.parent
def setParent(self, parent):
self.parent = parent
class BinaryTree:
def __init__(self):
self.root = None
self.found = None
self.visited = None
def getRoot(self):
return self.root
def insert(self, parentKey, node, direction):
if self.root == None:
self.root = node
else:
self.found = None
self.searchPreOrder(self.root, parentKey)
if self.found == None:
return
elif direction == LEFT:
self.found.setLeft(node)
elif direction == RIGHT:
self.found.setRight(node)
if node is not None:
node.setParent(self.found)
cost = abs(((self.found.getX() - node.getX()) ** 2) -
((self.found.getY() - node.getY()) ** 2))
cost = self.found.getPathCost() + math.sqrt(cost)
node.setPathCost(cost)
def search(self, searchKey, algorithm):
start_time = int(round(time.time()*1000))
start = self.root.getKey()
end = searchKey
self.visited = 0
node = None
name = None
if algorithm == DEPTH_FIRST:
node = self.depthFirstSearch(searchKey, visited=0)
name = "Depth First"
elif algorithm == BREATH_FIRST:
node = self.breathFirstSearch(searchKey, visited=0)
name = "Breath First"
elif algorithm == UNIFORM_COST:
node = self.uniformCostSearch(searchKey)
name = "Uniform Cost"
stop_time = int(round(time.time()*1000))
return {
"end": end,
"node": node,
"name": name,
"start": start,
"visited": self.visited,
"time": str(stop_time - start_time) + "ms",
}
def _displayInOrder(self, node):
if node == None:
return
if node.hasLeft():
self._displayInOrder(node.getLeft())
display(node.getKey())
if node.hasRight():
self._displayInOrder(node.getRight())
def displayInOrder(self):
return self._displayInOrder(self.root)
def _displayPreOrder(self, node):
if self.root == None:
return
display(node.getKey())
if node.hasLeft():
self._displayPreOrder(node.getLeft())
if node.hasRight():
self._displayPreOrder(node.getRight())
def displayPreOrder(self):
return self._displayPreOrder(self.root)
def _displayPostOrder(self, node):
if node == None:
return
if node.hasLeft():
self._displayPostOrder(node.getLeft())
if node.hasRight():
self._displayPostOrder(node.getRight())
display(node.getKey())
def displayPostOrder(self):
return self._displayPostOrder(self.root)
def searchPreOrder(self, node, searchKey):
if node == None:
return
if node.getKey() == searchKey:
self.found = node
self.searchPreOrder(node.getLeft(), searchKey)
self.searchPreOrder(node.getRight(), searchKey)
def uniformCostSearch(self, searchKey):
node = self.root
priority_queue = []
visited = []
heapq.heappush(priority_queue, (node.getPathCost(), id(node), node))
while len(priority_queue) != 0:
node = heapq.heappop(priority_queue)[2]
if visited.count(node.getKey()) != 0:
continue
visited.append(node.getKey())
if node.getKey() == searchKey:
self.visited = len(visited)
return node
if node.hasLeft():
heapq.heappush(priority_queue, (node.getLeft().getPathCost(), id(node.getLeft()), node.getLeft()))
if node.hasRight():
heapq.heappush(priority_queue, (node.getRight().getPathCost(), id(node.getLeft()), node.getRight()))
def depthFirstSearch(self, searchKey, visited=0):
lifo=[]
lifo.append(self.root)
self.visited=visited
while len(lifo) != 0:
current=lifo.pop()
self.visited += 1
if current.getKey() != searchKey:
if current.hasLeft():
lifo.append(current.getLeft())
if current.hasRight():
lifo.append(current.getRight())
else:
return current
def breathFirstSearch(self, searchKey, visited=0):
fifo=[]
fifo.append(self.root)
self.visited=visited
while len(fifo) != 0:
current=fifo.pop(0)
self.visited += 1
if current.getKey() != searchKey:
if current.hasLeft():
fifo.append(current.getLeft())
if current.hasRight():
fifo.append(current.getRight())
else:
return current
def display(key):
print(key, end=" -> ")
| 27.734513
| 116
| 0.547064
|
341e32c301c598958cc6e90a51fe3a0741eee3f2
| 1,239
|
py
|
Python
|
pennylane/templates/subroutines/__init__.py
|
ryanlevy/pennylane
|
fb03b09d17267ebd0b9050432f9eeb84b5dff200
|
[
"Apache-2.0"
] | null | null | null |
pennylane/templates/subroutines/__init__.py
|
ryanlevy/pennylane
|
fb03b09d17267ebd0b9050432f9eeb84b5dff200
|
[
"Apache-2.0"
] | 1
|
2021-05-27T05:36:41.000Z
|
2021-05-27T05:36:41.000Z
|
pennylane/templates/subroutines/__init__.py
|
ryanlevy/pennylane
|
fb03b09d17267ebd0b9050432f9eeb84b5dff200
|
[
"Apache-2.0"
] | 1
|
2021-07-11T11:45:14.000Z
|
2021-07-11T11:45:14.000Z
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Subroutines are the most basic template, consisting of a collection of quantum operations, and not fulfilling
any of the characteristics of other templates (i.e. to prepare a specific state, to be repeated or to encode features).
"""
from .arbitrary_unitary import ArbitraryUnitary
from .double_excitation_unitary import DoubleExcitationUnitary
from .interferometer import Interferometer
from .single_excitation_unitary import SingleExcitationUnitary
from .uccsd import UCCSD
from .approx_time_evolution import ApproxTimeEvolution
from .permute import Permute
from .qpe import QuantumPhaseEstimation
from .qmc import QuantumMonteCarlo
| 44.25
| 119
| 0.810331
|
9e568c6bd415300ba400e7c0c28b6d0144fd70ea
| 538
|
py
|
Python
|
programs/pgm04_18.py
|
danielsunzhongyuan/python_practice
|
79bc88db1c52ee2f5607f6f9fec1bbacea2804ff
|
[
"Apache-2.0"
] | null | null | null |
programs/pgm04_18.py
|
danielsunzhongyuan/python_practice
|
79bc88db1c52ee2f5607f6f9fec1bbacea2804ff
|
[
"Apache-2.0"
] | null | null | null |
programs/pgm04_18.py
|
danielsunzhongyuan/python_practice
|
79bc88db1c52ee2f5607f6f9fec1bbacea2804ff
|
[
"Apache-2.0"
] | null | null | null |
#
# This file contains the Python code from Program 4.18 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by Bruno R. Preiss.
#
# Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm04_18.txt
#
class LinkedList(object):
def __copy__(self):
result = LinkedList()
ptr = list._head
while ptr is not None:
result.append(ptr._datum)
ptr = ptr._next
return result
# ...
| 24.454545
| 69
| 0.644981
|
548c2d2e8ad39afeec3634c158bb310c65ee64ec
| 1,267
|
py
|
Python
|
bcompiler/tests/test_integration.py
|
hammerheadlemon/bcompiler
|
9cf48352fff24dad1b165e9674cf6c44b1e1fc0f
|
[
"MIT"
] | null | null | null |
bcompiler/tests/test_integration.py
|
hammerheadlemon/bcompiler
|
9cf48352fff24dad1b165e9674cf6c44b1e1fc0f
|
[
"MIT"
] | 7
|
2019-02-28T19:14:01.000Z
|
2020-03-31T05:35:45.000Z
|
bcompiler/tests/test_integration.py
|
yulqen/bcompiler
|
9cf48352fff24dad1b165e9674cf6c44b1e1fc0f
|
[
"MIT"
] | 2
|
2019-02-19T16:51:29.000Z
|
2019-04-21T08:58:38.000Z
|
import csv
import os
import subprocess
from bcompiler import __version__
from ..utils import OUTPUT_DIR
def test_bcompiler_help():
output = subprocess.run(['bcompiler', '-h'], stdout=subprocess.PIPE, encoding='utf-8')
assert output.stdout.startswith('usage')
def test_bcompiler_version():
output = subprocess.run(['bcompiler', '-v'], stdout=subprocess.PIPE, encoding='utf-8')
assert output.stdout.strip() == __version__
def test_bcompiler_count_rows(populated_template):
output = subprocess.run(['bcompiler', '-r'], stdout=subprocess.PIPE, encoding='utf-8')
assert output.stdout.startswith('Workbook')
def test_bcompiler_count_rows_csv(populated_template):
subprocess.run(['bcompiler', '-r', '--csv'])
with open(os.path.join(OUTPUT_DIR, 'row_count.csv'), 'r') as f:
reader = csv.reader(f)
assert next(reader)[0] == 'bicc_template.xlsm'
def test_bcompiler_count_rows_quiet(populated_template):
output = subprocess.run(['bcompiler', '-r', '--quiet'], stdout=subprocess.PIPE, encoding='utf-8')
assert output.stdout.startswith('#')
#def test_bcompiler_populate_all_templates(master):
# output = subprocess.run(['bcompiler', '-a'], stdout=subprocess.PIPE, encoding='utf-8')
# assert output.stdout
| 32.487179
| 101
| 0.716654
|
0cb95376e55d3c2da7318f6156be0fb09988c167
| 3,762
|
py
|
Python
|
order/views.py
|
rzr911/ice_cream_api
|
9382bfeeeeebc71fe7cd6c33ab86420da9c510a5
|
[
"MIT"
] | null | null | null |
order/views.py
|
rzr911/ice_cream_api
|
9382bfeeeeebc71fe7cd6c33ab86420da9c510a5
|
[
"MIT"
] | null | null | null |
order/views.py
|
rzr911/ice_cream_api
|
9382bfeeeeebc71fe7cd6c33ab86420da9c510a5
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import status, viewsets
from rest_framework.response import Response
from icecream.models import IceCream
from order.models import Order, StatusChoices
from order.serializers import OrderCreateUpdateReadSerializer, OrderCheckoutSerializer
from rest_framework.decorators import action
class OrderViewset(viewsets.ModelViewSet):
queryset = Order.objects.all().prefetch_related("icecreams")
def get_serializer_class(self):
serializers = {
"list": OrderCreateUpdateReadSerializer,
"create": OrderCreateUpdateReadSerializer,
"retrieve": OrderCreateUpdateReadSerializer,
"update": OrderCreateUpdateReadSerializer,
"partial_update": OrderCreateUpdateReadSerializer,
"checkout": OrderCheckoutSerializer,
}
return serializers.get(self.action)
def create(self, request, *args, **kwargs):
data = request.data
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
icecreams = serializer.validated_data.pop("icecreams")
order = Order.objects.create(**serializer.validated_data)
icecreams = IceCream.objects.bulk_create(
[
IceCream(
order=order,
cone_wafer=icecream.get("cone_wafer"),
base_flavour=icecream.get("base_flavour"),
toppings=icecream.get("toppings"),
)
for icecream in icecreams
]
)
serializer = self.get_serializer(order)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
partial = kwargs.pop("partial", False)
data = request.data
order = self.get_object()
serializer = self.get_serializer(data=data, instance=order, partial=partial)
serializer.is_valid(raise_exception=True)
remarks = data.get("remarks") if data.get("remarks") else order.remarks
order.remarks = remarks
# if icecreams are set in request, previously set icecreams are wiped and the following ones are set
if data.get("icecreams"):
icecreams = data.get("icecreams")
icecreams_to_be_created = IceCream.objects.bulk_create(
[
IceCream(
cone_wafer=icecream.get("cone_wafer"),
base_flavour=icecream.get("base_flavour"),
toppings=icecream.get("toppings"),
)
for icecream in icecreams
]
)
order.icecreams.set(icecreams_to_be_created)
order.save()
serializer = self.get_serializer(order)
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True, methods=["post"])
def checkout(self, request, *args, **kwargs):
data = request.data
order = self.get_object()
serializer = self.get_serializer(data=data, instance=order, partial=False)
serializer.is_valid(raise_exception=True)
# Order that is Completed should not be allowed to be modified
if order.status == StatusChoices.COMPLETED:
return Response(message="", status=status.HTTP_400_BAD_REQUEST)
order.address = serializer.validated_data.get("address")
order.phone_number = serializer.validated_data.get("phone_number")
order.status = StatusChoices.COMPLETED
order.save()
serializer = self.get_serializer(order)
return Response(serializer.data, status=status.HTTP_200_OK)
| 38.783505
| 108
| 0.64487
|
a79c4b15b0b1b8c629f7975ac7409ff4f520c617
| 2,193
|
py
|
Python
|
transportation.py
|
stscicrawford/travelreform
|
252dfc67a1be5f5876efeb74a6fea88416221e50
|
[
"BSD-3-Clause"
] | null | null | null |
transportation.py
|
stscicrawford/travelreform
|
252dfc67a1be5f5876efeb74a6fea88416221e50
|
[
"BSD-3-Clause"
] | 5
|
2018-05-25T14:19:38.000Z
|
2018-05-25T14:48:15.000Z
|
transportation.py
|
stscicrawford/travelreform
|
252dfc67a1be5f5876efeb74a6fea88416221e50
|
[
"BSD-3-Clause"
] | 3
|
2018-05-25T14:29:04.000Z
|
2018-05-25T14:33:40.000Z
|
from flask import Flask, render_template, flash, request
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField, SelectField, DateField
# App config.
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
class TransportationForm(Form):
departure = TextField('Departure City:', validators=[validators.InputRequired()])
arrival = TextField('Arrival City:', validators=[validators.InputRequired()])
date_depart = DateField('Date of departure (mm/dd/yyyy):', format='%m/%d/%y',
validators=[validators.InputRequired()])
date_return = DateField('Date of return (mm/dd/yyyy):', format='%m/%d/%y',
validators=[validators.InputRequired()])
rental = SelectField('Is a rental car needed?',
choices=[('no_rental', 'No'), ('yes_rental','Yes')],
validators=[validators.InputRequired()])
hotel = SelectField('Is a hotel needed?',
choices=[('no_hotel', 'No'), ('yes_hotel','Yes')],
validators=[validators.InputRequired()])
other = TextField('Other (special requests):')
comments = TextAreaField('Comments:', [validators.Length(max=500)])
@app.route("/", methods=['GET', 'POST'])
def transportation():
form = TransportationForm(request.form)
print(form.errors)
if request.method == 'POST':
#General Transportation
departure = request.form['departure']
arrival = request.form['arrival']
date_depart = request.form['date_depart']
date_return = request.form['date_return']
# Rental Car and Hotel
rental = request.form['rental']
hotel = request.form['hotel']
other = request.form['other']
comments = request.form['comments']
print(departure)
if form.validate():
# Save the comment here.
flash('Hello ')
else:
flash('All the form fields are required. ')
return render_template('transportation.html', form=form)
if __name__ == "__main__":
app.run()
| 36.55
| 112
| 0.622891
|
580a29cb9f3bba2eb18421e001c7cf7b0ef9a075
| 5,424
|
py
|
Python
|
libcloud/storage/drivers/google_storage.py
|
wrigri/libcloud
|
26a54f158392e07e75fb1fd9a808db635ee35e2c
|
[
"Apache-2.0"
] | 3
|
2016-06-03T03:40:18.000Z
|
2018-09-24T05:28:47.000Z
|
libcloud/storage/drivers/google_storage.py
|
wrigri/libcloud
|
26a54f158392e07e75fb1fd9a808db635ee35e2c
|
[
"Apache-2.0"
] | 1
|
2015-10-26T21:29:56.000Z
|
2015-10-27T17:29:20.000Z
|
libcloud/storage/drivers/google_storage.py
|
wrigri/libcloud
|
26a54f158392e07e75fb1fd9a808db635ee35e2c
|
[
"Apache-2.0"
] | 2
|
2018-09-24T05:28:42.000Z
|
2020-12-31T05:11:04.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import email.utils
from libcloud.common.base import ConnectionUserAndKey
from libcloud.common.google import GoogleAuthType
from libcloud.common.google import GoogleOAuth2Credential
from libcloud.storage.drivers.s3 import BaseS3Connection
from libcloud.storage.drivers.s3 import BaseS3StorageDriver
from libcloud.storage.drivers.s3 import S3RawResponse
from libcloud.storage.drivers.s3 import S3Response
# Docs are a lie. Actual namespace returned is different that the one listed
# in the docs.
SIGNATURE_IDENTIFIER = 'GOOG1'
API_VERSION = '2006-03-01'
NAMESPACE = 'http://doc.s3.amazonaws.com/%s' % (API_VERSION)
class GoogleStorageConnection(ConnectionUserAndKey):
"""
Represents a single connection to the Google storage API endpoint.
This can either authenticate via the Google OAuth2 methods or via
the S3 HMAC interoperability method.
"""
host = 'storage.googleapis.com'
responseCls = S3Response
rawResponseCls = S3RawResponse
PROJECT_ID_HEADER = 'x-goog-project-id'
def __init__(self, user_id, key, secure, auth_type=None,
credential_file=None, **kwargs):
self.auth_type = auth_type or GoogleAuthType.guess_type(user_id)
if GoogleAuthType.is_oauth2(self.auth_type):
self.oauth2_credential = GoogleOAuth2Credential(
user_id, key, self.auth_type, credential_file, **kwargs)
else:
self.oauth2_credential = None
super(GoogleStorageConnection, self).__init__(user_id, key, secure,
**kwargs)
def add_default_headers(self, headers):
date = email.utils.formatdate(usegmt=True)
headers['Date'] = date
project = self.get_project()
if project:
headers[self.PROJECT_ID_HEADER] = project
return headers
def get_project(self):
return getattr(self.driver, 'project')
def pre_connect_hook(self, params, headers):
if self.auth_type == GoogleAuthType.GCS_S3:
signature = self._get_s3_auth_signature(params, headers)
headers['Authorization'] = '%s %s:%s' % (SIGNATURE_IDENTIFIER,
self.user_id, signature)
else:
headers['Authorization'] = ('Bearer ' +
self.oauth2_credential.access_token)
return params, headers
def _get_s3_auth_signature(self, params, headers):
"""Hacky wrapper to work with S3's get_auth_signature."""
headers_copy = {}
params_copy = copy.deepcopy(params)
# Lowercase all headers except 'date' and Google header values
for k, v in headers.items():
k_lower = k.lower()
if (k_lower == 'date' or k_lower.startswith(
GoogleStorageDriver.http_vendor_prefix) or
not isinstance(v, str)):
headers_copy[k_lower] = v
else:
headers_copy[k_lower] = v.lower()
return BaseS3Connection.get_auth_signature(
method=self.method,
headers=headers_copy,
params=params_copy,
expires=None,
secret_key=self.key,
path=self.action,
vendor_prefix=GoogleStorageDriver.http_vendor_prefix)
class GoogleStorageDriver(BaseS3StorageDriver):
"""
Driver for Google Cloud Storage.
Can authenticate via standard Google Cloud methods (Service Accounts,
Installed App credentials, and GCE instance service accounts)
Examples:
Service Accounts::
driver = GoogleStorageDriver(key=client_email, secret=private_key, ...)
Installed Application::
driver = GoogleStorageDriver(key=client_id, secret=client_secret, ...)
From GCE instance::
driver = GoogleStorageDriver(key=foo , secret=bar, ...)
Can also authenticate via Google Cloud Storage's S3 HMAC interoperability
API. S3 user keys are 20 alphanumeric characters, starting with GOOG.
Example::
driver = GoogleStorageDriver(key='GOOG0123456789ABCXYZ',
secret=key_secret)
"""
name = 'Google Storage'
website = 'http://cloud.google.com/'
connectionCls = GoogleStorageConnection
hash_type = 'md5'
namespace = NAMESPACE
supports_chunked_encoding = False
supports_s3_multipart_upload = False
http_vendor_prefix = 'x-goog'
def __init__(self, key, secret=None, project=None, **kwargs):
self.project = project
super(GoogleStorageDriver, self).__init__(key, secret, **kwargs)
| 37.150685
| 79
| 0.673304
|
990eeb49e158a1c7d0a3acdd5368ac91e18d1c20
| 996
|
py
|
Python
|
agent/src/agent/cli/prompt/pipeline/sage.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | 16
|
2019-04-03T08:31:54.000Z
|
2021-01-24T17:12:04.000Z
|
agent/src/agent/cli/prompt/pipeline/sage.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | 10
|
2020-01-20T14:59:06.000Z
|
2022-01-21T10:19:16.000Z
|
agent/src/agent/cli/prompt/pipeline/sage.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | 5
|
2021-01-08T19:23:03.000Z
|
2021-11-09T14:15:49.000Z
|
import click
from .schemaless import SchemalessPrompter
class SagePrompter(SchemalessPrompter):
def prompt_config(self):
self.set_query()
self.prompt_delay()
self.prompt_interval()
self.prompt_days_to_backfill()
# self.data_preview()
self.set_values()
self.prompt_measurement_names()
self.set_dimensions()
self.prompt_static_dimensions()
self.prompt_tags()
def set_query(self):
self.config['query_file'] = click.prompt('Query file path', type=click.Path(exists=True, dir_okay=False),
default=self.default_config.get('query_file'))
with open(self.config['query_file']) as f:
self.config['query'] = f.read()
def set_dimensions(self):
self.config['dimensions'] = self.prompt_dimensions('Dimensions',
default_value=self.default_config.get('dimensions', []))
| 34.344828
| 115
| 0.601406
|
60e8bad3a1515020a39e1fbbccf489256ef99f65
| 19,765
|
py
|
Python
|
pypeit/deprecated/ech_coadd.py
|
ykwang1/PypeIt
|
a96cff699f1284905ce7ef19d06a9027cd333c63
|
[
"BSD-3-Clause"
] | 107
|
2018-08-06T07:07:20.000Z
|
2022-02-28T14:33:42.000Z
|
pypeit/deprecated/ech_coadd.py
|
ykwang1/PypeIt
|
a96cff699f1284905ce7ef19d06a9027cd333c63
|
[
"BSD-3-Clause"
] | 889
|
2018-07-26T12:14:33.000Z
|
2022-03-18T22:49:42.000Z
|
pypeit/deprecated/ech_coadd.py
|
ykwang1/PypeIt
|
a96cff699f1284905ce7ef19d06a9027cd333c63
|
[
"BSD-3-Clause"
] | 74
|
2018-09-25T17:03:07.000Z
|
2022-03-10T23:59:24.000Z
|
""" Routine for Echelle coaddition
"""
import numpy as np
from astropy import stats
from astropy.io import fits
from astropy import units
import matplotlib.pyplot as plt
from pypeit.core import coadd
from pypeit.core import load
from pypeit import msgs
from linetools.spectra.utils import collate
from linetools.spectra.xspectrum1d import XSpectrum1D
from pkg_resources import resource_filename
# setting plot parameters
plt.rcdefaults()
plt.rcParams['font.family'] = 'times new roman'
plt.rcParams["xtick.top"] = True
plt.rcParams["ytick.right"] = True
plt.rcParams["xtick.minor.visible"] = True
plt.rcParams["ytick.minor.visible"] = True
plt.rcParams["ytick.direction"] = 'in'
plt.rcParams["xtick.direction"] = 'in'
plt.rcParams["xtick.labelsize"] = 17
plt.rcParams["ytick.labelsize"] = 17
plt.rcParams["axes.labelsize"] = 17
def spec_from_array(wave,flux,sig,**kwargs):
"""
Make an XSpectrum1D from numpy arrays of wave, flux and sig
Parameters
----------
If wave is unitless, Angstroms are assumed
If flux is unitless, it is made dimensionless
The units for sig and co are taken from flux.
Return spectrum from arrays of wave, flux and sigma
"""
# Get rid of 0 wavelength
good_wave = (wave>1.0*units.AA)
wave,flux,sig = wave[good_wave],flux[good_wave],sig[good_wave]
ituple = (wave, flux, sig)
spectrum = XSpectrum1D.from_tuple(ituple, **kwargs)
# Polish a bit -- Deal with NAN, inf, and *very* large values that will exceed
# the floating point precision of float32 for var which is sig**2 (i.e. 1e38)
bad_flux = np.any([np.isnan(spectrum.flux), np.isinf(spectrum.flux),
np.abs(spectrum.flux) > 1e30,
spectrum.sig ** 2 > 1e10,
], axis=0)
if np.sum(bad_flux):
msgs.warn("There are some bad flux values in this spectrum. Will zero them out and mask them (not ideal)")
spectrum.data['flux'][spectrum.select][bad_flux] = 0.
spectrum.data['sig'][spectrum.select][bad_flux] = 0.
return spectrum
def order_phot_scale(spectra, phot_scale_dicts, nsig=3.0, niter=5, debug=False):
'''
Scale coadded spectra with photometric data.
Parameters:
spectra: XSpectrum1D spectra (longslit) or spectra list (echelle)
phot_scale_dicts: A dict contains photometric information of each orders (if echelle).
An example is given below.
phot_scale_dicts = {0: {'filter': None, 'mag': None, 'mag_type': None, 'masks': None},
1: {'filter': 'UKIRT-Y', 'mag': 20.33, 'mag_type': 'AB', 'masks': None},
2: {'filter': 'UKIRT-J', 'mag': 20.19, 'mag_type': 'AB', 'masks': None},
3: {'filter': 'UKIRT-H', 'mag': 20.02, 'mag_type': 'AB', 'masks': None},
4: {'filter': 'UKIRT-K', 'mag': 19.92, 'mag_type': 'AB', 'masks': None}}
Show QA plot if debug=True
Return a new scaled XSpectrum1D spectra
'''
from pypeit.core.flux_calib import scale_in_filter
norder = spectra.nspec
# scaling spectrum order by order.
spectra_list_new = []
for iord in range(norder):
phot_scale_dict = phot_scale_dicts[iord]
if (phot_scale_dict['filter'] is not None) & (phot_scale_dict['mag'] is not None):
speci = scale_in_filter(spectra[iord], phot_scale_dict)
else:
#ToDo: Think a better way to do the following
try:
spec0 = scale_in_filter(spectra[iord-1], phot_scale_dicts[iord-1])
speci = spectra[iord]
med_flux = spec0.data['flux'] / speci.data['flux']
mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(med_flux, sigma=nsig, iters=niter)
med_scale = np.minimum(med_scale, 5.0)
spectra.data['flux'] *= med_scale
spectra.data['sig'] *= med_scale
msgs.warn("Not enough photometric information given. Scaled order {:d} to order {:d}".format(iord, iord-1))
except KeyError:
msgs.warn("Not enough photometric information given. Scale order {:d} to order {:d} failed".format(iord, iord-1))
try:
spec0 = scale_in_filter(spectra[iord + 1], phot_scale_dicts[iord + 1])
speci = spectra[iord]
med_flux = spec0.data['flux'] / speci.data['flux']
mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(med_flux, sigma=nsig, iters=niter)
med_scale = np.minimum(med_scale, 5.0)
speci.data['flux'] *= med_scale
speci.data['sig'] *= med_scale
msgs.warn("Not enough photometric information given. Scaled order {:d} to order {:d}".format(iord, iord+1))
except:
msgs.warn("Not enough photometric information given. No scaling on order {:d}".format(iord))
speci = spectra[iord]
spectra_list_new.append(speci)
if debug:
gdp = speci.sig>0
plt.plot(spectra[iord].wavelength[gdp], spectra[iord].flux[gdp], 'k-', label='raw spectrum')
plt.plot(speci.wavelength[gdp], speci.flux[gdp], 'b-',
label='scaled spectrum')
mny, medy, stdy = stats.sigma_clipped_stats(speci.flux[gdp], sigma=3, iters=5)
plt.ylim([0.1 * medy, 4.0 * medy])
plt.legend()
plt.xlabel('wavelength')
plt.ylabel('Flux')
plt.show()
return collate(spectra_list_new)
def order_median_scale(spectra, nsig=3.0, niter=5, overlapfrac=0.03, num_min_pixels=50, SN_MIN_MEDSCALE=0.5, debug=False):
'''
Scale different orders using the median of overlap regions. It starts from the reddest order, i.e. scale H to K,
and then scale J to H+K, etc.
Parameters:
spectra: XSpectrum1D spectra
nsig: float
sigma used for sigma_clipping median
niter: int
number of iterations for sigma_clipping median
overlapfrac: float
minmum overlap fraction (number of overlapped pixels devided by number of pixels of the whole spectrum) between orders.
num_min_pixels: int
minum required good pixels. The code only scale orders when the overlapped
pixels > max(num_min_pixels,overlapfrac*len(wave))
SN_MIN_MEDSCALE: float
Maximum RMS S/N allowed to automatically apply median scaling
Show QA plot if debug=True
Return:
No return, but the spectra is already scaled after executing this function.
'''
norder = spectra.nspec
fluxes, sigs, wave = coadd.unpack_spec(spectra, all_wave=False)
fluxes_raw = fluxes.copy()
# scaling spectrum order by order. We use the reddest order as the reference since slit loss in redder is smaller
for i in range(norder - 1):
iord = norder - i - 1
sn_iord_iref = fluxes[iord] * (1. / sigs[iord])
sn_iord_scale = fluxes[iord - 1] * (1. / sigs[iord - 1])
allok = (sigs[iord - 1, :] > 0) & (sigs[iord, :] > 0) & (sn_iord_iref > SN_MIN_MEDSCALE) & (
sn_iord_scale > SN_MIN_MEDSCALE)
if sum(allok) > np.maximum(num_min_pixels, len(wave) * overlapfrac):
# Ratio
med_flux = spectra.data['flux'][iord, allok] / spectra.data['flux'][iord - 1, allok]
# Clip
mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(med_flux, sigma=nsig, iters=niter)
med_scale = np.minimum(med_scale, 5.0)
spectra.data['flux'][iord - 1, :] *= med_scale
spectra.data['sig'][iord - 1, :] *= med_scale
msgs.info('Scaled %s order by a factor of %s'%(iord,str(med_scale)))
if debug:
plt.plot(wave, spectra.data['flux'][iord], 'r-', label='reference spectrum')
plt.plot(wave, fluxes_raw[iord - 1], 'k-', label='raw spectrum')
plt.plot(spectra.data['wave'][iord - 1, :], spectra.data['flux'][iord - 1, :], 'b-',
label='scaled spectrum')
mny, medy, stdy = stats.sigma_clipped_stats(fluxes[iord, allok], sigma=nsig, iters=niter)
plt.ylim([0.1 * medy, 4.0 * medy])
plt.xlim([np.min(wave[sigs[iord - 1, :] > 0]), np.max(wave[sigs[iord, :] > 0])])
plt.legend()
plt.xlabel('wavelength')
plt.ylabel('Flux')
plt.show()
else:
msgs.warn('Not enough overlap region for sticking different orders.')
def ech_coadd(files,objids=None,extract='OPT',flux=True,giantcoadd=False,orderscale='median',mergeorder=True,
wave_grid_method='velocity', niter=5,wave_grid_min=None, wave_grid_max=None,v_pix=None,
scale_method='auto', do_offset=False, sigrej_final=3.,do_var_corr=False,
SN_MIN_MEDSCALE = 0.5, overlapfrac = 0.01, num_min_pixels=10,phot_scale_dicts=None,
qafile=None, outfile=None,do_cr=True, debug=False,**kwargs):
"""
routines for coadding spectra observed with echelle spectrograph.
parameters:
files (list): file names
objids (str): objid
extract (str): 'OPT' or 'BOX'
flux (bool): fluxed or not
giantcoadd (bool): coadding order by order or do it at once?
wave_grid_method (str): default velocity
niter (int): number of iteration for rejections
wave_grid_min (float): min wavelength, None means it will find the min value from your spectra
wave_grid_max (float): max wavelength, None means it will find the max value from your spectra
v_pix (float): delta velocity, see coadd.py
scale_method (str): see coadd.py
do_offset (str): see coadd.py, not implemented yet.
sigrej_final (float): see coadd.py
do_var_corr (bool): see coadd.py, default False. It seems True will results in a large error
SN_MIN_MEDSCALE (float): minimum SNR for scaling different orders
overlapfrac (float): minimum overlap fraction for scaling different orders.
qafile (str): name of qafile
outfile (str): name of coadded spectrum
do_cr (bool): remove cosmic rays?
debug (bool): show debug plots?
kwargs: see coadd.py
returns:
spec1d: coadded XSpectrum1D
"""
nfile = len(files)
if nfile <=1:
msgs.info('Only one spectrum exits coadding...')
return
fname = files[0]
ext_final = fits.getheader(fname, -1)
norder = ext_final['ECHORDER'] + 1
msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
if norder <= 1:
msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')
if giantcoadd:
msgs.info('Coadding all orders and exposures at once')
spectra = load.ech_load_spec(files, objid=objids,order=None, extract=extract, flux=flux)
wave_grid = np.zeros((2,spectra.nspec))
for i in range(spectra.nspec):
wave_grid[0, i] = spectra[i].wvmin.value
wave_grid[1, i] = spectra[i].wvmax.value
ech_kwargs = {'echelle': True, 'wave_grid_min': np.min(wave_grid), 'wave_grid_max': np.max(wave_grid),
'v_pix': v_pix}
kwargs.update(ech_kwargs)
# Coadding
spec1d = coadd.coadd_spectra(spectra, wave_grid_method=wave_grid_method, niter=niter,
scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
do_var_corr=do_var_corr, qafile=qafile, outfile=outfile,
do_cr=do_cr, debug=debug,**kwargs)
else:
msgs.info('Coadding individual orders first and then merge order')
spectra_list = []
# Keywords for Table
rsp_kwargs = {}
rsp_kwargs['wave_tag'] = '{:s}_WAVE'.format(extract)
rsp_kwargs['flux_tag'] = '{:s}_FLAM'.format(extract)
rsp_kwargs['sig_tag'] = '{:s}_FLAM_SIG'.format(extract)
#wave_grid = np.zeros((2,norder))
for iord in range(norder):
spectra = load.ech_load_spec(files, objid=objids, order=iord, extract=extract, flux=flux)
ech_kwargs = {'echelle': False, 'wave_grid_min': spectra.wvmin.value, 'wave_grid_max': spectra.wvmax.value, 'v_pix': v_pix}
#wave_grid[0,iord] = spectra.wvmin.value
#wave_grid[1,iord] = spectra.wvmax.value
kwargs.update(ech_kwargs)
# Coadding the individual orders
if qafile is not None:
qafile_iord = qafile+'_%s'%str(iord)
else:
qafile_iord = None
spec1d_iord = coadd.coadd_spectra(spectra, wave_grid_method=wave_grid_method, niter=niter,
scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
do_var_corr=do_var_corr, qafile=qafile_iord, outfile=None,
do_cr=do_cr, debug=debug, **kwargs)
spectrum = spec_from_array(spec1d_iord.wavelength, spec1d_iord.flux, spec1d_iord.sig,**rsp_kwargs)
spectra_list.append(spectrum)
spectra_coadd = collate(spectra_list)
# Rebin the spectra
# ToDo: we should read in JFH's wavelength grid here.
# Join into one XSpectrum1D object
# Final wavelength array
kwargs['wave_grid_min'] = np.min(spectra_coadd.data['wave'][spectra_coadd.data['wave'] > 0])
kwargs['wave_grid_max'] = np.max(spectra_coadd.data['wave'][spectra_coadd.data['wave'] > 0])
wave_final = coadd.new_wave_grid(spectra_coadd.data['wave'], wave_method=wave_grid_method, **kwargs)
# The rebin function in linetools can not work on collated spectra (i.e. filled 0).
# Thus I have to rebin the spectra first and then collate again.
spectra_list_new = []
for i in range(spectra_coadd.nspec):
speci = spectra_list[i].rebin(wave_final * units.AA, all=True, do_sig=True, grow_bad_sig=True,
masking='none')
spectra_list_new.append(speci)
spectra_coadd_rebin = collate(spectra_list_new)
## Note
if orderscale == 'photometry':
# Only tested on NIRES.
if phot_scale_dicts is not None:
spectra_coadd_rebin = order_phot_scale(spectra_coadd_rebin, phot_scale_dicts, debug=debug)
else:
msgs.warn('No photometric information is provided. Will use median scale.')
orderscale = 'median'
elif orderscale == 'median':
#rmask = spectra_coadd_rebin.data['sig'].filled(0.) > 0.
#sn2, weights = coadd.sn_weights(fluxes, sigs, rmask, wave)
## scaling different orders
order_median_scale(spectra_coadd_rebin, nsig=sigrej_final, niter=niter, overlapfrac=overlapfrac,
num_min_pixels=num_min_pixels, SN_MIN_MEDSCALE=SN_MIN_MEDSCALE, debug=debug)
else:
msgs.warn('No any scaling is performed between different orders.')
if mergeorder:
fluxes, sigs, wave = coadd.unpack_spec(spectra_coadd_rebin, all_wave=False)
## Megering orders
msgs.info('Merging different orders')
## ToDo: Joe claimed not to use pixel depedent weighting.
weights = 1.0 / sigs**2
weights[~np.isfinite(weights)] = 0.0
weight_combine = np.sum(weights, axis=0)
weight_norm = weights / weight_combine
weight_norm[np.isnan(weight_norm)] = 1.0
flux_final = np.sum(fluxes * weight_norm, axis=0)
sig_final = np.sqrt(np.sum((weight_norm * sigs) ** 2, axis=0))
spec1d_final = spec_from_array(wave_final * units.AA,flux_final,sig_final,**rsp_kwargs)
if outfile is not None:
msgs.info('Saving the final calibrated spectrum as {:s}'.format(outfile))
coadd.write_to_disk(spec1d_final, outfile)
if (qafile is not None) or (debug):
# plot and save qa
plt.figure(figsize=(12, 6))
ax1 = plt.axes([0.07, 0.13, 0.9, 0.4])
ax2 = plt.axes([0.07, 0.55, 0.9, 0.4])
plt.setp(ax2.get_xticklabels(), visible=False)
medf = np.median(spec1d_final.flux)
ylim = (np.sort([0. - 0.3 * medf, 5 * medf]))
cmap = plt.get_cmap('RdYlBu_r')
for idx in range(spectra_coadd_rebin.nspec):
spectra_coadd_rebin.select = idx
color = cmap(float(idx) / spectra_coadd_rebin.nspec)
ind_good = spectra_coadd_rebin.sig > 0
ax1.plot(spectra_coadd_rebin.wavelength[ind_good], spectra_coadd_rebin.flux[ind_good], color=color)
if (np.max(spec1d_final.wavelength) > (9000.0 * units.AA)):
skytrans_file = resource_filename('pypeit', '/data/skisim/atm_transmission_secz1.5_1.6mm.dat')
skycat = np.genfromtxt(skytrans_file, dtype='float')
scale = 0.85 * ylim[1]
ax2.plot(skycat[:, 0] * 1e4, skycat[:, 1] * scale, 'm-', alpha=0.5)
ax2.plot(spec1d_final.wavelength, spec1d_final.sig, ls='steps-', color='0.7')
ax2.plot(spec1d_final.wavelength, spec1d_final.flux, ls='steps-', color='b')
ax1.set_xlim([np.min(spec1d_final.wavelength.value), np.max(spec1d_final.wavelength.value)])
ax2.set_xlim([np.min(spec1d_final.wavelength.value), np.max(spec1d_final.wavelength.value)])
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
ax1.set_xlabel('Wavelength (Angstrom)')
ax1.set_ylabel('Flux')
ax2.set_ylabel('Flux')
plt.tight_layout(pad=0.2, h_pad=0., w_pad=0.2)
if len(qafile.split('.')) == 1:
msgs.info("No fomat given for the qafile, save to PDF format.")
qafile = qafile + '.pdf'
if qafile:
plt.savefig(qafile)
msgs.info("Wrote coadd QA: {:s}".format(qafile))
if debug:
plt.show()
plt.close()
### Do NOT remove this part althoug it is deprecated.
# we may need back to using this pieces of code after fixing the coadd.coadd_spectra problem on first order.
#kwargs['echelle'] = True
#kwargs['wave_grid_min'] = np.min(wave_grid)
#kwargs['wave_grid_max'] = np.max(wave_grid)
#spec1d_final = coadd.coadd_spectra(spectra_coadd_rebin, wave_grid_method=wave_grid_method, niter=niter,
# scale_method=scale_method, do_offset=do_offset, sigrej_final=sigrej_final,
# do_var_corr=do_var_corr, qafile=qafile, outfile=outfile,
# do_cr=do_cr, debug=debug, **kwargs)
return spec1d_final
else:
msgs.warn('Skipped merging orders')
if outfile is not None:
for iord in range(len(spectra_list)):
outfile_iord = outfile.replace('.fits','_ORDER{:04d}.fits'.format(iord))
msgs.info('Saving the final calibrated spectrum of order {:d} as {:s}'.format(iord,outfile))
spectra_list[iord].write_to_fits(outfile_iord)
return spectra_list
| 51.204663
| 135
| 0.601366
|
8b0f281127dcf9d8d01f930a5ee6894777028820
| 1,592
|
py
|
Python
|
lpips/base_model.py
|
stevenwudi/stylegan2-pytorch-toonify
|
c03330d316159a3ced3d41c0f6a3f377955fcc54
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2022-01-11T12:58:14.000Z
|
2022-01-11T12:58:14.000Z
|
lpips/base_model.py
|
stevenwudi/stylegan2-pytorch-toonify
|
c03330d316159a3ced3d41c0f6a3f377955fcc54
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
lpips/base_model.py
|
stevenwudi/stylegan2-pytorch-toonify
|
c03330d316159a3ced3d41c0f6a3f377955fcc54
|
[
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2021-07-19T03:40:32.000Z
|
2021-07-19T03:40:32.000Z
|
import os
import numpy as np
import torch
from torch.autograd import Variable
from pdb import set_trace as st
class BaseModel():
def __init__(self):
pass;
def name(self):
return 'BaseModel'
def initialize(self, use_gpu=True, gpu_ids=[0]):
self.use_gpu = use_gpu
self.gpu_ids = gpu_ids
def forward(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, path, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(path, save_filename)
torch.save(network.state_dict(), save_path)
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
print('Loading network from %s'%save_path)
network.load_state_dict(torch.load(save_path))
def update_learning_rate():
pass
def get_image_paths(self):
return self.image_paths
def save_done(self, flag=False):
np.save(os.path.join(self.save_dir, 'done_flag'),flag)
np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i')
| 27.448276
| 77
| 0.65892
|
bbccf0c7c97187561b67023aeb51ce30864b8a98
| 21,099
|
py
|
Python
|
bips/workflows/gablab/wips/fmri/first_level/first_level_ev.py
|
akeshavan/BrainImagingPipelines
|
39915b5a313d049a0bb3ccd3a82a9a83e1241b11
|
[
"Apache-2.0"
] | 1
|
2017-12-03T00:12:26.000Z
|
2017-12-03T00:12:26.000Z
|
bips/workflows/gablab/wips/fmri/first_level/first_level_ev.py
|
akeshavan/BrainImagingPipelines
|
39915b5a313d049a0bb3ccd3a82a9a83e1241b11
|
[
"Apache-2.0"
] | null | null | null |
bips/workflows/gablab/wips/fmri/first_level/first_level_ev.py
|
akeshavan/BrainImagingPipelines
|
39915b5a313d049a0bb3ccd3a82a9a83e1241b11
|
[
"Apache-2.0"
] | null | null | null |
# Import Stuff
from ...scripts.base import create_first
import os
from .....base import MetaWorkflow, load_config, register_workflow
from traits.api import HasTraits, Directory, Bool
import traits.api as traits
from .....flexible_datagrabber import Data, DataBase
"""
Part 1: Define a MetaWorkflow
"""
mwf = MetaWorkflow()
mwf.uuid = '02e050aa486911e2b88d00259080ab1a'
mwf.help="""
First-Level Workflow - ev files
===============================
"""
mwf.tags=['fMRI','First Level','ev files']
"""
Part 2: Define the config class & create_config function
"""
class config(HasTraits):
uuid = traits.Str(desc="UUID")
desc = traits.Str(desc="Workflow Description")
# Directories
working_dir = Directory(mandatory=True, desc="Location of the Nipype working directory")
sink_dir = Directory(os.path.abspath('.'), mandatory=True, desc="Location where the BIP will store the results")
crash_dir = Directory(mandatory=False, desc="Location to store crash files")
json_sink = Directory(mandatory=False, desc= "Location to store json_files")
surf_dir = Directory(mandatory=True, desc= "Freesurfer subjects directory")
save_script_only = traits.Bool(False)
# Execution
run_using_plugin = Bool(False, usedefault=True, desc="True to run pipeline with plugin, False to run serially")
plugin = traits.Enum("PBS", "MultiProc", "SGE", "Condor",
usedefault=True,
desc="plugin to use, if run_using_plugin=True")
plugin_args = traits.Dict({"qsub_args": "-q many"},
usedefault=True, desc='Plugin arguments.')
test_mode = Bool(False, mandatory=False, usedefault=True,
desc='Affects whether where and if the workflow keeps its \
intermediary files. True to keep intermediary files. ')
timeout = traits.Float(14.0)
# Subjects
#subjects= traits.List(traits.Str, mandatory=True, usedefault=True,
# desc="Subject id's. Note: These MUST match the subject id's in the \
# Freesurfer directory. For simplicity, the subject id's should \
# also match with the location of individual functional files.")
datagrabber = traits.Instance(Data, ())
# First Level
interscan_interval = traits.Float()
film_threshold = traits.Float()
input_units = traits.Enum('scans','secs')
is_sparse = traits.Bool(False)
model_hrf = traits.Bool(True)
stimuli_as_impulses = traits.Bool(True)
use_temporal_deriv = traits.Bool(True)
volumes_in_cluster = traits.Int(1)
ta = traits.Float()
tr = traits.Float()
hpcutoff = traits.Float()
scan_onset = traits.Int(0)
scale_regressors = traits.Bool(True)
#bases = traits.Dict({'dgamma':{'derivs': False}},use_default=True)
bases = traits.Dict({'dgamma':{'derivs': False}},use_default=True)#traits.Enum('dgamma','gamma','none'), traits.Enum(traits.Dict(traits.Enum('derivs',None), traits.Bool),None), desc="name of basis function and options e.g., {'dgamma': {'derivs': True}}")
# preprocessing info
preproc_config = traits.File(desc="preproc config file")
use_compcor = traits.Bool(desc="use noise components from CompCor")
#advanced_options
use_advanced_options = Bool(False)
advanced_options = traits.Code()
def create_config():
c = config()
c.uuid = mwf.uuid
c.desc = mwf.help
c.datagrabber = create_datagrabber_config()
return c
def create_datagrabber_config():
dg = Data(['noise_components',
'motion_parameters',
'highpassed_files',
'outlier_files','ev_files','contrasts'])
foo = DataBase()
foo.name="subject_id"
foo.iterable = True
foo.values=["sub01","sub02"]
bar = DataBase()
bar.name = 'fwhm'
bar.iterable =True
bar.values = ['0', '6.0']
dg.template= '*'
dg.field_template = dict(noise_components='%s/preproc/noise_components/*noise_components.txt',
motion_parameters='%s/preproc/motion/*.par',
highpassed_files='%s/preproc/output/bandpassed/fwhm_%s/*.nii*',
outlier_files='%s/preproc/art/*_outliers.txt',
ev_files='%s/model/*',
contrasts='%s/contrasts/*')
dg.template_args = dict(noise_components=[['subject_id']],
motion_parameters=[['subject_id']],
highpassed_files=[['subject_id','fwhm']],
outlier_files=[['subject_id']],
ev_files=[['subject_id']],
contrasts=[['subject_id']])
dg.fields = [foo, bar]
return dg
mwf.config_ui = create_config
"""
Part 3: Create a View
"""
def create_view():
from traitsui.api import View, Item, Group
from traitsui.menu import OKButton, CancelButton
view = View(Group(Item(name='uuid', style='readonly'),
Item(name='desc', style='readonly'),
label='Description', show_border=True),
Group(Item(name='working_dir'),
Item(name='sink_dir'),
Item(name='crash_dir'),
Item(name='json_sink'),
label='Directories', show_border=True),
Group(Item(name='run_using_plugin',enabled_when='save_script_only'),Item('save_script_only'),
Item(name='plugin', enabled_when="run_using_plugin"),
Item(name='plugin_args', enabled_when="run_using_plugin"),
Item(name='test_mode'), Item(name="timeout"),
label='Execution Options', show_border=True),
Group(Item(name='datagrabber'),
label='Subjects', show_border=True),
Group(Item(name='interscan_interval'),
Item(name='film_threshold'),
Item(name='input_units'),
Item("is_sparse"),
Item("ta",enabled_when="is_sparse"),
Item("tr"),
Item("hpcutoff"),
Item("model_hrf",enabled_when="is_sparse"),
Item("stimuli_as_impulses",enabled_when="is_sparse"),
Item("use_temporal_deriv",enabled_when="is_sparse"),
Item("volumes_in_cluster",enabled_when="is_sparse"),
Item("scan_onset",enabled_when="is_sparse"),
Item("scale_regressors",enabled_when="is_sparse"),
Item("bases"),
label = 'First Level'),
Group(Item(name='preproc_config'),
Item(name="use_compcor"),
label = 'Preprocessing Info'),
Group(Item(name='use_advanced_options'),
Item(name="advanced_options", enabled_when="use_advanced_options"),
label="Advanced Options", show_border=True),
buttons = [OKButton, CancelButton],
resizable=True,
width=1050)
return view
mwf.config_view = create_view
"""
Part 4: Workflow Construction
"""
def extract_contrasts(con_file):
import numpy as np
cons = np.genfromtxt(con_file,dtype=str)
contrasts = []
for c in cons:
contrasts.append((c[0],c[1],c[2].split(','),np.array(c[3].split(',')).astype(float).tolist()))
return contrasts
def gen_info(run_event_files):
"""Generate subject_info structure from a list of event files
"""
from nipype.interfaces.base import Bunch
import numpy as np
import os
info = []
if not isinstance(run_event_files[0],list):
run_event_files = [run_event_files]
for i, event_files in enumerate(run_event_files):
runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[],tmod=None, pmod=None, regressor_names=None, regressors=None)
for event_file in event_files:
_, name = os.path.split(event_file)
if name.endswith('.txt'):
name, _ = name.split('.txt')
if '.run' in name:
name, _ = name.split('.run')
#elif '.txt' in name:
# name, _ = name.split('.txt')
runinfo.conditions.append(name)
event_info = np.loadtxt(event_file)
if len(event_info.shape) == 1:
event_info = event_info[:,None].T
runinfo.onsets.append(event_info[:, 0].tolist())
if event_info.shape[1] > 1:
runinfo.durations.append(event_info[:, 1].tolist())
else:
runinfo.durations.append([0])
if event_info.shape[1] > 2:
runinfo.amplitudes.append(event_info[:, 2].tolist())
else:
delattr(runinfo, 'amplitudes')
info.append(runinfo)
return info
def trad_mot(subinfo,files):
# modified to work with only one regressor at a time...
import numpy as np
motion_params = []
mot_par_names = ['Pitch (rad)','Roll (rad)','Yaw (rad)','Tx (mm)','Ty (mm)','Tz (mm)']
if not isinstance(files,list):
files = [files]
if not isinstance(subinfo,list):
subinfo = [subinfo]
for j,i in enumerate(files):
motion_params.append([[],[],[],[],[],[]])
#k = map(lambda x: float(x), filter(lambda y: y!='',open(i,'r').read().replace('\n',' ').split(' ')))
print i
a = np.genfromtxt(i)
for z in range(6):
motion_params[j][z] = a[:,z].tolist()#k[z:len(k):6]
for j,i in enumerate(subinfo):
if i.regressor_names == None: i.regressor_names = []
if i.regressors == None: i.regressors = []
for j3, i3 in enumerate(motion_params[j]):
i.regressor_names.append(mot_par_names[j3])
i.regressors.append(i3)
return subinfo
def noise_mot(subinfo,files,num_noise_components,use_compcor):
if use_compcor:
noi_reg_names = map(lambda x: 'noise_comp_'+str(x+1),range(num_noise_components))
noise_regressors = []
if not isinstance(files,list):
files = [files]
for j,i in enumerate(files):
noise_regressors.append([[]]*num_noise_components)
k = map(lambda x: float(x), filter(lambda y: y!='',open(i,'r').read().replace('\n',' ').split(' ')))
for z in range(num_noise_components):
noise_regressors[j][z] = k[z:len(k):num_noise_components]
for j,i in enumerate(subinfo):
if i.regressor_names == None: i.regressor_names = []
if i.regressors == None: i.regressors = []
for j3,i3 in enumerate(noise_regressors[j]):
i.regressor_names.append(noi_reg_names[j3])
i.regressors.append(i3)
return subinfo
# First level modeling
from ...scripts.workflow1 import create_config as prep_config
foo = prep_config()
def combine_wkflw(c,prep_c=foo, name='work_dir'):
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.io as nio # input/output
from nipype.algorithms.modelgen import SpecifyModel, SpecifySparseModel
import numpy as np
modelflow = pe.Workflow(name=name)
modelflow.base_dir = os.path.join(c.working_dir)
preproc = c.datagrabber.create_dataflow()#preproc_datagrabber(prep_c)
#infosource = pe.Node(util.IdentityInterface(fields=['subject_id']),
# name='subject_names')
#if c.test_mode:
# infosource.iterables = ('subject_id', [c.subjects[0]])
#else:
# infosource.iterables = ('subject_id', c.subjects)
infosource = preproc.get_node('subject_id_iterable')
#modelflow.connect(infosource,'subject_id',preproc,'subject_id')
#preproc.iterables = ('fwhm', prep_c.fwhm)
subjectinfo = pe.Node(util.Function(input_names=['run_event_files'], output_names=['info'],function=gen_info), name='subjectinfo')
#subjectinfo.inputs.function_str = c.subjectinfo
def getsubs(subject_id,cons,info,fwhm):
#from config import getcontrasts, get_run_numbers, subjectinfo, fwhm
subs = [('_subject_id_%s/'%subject_id,''),
('_plot_type_',''),
('_fwhm','fwhm'),
('_dtype_mcf_mask_mean','_mean'),
('_dtype_mcf_mask_smooth_mask_gms_tempfilt','_smoothed_preprocessed'),
('_dtype_mcf_mask_gms_tempfilt','_unsmoothed_preprocessed'),
('_dtype_mcf','_mcf')]
for i in range(4):
subs.append(('_plot_motion%d'%i, ''))
subs.append(('_highpass%d/'%i, ''))
subs.append(('_realign%d/'%i, ''))
subs.append(('_meanfunc2%d/'%i, ''))
runs = range(len(info))
for i, run in enumerate(runs):
subs.append(('_modelestimate%d/'%i, '_run_%d_%02d_'%(i,run)))
subs.append(('_modelgen%d/'%i, '_run_%d_%02d_'%(i,run)))
subs.append(('_conestimate%d/'%i,'_run_%d_%02d_'%(i,run)))
for i, con in enumerate(cons):
subs.append(('cope%d.'%(i+1), 'cope%02d_%s.'%(i+1,con[0])))
subs.append(('varcope%d.'%(i+1), 'varcope%02d_%s.'%(i+1,con[0])))
subs.append(('zstat%d.'%(i+1), 'zstat%02d_%s.'%(i+1,con[0])))
subs.append(('tstat%d.'%(i+1), 'tstat%02d_%s.'%(i+1,con[0])))
"""for i, name in enumerate(info[0].conditions):
subs.append(('pe%d.'%(i+1), 'pe%02d_%s.'%(i+1,name)))
for i in range(len(info[0].conditions), 256):
subs.append(('pe%d.'%(i+1), 'others/pe%02d.'%(i+1)))"""
for i in fwhm:
subs.append(('_register%d/'%(i),''))
return subs
get_substitutions = pe.Node(util.Function(input_names=['subject_id',
'cons',
'info',
'fwhm'],
output_names=['subs'], function=getsubs), name='getsubs')
# create a node to create the subject info
if not c.is_sparse:
s = pe.Node(SpecifyModel(),name='s')
else:
s = pe.Node(SpecifySparseModel(model_hrf=c.model_hrf,
stimuli_as_impulses=c.stimuli_as_impulses,
use_temporal_deriv=c.use_temporal_deriv,
volumes_in_cluster=c.volumes_in_cluster,
scan_onset=c.scan_onset,scale_regressors=c.scale_regressors),
name='s')
s.inputs.time_acquisition = c.ta
s.inputs.input_units = c.input_units
s.inputs.time_repetition = c.tr
if c.hpcutoff < 0:
c.hpcutoff = np.inf
s.inputs.high_pass_filter_cutoff = c.hpcutoff
#subjinfo = subjectinfo(subj)
# create a node to add the traditional (MCFLIRT-derived) motion regressors to
# the subject info
trad_motn = pe.Node(util.Function(input_names=['subinfo',
'files'],
output_names=['subinfo'],
function=trad_mot),
name='trad_motn')
#subjinfo = pe.Node(interface=util.Function(input_names=['subject_id','get_run_numbers'], output_names=['output'], function = c.subjectinfo), name='subjectinfo')
#subjinfo.inputs.get_run_numbers = c.get_run_numbers
#modelflow.connect(infosource,'subject_id',
# subjinfo,'subject_id' )
#modelflow.connect(subjinfo, 'output',
# trad_motn, 'subinfo')
#modelflow.connect(infosource, ('subject_id',subjectinfo), trad_motn, 'subinfo')
modelflow.connect(preproc,'datagrabber.ev_files', subjectinfo, 'run_event_files')
modelflow.connect(subjectinfo, 'info', trad_motn, 'subinfo')
# create a node to add the principle components of the noise regressors to
# the subject info
noise_motn = pe.Node(util.Function(input_names=['subinfo',
'files',
'num_noise_components',
"use_compcor"],
output_names=['subinfo'],
function=noise_mot),
name='noise_motn')
noise_motn.inputs.use_compcor=c.use_compcor
# generate first level analysis workflow
modelfit = create_first()
modelfit.inputs.inputspec.interscan_interval = c.interscan_interval
modelfit.inputs.inputspec.film_threshold = c.film_threshold
contrasts = pe.Node(util.Function(input_names=['con_file'], output_names=['contrasts'],function=extract_contrasts), name='getcontrasts')
#contrasts.inputs.function_str = c.contrasts
modelflow.connect(preproc,'datagrabber.contrasts',
contrasts, 'con_file')
modelflow.connect(contrasts,'contrasts', modelfit, 'inputspec.contrasts')
modelfit.inputs.inputspec.bases = c.bases
modelfit.inputs.inputspec.model_serial_correlations = True
noise_motn.inputs.num_noise_components = prep_c.num_noise_components
# make a data sink
sinkd = pe.Node(nio.DataSink(), name='sinkd')
sinkd.inputs.base_directory = os.path.join(c.sink_dir)
modelflow.connect(infosource, 'subject_id', sinkd, 'container')
#modelflow.connect(infosource, ('subject_id',getsubs, getcontrasts, subjectinfo, prep_c.fwhm), sinkd, 'substitutions')
modelflow.connect(infosource, 'subject_id', get_substitutions, 'subject_id')
modelflow.connect(contrasts, 'contrasts', get_substitutions, 'cons')
modelflow.connect(subjectinfo,'info',get_substitutions,'info')
get_substitutions.inputs.fwhm = prep_c.fwhm
modelflow.connect(get_substitutions,'subs', sinkd, 'substitutions')
sinkd.inputs.regexp_substitutions = [('mask/fwhm_%d/_threshold([0-9]*)/.*nii'%x,'mask/fwhm_%d/funcmask.nii'%x) for x in prep_c.fwhm]
sinkd.inputs.regexp_substitutions.append(('realigned/fwhm_([0-9])/_copy_geom([0-9]*)/','realigned/'))
sinkd.inputs.regexp_substitutions.append(('motion/fwhm_([0-9])/','motion/'))
sinkd.inputs.regexp_substitutions.append(('bbreg/fwhm_([0-9])/','bbreg/'))
# make connections
modelflow.connect(preproc, 'datagrabber.motion_parameters', trad_motn, 'files')
modelflow.connect(preproc, 'datagrabber.noise_components', noise_motn, 'files')
modelflow.connect(preproc, 'datagrabber.highpassed_files', s, 'functional_runs')
modelflow.connect(preproc, 'datagrabber.highpassed_files', modelfit, 'inputspec.functional_data')
modelflow.connect(preproc, 'datagrabber.outlier_files', s, 'outlier_files')
modelflow.connect(trad_motn,'subinfo', noise_motn, 'subinfo')
modelflow.connect(noise_motn,'subinfo', s, 'subject_info')
modelflow.connect(s,'session_info', modelfit, 'inputspec.session_info')
modelflow.connect(modelfit, 'outputspec.parameter_estimates', sinkd, 'modelfit.estimates')
modelflow.connect(modelfit, 'outputspec.sigmasquareds', sinkd, 'modelfit.estimates.@sigsq')
modelflow.connect(modelfit, 'outputspec.dof_file', sinkd, 'modelfit.dofs')
modelflow.connect(modelfit, 'outputspec.copes', sinkd, 'modelfit.contrasts.@copes')
modelflow.connect(modelfit, 'outputspec.varcopes', sinkd, 'modelfit.contrasts.@varcopes')
modelflow.connect(modelfit, 'outputspec.zstats', sinkd, 'modelfit.contrasts.@zstats')
modelflow.connect(modelfit, 'outputspec.tstats', sinkd, 'modelfit.contrasts.@tstats')
modelflow.connect(modelfit, 'outputspec.design_image', sinkd, 'modelfit.design')
modelflow.connect(modelfit, 'outputspec.design_cov', sinkd, 'modelfit.design.@cov')
modelflow.connect(modelfit, 'outputspec.design_file', sinkd, 'modelfit.design.@matrix')
modelflow.connect(modelfit, 'outputspec.pfiles', sinkd, 'modelfit.contrasts.@pstats')
return modelflow
mwf.workflow_function = combine_wkflw
"""
Part 5: Define the main function
"""
def main(config_file):
c = load_config(config_file, create_config)
prep_c = load_config(c.preproc_config, prep_config)
first_level = combine_wkflw(c, prep_c)
first_level.config = {'execution' : {'crashdump_dir' : c.crash_dir, "job_finished_timeout": c.timeout}}
first_level.base_dir = c.working_dir
if c.use_advanced_options:
exec c.advanced_options
if c.test_mode:
first_level.write_graph()
from nipype.utils.filemanip import fname_presuffix
first_level.export(fname_presuffix(config_file,'','_script_').replace('.json',''))
if c.save_script_only:
return 0
if c.run_using_plugin:
first_level.run(plugin=c.plugin, plugin_args = c.plugin_args)
else:
first_level.run()
mwf.workflow_main_function = main
"""
Part 6: Register the Workflow
"""
register_workflow(mwf)
| 43.68323
| 258
| 0.610977
|
30fee36ee591cab0b842ce988516c663f87581ea
| 1,131
|
py
|
Python
|
plgx-esp-ui/polylogyx/dao/rules_dao.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | null | null | null |
plgx-esp-ui/polylogyx/dao/rules_dao.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | null | null | null |
plgx-esp-ui/polylogyx/dao/rules_dao.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | 2
|
2021-11-12T10:25:02.000Z
|
2022-03-30T06:33:52.000Z
|
from polylogyx.models import db, Rule
def get_rule_by_id(rule_id):
return Rule.query.filter(Rule.id == rule_id).first()
def get_rule_name_by_id(rule_id):
rule=Rule.query.filter(Rule.id == rule_id).first()
return rule.name
def get_all_rules():
return Rule.query.all()
def get_rule_by_name(rule_name):
return Rule.query.filter(Rule.name == rule_name).first()
def edit_rule_by_id(rule_id,name,alerters,description,conditions,status,updated_at,recon_queries,severity,type_ip,tactics,technique_id):
rule = get_rule_by_id(rule_id)
return rule.update(name=name,alerters=alerters,description=description,conditions=conditions,status=status,updated_at=updated_at,recon_queries=recon_queries,severity=severity,type_ip=type_ip,tactics=tactics,technique_id=technique_id)
def create_rule_object(name,alerters,description,conditions,status,type_ip,tactics,technique_id,updated_at,recon_queries,severity):
return Rule(name=name,alerters=alerters,description=description,conditions=conditions,status=status,type=type_ip,tactics=tactics,technique_id=technique_id,recon_queries=recon_queries,severity=severity)
| 51.409091
| 237
| 0.821397
|
7663a193b4d2f0d448037793d402dc0d0564872b
| 10,285
|
py
|
Python
|
change_detection/xbd_hi_resolution_attempt/model/damage_classification.py
|
ai4er-cdt/gtc-exposure
|
f0504d8c40c3553ba1466faef3d802ced09bd984
|
[
"RSA-MD"
] | 1
|
2022-01-05T14:27:19.000Z
|
2022-01-05T14:27:19.000Z
|
change_detection/xbd_hi_resolution_attempt/model/damage_classification.py
|
Riya0001/gtc-exposure
|
f0504d8c40c3553ba1466faef3d802ced09bd984
|
[
"RSA-MD"
] | 12
|
2020-12-11T10:12:10.000Z
|
2021-01-29T10:05:30.000Z
|
change_detection/xbd_hi_resolution_attempt/model/damage_classification.py
|
Riya0001/gtc-exposure
|
f0504d8c40c3553ba1466faef3d802ced09bd984
|
[
"RSA-MD"
] | 3
|
2020-12-10T11:25:19.000Z
|
2022-01-05T14:26:28.000Z
|
#####################################################################################################################################################################
# xView2 #
# Copyright 2019 Carnegie Mellon University. #
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO #
# WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, #
# EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, #
# TRADEMARK, OR COPYRIGHT INFRINGEMENT. #
# Released under a MIT (SEI)-style license, please see LICENSE.md or contact permission@sei.cmu.edu for full terms. #
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use #
# and distribution. #
# This Software includes and/or makes use of the following Third-Party Software subject to its own license: #
# 1. SpaceNet (https://github.com/motokimura/spacenet_building_detection/blob/master/LICENSE) Copyright 2017 Motoki Kimura. #
# DM19-0988 #
#####################################################################################################################################################################
from PIL import Image
import time
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
import math
import random
import argparse
import logging
import json
import cv2
import datetime
from sklearn.metrics import f1_score
from sklearn.utils.class_weight import compute_class_weight
import shapely.wkt
import shapely
from shapely.geometry import Polygon
from collections import defaultdict
import tensorflow as tf
import keras
import ast
from keras import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Add, Input, Concatenate
from keras.models import Model
from keras.applications.resnet50 import ResNet50
from keras import backend as K
from model import *
logging.basicConfig(level=logging.INFO)
# Configurations
NUM_WORKERS = 4
NUM_CLASSES = 4
BATCH_SIZE = 64
NUM_EPOCHS = 100
LEARNING_RATE = 0.0001
RANDOM_SEED = 123
LOG_STEP = 150
LOG_DIR = '/gws/nopw/j04/ai4er/users/jl2182/gtc-exposure/change_detection/xBD/xView2_baseline/model/tensorboard_logs' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
damage_intensity_encoding = dict()
damage_intensity_encoding[3] = '3'
damage_intensity_encoding[2] = '2'
damage_intensity_encoding[1] = '1'
damage_intensity_encoding[0] = '0'
###
# Function to compute unweighted f1 scores, just for reference
###
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
###
# Creates data generator for validation set
###
def validation_generator(test_csv, test_dir):
df = pd.read_csv(test_csv)
df = df.replace({"labels" : damage_intensity_encoding })
gen = keras.preprocessing.image.ImageDataGenerator(
rescale=1/255.)
return gen.flow_from_dataframe(dataframe=df,
directory=test_dir,
x_col='uuid',
y_col='labels',
batch_size=BATCH_SIZE,
shuffle=False,
seed=RANDOM_SEED,
class_mode="categorical",
target_size=(128, 128))
###
# Applies random transformations to training data
###
def augment_data(df, in_dir):
df = df.replace({"labels" : damage_intensity_encoding })
gen = keras.preprocessing.image.ImageDataGenerator(horizontal_flip=True,
vertical_flip=True,
width_shift_range=0.1,
height_shift_range=0.1,
rescale=1/255.)
return gen.flow_from_dataframe(dataframe=df,
directory=in_dir,
x_col='uuid',
y_col='labels',
batch_size=BATCH_SIZE,
seed=RANDOM_SEED,
class_mode="categorical",
target_size=(128, 128))
# Run training and evaluation based on existing or new model
def train_model(train_data, train_csv, test_data, test_csv, model_in, model_out):
model = generate_xBD_baseline_model()
# Add model weights if provided by user
if model_in is not None:
model.load_weights(model_in)
df = pd.read_csv(train_csv)
class_weights = compute_class_weight('balanced', np.unique(df['labels'].to_list()), df['labels'].to_list());
d_class_weights = dict(enumerate(class_weights))
samples = df['uuid'].count()
steps = np.ceil(samples/BATCH_SIZE)
# Augments the training data
train_gen_flow = augment_data(df, train_data)
#Set up tensorboard logging
tensorboard_callbacks = keras.callbacks.TensorBoard(log_dir=LOG_DIR,
batch_size=BATCH_SIZE)
#Filepath to save model weights
filepath = model_out + "-saved-model-{epoch:02d}-{accuracy:.2f}.hdf5"
checkpoints = keras.callbacks.ModelCheckpoint(filepath,
monitor=['loss', 'accuracy'],
verbose=1,
save_best_only=False,
mode='max')
#Adds adam optimizer
adam = keras.optimizers.Adam(lr=LEARNING_RATE,
beta_1=0.9,
beta_2=0.999,
decay=0.0,
amsgrad=False)
model.compile(loss=ordinal_loss, optimizer=adam, metrics=['accuracy', f1])
#Training begins
model.fit_generator(generator=train_gen_flow,
steps_per_epoch=steps,
epochs=NUM_EPOCHS,
workers=NUM_WORKERS,
use_multiprocessing=True,
class_weight=d_class_weights,
callbacks=[tensorboard_callbacks, checkpoints],
verbose=1)
#Evalulate f1 weighted scores on validation set
validation_gen = validation_generator(test_csv, test_data)
predictions = model.predict(validation_gen)
val_trues = validation_gen.classes
val_pred = np.argmax(predictions, axis=-1)
f1_weighted = f1_score(val_trues, val_pred, average='weighted')
print(f1_weighted)
def main():
parser = argparse.ArgumentParser(description='Run Building Damage Classification Training & Evaluation')
parser.add_argument('--train_data',
required=True,
metavar="/path/to/xBD_train",
help="Full path to the train data directory")
parser.add_argument('--train_csv',
required=True,
metavar="/path/to/xBD_split",
help="Full path to the train csv")
parser.add_argument('--test_data',
required=True,
metavar="/path/to/xBD_test",
help="Full path to the test data directory")
parser.add_argument('--test_csv',
required=True,
metavar="/path/to/xBD_split",
help="Full path to the test csv")
parser.add_argument('--model_in',
default=None,
metavar='/path/to/input_model',
help="Path to save model")
parser.add_argument('--model_out',
required=True,
metavar='/path/to/save_model',
help="Path to save model")
args = parser.parse_args()
train_model(args.train_data, args.train_csv, args.test_data, args.test_csv, args.model_in, args.model_out)
if __name__ == '__main__':
main()
| 41.979592
| 170
| 0.537385
|
d5265de74e9d816b5bd866e7f4e0b34d39c11ddb
| 568
|
py
|
Python
|
setup.py
|
dani0805/auprico-auth
|
5965381da1d2c468f1afd532deb85c0a23c4ff93
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dani0805/auprico-auth
|
5965381da1d2c468f1afd532deb85c0a23c4ff93
|
[
"Apache-2.0"
] | 8
|
2019-12-04T22:44:16.000Z
|
2022-02-10T08:21:31.000Z
|
setup.py
|
dani0805/auprico-auth
|
5965381da1d2c468f1afd532deb85c0a23c4ff93
|
[
"Apache-2.0"
] | null | null | null |
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
setup(
name='auprico-auth',
version='0.1',
packages=['auprico_auth'],
description='automated processes and intelligent components - auth package',
long_description=README,
author='Daniele Bernardini',
author_email='daniele.bernardini@aismart.co',
url='https://github.com/dani0805/auprico-auth/',
license='Apache',
install_requires=[
'Django>=2.1', 'auprico-core>=0.1'
]
)
| 28.4
| 80
| 0.684859
|
965530022f6062f67a9566a63afc1c4e324a69f2
| 4,367
|
py
|
Python
|
tests/test_test.py
|
kbh2o/slash
|
532b7e3acdf46103ece5b86f21c29f9b58587289
|
[
"BSD-3-Clause"
] | 70
|
2015-12-05T12:33:10.000Z
|
2022-03-03T04:56:58.000Z
|
tests/test_test.py
|
kbh2o/slash
|
532b7e3acdf46103ece5b86f21c29f9b58587289
|
[
"BSD-3-Clause"
] | 711
|
2015-10-06T11:01:48.000Z
|
2022-02-09T12:40:47.000Z
|
tests/test_test.py
|
kbh2o/slash
|
532b7e3acdf46103ece5b86f21c29f9b58587289
|
[
"BSD-3-Clause"
] | 37
|
2015-10-13T11:00:51.000Z
|
2022-02-08T07:28:11.000Z
|
from .utils import (
TestCase,
CustomException,
run_tests_in_session,
make_runnable_tests,
)
import itertools
import slash
class TestTest(TestCase):
"""
Test the :class:`Test` class, which is the quickest way to create test classes in Slash
"""
def test_test_class(self):
events = []
class Test(slash.Test):
def before(self):
events.append("before")
def after(self):
events.append("after")
def test_1(self):
events.append("test_1")
def test_2(self):
events.append("test_2")
with slash.Session():
tests = make_runnable_tests(Test)
for test in tests:
self.assertIsInstance(test, Test)
self.assertEqual(len(tests), 2)
tests.sort(key=lambda test: test._test_method_name) # pylint: disable=protected-access
for test in tests:
test.run()
self.assertEqual(events, ["before", "test_1", "after", "before", "test_2", "after"])
def test_before_failures(self):
"Check that exceptions during before() prevent after() from happening"
events = []
class Test(slash.Test):
def before(self):
raise CustomException()
def test(self):
events.append("test")
def after(self):
events.append("after")
with slash.Session():
[test] = make_runnable_tests(Test) # pylint: disable=unbalanced-tuple-unpacking
with self.assertRaises(CustomException):
test.run()
self.assertEqual(events, [])
def test_after_failures(self):
class Test(slash.Test):
def test(self):
assert False, "msg1"
def after(self):
assert False, "msg2"
session = run_tests_in_session(Test)
self.assertFalse(session.results.is_success())
[result] = session.results.iter_test_results()
self.assertEqual(len(result.get_failures()), 2)
def test_after_gets_called(self):
"If before() is successful, after() always gets called"
events = []
class Test(slash.Test):
def before(self):
events.append("before")
def test_1(self):
events.append("test")
raise CustomException(1)
def after(self):
events.append("after")
with slash.Session():
[test] = make_runnable_tests(Test) # pylint: disable=unbalanced-tuple-unpacking
with self.assertRaises(CustomException):
test.run()
self.assertEqual(events, ["before", "test", "after"])
class AbstractTestTest(TestCase):
def test_abstract_tests(self):
@slash.abstract_test_class
class Abstract(slash.Test):
def test1(self):
pass
def test2(self):
pass
def test3(self):
pass
with slash.Session():
self.assertEqual(list(make_runnable_tests(Abstract)), [])
class Derived(Abstract):
pass
with slash.Session():
self.assertEqual(len(list(make_runnable_tests(Derived))), 3)
class TestParametersTest(TestCase):
def test_parameters(self):
variations = []
a_values = [1, 2]
b_values = [3, 4]
c_values = [5, 6]
d_values = [7, 8]
class Parameterized(slash.Test):
@slash.parameters.iterate(a=a_values)
def before(self, a): # pylint: disable=arguments-differ
variations.append([a])
@slash.parameters.iterate(b=b_values, c=c_values)
def test(self, b, c):
variations[-1].extend([b, c])
@slash.parameters.iterate(d=d_values)
def after(self, d): # pylint: disable=arguments-differ
variations[-1].append(d)
with slash.Session():
for test in make_runnable_tests(Parameterized):
test.run()
self.assertEqual(
set(tuple(x) for x in variations),
set(itertools.product(
a_values,
b_values,
c_values,
d_values
)))
| 34.385827
| 99
| 0.54866
|
0af35e4f31a36db9e77c724c71a7a362f5801b27
| 74
|
py
|
Python
|
src/data/__init__.py
|
silasbrack/metric-laplace-approximation
|
50f82c2e0b5a0b859751c37724998f77652207ab
|
[
"MIT"
] | null | null | null |
src/data/__init__.py
|
silasbrack/metric-laplace-approximation
|
50f82c2e0b5a0b859751c37724998f77652207ab
|
[
"MIT"
] | null | null | null |
src/data/__init__.py
|
silasbrack/metric-laplace-approximation
|
50f82c2e0b5a0b859751c37724998f77652207ab
|
[
"MIT"
] | null | null | null |
from .cifar100 import CIFAR100DataModule
from .svhn import SVHNDataModule
| 24.666667
| 40
| 0.864865
|
ecc86a1f7b403f57821dde2a2b4f0619c0d6cae3
| 1,288
|
py
|
Python
|
netdissect/autoeval.py
|
mcartagenah/ganspace
|
f297c090257939dce1eef0eb87e6d9c4c19928a8
|
[
"Apache-2.0"
] | 1,711
|
2018-11-22T08:56:26.000Z
|
2022-03-30T21:03:58.000Z
|
netdissect/autoeval.py
|
mcartagenah/ganspace
|
f297c090257939dce1eef0eb87e6d9c4c19928a8
|
[
"Apache-2.0"
] | 54
|
2020-04-07T23:32:19.000Z
|
2022-03-27T15:06:26.000Z
|
netdissect/autoeval.py
|
mcartagenah/ganspace
|
f297c090257939dce1eef0eb87e6d9c4c19928a8
|
[
"Apache-2.0"
] | 284
|
2018-11-27T02:05:21.000Z
|
2021-12-19T05:41:21.000Z
|
from collections import defaultdict
from importlib import import_module
def autoimport_eval(term):
'''
Used to evaluate an arbitrary command-line constructor specifying
a class, with automatic import of global module names.
'''
class DictNamespace(object):
def __init__(self, d):
self.__d__ = d
def __getattr__(self, key):
return self.__d__[key]
class AutoImportDict(defaultdict):
def __init__(self, wrapped=None, parent=None):
super().__init__()
self.wrapped = wrapped
self.parent = parent
def __missing__(self, key):
if self.wrapped is not None:
if key in self.wrapped:
return self.wrapped[key]
if self.parent is not None:
key = self.parent + '.' + key
if key in __builtins__:
return __builtins__[key]
mdl = import_module(key)
# Return an AutoImportDict for any namespace packages
if hasattr(mdl, '__path__'): # and not hasattr(mdl, '__file__'):
return DictNamespace(
AutoImportDict(wrapped=mdl.__dict__, parent=key))
return mdl
return eval(term, {}, AutoImportDict())
| 33.894737
| 76
| 0.583851
|
1de24c2c186880753c271803fbc4d3396a5ff1ab
| 1,038
|
py
|
Python
|
src/wizard/controller/frmRequiredComboValidator.py
|
UCHIC/ODMStreamingDataLoader
|
711886167a068754efff6dfbd604e3e19e545253
|
[
"BSD-3-Clause"
] | null | null | null |
src/wizard/controller/frmRequiredComboValidator.py
|
UCHIC/ODMStreamingDataLoader
|
711886167a068754efff6dfbd604e3e19e545253
|
[
"BSD-3-Clause"
] | null | null | null |
src/wizard/controller/frmRequiredComboValidator.py
|
UCHIC/ODMStreamingDataLoader
|
711886167a068754efff6dfbd604e3e19e545253
|
[
"BSD-3-Clause"
] | null | null | null |
import string
import wx
class RequiredComboValidator(wx.PyValidator):
'''
This validator will ensure that whatever it's added to gets
filled out.
'''
def __init__(self):
super(RequiredComboValidator, self).__init__()
self.Bind(wx.EVT_COMBOBOX, self.onCombo)
def Clone(self):
'''
Every validator must implement a Clone method.
'''
return RequiredComboValidator()
def Validate(self, win):
comboControl = self.GetWindow()
value = comboControl.GetSelection()
if value == wx.NOT_FOUND:
comboControl.SetBackgroundColour('#FFFAC5')
return False
return True
def onCombo(self, event):
control = self.GetWindow()
control.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
control.Refresh()
event.Skip()
return
def TransferToWindow(self):
return True
def TransferFromWindow(self):
return True
| 25.317073
| 86
| 0.615607
|
8666c5515b6c4f630762311d5b9c5085d4702569
| 2,391
|
py
|
Python
|
NeuralNetwork/SchoolAdmissions/gradient.py
|
marcelaldecoa/DLND-Labs
|
2aab4c693dbb0989e19d90fd717ce88ced29a07c
|
[
"MIT"
] | null | null | null |
NeuralNetwork/SchoolAdmissions/gradient.py
|
marcelaldecoa/DLND-Labs
|
2aab4c693dbb0989e19d90fd717ce88ced29a07c
|
[
"MIT"
] | null | null | null |
NeuralNetwork/SchoolAdmissions/gradient.py
|
marcelaldecoa/DLND-Labs
|
2aab4c693dbb0989e19d90fd717ce88ced29a07c
|
[
"MIT"
] | null | null | null |
import numpy as np
from data_prep import features, targets, features_test, targets_test
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
# TODO: We haven't provided the sigmoid_prime function like we did in
# the previous lesson to encourage you to come up with a more
# efficient solution. If you need a hint, check out the comments
# in solution.py from the previous lecture.
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = y - output
# The error term
# Notice we calulate f'(h) here instead of defining a separate
# sigmoid_prime function. This just makes it faster because we
# can re-use the result of the sigmoid function stored in
# the output variable
error_term = error * output * (1 - output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
# Calculate accuracy on test data
tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
| 33.676056
| 82
| 0.66123
|
7417c6f39654076ebc0568baf67107b4d808e995
| 8,845
|
py
|
Python
|
tests/catalyst/metrics/functional/test_cmc_metric.py
|
tadejsv/catalyst
|
2553ce8fd7cecc025ad88819aea73faf8abb229b
|
[
"Apache-2.0"
] | 206
|
2018-10-05T19:16:47.000Z
|
2019-01-19T21:10:41.000Z
|
tests/catalyst/metrics/functional/test_cmc_metric.py
|
tadejsv/catalyst
|
2553ce8fd7cecc025ad88819aea73faf8abb229b
|
[
"Apache-2.0"
] | 20
|
2018-10-07T06:30:49.000Z
|
2019-01-17T17:26:15.000Z
|
tests/catalyst/metrics/functional/test_cmc_metric.py
|
tadejsv/catalyst
|
2553ce8fd7cecc025ad88819aea73faf8abb229b
|
[
"Apache-2.0"
] | 22
|
2018-10-06T12:34:08.000Z
|
2019-01-10T16:00:48.000Z
|
# flake8: noqa
from typing import List, Tuple
from itertools import chain
import numpy as np
import pytest
import torch
from catalyst.metrics.functional._cmc_score import (
cmc_score,
cmc_score_count,
masked_cmc_score,
)
EPS = 1e-4
TEST_DATA_SIMPLE = (
# (distance_matrix, conformity_matrix, topk, expected_value)
(torch.tensor([[1, 2], [2, 1]]), torch.tensor([[0, 1], [1, 0]]), 1, 0.0),
(torch.tensor([[0, 0.5], [0.0, 0.5]]), torch.tensor([[0, 1], [1, 0]]), 1, 0.5),
(torch.tensor([[0, 0.5], [0.0, 0.5]]), torch.tensor([[0, 1], [1, 0]]), 2, 1),
(
torch.tensor([[1, 0.5, 0.2], [2, 3, 4], [0.4, 3, 4]]),
torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
2,
1 / 3,
),
(torch.randn((10, 10)), torch.ones((10, 10)), 1, 1),
)
TEST_DATA_LESS_SMALL = (
(torch.rand((10, 10)) + torch.tril(torch.ones((10, 10))), torch.eye(10), i, i / 10)
for i in range(1, 10)
)
TEST_DATA_GREATER_SMALL = (
(
torch.rand((10, 10)) + torch.triu(torch.ones((10, 10)), diagonal=1),
torch.eye(10),
i,
i / 10,
)
for i in range(1, 10)
)
TEST_DATA_LESS_BIG = (
(
torch.rand((100, 100)) + torch.tril(torch.ones((100, 100))),
torch.eye(100),
i,
i / 100,
)
for i in range(1, 101, 10)
)
@pytest.mark.parametrize(
"distance_matrix,conformity_matrix,topk,expected", TEST_DATA_SIMPLE
)
def test_metric_count(distance_matrix, conformity_matrix, topk, expected):
"""Simple test"""
out = cmc_score_count(
distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk
)
assert np.isclose(out, expected)
@pytest.mark.parametrize(
"distance_matrix,conformity_matrix,topk,expected",
chain(TEST_DATA_LESS_SMALL, TEST_DATA_LESS_BIG),
)
def test_metric_less(distance_matrix, conformity_matrix, topk, expected):
"""Simple test"""
out = cmc_score_count(
distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk
)
assert out - EPS <= expected
@pytest.mark.parametrize(
"distance_matrix,conformity_matrix,topk,expected", chain(TEST_DATA_GREATER_SMALL)
)
def test_metric_greater(distance_matrix, conformity_matrix, topk, expected):
"""Simple test"""
out = cmc_score_count(
distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk
)
assert out + EPS >= expected
@pytest.fixture
def generate_samples_for_cmc_score() -> List[
Tuple[float, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
]:
"""
Generate list of query and gallery data for cmc score testing.
"""
data = []
for error_rate in [
0.05,
0.1,
0.15,
0.2,
0.25,
]:
# generate params of the datasets
class_number = np.random.randint(low=2, high=10)
kq = np.random.randint(low=1000, high=1500)
kg = np.random.randint(low=500, high=1000)
def generate_samples(n_labels, samples_per_label):
samples = []
labels = []
# for each label generate dots that will be close to each other and
# distanced from samples of other classes
for i in range(n_labels):
tmp_samples = np.random.uniform(
low=2 * i, high=2 * i + 0.2, size=(samples_per_label,)
)
samples = np.concatenate((samples, tmp_samples))
labels = np.concatenate((labels, [i] * samples_per_label))
return samples.reshape((-1, 1)), labels
query_embs, query_labels = generate_samples(
n_labels=class_number, samples_per_label=kq
)
gallery_embs, gallery_labels = generate_samples(
n_labels=class_number, samples_per_label=kg
)
# spoil generated gallery dataset: for each sample from data change
# label to any other one with probability error_rate
def confuse_labels(labels, error_rate):
unique_labels = set(labels)
size = len(labels)
for i in range(size):
if np.random.binomial(n=1, p=error_rate, size=1)[0]:
labels[i] = np.random.choice(list(unique_labels - {labels[i]}))
return labels
gallery_labels = confuse_labels(gallery_labels, error_rate=error_rate)
query_embs = torch.tensor(query_embs)
gallery_embs = torch.tensor(gallery_embs)
query_labels = torch.tensor(query_labels, dtype=torch.long)
gallery_labels = torch.tensor(gallery_labels, dtype=torch.long)
data.append((error_rate, query_embs, query_labels, gallery_embs, gallery_labels))
return data
def test_cmc_score_with_samples(generate_samples_for_cmc_score):
"""
Count cmc score callback for sets of well-separated data clusters labeled
with error_rate probability mistake.
"""
for (
error_rate,
query_embs,
query_labels,
gallery_embs,
gallery_labels,
) in generate_samples_for_cmc_score:
true_cmc_01 = 1 - error_rate
conformity_matrix = (query_labels.reshape((-1, 1)) == gallery_labels).to(
torch.bool
)
cmc = cmc_score(
query_embeddings=query_embs,
gallery_embeddings=gallery_embs,
conformity_matrix=conformity_matrix,
topk=1,
)
assert abs(cmc - true_cmc_01) <= 0.05
@pytest.mark.parametrize(
(
"query_embeddings",
"gallery_embeddings",
"conformity_matrix",
"available_samples",
"topk",
"expected",
),
(
(
torch.tensor(
[[1, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 1], [0, 0, 1, 1]]
).float(),
torch.tensor([[1, 1, 1, 0], [1, 1, 1, 1], [0, 1, 1, 0]]).float(),
torch.tensor(
[
[True, False, False],
[True, False, False],
[False, True, True],
[False, True, True],
]
),
torch.tensor(
[
[False, True, True],
[True, True, True],
[True, False, True],
[True, True, True],
]
),
1,
0.75,
),
(
torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]]).float(),
torch.tensor([[0, 1, 0], [0, 0, 1], [1, 0, 1]]).float(),
torch.tensor(
[
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True],
]
),
torch.tensor(
[
[True, True, True],
[False, True, True],
[True, False, True],
[True, True, False],
]
),
1,
0.25,
),
),
)
def test_masked_cmc_score(
query_embeddings,
gallery_embeddings,
conformity_matrix,
available_samples,
topk,
expected,
):
score = masked_cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=conformity_matrix,
available_samples=available_samples,
topk=topk,
)
assert score == expected
@pytest.mark.parametrize(
(
"query_embeddings",
"gallery_embeddings",
"conformity_matrix",
"available_samples",
"topk",
),
(
(
torch.rand(size=(query_size, 32)).float(),
torch.rand(size=(gallery_size, 32)).float(),
torch.randint(low=0, high=2, size=(query_size, gallery_size)).bool(),
torch.ones(size=(query_size, gallery_size)).bool(),
k,
)
for query_size, gallery_size, k in zip(
list(range(10, 20)), list(range(25, 35)), list(range(1, 11))
)
),
)
def test_no_mask_cmc_score(
query_embeddings, gallery_embeddings, conformity_matrix, available_samples, topk
) -> None:
"""
In this test we just check that masked_cmc_score is equal to cmc_score
when all the samples are available for for scoring.
"""
masked_score = masked_cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=conformity_matrix,
available_samples=available_samples,
topk=topk,
)
score = cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=conformity_matrix,
topk=topk,
)
assert masked_score == score
| 29.781145
| 89
| 0.562691
|
2474e1ff3056bb33e73940415fc89f13a98f3904
| 1,321
|
py
|
Python
|
test/test_reports_report_subreports.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_reports_report_subreports.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_reports_report_subreports.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.reports_report_subreports import ReportsReportSubreports
class TestReportsReportSubreports(unittest.TestCase):
""" ReportsReportSubreports unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testReportsReportSubreports(self):
"""
Test ReportsReportSubreports
"""
model = swagger_client.models.reports_report_subreports.ReportsReportSubreports()
if __name__ == '__main__':
unittest.main()
| 26.959184
| 89
| 0.74489
|
f6174a67fee2e6a0c65fc1d6ce4ba4b1887712e7
| 368
|
py
|
Python
|
src/molecule/test/scenarios/cleanup/molecule/default/tests/test_default.py
|
gardar/molecule
|
756fc33512a294ebbdb1e0de8aac2dabb642609e
|
[
"MIT"
] | 1,599
|
2015-11-18T01:40:26.000Z
|
2018-10-29T16:42:52.000Z
|
src/molecule/test/scenarios/cleanup/molecule/default/tests/test_default.py
|
gardar/molecule
|
756fc33512a294ebbdb1e0de8aac2dabb642609e
|
[
"MIT"
] | 1,232
|
2015-11-18T16:56:02.000Z
|
2018-10-27T03:51:50.000Z
|
molecule/test/scenarios/cleanup/molecule/default/tests/test_default.py
|
corserp/molecule
|
48921fa43c3c4647a3f835c79290959af945a522
|
[
"MIT"
] | 290
|
2015-11-19T18:16:41.000Z
|
2018-10-29T18:09:13.000Z
|
"""Testinfra tests."""
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
def test_hosts_file(host):
"""Validate host file."""
f = host.file("/etc/hosts")
assert f.exists
assert f.user == "root"
assert f.group == "root"
| 19.368421
| 63
| 0.692935
|
416b3a56a73aba623f2a49d4a24bde74fa343eee
| 5,172
|
py
|
Python
|
serveradmin/graphite/views.py
|
dreis2211/serveradmin
|
39b286e3df96f8c007662ff8bec1d33ed1778698
|
[
"MIT"
] | 1
|
2021-07-13T16:07:22.000Z
|
2021-07-13T16:07:22.000Z
|
serveradmin/graphite/views.py
|
dreis2211/serveradmin
|
39b286e3df96f8c007662ff8bec1d33ed1778698
|
[
"MIT"
] | null | null | null |
serveradmin/graphite/views.py
|
dreis2211/serveradmin
|
39b286e3df96f8c007662ff8bec1d33ed1778698
|
[
"MIT"
] | null | null | null |
from urllib.request import (
HTTPBasicAuthHandler,
HTTPPasswordMgrWithDefaultRealm,
build_opener
)
from django.http import (
Http404, HttpResponse, HttpResponseBadRequest, HttpResponseServerError
)
from django.template.response import TemplateResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
from django.conf import settings
from adminapi.base import DatasetError, MultiAttr
from serveradmin.graphite.models import (
GRAPHITE_ATTRIBUTE_ID,
Collection,
format_attribute_value,
)
from serveradmin.dataset import Query
@login_required # NOQA: C901
@ensure_csrf_cookie
def graph_table(request):
"""Graph table page"""
hostnames = [h for h in request.GET.getlist('hostname') if h]
if len(hostnames) == 0:
return HttpResponseBadRequest('No hostname provided')
# For convenience we will cache the servers in a dictionary.
servers = {}
for hostname in hostnames:
try:
servers[hostname] = Query({'hostname': hostname}).get()
except DatasetError:
raise Http404
# Find the collections which are related with all of the hostnames.
# If there are two collections with same match, use only the one which
# is not an overview.
collections = []
for collection in Collection.objects.order_by('overview', 'sort_order'):
if any(collection.name == c.name for c in collections):
continue
for hostname in hostnames:
if GRAPHITE_ATTRIBUTE_ID not in servers[hostname]:
break # The server hasn't got this attribute at all.
value = servers[hostname][GRAPHITE_ATTRIBUTE_ID]
assert isinstance(value, MultiAttr)
if not any(collection.name == v for v in value):
break # The server hasn't got this attribute value.
else:
collections.append(collection)
# Prepare the graph descriptions
descriptions = []
for collection in collections:
for template in collection.template_set.all():
descriptions += (
[(template.name, template.description)] * len(hostnames)
)
# Prepare the graph tables for all hosts
graph_tables = []
for hostname in hostnames:
graph_table = []
if request.GET.get('action') == 'Submit':
custom_params = request.GET.urlencode()
for collection in collections:
column = collection.graph_column(
servers[hostname], custom_params
)
graph_table += [(k, [('Custom', v)]) for k, v in column]
else:
for collection in collections:
graph_table += collection.graph_table(servers[hostname])
graph_tables.append(graph_table)
grafana_link_params = '?'
if len(hostname) > 1:
# Add hostname to the titles
for order, hostname in enumerate(hostnames):
graph_tables[order] = [
(k + ' on ' + hostname, v) for k, v in graph_tables[order]
]
grafana_link_params += 'var-SERVER={}&'.format(
format_attribute_value(hostname)
)
# Combine them
graph_table = []
for combined_tables in zip(*graph_tables):
graph_table += list(combined_tables)
return TemplateResponse(request, 'graphite/graph_table.html', {
'hostnames': hostnames,
'descriptions': descriptions,
'graph_table': graph_table,
'is_ajax': request.is_ajax(),
'base_template': 'empty.html' if request.is_ajax() else 'base.html',
'link': request.get_full_path(),
'from': request.GET.get('from', '-24h'),
'until': request.GET.get('until', 'now'),
'grafana_link': settings.GRAFANA_DASHBOARD + grafana_link_params
})
@login_required
@ensure_csrf_cookie
def graph(request):
"""Proxy Graphite graphs
We don't want to bother the user with authenticating to Graphite.
Instead here we download the graph using our credentials and pass
it to the user.
"""
password_mgr = HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(
None,
settings.GRAPHITE_URL,
settings.GRAPHITE_USER,
settings.GRAPHITE_PASSWORD,
)
auth_handler = HTTPBasicAuthHandler(password_mgr)
url = '{0}/render?{1}'.format(
settings.GRAPHITE_URL, request.GET.urlencode()
)
# If the Graphite server fails, we would return proper server error
# to the user instead of failing. This is not really a matter for
# the user as they would get a 500 in any case, but it is matter for
# the server. We expect any kind of error in here, but the socket
# errors are more likely to happen. Graphite has tendency to return
# empty result with 200 instead of proper error codes.
try:
with build_opener(auth_handler).open(url) as response:
return HttpResponse(response.read(), content_type='image/png')
except IOError as error:
return HttpResponseServerError(str(error))
| 36.422535
| 76
| 0.651392
|
de1946191f13d17b3224021705be368279a2b07f
| 10,173
|
py
|
Python
|
sharppy/viz/ship.py
|
skovic/SHARPpy
|
19175269ab11fe06c917b5d10376862a4716e1db
|
[
"BSD-3-Clause"
] | 163
|
2015-01-05T06:57:16.000Z
|
2022-03-15T04:19:42.000Z
|
sharppy/viz/ship.py
|
skovic/SHARPpy
|
19175269ab11fe06c917b5d10376862a4716e1db
|
[
"BSD-3-Clause"
] | 187
|
2015-01-20T05:30:55.000Z
|
2022-03-28T17:50:38.000Z
|
sharppy/viz/ship.py
|
skovic/SHARPpy
|
19175269ab11fe06c917b5d10376862a4716e1db
|
[
"BSD-3-Clause"
] | 110
|
2015-01-06T05:55:47.000Z
|
2022-03-15T18:40:21.000Z
|
import numpy as np
import os
from qtpy import QtGui, QtCore, QtWidgets
import sharppy.sharptab as tab
import sharppy.databases.inset_data as inset_data
from sharppy.sharptab.constants import *
import platform
## routine written by Kelton Halbert and Greg Blumberg
## keltonhalbert@ou.edu and wblumberg@ou.edu
__all__ = ['backgroundSHIP', 'plotSHIP']
class backgroundSHIP(QtWidgets.QFrame):
'''
Draw the background frame and lines for the Theta-E plot frame
'''
def __init__(self):
super(backgroundSHIP, self).__init__()
self.initUI()
def initUI(self):
## window configuration settings,
## sich as padding, width, height, and
## min/max plot axes
self.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" border-width: 1px;"
" border-style: solid;"
" border-color: #3399CC;}")
self.textpad = 5
self.font_ratio = 0.0512
fsize1 = round(self.size().height() * self.font_ratio) + 2
fsize2 = round(self.size().height() * self.font_ratio)
self.plot_font = QtGui.QFont('Helvetica', fsize1 )
self.box_font = QtGui.QFont('Helvetica', fsize2)
self.plot_metrics = QtGui.QFontMetrics( self.plot_font )
self.box_metrics = QtGui.QFontMetrics(self.box_font)
if platform.system() == "Windows":
fsize1 -= self.plot_metrics.descent()
fsize2 -= self.box_metrics.descent()
self.plot_font = QtGui.QFont('Helvetica', fsize1 )
self.box_font = QtGui.QFont('Helvetica', fsize2)
self.plot_metrics = QtGui.QFontMetrics( self.plot_font )
self.box_metrics = QtGui.QFontMetrics(self.box_font)
self.plot_height = self.plot_metrics.xHeight()# + self.textpad
self.box_height = self.box_metrics.xHeight() + self.textpad
self.tpad = self.plot_height + 15;
self.bpad = self.plot_height + 2
self.lpad = 0.; self.rpad = 0.
self.wid = self.size().width() - self.rpad
self.hgt = self.size().height() - self.bpad
self.tlx = self.rpad; self.tly = self.tpad;
self.brx = self.wid;
self.bry = self.hgt - self.bpad#+ round(self.font_ratio * self.hgt)
self.shipmax = 5.; self.shipmin = 0.
self.plotBitMap = QtGui.QPixmap(self.width()-2, self.height()-2)
self.plotBitMap.fill(self.bg_color)
self.plotBackground()
def resizeEvent(self, e):
'''
Handles the event the window is resized
'''
self.initUI()
def plotBackground(self):
'''
Handles painting the frame.
'''
## initialize a painter object and draw the frame
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
self.draw_frame(qp)
qp.end()
def draw_frame(self, qp):
'''
Draw the background frame.
qp: QtGui.QPainter object
'''
## set a new pen to draw with
pen = QtGui.QPen(self.fg_color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.plot_font)
rect1 = QtCore.QRectF(1.5,6, self.brx, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'Significant Hail Param (SHIP)')
pen = QtGui.QPen(QtCore.Qt.blue, 1, QtCore.Qt.DashLine)
qp.setPen(pen)
spacing = self.bry / 6.
ytick_fontsize = round(self.font_ratio * self.hgt)
y_ticks_font = QtGui.QFont('Helvetica', ytick_fontsize)
qp.setFont(y_ticks_font)
ship_inset_data = inset_data.shipData()
texts = ship_inset_data['ship_ytexts']
for i in range(len(texts)):
pen = QtGui.QPen(self.line_color, 1, QtCore.Qt.DashLine)
qp.setPen(pen)
try:
qp.drawLine(self.tlx, self.ship_to_pix(int(texts[i])), self.brx, self.ship_to_pix(int(texts[i])))
except:
continue
color = QtGui.QColor(self.bg_color)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
ypos = spacing*(i+1) - (spacing/4.)
ypos = self.ship_to_pix(int(texts[i])) - ytick_fontsize/2
rect = QtCore.QRect(self.tlx, ypos, 20, ytick_fontsize)
pen = QtGui.QPen(self.fg_color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
ef = ship_inset_data['ship_dist']
width = self.brx / 3.7
spacing = self.brx / 3
center = np.arange(spacing, self.brx, spacing)
texts = ship_inset_data['ship_xtexts']
ef = self.ship_to_pix(ef)
qp.setFont(QtGui.QFont('Helvetica', round(self.font_ratio * self.hgt)))
for i in range(ef.shape[0]):
# Set green pen to draw box and whisker plots
pen = QtGui.QPen(self.box_color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
# Draw lower whisker
qp.drawLine(center[i], ef[i,0], center[i], ef[i,1])
# Draw box
qp.drawLine(center[i] - width/2., ef[i,3], center[i] + width/2., ef[i,3])
qp.drawLine(center[i] - width/2., ef[i,1], center[i] + width/2., ef[i,1])
qp.drawLine(center[i] - width/2., ef[i,1], center[i] - width/2., ef[i,3])
qp.drawLine(center[i] + width/2., ef[i,1], center[i] + width/2., ef[i,3])
# Draw median
#qp.drawLine(center[i] - width/2., ef[i,2], center[i] + width/2., ef[i,2])
# Draw upper whisker
qp.drawLine(center[i], ef[i,3], center[i], ef[i,4])
# Set black transparent pen to draw a rectangle
color = QtGui.QColor(self.bg_color)
color.setAlpha(0)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
rect = QtCore.QRectF(center[i] - width/2., self.bry + self.bpad/2, width, self.bpad)
# Change to a white pen to draw the text below the box and whisker plot
pen = QtGui.QPen(self.fg_color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
def ship_to_pix(self, ship):
scl1 = self.shipmax - self.shipmin
scl2 = self.shipmin + ship
return self.bry - (scl2 / scl1) * (self.bry - self.tpad)
class plotSHIP(backgroundSHIP):
'''
Plot the data on the frame. Inherits the background class that
plots the frame.
'''
def __init__(self):
self.bg_color = QtGui.QColor('#000000')
self.fg_color = QtGui.QColor('#ffffff')
self.box_color = QtGui.QColor('#00ff00')
self.line_color = QtGui.QColor('#0080ff')
self.alert_colors = [
QtGui.QColor('#775000'),
QtGui.QColor('#996600'),
QtGui.QColor('#ffffff'),
QtGui.QColor('#ffff00'),
QtGui.QColor('#ff0000'),
QtGui.QColor('#e700df'),
]
super(plotSHIP, self).__init__()
self.prof = None
def setProf(self, prof):
self.prof = prof
self.ship = prof.ship
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def setPreferences(self, update_gui=True, **prefs):
self.bg_color = QtGui.QColor(prefs['bg_color'])
self.fg_color = QtGui.QColor(prefs['fg_color'])
self.box_color = QtGui.QColor(prefs['stp_box_color'])
self.line_color = QtGui.QColor(prefs['stp_line_color'])
self.alert_colors = [
QtGui.QColor(prefs['alert_l1_color']),
QtGui.QColor(prefs['alert_l2_color']),
QtGui.QColor(prefs['alert_l3_color']),
QtGui.QColor(prefs['alert_l4_color']),
QtGui.QColor(prefs['alert_l5_color']),
QtGui.QColor(prefs['alert_l6_color']),
]
if update_gui:
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def resizeEvent(self, e):
'''
Handles when the window is resized
'''
super(plotSHIP, self).resizeEvent(e)
self.plotData()
def paintEvent(self, e):
super(plotSHIP, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(1, 1, self.plotBitMap)
qp.end()
def clearData(self):
'''
Handles the clearing of the pixmap
in the frame.
'''
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.plotBitMap.fill(self.bg_color)
def plotData(self):
'''
Handles painting on the frame
'''
if self.prof is None:
return
## this function handles painting the plot
## create a new painter obkect
qp = QtGui.QPainter()
self.draw_ship(qp)
def draw_ship(self, qp):
if not tab.utils.QC(self.ship):
return
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
if self.ship < self.shipmin:
self.ship = self.shipmin
elif self.ship > self.shipmax:
self.ship = self.shipmax
color = self.ship_color(self.ship)
ef = self.ship_to_pix(self.ship)
pen = QtGui.QPen(color, 1.5, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(0, ef, self.wid, ef)
qp.end()
def ship_color(self, ship):
color_list = self.alert_colors
if float(ship) >= 5:
color = color_list[5]
elif float(ship) >= 2:
color = color_list[4]
elif float(ship) >= 1:
color = color_list[3]
elif float(ship) >= .5:
color = color_list[2]
else:
color = color_list[0]
return color
if __name__ == '__main__':
app_frame = QtGui.QApplication([])
tester = plotSHIP()
tester.show()
app_frame.exec_()
| 35.694737
| 113
| 0.578394
|
bc810abe075ca43d20d03f6dab9590064a1aff50
| 4,424
|
py
|
Python
|
userbot/modules/quotly.py
|
Musthofaalfian/toxic
|
1663255a586d6a0119cb94b670f927c30f694f91
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/quotly.py
|
Musthofaalfian/toxic
|
1663255a586d6a0119cb94b670f927c30f694f91
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/quotly.py
|
Musthofaalfian/toxic
|
1663255a586d6a0119cb94b670f927c30f694f91
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Port From UniBorg to UserBot by MoveAngel
import random
import requests
from asyncio.exceptions import TimeoutError
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from userbot import CMD_HELP, bot
from userbot.events import register
if 1 == 1:
strings = {
"name": "Quotes",
"api_token_cfg_doc": "API Key/Token for Quotes.",
"api_url_cfg_doc": "API URL for Quotes.",
"colors_cfg_doc": "Username colors",
"default_username_color_cfg_doc": "Default color for the username.",
"no_reply": "You didn't reply to a message.",
"no_template": "You didn't specify the template.",
"delimiter": "</code>, <code>",
"server_error": "Server error. Please report to developer.",
"invalid_token": "You've set an invalid token, get it from `http://antiddos.systems`.",
"unauthorized": "You're unauthorized to do this.",
"not_enough_permissions": "Wrong template. You can use only the default one.",
"templates": "Available Templates: <code>{}</code>",
"cannot_send_stickers": "You cannot send stickers in this chat.",
"admin": "admin",
"creator": "creator",
"hidden": "hidden",
"channel": "Channel"}
config = {"api_url": "http://api.antiddos.systems",
"username_colors": ["#fb6169", "#faa357", "#b48bf2", "#85de85",
"#62d4e3", "#65bdf3", "#ff5694"],
"default_username_color": "#b48bf2"}
@register(outgoing=True, pattern=r"^\.q")
async def quotess(qotli):
if qotli.fwd_from:
return
if not qotli.reply_to_msg_id:
return await qotli.edit("```Balas di Pesan GANTENG!!.```")
reply_message = await qotli.get_reply_message()
if not reply_message.text:
return await qotli.edit("```Balas di Pesan GANTENG!!```")
chat = "@QuotLyBot"
if reply_message.sender.bot:
return await qotli.edit("```Balas di Pesan GANTENG!!.```")
await qotli.edit("```Pim Pim Pom Jadi tikel duar pantekk......```")
try:
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(
incoming=True,
from_users=1031952739))
msg = await bot.forward_messages(chat, reply_message)
response = await response
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
return await qotli.reply("```Please unblock @QuotLyBot and try again```")
if response.text.startswith("Hi!"):
await qotli.edit("```Can you kindly disable your forward privacy settings for good?```")
else:
await qotli.delete()
await bot.forward_messages(qotli.chat_id, response.message)
await bot.send_read_acknowledge(qotli.chat_id)
""" - cleanup chat after completed - """
await qotli.client.delete_messages(conv.chat_id,
[msg.id, response.id])
except TimeoutError:
await qotli.edit()
@register(outgoing=True, pattern="^.xquote(?: |$)(.*)")
async def quote_search(event):
if event.fwd_from:
return
await event.edit("Processing...")
search_string = event.pattern_match.group(1)
input_url = "https://bots.shrimadhavuk.me/Telegram/GoodReadsQuotesBot/?q={}".format(
search_string)
headers = {"USER-AGENT": "UniBorg"}
try:
response = requests.get(input_url, headers=headers).json()
except BaseException:
response = None
if response is not None:
result = random.choice(response).get(
"input_message_content").get("message_text")
else:
result = None
if result:
await event.edit(result.replace("<code>", "`").replace("</code>", "`"))
else:
await event.edit("Zero results found")
CMD_HELP.update({
"quotly":
"`.q`\
\nUsage: Enhance ur text to sticker.\
\n\n`.xquote`\
\nUsage: Enhance ur text to stickers."
})
| 38.807018
| 104
| 0.605108
|
1489d40e03eb36d9d00625138e1da9c41d5245a1
| 2,086
|
py
|
Python
|
yt/data_objects/tests/test_ellipsoid.py
|
semeraro/yt
|
0511823034e63a9cde02372b1258225b527cb154
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-09-15T08:17:43.000Z
|
2021-09-15T08:17:43.000Z
|
yt/data_objects/tests/test_ellipsoid.py
|
semeraro/yt
|
0511823034e63a9cde02372b1258225b527cb154
|
[
"BSD-3-Clause-Clear"
] | 2
|
2021-09-15T16:10:39.000Z
|
2021-09-16T14:23:31.000Z
|
yt/data_objects/tests/test_ellipsoid.py
|
stonnes/yt
|
aad3cfa3b4ebab7838352ab467275a27c26ff363
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-04-21T07:01:51.000Z
|
2021-04-21T07:01:51.000Z
|
import numpy as np
from yt.testing import assert_array_less, fake_random_ds
def setup():
from yt.config import ytcfg
ytcfg["yt", "log_level"] = 50
ytcfg["yt", "internals", "within_testing"] = True
def _difference(x1, x2, dw):
rel = x1 - x2
rel[rel > dw / 2.0] -= dw
rel[rel < -dw / 2.0] += dw
return rel
def test_ellipsoid():
# We decompose in different ways
cs = [
np.array([0.5, 0.5, 0.5]),
np.array([0.1, 0.2, 0.3]),
np.array([0.8, 0.8, 0.8]),
]
np.random.seed(int(0x4D3D3D3))
for nprocs in [1, 2, 4, 8]:
ds = fake_random_ds(64, nprocs=nprocs)
DW = ds.domain_right_edge - ds.domain_left_edge
min_dx = 2.0 / ds.domain_dimensions
ABC = np.random.random((3, 12)) * 0.1
e0s = np.random.random((3, 12))
tilts = np.random.random(12)
ABC[:, 0] = 0.1
for i in range(12):
for c in cs:
A, B, C = reversed(sorted(ABC[:, i]))
A = max(A, min_dx[0])
B = max(B, min_dx[1])
C = max(C, min_dx[2])
e0 = e0s[:, i]
tilt = tilts[i]
ell = ds.ellipsoid(c, A, B, C, e0, tilt)
assert_array_less(ell["radius"], A)
p = np.array([ell[ax] for ax in "xyz"])
dot_evec = [np.zeros_like(ell["radius"]) for i in range(3)]
vecs = [ell._e0, ell._e1, ell._e2]
mags = [ell._A, ell._B, ell._C]
my_c = np.array([c] * p.shape[1]).transpose()
dot_evec = [de.to_ndarray() for de in dot_evec]
mags = [m.to_ndarray() for m in mags]
for ax_i in range(3):
dist = _difference(p[ax_i, :], my_c[ax_i, :], DW[ax_i])
for ax_j in range(3):
dot_evec[ax_j] += dist * vecs[ax_j][ax_i]
dist = 0
for ax_i in range(3):
dist += dot_evec[ax_i] ** 2.0 / mags[ax_i] ** 2.0
assert_array_less(dist, 1.0)
| 34.196721
| 75
| 0.477948
|
6f301367e51ffd502026ca6bfbf19bdb0e3da6c9
| 3,615
|
py
|
Python
|
auto_push.py
|
ariua91/auto-push-to-vd
|
7fbc58a0feb952d96adf9965c3b5097258761bb2
|
[
"MIT"
] | null | null | null |
auto_push.py
|
ariua91/auto-push-to-vd
|
7fbc58a0feb952d96adf9965c3b5097258761bb2
|
[
"MIT"
] | 1
|
2021-06-01T23:15:58.000Z
|
2021-06-01T23:15:58.000Z
|
auto_push.py
|
ariua91/auto-push-to-vd
|
7fbc58a0feb952d96adf9965c3b5097258761bb2
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import requests
from secret_keys import ENV, CLIENT_ID, CLIENT_SECRET, COOKIE, RANDOM_TASK_ID
LOGFN = 'logs/' + datetime.strftime(datetime.utcnow() + timedelta(hours=8),
'%y-%U-logs.txt')
VERBOSE = True
def write_to_log(logfn, logtext):
with open(logfn, 'a') as f:
f.write(datetime.strftime(
datetime.utcnow() + timedelta(hours=8),
'%y-%m-%dT%H:%M:%S+08:00 | '
))
f.write(logtext + '\n')
def get_times(tzoffset):
'''
Given a tzoffset int in hours (6hours for this client)
returns:
- FROM time object
- TO time object
- SUBMIT ALL DATE
KNOWN BUGS: Negative Time Zones
'''
current_dt = datetime.utcnow() + timedelta(hours=tzoffset)
return (datetime.strftime(current_dt,
'%Y-%m-%dT06:00:00+08:00'),
datetime.strftime(current_dt + timedelta(days=1),
'%Y-%m-%dT05:59:59+08:00'),
datetime.strftime(current_dt + timedelta(days=1),
'%Y-%m-%d')
)
def get_num_tasks(varibales, time_from, time_to, per_pg, archived):
'''
Get all tasks
'''
env, c_id, c_secret = variables['env'], variables['c_id'], variables['c_secret']
pg = 1
acted_on, total_tasks = 0, 999999999
while acted_on < total_tasks:
tmp = requests.get(
"https://{!s}.versafleet.co/api/tasks?"
"client_id={!s}&client_secret={!s}"
"&page={!s}&per_page={!s}"
"&state=assigned"
"&from_datetime={!s}&to_datetime={!s}&archived={!s}".format(
env,
c_id,
c_secret,
pg,
per_pg,
time_from,
time_to,
archived
)
)
acted_on += len(tmp.json()['tasks'])
total_tasks = tmp.json()['meta']['total']
pg += 1
if VERBOSE:
print "GOT {!s} / {!s} tasks".format(acted_on, total_tasks)
print tmp
return acted_on
def push_vd(variables, push_date):
'Push push_date tasks to VD'
env, cookie = (
variables['env'],
variables['cookie']
)
tmp = requests.put(
'https://{}.versafleet.co/tasks/submit_all'.format(env),
json= {"date":push_date},
headers={"cookie":cookie}
)
return tmp
def random_get(variables):
'Do a random GET to refresh cookie'
env, cookie, rand_task_id = (
variables['env'],
variables['cookie'],
variables['rand_task_id']
)
tmp = requests.get(
'https://{!s}.versafleet.co/tasks/{!s}'.format(env, rand_task_id),
headers={"cookie":cookie}
)
return tmp
variables = {
'env': ENV,
'c_id': CLIENT_ID,
'c_secret': CLIENT_SECRET,
'cookie': COOKIE,
'rand_task_id': RANDOM_TASK_ID
}
time_from, time_to, push_date = get_times(2)
task_cnt = get_num_tasks(variables, time_from, time_to, 20, 0)
if task_cnt > 0:
tmp = push_vd(variables, push_date)
if VERBOSE:
print "SUBMIT ALL PUT"
write_to_log(LOGFN,
"Num Assigned: {!s}. Made a SUBMIT ALL call. Status:{!s}".format(
task_cnt, tmp.status_code
))
else:
tmp = random_get(variables)
if VERBOSE:
print "Just a GET to refresh"
write_to_log(LOGFN,
"Num Assigned: {!s}. GET to refresh. Status:{!s}".format(
task_cnt, tmp.status_code
))
| 27.180451
| 84
| 0.541355
|
837059149243e65f4207d9d88cd8c335b99fc501
| 912
|
py
|
Python
|
icon_validator/rules/workflow_validators/workflow_profanity_validator.py
|
rapid7/icon-integrations-validators
|
673e588f8c6aa02bdb6c5e82556fdc59fe3a7280
|
[
"MIT"
] | 6
|
2020-11-10T03:07:00.000Z
|
2022-02-24T18:07:57.000Z
|
icon_validator/rules/workflow_validators/workflow_profanity_validator.py
|
rapid7/icon-integrations-validators
|
673e588f8c6aa02bdb6c5e82556fdc59fe3a7280
|
[
"MIT"
] | 17
|
2020-01-21T16:02:04.000Z
|
2022-01-12T15:11:26.000Z
|
icon_validator/rules/workflow_validators/workflow_profanity_validator.py
|
rapid7/icon-integrations-validators
|
673e588f8c6aa02bdb6c5e82556fdc59fe3a7280
|
[
"MIT"
] | 2
|
2020-12-26T11:33:23.000Z
|
2021-09-30T22:22:43.000Z
|
from icon_validator.rules.validator import KomandPluginValidator
from icon_validator.exceptions import ValidationException
from icon_validator.rules.lists.lists import profanity_list
class WorkflowProfanityValidator(KomandPluginValidator):
@staticmethod
def validate_profanity(spec):
"""
Check that yaml and help do not contain banned words.
"""
raw_spec = spec.raw_spec()
spec_words = raw_spec.split()
for word in spec_words:
if word in profanity_list:
raise ValidationException(f"{spec.spec_file_name} contains banned word: {word}.")
help_lst = spec.raw_help().split()
for word in help_lst:
if word in profanity_list:
raise ValidationException(f"help.md contains banned word: {word}.")
def validate(self, spec):
WorkflowProfanityValidator.validate_profanity(spec)
| 33.777778
| 97
| 0.690789
|
f252070393abf13f19a83accd551841a228e4a35
| 2,711
|
py
|
Python
|
mutagene/profiles/profile.py
|
neksa/pymutagene
|
1122d64a5ab843a4960124933f78f3c2e388a792
|
[
"CC0-1.0"
] | 3
|
2020-05-18T07:00:46.000Z
|
2022-02-20T02:55:48.000Z
|
mutagene/profiles/profile.py
|
neksa/pymutagene
|
1122d64a5ab843a4960124933f78f3c2e388a792
|
[
"CC0-1.0"
] | 31
|
2020-03-13T16:28:34.000Z
|
2021-02-27T22:12:15.000Z
|
mutagene/profiles/profile.py
|
neksa/pymutagene
|
1122d64a5ab843a4960124933f78f3c2e388a792
|
[
"CC0-1.0"
] | 3
|
2020-03-24T20:01:44.000Z
|
2020-11-26T17:30:39.000Z
|
# from collections import defaultdict
# from mutagene.dna import complementary_nucleotide
from collections import Counter
from operator import add
from functools import reduce
import numpy as np
from numpy.random import multinomial
from sklearn.utils import resample
from mutagene.io.profile import write_profile_file, get_profile_attributes_dict
from mutagene.io.mutations_profile import read_auto_profile
import logging
logger = logging.getLogger(__name__)
def calc_profile(infile, outfile, genome, fmt='auto'):
all_mutations = {}
for f in infile:
mutations, processing_stats = read_auto_profile(f, fmt=fmt, asm=genome)
msg = "Loaded {} mutations".format(processing_stats['loaded'])
if processing_stats['skipped'] > 0:
msg += " skipped {} mutations due to mismatches with the reference genome".format(processing_stats['skipped'])
logger.info(msg)
all_mutations = {k: all_mutations.get(k, 0) + mutations.get(k, 0) for k in set(all_mutations) | set(mutations)}
if sum(all_mutations.values()) == 0:
logger.warn('Can not create profile')
return
profile = get_mutational_profile(all_mutations, counts=True)
write_profile_file(outfile, profile)
def get_mutational_profile(mutational_profile_dict, counts=False):
attrib = get_profile_attributes_dict()
values = []
total_mut_number = sum(mutational_profile_dict.values())
for i, attr in enumerate(attrib):
number = mutational_profile_dict.get(attr['context'] + attr['mutation'], 0)
# freq = 0.000001 * number / total_mut_number
if counts:
freq = number
else:
freq = number / float(total_mut_number)
# trinucleotide = attr['context'][0] + attr['mutation'][0] + attr['context'][1]
# trinucleotide_freq = exome_trinucleotide_freq[trinucleotide]
# values.append(3.0 * freq / trinucleotide_freq)
values.append(freq)
return values
def get_multisample_mutational_profile(samples_mutations, counts=False):
samples_profiles = {}
for sample, mutations in samples_mutations.items():
samples_profiles[sample] = get_mutational_profile(mutations, counts)
return samples_profiles
def get_pooled_multisample_mutational_profile(samples_mutations, counts=False):
mutational_profile_dict = reduce(add, (Counter(dict(mutations)) for mutations in samples_mutations.values()))
return get_mutational_profile(mutational_profile_dict, counts)
def generate_resampled_profiles(profile, k):
profile = np.array(profile)
N = np.sum(profile)
new_profiles = multinomial(N, profile / N, size=k)
for i in range(k):
yield new_profiles[i]
| 37.136986
| 122
| 0.722243
|
7bf84dd64131d00e60a5ff0f65ee26562715a947
| 6,141
|
py
|
Python
|
raspberry-pi-env/default_methods.py
|
kb2623/raspi-trash-bin
|
8b695a167eddbb9f189cd1663af8158ad61e421a
|
[
"MIT"
] | null | null | null |
raspberry-pi-env/default_methods.py
|
kb2623/raspi-trash-bin
|
8b695a167eddbb9f189cd1663af8158ad61e421a
|
[
"MIT"
] | null | null | null |
raspberry-pi-env/default_methods.py
|
kb2623/raspi-trash-bin
|
8b695a167eddbb9f189cd1663af8158ad61e421a
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime
from time import sleep
from multiprocessing import Lock
import numpy as np
import gpiozero as gpio
from default_server import data_dir
# Led diodi
led1, led2 = gpio.RGBLED(red=9, green=10, blue=11), gpio.RGBLED(red=16, green=20, blue=21)
# Vecfunkciski gumbi
button1, button2, button3 = gpio.Button(3), gpio.Button(4), gpio.Button(17)
# Kontrala pokrova (odprt/zaprt)
buttonPO, buttonPZ = gpio.Button(2), gpio.Button(26)
# Kontrola ovir pri voznji, detekcija ovir (spredaj/zadaj)
buttonOZ, buttonOS = gpio.Button(14), gpio.Button(15)
# Senzor razdalje
distance_sensor = gpio.DistanceSensor(23, 24)
# Motor za premikaje pokrova
lid_motor = gpio.Motor(forward=27, backward=22)
# Motorja za premikaje kante
robot = gpio.Robot(left=(5, 6), right=(13, 19))
# Lock za pokrov
lid_lock = Lock()
# Lock za premikanje
robot_lock = Lock()
# Maksimalna hitros motorjev v m/s
motor_max_speed = .5
# Polmer med kolesoma v m
wheel_radius = .2
# Globina posode v metirh
height = 2
# Format za datum
date_format = '%b-%d-%Y %H:%M:%S'
# Logs
log_out = ''
log_err = ''
def prut_fun(fun, rep_time=18000, *args, **kwargs):
r"""Utility function for creating preriodic rutines.
Args:
fun (Callable[[list, dict], None]): Function for periodic activation.
rep_time (float): Time before every mesurment repeats.
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
"""
while True:
fun(*args, **kwargs)
sleep(rep_time)
def open_lid(*args, **kwargs):
r"""Open lid of garbage can.
Args:
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
"""
lid_lock.acquire()
if not buttonPZ.is_pressed:
lid_motor.backward()
buttonPZ.wait_for_press()
lid_motor.stop()
lid_motor.forward()
buttonPO.wait_for_press()
lid_motor.stop()
lid_lock.release()
def close_lid(*args, **kwargs):
r"""Close lid of garbage can.
Args:
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
"""
lid_lock.acquire()
if not buttonPO.is_pressed:
lid_motor.forward()
buttonPO.wait_for_press()
lid_motor.stop()
lid_motor.backward()
buttonPZ.wait_for_press()
lid_motor.stop()
lid_lock.release()
def __move_time(distance, speed):
r"""Get time to apply the movement.
Args:
distance (float): Distance in menters.
speed (float): Speed of movement in meters per second.
"""
return distance / speed
def move_forward(distance, *args, **kwargs):
r"""Move the robot forward for some distance in meters.
Args:
distance (float): Distance in meters to move the robot.
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
Keyword Arguments:
speed (float): Speed ration in [0, 1].
"""
speed_ratio = kwargs.get('speed', 1)
speed = speed_ratio * motor_max_speed
time_of_movement = __move_time(distance, speed)
robot_lock.acquire()
robot.forward(speed_ratio)
sleep(time_of_movement)
robot.stop()
robot_lock.release()
def move_backword(distance, *args, **kwargs):
r"""Move the robot backword.
Args:
distance (float): Distance in meters to move the robot.
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
Keyword Arguments:
speed (float): Speed ration in [0, 1].
"""
speed_ratio = kwargs.get('speed', 1)
speed = speed_ratio * motor_max_speed
time_of_movement = __move_time(distance, speed)
robot_lock.acquire()
robot.backward(speed_ratio)
sleep(time_of_movement)
robot.stop()
robot_lock.release()
def __rotation_distance(degrees):
r"""Get the distance of rotation.
Args:
degrees (float): Degress to rotate the object.
Returns:
float: Distance in methers.
"""
return (2 * np.pi * wheel_radius * degrees) / 360
def rotate_right(degrees, *args, **kwargs):
r"""Rotate the robot clock wise.
Args:
degrees (float): Rotation in degrees.
*args (list): Additional arguments.
**kwargs (dict): Keyword arguments.
Keyword Arguments:
speed (float): Speed ration in [0, 1].
"""
speed_ratio = kwargs.get('speed', 1)
speed = speed_ratio * motor_max_speed
distance = __rotation_distance(degrees)
time_of_movement = __move_time(distance, speed)
robot_lock.acquire()
robot.right(speed_ratio)
sleep(time_of_movement)
robot.stop()
robot_lock.release()
def rotate_left(degrees, *args, **kwargs):
r"""Rotate the robot counter clock wise.
Args:
degrees (float): Rotation in degrees.
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
Keyword Arguments:
speed (float): Speed ration in [0, 1].
"""
speed_ratio = kwargs.get('speed', 1)
speed = speed_ratio * motor_max_speed
distance = __rotation_distance(degrees)
time_of_movement = __move_time(distance, speed)
robot_lock.acquire()
robot.left(speed_ratio)
sleep(time_of_movement)
robot.stop()
robot_lock.release()
def mesure_garbage(*args, **kwargs):
r"""Get the distance mesured from lid to garbage.
Args:
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
Returns:
float: Distance from lid to garbage.
"""
return distance_sensor.distance / height
def current_milli_time():
r"""Get current time in miliseconds.
Returns:
int: Current time in miliseconds.
"""
return datetime.now().microsecond
def mesure_garbage_rut(rep_time=18000, file_prefix='garbage_', format_tmp='#DATETIME# #GARBAGE#', *args, **kwargs):
r"""Periodicy Mesure garbage in the trashcan.
Args:
file_prefix (str): Prefix for file names of the garbage collection.
args (list): Additional arguments.
kwargs (dict): Keyword arguments.
"""
today = date.today()
mdate, time, formated_date = today.strftime('%d/%m/%Y'), today.strftime('%H:%M:%S'), today.strftime(date_format)
file_name = '%s_%d' % (file_prefix, current_milli_time())
data = '%s' % format_tmp
data = data.replace('#FILENAME#', file_name)
data = data.replace('#DATE#', mdate)
data = data.replace('#TIME#', time)
data = data.replace('#DATETIME#', '%s %s' % (date, time))
data = data.replace('#FORMATEDDATE#', formated_date)
data = data.replace('#GARBAGE#', mesure_garbage())
with open(file_name, 'w') as file: file.write(data)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 24.762097
| 115
| 0.721218
|
1651ddd6b04a74f30787fa1575dbb8209e1e87c5
| 2,106
|
py
|
Python
|
django_pluggableappsettings/tests/travis_ci.py
|
kocunop/django-pluggableappsettings
|
d4fd7c4b1c979d46cc8590f85fd0504107ae33e5
|
[
"MIT"
] | null | null | null |
django_pluggableappsettings/tests/travis_ci.py
|
kocunop/django-pluggableappsettings
|
d4fd7c4b1c979d46cc8590f85fd0504107ae33e5
|
[
"MIT"
] | null | null | null |
django_pluggableappsettings/tests/travis_ci.py
|
kocunop/django-pluggableappsettings
|
d4fd7c4b1c979d46cc8590f85fd0504107ae33e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
__author__ = 'Tim Schneider <tim.schneider@northbridge-development.de>'
__copyright__ = "Copyright 2015, Northbridge Development Konrad & Schneider GbR"
__credits__ = ["Tim Schneider", ]
__maintainer__ = "Tim Schneider"
__email__ = "mail@northbridge-development.de"
__status__ = "Development"
logger = logging.getLogger(__name__)
import glob
import os
import sys
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, os.path.abspath(BASE_DIR))
import django
from django.conf import settings
from django.core.management import execute_from_command_line
# Unfortunately, apps can not be installed via ``modify_settings``
# decorator, because it would miss the database setup.
INSTALLED_APPS = (
)
settings.configure(
SECRET_KEY="django_tests_secret_key",
DEBUG=False,
TEMPLATE_DEBUG=False,
ALLOWED_HOSTS=[],
INSTALLED_APPS=INSTALLED_APPS,
MIDDLEWARE_CLASSES=[],
ROOT_URLCONF='tests.urls',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
LANGUAGE_CODE='en-us',
TIME_ZONE='UTC',
USE_I18N=True,
USE_L10N=True,
USE_TZ=True,
STATIC_URL='/static/',
# Use a fast hasher to speed up tests.
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
FIXTURE_DIRS=glob.glob(BASE_DIR + '/' + '*/fixtures/'),
SETTING_THAT_WE_CAN_TEST=object()
)
try:
# Django >=1.7 needs this, but other versions dont.
django.setup()
except AttributeError:
pass
args = [sys.argv[0], 'test']
# Current module (``tests``) and its submodules.
test_cases = '.'
# Allow accessing test options from the command line.
offset = 1
try:
sys.argv[1]
except IndexError:
pass
else:
option = sys.argv[1].startswith('-')
if not option:
test_cases = sys.argv[1]
offset = 2
args.append(test_cases)
# ``verbosity`` can be overwritten from command line.
#args.append('--verbosity=2')
args.extend(sys.argv[offset:])
execute_from_command_line(args)
| 23.931818
| 80
| 0.694682
|
af295c5a9cfec2b61f6a63aded7cc2c7e89274dc
| 1,706
|
py
|
Python
|
tests/bootstrap/success/filtering_tests.py
|
varajala/microtest
|
f8399fc6eeab2fcbabba09716f9808311c056ab6
|
[
"MIT"
] | null | null | null |
tests/bootstrap/success/filtering_tests.py
|
varajala/microtest
|
f8399fc6eeab2fcbabba09716f9808311c056ab6
|
[
"MIT"
] | null | null | null |
tests/bootstrap/success/filtering_tests.py
|
varajala/microtest
|
f8399fc6eeab2fcbabba09716f9808311c056ab6
|
[
"MIT"
] | null | null | null |
import sys
import subprocess
import microtest
import os
import tempfile
def run_microtest_as_module(directory: str) -> str:
cmd = [sys.executable, '-m', 'microtest', directory]
stream = tempfile.TemporaryFile(mode='w+')
proc = subprocess.Popen(cmd, stdout = stream, cwd = directory)
proc.wait()
stream.seek(0)
data = stream.read()
stream.close()
return data
def join_asset_path(*args):
path = os.path.dirname(os.path.abspath(__file__))
path = os.path.dirname(path)
path = os.path.dirname(path)
path = os.path.join(path, 'assets')
for name in args:
path = os.path.join(path, name)
return path
@microtest.test
def test_exclude_modules():
output = run_microtest_as_module(join_asset_path('module_filtering', 'exclude_modules'))
assert 'config executed' in output
assert 'run_this_test' in output
@microtest.test
def test_only_modules():
output = run_microtest_as_module(join_asset_path('module_filtering', 'only_modules'))
assert 'config executed' in output
assert 'run_this_test' in output
assert 'exec_this_test' in output
@microtest.test
def test_exclude_groups():
os.environ['MICROTEST_ENTRYPOINT'] = 'exclude_slow.py'
output = run_microtest_as_module(join_asset_path('test_filtering'))
assert 'config executed' in output
assert 'slow_test' not in output
assert 'normal_test' in output
@microtest.test
def test_only_groups():
os.environ['MICROTEST_ENTRYPOINT'] = 'only_slow.py'
output = run_microtest_as_module(join_asset_path('test_filtering'))
assert 'config executed' in output
assert 'slow_test' in output
assert 'normal_test' not in output
| 27.079365
| 92
| 0.714537
|
3344522306e3df32145630511589a4b2e56d3767
| 5,171
|
py
|
Python
|
tests/asset_viewer/files/models/misc/compress.py
|
duhnnie/3-dreams-of-black
|
15aded97f57a82e5a4c95c4e74bcd603b3fc6e1e
|
[
"Apache-2.0"
] | 475
|
2015-01-02T07:49:46.000Z
|
2022-03-17T04:01:47.000Z
|
tests/asset_viewer/files/models/misc/compress.py
|
duhnnie/3-dreams-of-black
|
15aded97f57a82e5a4c95c4e74bcd603b3fc6e1e
|
[
"Apache-2.0"
] | 3
|
2015-03-06T10:51:03.000Z
|
2019-09-10T19:39:39.000Z
|
tests/asset_viewer/files/models/misc/compress.py
|
duhnnie/3-dreams-of-black
|
15aded97f57a82e5a4c95c4e74bcd603b3fc6e1e
|
[
"Apache-2.0"
] | 130
|
2015-01-15T02:08:21.000Z
|
2021-12-20T19:15:22.000Z
|
#!/usr/bin/python2.6
# v0.2
import sys
import os
import simplejson as json
import re
BITS = 9
TARGET_SCALE = (1 << (BITS - 1) - 1)
JS_TPL = """
var req = new XMLHttpRequest();
req.open('GET', "%s", false);
req.send(null);
if (req.status == 200 || req.status == 0) {
var numVertices = %s;
var numMorphTargets = model.morphTargets.length;
var scale = %s;
model.vertices = new Float32Array(numVertices);
for (var j = 0; j < numMorphTargets; ++j) {
model.morphTargets[j].vertices = new Float32Array(numVertices);
}
var untransposed = new Int16Array(numVertices);
var transposeOffset = numVertices / 3;
var prevX = 0, prevY = 0, prevZ = 0;
for (var i = 0; i < transposeOffset; ++i) {
var x = req.responseText.charCodeAt(i);
x = (x >> 1) ^ (-(x & 1));
prevX += x;
untransposed[3*i] = prevX;
var y = req.responseText.charCodeAt(transposeOffset + i);
y = (y >> 1) ^ (-(y & 1));
prevY += y;
untransposed[3*i + 1] = prevY;
var z = req.responseText.charCodeAt(2*transposeOffset + i);
z = (z >> 1) ^ (-(z & 1));
prevZ += z;
untransposed[3*i + 2] = prevZ;
}
for (var i = 0; i < numVertices; ++i) {
var word = untransposed[i];
model.vertices[i] = scale * word;
var prev = word;
for (var j = 0; j < numMorphTargets; ++j) {
var offset = (j + 1) * numVertices;
var delta = req.responseText.charCodeAt(offset + i);
delta = (delta >> 1) ^ (-(delta & 1));
prev += delta;
model.morphTargets[j].vertices[i] = scale * prev;
}
}
var faceOffset = numVertices * (numMorphTargets + 1);
var numFaces = %s;
model.faces = new Uint16Array(numFaces);
for (var i = 0; i < numFaces; ++i) {
model.faces[i] = req.responseText.charCodeAt(faceOffset + i);
}
}"""
# Unused face compression.
FACE_DELTA_TPL = """
var prevFace = 0
for (var i = 0; i < numFaces; ++i) {
var face = req.responseText.charCodeAt(faceOffset + i);
face = (face >> 1) ^ (-(face & 1));
prevFace += face;
model.faces[i] = prevFace
}
"""
def ZigZag(word):
return ((word << 1) ^ (word >> 15)) & 0xFFFF
def ZigZagOut(arr, out):
for word in arr:
u = unichr(ZigZag(word)).encode('utf-8')
out.write(u)
def UnsignedOut(arr, out):
for word in arr:
u = unichr(word).encode('utf-8')
out.write(u)
def removeVertexAndFaceValues(data):
def process(line):
if '"vertices": [' in line or '"faces": [' in line:
line = re.sub('\[.*?\]','[]',line)
return line
lines = data.split('\n')
lines = map(process, lines)
return '\n'.join(lines)
def main(json_file, utf8_out):
# Load JSON
model = open(json_file, 'r').read()
jsonStart = model.find("model = ")+8
jsonEnd = model.find("};")+1
modelHead = model[0:jsonStart]
modelTail = model[jsonEnd:]
model = model[jsonStart:jsonEnd]
obj = json.loads(model)
verts = obj['vertices']
assert (len(verts) % 3 == 0)
morphTargets = obj['morphTargets']
morphVertsList = [morphTarget['vertices'] for morphTarget in morphTargets]
for morphVerts in morphVertsList:
assert(len(verts) == len(morphVerts))
faces = obj['faces']
print "faces: ", len(faces)
# Compute Scale
scale = 0
for vert in verts:
abs_vert = abs(vert)
if abs_vert > scale: scale = abs_vert
for morphVerts in morphVertsList:
for vert in morphVerts:
abs_vert = abs(vert)
if abs_vert > scale: scale = abs_vert
scale_factor = TARGET_SCALE / scale
print "scale_factor: ", scale_factor, 1.0/scale_factor
# Rescale original
verts = map(lambda x: int(x * scale_factor), verts)
# Rescale and delta morphs
prevVerts = verts
deltaMorphVertsList = []
for morphVerts in morphVertsList:
newVerts = map(lambda x: int(x * scale_factor), morphVerts)
deltaMorphVertsList.append(map(lambda x, prev: x - prev,
newVerts,
prevVerts))
prevVerts = newVerts
outCompressed = open(utf8_out, 'w')
# Delta compress across vertices.
prevX = 0
prevY = 0
prevZ = 0
transposeOffset = len(verts) / 3
deltaVerts = []
deltaVerts.extend(verts)
for i in range(transposeOffset):
x = verts[3*i]
deltaVerts[i] = x - prevX;
prevX = x
y = verts[3*i + 1]
deltaVerts[transposeOffset + i] = y - prevY
prevY = y
z = verts[3*i + 2]
deltaVerts[2*transposeOffset + i] = z - prevZ
prevZ = z
# Delta compress face indices.
prev = 0
deltaFaces = []
for face in faces:
deltaFaces.append(face - prev)
prev = face
ZigZagOut(deltaVerts, outCompressed)
for morphVerts in deltaMorphVertsList:
ZigZagOut(morphVerts, outCompressed)
UnsignedOut(faces, outCompressed)
#ZigZagOut(deltaFaces, outCompressed)
jsLoader = JS_TPL % (os.path.basename(utf8_out),
len(verts),
1.0/scale_factor,
len(faces))
model = removeVertexAndFaceValues(model)
model = modelHead + model + ";" + jsLoader + modelTail[1:]
outJson = open(utf8_out.replace("txt","js"), 'w')
outJson.write(model)
if __name__ == '__main__':
assert(len(sys.argv) == 3)
main(sys.argv[1], sys.argv[2])
| 25.984925
| 76
| 0.612067
|
9353d05b51f8a35958215df7530fdf1b7108275b
| 1,418
|
py
|
Python
|
httpstan/routes.py
|
mjcarter95/httpstan
|
4a15b0316d7cb0a50193555d80fb1785f557f645
|
[
"ISC"
] | 35
|
2017-05-18T21:30:06.000Z
|
2022-03-12T08:26:07.000Z
|
httpstan/routes.py
|
mjcarter95/httpstan
|
4a15b0316d7cb0a50193555d80fb1785f557f645
|
[
"ISC"
] | 420
|
2017-05-18T18:43:41.000Z
|
2022-03-07T16:39:53.000Z
|
httpstan/routes.py
|
mjcarter95/httpstan
|
4a15b0316d7cb0a50193555d80fb1785f557f645
|
[
"ISC"
] | 15
|
2017-06-10T20:47:56.000Z
|
2022-03-02T15:24:10.000Z
|
"""Routes for httpstan.
Routes for the HTTP server are defined here.
"""
import aiohttp.web
import httpstan.views as views
def setup_routes(app: aiohttp.web.Application) -> None:
"""Add routes to Application.
Arguments:
app (aiohttp.Application): Application to which routes should be added.
"""
# Note: changes here must be mirrored in `openapi.py`.
app.router.add_get("/v1/health", views.handle_health)
app.router.add_post("/v1/models", views.handle_create_model)
app.router.add_get("/v1/models", views.handle_list_models)
app.router.add_delete("/v1/models/{model_id}", views.handle_delete_model)
app.router.add_post("/v1/models/{model_id}/params", views.handle_show_params)
app.router.add_post("/v1/models/{model_id}/log_prob", views.handle_log_prob)
app.router.add_post("/v1/models/{model_id}/log_prob_grad", views.handle_log_prob_grad)
app.router.add_post("/v1/models/{model_id}/write_array", views.handle_write_array)
app.router.add_post("/v1/models/{model_id}/transform_inits", views.handle_transform_inits)
app.router.add_post("/v1/models/{model_id}/fits", views.handle_create_fit)
app.router.add_get("/v1/models/{model_id}/fits/{fit_id}", views.handle_get_fit)
app.router.add_delete("/v1/models/{model_id}/fits/{fit_id}", views.handle_delete_fit)
app.router.add_get("/v1/operations/{operation_id}", views.handle_get_operation)
| 45.741935
| 94
| 0.74189
|
2563c314268f81d1bdcbf7af02f0c282de4fc66c
| 661
|
py
|
Python
|
venv/bin/rst2html.py
|
anthodemorais/spaces
|
e2f4b70bf2438a39ce1e1bd954f8dc98bea5280d
|
[
"MIT"
] | null | null | null |
venv/bin/rst2html.py
|
anthodemorais/spaces
|
e2f4b70bf2438a39ce1e1bd954f8dc98bea5280d
|
[
"MIT"
] | null | null | null |
venv/bin/rst2html.py
|
anthodemorais/spaces
|
e2f4b70bf2438a39ce1e1bd954f8dc98bea5280d
|
[
"MIT"
] | null | null | null |
#!/Users/anthonydemorais/Documents/travail/supinternet/python/spaces/venv/bin/python3
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| 27.541667
| 85
| 0.748865
|
b08b9775941e4f217aed30bc5436a2f0294daa4e
| 585
|
py
|
Python
|
app/news/migrations/0002_newsentry_pub_date.py
|
mooja/ssip3
|
d42daee2b3b9d692a1fda4163bc6f0c6e970b79b
|
[
"MIT"
] | null | null | null |
app/news/migrations/0002_newsentry_pub_date.py
|
mooja/ssip3
|
d42daee2b3b9d692a1fda4163bc6f0c6e970b79b
|
[
"MIT"
] | null | null | null |
app/news/migrations/0002_newsentry_pub_date.py
|
mooja/ssip3
|
d42daee2b3b9d692a1fda4163bc6f0c6e970b79b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-14 00:24
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='newsentry',
name='pub_date',
field=models.DateField(default=datetime.datetime(2016, 5, 14, 0, 24, 11, 524933, tzinfo=utc)),
preserve_default=False,
),
]
| 24.375
| 106
| 0.634188
|
cd8760fb624bf50dc3139f4c7543817702c659f8
| 1,613
|
py
|
Python
|
distribution/service/demandservice.py
|
beehive-spg/beehive-drone-distribution
|
dc3cb6036497680bb839fa7a1d3995c02cc70873
|
[
"Apache-2.0"
] | null | null | null |
distribution/service/demandservice.py
|
beehive-spg/beehive-drone-distribution
|
dc3cb6036497680bb839fa7a1d3995c02cc70873
|
[
"Apache-2.0"
] | null | null | null |
distribution/service/demandservice.py
|
beehive-spg/beehive-drone-distribution
|
dc3cb6036497680bb839fa7a1d3995c02cc70873
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import json
from foundation.logger import Logger
from distribution.service import buildingservice, routeservice, hiveservice, distributionservice
logger = Logger(__name__)
def update_demand(message):
decoded_message = message.decode("utf-8")
loaded_message = json.loads(decoded_message)
logger.info("Received message: " + str(loaded_message))
event_type = loaded_message['type']
if (event_type == "dep"):
return
route_id = int(loaded_message['route_id'])
hop_id = int(loaded_message['hop_id'])
route = routeservice.get_route_by(route_id)
hop = routeservice.get_hop_in_route(route, hop_id)
hives_to_update = get_new_demand(route, hop)
for hive in hives_to_update:
logger.info("Hive to update: " + str(hive.to_primitive()))
hiveservice.update_demands(hives_to_update)
def get_new_demand(route, hop):
end_hive = get_end_hop_demand(route, hop)
start_hop = route.hops[0]
if (is_start_hop(hop, start_hop)):
start_hive = get_start_hop_demand(start_hop)
if (route.origin.ident != "route.origin/distribution"):
distributionservice.evaluate_hive(start_hive)
return [ start_hive, end_hive ]
return [ end_hive ]
def is_start_hop(message_hop, route_start):
return message_hop.id == route_start.id
def get_start_hop_demand(start_hop):
start_hive = hiveservice.get_hive_by(start_hop.start.id)
start_hive.demand += 1
return start_hive
def get_end_hop_demand(route, hop):
end_hop = route.hops[len(route.hops)-1]
endhop_hive = hiveservice.get_hive_by(end_hop.end.id)
endhop_hive.demand -= routeservice.get_route_distance_progress(route, hop)
return endhop_hive
| 31.627451
| 96
| 0.781773
|
c634d9dd675c4072ffe69668c814f7613c9d4005
| 8,516
|
py
|
Python
|
model/unetdsbn.py
|
zzzqzhou/Dual-Normalization
|
b9831b6e2662a950600ba37ada087ba8ce93f60c
|
[
"MIT"
] | 12
|
2022-03-10T09:24:41.000Z
|
2022-03-30T03:36:51.000Z
|
model/unetdsbn.py
|
zzzqzhou/Dual-Normalization
|
b9831b6e2662a950600ba37ada087ba8ce93f60c
|
[
"MIT"
] | 1
|
2022-03-30T09:41:23.000Z
|
2022-03-30T09:41:23.000Z
|
model/unetdsbn.py
|
zzzqzhou/Dual-Normalization
|
b9831b6e2662a950600ba37ada087ba8ce93f60c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
2D Unet-like architecture code in Pytorch
"""
import math
import numpy as np
from model.layers import *
from model.dsbn import DomainSpecificBatchNorm2d
import torch.nn as nn
import torch.nn.functional as F
import torch
class MyUpsample2(nn.Module):
def forward(self, x):
return x[:, :, :, None, :, None].expand(-1, -1, -1, 2, -1, 2).reshape(x.size(0), x.size(1), x.size(2)*2, x.size(3)*2)
def normalization(planes, norm='gn', num_domains=None, momentum=0.1):
if norm == 'dsbn':
m = DomainSpecificBatchNorm2d(planes, num_domains=num_domains, momentum=momentum)
elif norm == 'bn':
m = nn.BatchNorm2d(planes)
elif norm == 'gn':
m = nn.GroupNorm(1, planes)
elif norm == 'in':
m = nn.InstanceNorm2d(planes)
else:
raise ValueError('Normalization type {} is not supporter'.format(norm))
return m
#### Note: All are functional units except the norms, which are sequential
class ConvD(nn.Module):
def __init__(self, inplanes, planes, norm='bn', first=False, num_domains=None, momentum=0.1):
super(ConvD, self).__init__()
self.first = first
self.conv1 = nn.Conv2d(inplanes, planes, 3, 1, 1, bias=True)
self.bn1 = normalization(planes, norm, num_domains, momentum=momentum)
self.conv2 = nn.Conv2d(planes, planes, 3, 1, 1, bias=True)
self.bn2 = normalization(planes, norm, num_domains, momentum=momentum)
self.conv3 = nn.Conv2d(planes, planes, 3, 1, 1, bias=True)
self.bn3 = normalization(planes, norm, num_domains, momentum=momentum)
def forward(self, x, weights=None, layer_idx=None, domain_label=None):
if weights == None:
weight_1, bias_1 = self.conv1.weight, self.conv1.bias
weight_2, bias_2 = self.conv2.weight, self.conv2.bias
weight_3, bias_3 = self.conv3.weight, self.conv3.bias
else:
weight_1, bias_1 = weights[layer_idx+'.conv1.weight'], weights[layer_idx+'.conv1.bias']
weight_2, bias_2 = weights[layer_idx+'.conv2.weight'], weights[layer_idx+'.conv2.bias']
weight_3, bias_3 = weights[layer_idx+'.conv3.weight'], weights[layer_idx+'.conv3.bias']
if not self.first:
x = maxpool2D(x, kernel_size=2)
#layer 1 conv, bn
x = conv2d(x, weight_1, bias_1)
if domain_label is not None:
x, _ = self.bn1(x, domain_label)
else:
x = self.bn1(x)
#layer 2 conv, bn, relu
y = conv2d(x, weight_2, bias_2)
if domain_label is not None:
y, _ = self.bn2(y, domain_label)
else:
y = self.bn2(y)
y = relu(y)
#layer 3 conv, bn
z = conv2d(y, weight_3, bias_3)
if domain_label is not None:
z, _ = self.bn3(z, domain_label)
else:
z = self.bn3(z)
z = relu(z)
return z
class ConvU(nn.Module):
def __init__(self, planes, norm='bn', first=False, num_domains=None, momentum=0.1):
super(ConvU, self).__init__()
self.first = first
if not self.first:
self.conv1 = nn.Conv2d(2*planes, planes, 3, 1, 1, bias=True)
self.bn1 = normalization(planes, norm, num_domains, momentum=momentum)
self.pool = MyUpsample2()
self.conv2 = nn.Conv2d(planes, planes//2, 1, 1, 0, bias=True)
self.bn2 = normalization(planes//2, norm, num_domains, momentum=momentum)
self.conv3 = nn.Conv2d(planes, planes, 3, 1, 1, bias=True)
self.bn3 = normalization(planes, norm, num_domains, momentum=momentum)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, prev, weights=None, layer_idx=None, domain_label=None):
if weights == None:
if not self.first:
weight_1, bias_1 = self.conv1.weight, self.conv1.bias
weight_2, bias_2 = self.conv2.weight, self.conv2.bias
weight_3, bias_3 = self.conv3.weight, self.conv3.bias
else:
if not self.first:
weight_1, bias_1 = weights[layer_idx+'.conv1.weight'], weights[layer_idx+'.conv1.bias']
weight_2, bias_2 = weights[layer_idx+'.conv2.weight'], weights[layer_idx+'.conv2.bias']
weight_3, bias_3 = weights[layer_idx+'.conv3.weight'], weights[layer_idx+'.conv3.bias']
#layer 1 conv, bn, relu
if not self.first:
x = conv2d(x, weight_1, bias_1, )
if domain_label is not None:
x, _ = self.bn1(x, domain_label)
else:
x = self.bn1(x)
x = relu(x)
#upsample, layer 2 conv, bn, relu
y = self.pool(x)
y = conv2d(y, weight_2, bias_2, kernel_size=1, stride=1, padding=0)
if domain_label is not None:
y, _ = self.bn2(y, domain_label)
else:
y = self.bn2(y)
y = relu(y)
#concatenation of two layers
y = torch.cat([prev, y], 1)
#layer 3 conv, bn
y = conv2d(y, weight_3, bias_3)
if domain_label is not None:
y, _ = self.bn3(y, domain_label)
else:
y = self.bn3(y)
y = relu(y)
return y
class Unet2D(nn.Module):
def __init__(self, c=1, n=16, norm='bn', num_classes=2, num_domains=4, momentum=0.1):
super(Unet2D, self).__init__()
self.convd1 = ConvD(c, n, norm, first=True, num_domains=num_domains, momentum=momentum)
self.convd2 = ConvD(n, 2*n, norm, num_domains=num_domains, momentum=momentum)
self.convd3 = ConvD(2*n, 4*n, norm, num_domains=num_domains, momentum=momentum)
self.convd4 = ConvD(4*n, 8*n, norm, num_domains=num_domains, momentum=momentum)
self.convd5 = ConvD(8*n,16*n, norm, num_domains=num_domains, momentum=momentum)
self.convu4 = ConvU(16*n, norm, first=True, num_domains=num_domains, momentum=momentum)
self.convu3 = ConvU(8*n, norm, num_domains=num_domains, momentum=momentum)
self.convu2 = ConvU(4*n, norm, num_domains=num_domains, momentum=momentum)
self.convu1 = ConvU(2*n, norm, num_domains=num_domains, momentum=momentum)
self.seg1 = nn.Conv2d(2*n, num_classes, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x, weights=None, domain_label=None):
if weights == None:
x1 = self.convd1(x, domain_label=domain_label)
x2 = self.convd2(x1, domain_label=domain_label)
x3 = self.convd3(x2, domain_label=domain_label)
x4 = self.convd4(x3, domain_label=domain_label)
x5 = self.convd5(x4, domain_label=domain_label)
y4 = self.convu4(x5, x4, domain_label=domain_label)
y3 = self.convu3(y4, x3, domain_label=domain_label)
y2 = self.convu2(y3, x2, domain_label=domain_label)
y1 = self.convu1(y2, x1, domain_label=domain_label)
y1_pred = conv2d(y1, self.seg1.weight, self.seg1.bias, kernel_size=None, stride=1, padding=0)
else:
x1 = self.convd1(x, weights=weights, layer_idx='module.convd1', domain_label=domain_label)
x2 = self.convd2(x1, weights=weights, layer_idx='module.convd2', domain_label=domain_label)
x3 = self.convd3(x2, weights=weights, layer_idx='module.convd3', domain_label=domain_label)
x4 = self.convd4(x3, weights=weights, layer_idx='module.convd4', domain_label=domain_label)
x5 = self.convd5(x4, weights=weights, layer_idx='module.convd5', domain_label=domain_label)
y4 = self.convu4(x5, x4, weights=weights, layer_idx='module.convu4', domain_label=domain_label)
y3 = self.convu3(y4, x3, weights=weights, layer_idx='module.convu3', domain_label=domain_label)
y2 = self.convu2(y3, x2, weights=weights, layer_idx='module.convu2', domain_label=domain_label)
y1 = self.convu1(y2, x1, weights=weights, layer_idx='module.convu1', domain_label=domain_label)
y1_pred = conv2d(y1, weights['module.seg1.weight'], weights['module.seg1.bias'], kernel_size=None, stride=1, padding=0)
predictions = torch.sigmoid(input=y1_pred)
return predictions
| 41.950739
| 131
| 0.616721
|
5a1241e9fac3d2e57827aed8785350cc88f5ae3d
| 689
|
py
|
Python
|
.circleci/compare_runs.py
|
KalyanovD/daal4py
|
7b75aa795863415a1ae35e24ac4357ab7b6e2faa
|
[
"Apache-2.0"
] | null | null | null |
.circleci/compare_runs.py
|
KalyanovD/daal4py
|
7b75aa795863415a1ae35e24ac4357ab7b6e2faa
|
[
"Apache-2.0"
] | null | null | null |
.circleci/compare_runs.py
|
KalyanovD/daal4py
|
7b75aa795863415a1ae35e24ac4357ab7b6e2faa
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import os
import sys
import operator
qoi_list = ['failed', 'passed', 'xpassed', 'xfailed', 'skipped', 'deselected']
def get_counts(tkn):
data = [x.split() for x in os.getenv(tkn).split(',')]
counts = {x[-1]: int(x[-2]) for x in data if len(x) > 0 and x[-1] in qoi_list}
return counts
def sum_of_attributes(counts, keys):
if isinstance(keys, str):
return counts.get(keys, 0)
return sum([counts.get(k, 0) for k in keys])
d4p = get_counts('D4P')
if d4p and sum_of_attributes(d4p, 'failed') == 0:
print("Patched scikit-learn passed the compatibility check")
sys.exit(0)
else:
print("Patched run: {}".format(d4p))
sys.exit(1)
| 23.758621
| 82
| 0.640058
|
0f9645f7a5a8612d98de2550706151a52c2cbeaf
| 248
|
py
|
Python
|
templates_advanced/pythons_auth/urls.py
|
DiyanKalaydzhiev23/Django-Framework
|
e20a58e123add27e70faee641e35279b07474ed8
|
[
"MIT"
] | null | null | null |
templates_advanced/pythons_auth/urls.py
|
DiyanKalaydzhiev23/Django-Framework
|
e20a58e123add27e70faee641e35279b07474ed8
|
[
"MIT"
] | null | null | null |
templates_advanced/pythons_auth/urls.py
|
DiyanKalaydzhiev23/Django-Framework
|
e20a58e123add27e70faee641e35279b07474ed8
|
[
"MIT"
] | null | null | null |
from django.urls import path
from pythons_auth import views
urlpatterns = [
path('register/', views.register_view, name="register"),
path('login/', views.login_view, name="login"),
path('logout/', views.logout_view, name="logout"),
]
| 24.8
| 60
| 0.697581
|
48d3ef115fb4cdb4a2cc13e556947fc5770756a4
| 811
|
py
|
Python
|
setup.py
|
EvaSDK/freezegun
|
9f416a85a12a8aad327b85e5dbdba4506cb6e915
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
EvaSDK/freezegun
|
9f416a85a12a8aad327b85e5dbdba4506cb6e915
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
EvaSDK/freezegun
|
9f416a85a12a8aad327b85e5dbdba4506cb6e915
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
requires = ['six']
if sys.version_info[0] == 2:
requires += ['python-dateutil>=1.0, != 2.0']
else:
# Py3k
requires += ['python-dateutil>=2.0']
setup(
name='freezegun',
version='0.3.1',
description='Let your Python tests travel through time',
author='Steve Pulec',
author_email='spulec@gmail',
url='https://github.com/spulec/freezegun',
packages=['freezegun'],
install_requires=requires,
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
| 25.34375
| 60
| 0.621455
|
6251e286d1c084845cd8d8b657ea0cd032477ba4
| 728
|
py
|
Python
|
tests/unit/util/iterables/test_index_of.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | 33
|
2018-01-16T02:04:51.000Z
|
2022-03-22T22:57:29.000Z
|
tests/unit/util/iterables/test_index_of.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | 7
|
2019-06-16T22:02:03.000Z
|
2021-10-02T13:45:31.000Z
|
tests/unit/util/iterables/test_index_of.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | 14
|
2019-06-01T21:39:24.000Z
|
2022-03-14T17:56:43.000Z
|
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
import pytest
from byceps.util.iterables import index_of
@pytest.mark.parametrize(
'iterable, predicate, expected',
[
(
[],
lambda x: x > 3,
None,
),
(
[2, 3, 4, 5],
lambda x: x > 1,
0,
),
(
[2, 3, 4, 5],
lambda x: x > 3,
2,
),
(
[2, 3, 4, 5],
lambda x: x > 6,
None,
),
],
)
def test_index_of(iterable, predicate, expected):
actual = index_of(iterable, predicate)
assert actual == expected
| 18.666667
| 54
| 0.442308
|
a5f6ea066e7632f57688d44f3ef272358ee5db02
| 8,929
|
py
|
Python
|
myapp/search_engine/index_ranking.py
|
estherflores/IRWA-final-project
|
ac076f8481f1c15d65f04c43c82510825ec86351
|
[
"Apache-2.0"
] | null | null | null |
myapp/search_engine/index_ranking.py
|
estherflores/IRWA-final-project
|
ac076f8481f1c15d65f04c43c82510825ec86351
|
[
"Apache-2.0"
] | null | null | null |
myapp/search_engine/index_ranking.py
|
estherflores/IRWA-final-project
|
ac076f8481f1c15d65f04c43c82510825ec86351
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append('myapp')
from myapp.search_engine.text_processing import json_data, json_data1, cleanText, all_tweets
from myapp.search_engine.search_engine import DocumentInfo
import math
import array
import numpy as np
import collections
from numpy import linalg as la
from collections import defaultdict
def create_index_tfidf(lines, num_documents, json_data):
"""
Implement the inverted index and compute tf, df and idf
Argument:
lines -- collection of Wikipedia articles
num_documents -- total number of documents
Returns:
index - the inverted index (implemented through a Python dictionary) containing terms as keys and the corresponding
list of document these keys appears in (and the positions) as values.
tf - normalized term frequency for each term in each document
df - number of documents each term appear in
idf - inverse document frequency of each term
"""
index = defaultdict(list)
tf = defaultdict(list) #term frequencies of terms in documents (documents in the same order as in the main index)
df = defaultdict(int) #document frequencies of terms in the corpus
idf = defaultdict(float)
for i in range(len(lines)): # Remember, lines contain all tweets
page_id = json_data[str(i)]['id']
terms = json_data[str(i)]['full_text']
## ===============================================================
## create the index for the **current page** and store it in current_page_index
## current_page_index ==> { ‘term1’: [current_doc, [list of positions]], ...,‘term_n’: [current_doc, [list of positions]]}
## Example: if the curr_doc has id 1 and his text is
##"web retrieval information retrieval":
## current_page_index ==> { ‘web’: [1, [0]], ‘retrieval’: [1, [1,4]], ‘information’: [1, [2]]}
## the term ‘web’ appears in document 1 in positions 0,
## the term ‘retrieval’ appears in document 1 in positions 1 and 4
## ===============================================================
current_page_index = {}
for position, term in enumerate(terms): ## terms contains page_title + page_text
try:
# if the term is already in the dict append the position to the corresponding list
current_page_index[term][1].append(position)
except:
# Add the new term as dict key and initialize the array of positions and add the position
current_page_index[term]=[page_id, array('I',[position])] #'I' indicates unsigned int (int in Python)
#normalize term frequencies
# Compute the denominator to normalize term frequencies (formula 2 above)
# norm is the same for all terms of a document.
norm = 0
for term, posting in current_page_index.items():
# posting will contain the list of positions for current term in current document.
# posting ==> [current_doc, [list of positions]]
# you can use it to infer the frequency of current term.
norm += len(posting[1]) ** 2
norm = math.sqrt(norm)
#calculate the tf(dividing the term frequency by the above computed norm) and df weights
for term, posting in current_page_index.items():
# append the tf for current term (tf = term frequency in current doc/norm)
tf[term].append(np.round(len(posting[1])/norm,4)) ## SEE formula (1) above
#increment the document frequency of current term (number of documents containing the current term)
df[term]+=1 # increment DF for current term
#merge the current page index with the main index
for term_page, posting_page in current_page_index.items():
index[term_page].append(posting_page)
# Compute IDF following the formula (3) above. HINT: use np.log
for term in df:
idf[term] = np.round(np.log(float(num_documents/ df[term])), 4)
return index, tf, df, idf
def rank_documents(terms, docs, index, idf, tf):
"""
Perform the ranking of the results of a search based on the tf-idf weights
Argument:
terms -- list of query terms
docs -- list of documents, to rank, matching the query
index -- inverted index data structure
idf -- inverted document frequencies
tf -- term frequencies
title_index -- mapping between page id and page title
Returns:
Print the list of ranked documents
"""
# We are interested only on the element of the docVector corresponding to the query terms
# The remaining elements would became 0 when multiplied to the query_vector
doc_vectors = defaultdict(lambda: [0] * len(terms)) # We call doc_vectors[k] for a nonexistent key k, the key-value pair (k,[0]*len(terms)) will be automatically added to the dictionary
query_vector = [0] * len(terms)
# compute the norm for the query tf
query_terms_count = collections.Counter(terms) # get the frequency of each term in the query.
# Example: collections.Counter(["hello","hello","world"]) --> Counter({'hello': 2, 'world': 1})
#HINT: use when computing tf for query_vector
query_norm = la.norm(list(query_terms_count.values()))
for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query
if term not in index:
continue
## Compute tf*idf(normalize TF as done with documents)
query_vector[termIndex]=query_terms_count[term]/query_norm * idf[term]
# Generate doc_vectors for matching docs
for doc_index, (doc, postings) in enumerate(index[term]):
# Example of [doc_index, (doc, postings)]
# 0 (26, array('I', [1, 4, 12, 15, 22, 28, 32, 43, 51, 68, 333, 337]))
# 1 (33, array('I', [26, 33, 57, 71, 87, 104, 109]))
# term is in doc 26 in positions 1,4, .....
# term is in doc 33 in positions 26,33, .....
#tf[term][0] will contain the tf of the term "term" in the doc 26
if doc in docs:
doc_vectors[doc][termIndex] = tf[term][doc_index] * idf[term] # TODO: check if multiply for idf
# Calculate the score of each doc
# compute the cosine similarity between queyVector and each docVector:
# HINT: you can use the dot product because in case of normalized vectors it corresponds to the cosine similarity
# see np.dot
doc_scores=[[np.dot(curDocVec, query_vector), doc] for doc, curDocVec in doc_vectors.items() ]
doc_scores.sort(reverse=True)
result_docs = [x[1] for x in doc_scores]
#print document titles instead if document id's
#result_docs=[ title_index[x] for x in result_docs ]
if len(result_docs) == 0:
print("No results found, try again")
query = input()
docs = search_tf_idf(query, index)
#print ('\n'.join(result_docs), '\n')
return result_docs
def search_tf_idf(query, index, top):
query = cleanText(query)
docs = set([posting[0] for posting in index[query[0]]])
for term in query[1:]:
try:
# store in term_docs the ids of the docs that contain "term"
term_docs=[posting[0] for posting in index[term]]
# docs = docs Union term_docs
docs = docs.intersection(set(term_docs))
except:
#term is not in index
pass
docs = list(docs)
ranked_docs = rank_documents(query, docs, index, idf, tf)
print(ranked_docs[0])
return retrieve_docs(ranked_docs, top)
def retrieve_docs(docs, top):
"""
Retrieve the documents in the required format
Argument:
docs -- collection of tweets
top -- the number of tweets to retrieve
Returns:
doc_info - the collection of top tweets retrieved in the required format
"""
doc_info = []
for d_id in docs[:top]:
for j in json_data1.keys():
if(d_id == json_data1[j]['id']):
if 'media' in json_data1[j]['entities'].keys():
title= json_data1[j]["full_text"][:10]
description = json_data1[j]["full_text"]
doc_date = json_data1[j]["created_at"]
url = json_data1[j]["entities"]["media"][0]['url']
doc_info.append(DocumentInfo(title, description, doc_date, url))
else:
title= json_data1[j]["full_text"][:10]
description = json_data1[j]["full_text"]
doc_date = json_data1[j]["created_at"]
doc_info.append(DocumentInfo(title, description, doc_date))
return doc_info
search_query= 'covid19'
index, tf, df, idf = create_index_tfidf(all_tweets, len(all_tweets),json_data)
results = search_tf_idf(search_query, index)
| 43.556098
| 189
| 0.628402
|
6f79e8844f8ef087bb3f3d10d9b76b12b119d149
| 954
|
py
|
Python
|
bwi_logging/src/get_distance.py
|
YuqianJiang/bwi_experimental
|
aa0915f170c6c1720a34d0ab24d5b287a9b1bb6d
|
[
"BSD-3-Clause"
] | null | null | null |
bwi_logging/src/get_distance.py
|
YuqianJiang/bwi_experimental
|
aa0915f170c6c1720a34d0ab24d5b287a9b1bb6d
|
[
"BSD-3-Clause"
] | null | null | null |
bwi_logging/src/get_distance.py
|
YuqianJiang/bwi_experimental
|
aa0915f170c6c1720a34d0ab24d5b287a9b1bb6d
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import rosbag
import rospy
import datetime
if rospy.has_param('bag_file'):
bag_file = rospy.get_param('bag_file', '')
print('bag_file: ' + bag_file)
else:
exit('please set the bag_file first: "rosparam set bag_file /path/to/your/bag_file"')
bag = rosbag.Bag(bag_file)
time_start = -1
time_consumed = 0
distance_traveled = 0.0
x = None
y = None
for topic, msg, t in bag.read_messages(topics=['odom']):
time_current = int(msg.header.stamp.secs)
x_current = float(msg.pose.pose.position.x)
y_current = float(msg.pose.pose.position.y)
if time_start < 0:
x = x_current
y = y_current
time_start = time_current
continue
distance_traveled += ((x_current - x)**2 + (y_current - y)**2)**0.5
x = x_current
y = y_current
time_consumed = time_current - time_start
print('time_consumed: ' + str(datetime.timedelta(seconds=time_consumed)))
print('distance_traveled: ' + str(distance_traveled))
bag.close()
| 21.2
| 87
| 0.706499
|
329b4b247ec9ac3694f2f6e241fe3ddd33da54bd
| 1,074
|
py
|
Python
|
xmnlp/sentiment/__init__.py
|
mokeyish/xmnlp
|
7b47dddb7dfcdcf26031370f7f6ea9cbcd2b8a4c
|
[
"Apache-2.0"
] | null | null | null |
xmnlp/sentiment/__init__.py
|
mokeyish/xmnlp
|
7b47dddb7dfcdcf26031370f7f6ea9cbcd2b8a4c
|
[
"Apache-2.0"
] | null | null | null |
xmnlp/sentiment/__init__.py
|
mokeyish/xmnlp
|
7b47dddb7dfcdcf26031370f7f6ea9cbcd2b8a4c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# -------------------------------------------#
# author: sean lee #
# email: xmlee97@gmail.com #
# -------------------------------------------#
import os
import threading
from typing import Tuple
from xmnlp import config
from xmnlp.sentiment.sentiment_model import SentimentModel
sentiment_model = None
lock = threading.Lock()
def load_sentiment(reload: bool = False) -> None:
with lock:
global sentiment_model
if sentiment_model is None or reload:
if config.MODEL_DIR is None:
raise ValueError("Error: 模型地址未设置,请根据文档「安装」 -> 「下载模型」指引下载并配置模型。")
print('Lazy load sentiment...')
sentiment_model = SentimentModel(
os.path.join(config.MODEL_DIR, 'sentiment'))
def sentiment(doc: str) -> Tuple[float, float]:
""" 情感分类
Args:
doc: str
Return:
Tuple[float, float], [proba of negative, proba of postive]
"""
load_sentiment()
doc = doc.strip()
return sentiment_model.predict(doc)
| 25.571429
| 80
| 0.55959
|
e0f1546d5f21dd2fc6842646d89974f74205c5b5
| 2,435
|
py
|
Python
|
maltego_trx/server.py
|
tlansec/maltego-trx
|
10040637d68e594933566cf2064b4121581cd02c
|
[
"MIT"
] | null | null | null |
maltego_trx/server.py
|
tlansec/maltego-trx
|
10040637d68e594933566cf2064b4121581cd02c
|
[
"MIT"
] | null | null | null |
maltego_trx/server.py
|
tlansec/maltego-trx
|
10040637d68e594933566cf2064b4121581cd02c
|
[
"MIT"
] | null | null | null |
import logging
from flask import Flask, request
from maltego_trx.maltego import MaltegoMsg
from .registry import mapping
log = logging.getLogger("maltego.server")
logging.basicConfig(level=logging.DEBUG)
URL_TEMPLATE = '/run/<transform_name>/'
def get_exception_message(msg="An exception occurred with the transform. Check the logs for more details."):
return """<MaltegoMessage>
<MaltegoTransformResponseMessage>
<Entities>
</Entities>
<UIMessages>
<UIMessage MessageType='PartialError'>
%s
</UIMessage>
</UIMessages>
</MaltegoTransformResponseMessage>
</MaltegoMessage>""" % msg
def print_transforms():
print("= Transform Server URLs =")
for path in mapping:
print(URL_TEMPLATE.replace("<transform_name>", path) + ": " + mapping[path].__name__)
print("\n")
print("= Local Transform Names =")
for path in mapping:
print(path + ": " + mapping[path].__name__)
print("\n")
def run_transform(transform_name, client_msg):
transform_method = mapping[transform_name]
try:
if hasattr(transform_method, "run_transform"):
return transform_method.run_transform(client_msg), 200 # Transform class
else:
return transform_method(client_msg), 200 # Transform method
except Exception as e:
log.error("An exception occurred while executing your transform code.")
log.error(e, exc_info=True)
return get_exception_message(), 200
app = Flask(__name__)
application = app # application variable for usage with apache mod wsgi
@app.route(URL_TEMPLATE, methods=['GET', 'POST'])
def transform_runner(transform_name):
transform_name = transform_name.lower()
if transform_name in mapping:
if request.method == 'POST':
client_msg = MaltegoMsg(request.data)
return run_transform(transform_name, client_msg)
else:
return "Transform found with name '%s', you will need to send a POST request to run it." % transform_name, 200
else:
log.info("No transform found with the name '%s'." % transform_name)
log.info("Available transforms are:\n %s" % str(list(mapping.keys())))
return "No transform found with the name '%s'." % transform_name, 404
@app.route('/', methods=['GET', 'POST'])
def index():
return "You have reached a Maltego Transform Server.", 200
| 32.905405
| 122
| 0.670226
|
9318b4928476f52bc0f04d74f44c5a6286706e88
| 3,329
|
py
|
Python
|
pubnub/endpoints/push/add_channels_to_push.py
|
natekspencer/pubnub-python
|
453ba34104b4067077546d5d9ba3b206559494d1
|
[
"MIT"
] | 146
|
2015-01-05T03:14:53.000Z
|
2022-03-16T16:51:52.000Z
|
pubnub/endpoints/push/add_channels_to_push.py
|
natekspencer/pubnub-python
|
453ba34104b4067077546d5d9ba3b206559494d1
|
[
"MIT"
] | 48
|
2015-01-15T15:27:41.000Z
|
2022-03-21T14:17:05.000Z
|
pubnub/endpoints/push/add_channels_to_push.py
|
natekspencer/pubnub-python
|
453ba34104b4067077546d5d9ba3b206559494d1
|
[
"MIT"
] | 128
|
2015-01-05T03:40:59.000Z
|
2022-03-02T20:50:58.000Z
|
from pubnub.endpoints.endpoint import Endpoint
from pubnub.errors import PNERR_CHANNEL_MISSING, PNERR_PUSH_DEVICE_MISSING, PNERROR_PUSH_TYPE_MISSING, \
PNERR_PUSH_TOPIC_MISSING
from pubnub.exceptions import PubNubException
from pubnub.enums import HttpMethod, PNOperationType, PNPushType, PNPushEnvironment
from pubnub.models.consumer.push import PNPushAddChannelResult
from pubnub import utils
class AddChannelsToPush(Endpoint):
# v1/push/sub-key/{subKey}/devices/{pushToken}
ADD_PATH = "/v1/push/sub-key/%s/devices/%s"
# v2/push/sub-key/{subKey}/devices-apns2/{deviceApns2}
ADD_PATH_APNS2 = "/v2/push/sub-key/%s/devices-apns2/%s"
def __init__(self, pubnub):
Endpoint.__init__(self, pubnub)
self._channels = None
self._device_id = None
self._push_type = None
self._topic = None
self._environment = None
def channels(self, channels):
self._channels = channels
return self
def device_id(self, device_id):
self._device_id = device_id
return self
def push_type(self, push_type):
self._push_type = push_type
return self
def topic(self, topic):
self._topic = topic
return self
def environment(self, environment):
self._environment = environment
return self
def custom_params(self):
params = {}
params['add'] = utils.join_items(self._channels)
if self._push_type != PNPushType.APNS2:
params['type'] = utils.push_type_to_string(self._push_type)
else:
if self._environment is None:
self._environment = PNPushEnvironment.DEVELOPMENT
params['environment'] = self._environment
params['topic'] = self._topic
return params
def build_path(self):
if self._push_type != PNPushType.APNS2:
return AddChannelsToPush.ADD_PATH % (
self.pubnub.config.subscribe_key, self._device_id)
else:
return AddChannelsToPush.ADD_PATH_APNS2 % (
self.pubnub.config.subscribe_key, self._device_id)
def http_method(self):
return HttpMethod.GET
def validate_params(self):
self.validate_subscribe_key()
if not isinstance(self._channels, list) or len(self._channels) == 0:
raise PubNubException(pn_error=PNERR_CHANNEL_MISSING)
if not isinstance(self._device_id, str) or len(self._device_id) == 0:
raise PubNubException(pn_error=PNERR_PUSH_DEVICE_MISSING)
if self._push_type is None:
raise PubNubException(pn_error=PNERROR_PUSH_TYPE_MISSING)
if self._push_type == PNPushType.APNS2:
if not isinstance(self._topic, str) or len(self._topic) == 0:
raise PubNubException(pn_error=PNERR_PUSH_TOPIC_MISSING)
def create_response(self, envelope):
return PNPushAddChannelResult()
def is_auth_required(self):
return True
def request_timeout(self):
return self.pubnub.config.non_subscribe_request_timeout
def connect_timeout(self):
return self.pubnub.config.connect_timeout
def operation_type(self):
return PNOperationType.PNAddPushNotificationsOnChannelsOperation
def name(self):
return "AddChannelsToPush"
| 32.009615
| 104
| 0.67678
|
07ff2c4df2b4784a00f87f2311775242c12db59c
| 6,540
|
py
|
Python
|
aerosandbox/numpy/array.py
|
scivm/AeroSandbox
|
616c579e49bc13c3023364773705eaac7df10da7
|
[
"MIT"
] | 1
|
2021-04-07T08:59:31.000Z
|
2021-04-07T08:59:31.000Z
|
aerosandbox/numpy/array.py
|
scivm/AeroSandbox
|
616c579e49bc13c3023364773705eaac7df10da7
|
[
"MIT"
] | null | null | null |
aerosandbox/numpy/array.py
|
scivm/AeroSandbox
|
616c579e49bc13c3023364773705eaac7df10da7
|
[
"MIT"
] | null | null | null |
import numpy as _onp
import casadi as _cas
from typing import List, Tuple
from aerosandbox.numpy.determine_type import is_casadi_type
def array(array_like, dtype=None):
"""
Initializes a new array. Creates a NumPy array if possible; if not, creates a CasADi array.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.array.html
"""
if is_casadi_type(array_like, recursive=False): # If you were literally given a CasADi array, just return it
# Handles inputs like cas.DM([1, 2, 3])
return array_like
elif not is_casadi_type(array_like,
recursive=True): # If you were given a list of iterables that don't have CasADi types:
# Handles inputs like [[1, 2, 3], [4, 5, 6]]
return _onp.array(array_like, dtype=dtype)
else:
# Handles inputs like [[opti_var_1, opti_var_2], [opti_var_3, opti_var_4]]
def make_row(contents: List):
try:
return _cas.horzcat(*contents)
except (TypeError, Exception):
return contents
return _cas.vertcat(
*[
make_row(row)
for row in array_like
]
)
def concatenate(arrays: Tuple, axis: int = 0):
"""
Join a sequence of arrays along an existing axis. Returns a NumPy array if possible; if not, returns a CasADi array.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html
"""
if not is_casadi_type(arrays, recursive=True):
return _onp.concatenate(arrays, axis=axis)
else:
if axis == 0:
return _cas.vertcat(*arrays)
elif axis == 1:
return _cas.horzcat(*arrays)
else:
raise ValueError("CasADi-backend arrays can only be 1D or 2D, so `axis` must be 0 or 1.")
def stack(arrays: Tuple, axis: int = 0):
"""
Join a sequence of arrays along a new axis. Returns a NumPy array if possible; if not, returns a CasADi array.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.stack.html
"""
if not is_casadi_type(arrays, recursive=True):
return _onp.stack(arrays, axis=axis)
else:
### Validate stackability
for array in arrays:
if is_casadi_type(array, recursive=False):
if not array.shape[1] == 1:
raise ValueError("Can only stack Nx1 CasADi arrays!")
else:
if not len(array.shape) == 1:
raise ValueError("Can only stack 1D NumPy ndarrays alongside CasADi arrays!")
if axis == 0 or axis == -2:
return _cas.transpose(_cas.horzcat(*arrays))
elif axis == 1 or axis == -1:
return _cas.horzcat(*arrays)
else:
raise ValueError("CasADi-backend arrays can only be 1D or 2D, so `axis` must be 0 or 1.")
def hstack(arrays):
if not is_casadi_type(arrays, recursive=True):
return _onp.hstack(arrays)
else:
raise ValueError(
"Use `np.stack()` or `np.concatenate()` instead of `np.hstack()` when dealing with mixed-backend arrays.")
def vstack(arrays):
if not is_casadi_type(arrays, recursive=True):
return _onp.vstack(arrays)
else:
raise ValueError(
"Use `np.stack()` or `np.concatenate()` instead of `np.vstack()` when dealing with mixed-backend arrays.")
def dstack(arrays):
if not is_casadi_type(arrays, recursive=True):
return _onp.dstack(arrays)
else:
raise ValueError(
"Use `np.stack()` or `np.concatenate()` instead of `np.dstack()` when dealing with mixed-backend arrays.")
def length(array) -> int:
"""
Returns the length of an 1D-array-like object. An extension of len() with slightly different functionality.
Args:
array:
Returns:
"""
if not is_casadi_type(array):
try:
return len(array)
except TypeError:
return 1
else:
if array.shape[0] != 1:
return array.shape[0]
else:
return array.shape[1]
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.diag.html
"""
if not is_casadi_type(v):
return _onp.diag(v, k=k)
else:
if k != 0:
raise NotImplementedError("Should be super possible, just haven't had the need yet.")
if 1 in v.shape:
return _cas.diag(v)
elif v.shape[0] == v.shape[1]:
raise NotImplementedError("Should be super possible, just haven't had the need yet.")
else:
raise ValueError("Cannot return the diagonal of a non-square matrix.")
def roll(a, shift, axis: int = None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at the first.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.roll.html
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
Returns
-------
res : ndarray
Output array, with the same shape as a.
"""
if not is_casadi_type(a):
return _onp.roll(a, shift, axis=axis)
else: # TODO add some checking to make sure shift < len(a), or shift is modulo'd down by len(a).
# assert shift < a.shape[axis]
if 1 in a.shape and axis == 0:
return _cas.vertcat(a[-shift, :], a[:-shift, :])
elif axis == 0:
return _cas.vertcat(a.T[:, -shift], a.T[:, :-shift]).T
elif axis == 1:
return _cas.horzcat(a[:, -shift], a[:, :-shift])
elif axis is None:
return roll(a, shift=shift, axis=0)
else:
raise Exception("CasADi types can only be up to 2D, so `axis` must be None, 0, or 1.")
def max(a):
"""
Returns the maximum value of an array
"""
try:
return _onp.max(a)
except TypeError:
return _cas.mmax(a)
def min(a):
"""
Returns the minimum value of an array
"""
try:
return _onp.min(a)
except TypeError:
return _cas.mmin(a)
def reshape(a, newshape):
"""Gives a new shape to an array without changing its data."""
if not is_casadi_type(a):
return _onp.reshape(a, newshape)
else:
return _cas.reshape(a, newshape)
| 30.138249
| 120
| 0.600612
|
6db82d600df199c54243326ce341288eb4a9fb0f
| 720
|
py
|
Python
|
flask_hougou.py
|
ColdFc/test
|
a0be1d71cee99351763205efb6224142ec208687
|
[
"MIT"
] | 9
|
2019-05-24T07:01:18.000Z
|
2019-05-24T12:35:20.000Z
|
flask_hougou.py
|
ColdFc/test
|
a0be1d71cee99351763205efb6224142ec208687
|
[
"MIT"
] | null | null | null |
flask_hougou.py
|
ColdFc/test
|
a0be1d71cee99351763205efb6224142ec208687
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import request
app = Flask(__name__, static_url_path="", static_folder="static")
class Config(object):
DEBUG = True
app.config.from_object(Config)
@app.route("/", methods=["GET"])
def index():
with open("templates/home.html", "rb") as f:
content = f.read()
return content
@app.route("/login.html", methods=["GET", "POST"])
def login():
with open("templates/login.html", "rb") as f:
content = f.read()
return content
@app.route("/register.html", methods=["POST", "GET"])
def register():
with open("templates/register.html", "rb") as f:
content = f.read()
return content
if __name__ == '__main__':
app.run(debug=True)
| 19.459459
| 65
| 0.634722
|
d299a8e08b4562ae6130f10390f98c2d94a6764a
| 4,432
|
py
|
Python
|
client/tools/interop_cli.py
|
derekbt96/interop
|
13f044c831d81745f75c7baa46d0aba6694f01a9
|
[
"Apache-2.0"
] | null | null | null |
client/tools/interop_cli.py
|
derekbt96/interop
|
13f044c831d81745f75c7baa46d0aba6694f01a9
|
[
"Apache-2.0"
] | null | null | null |
client/tools/interop_cli.py
|
derekbt96/interop
|
13f044c831d81745f75c7baa46d0aba6694f01a9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# CLI for interacting with interop server.
from __future__ import print_function
import argparse
import datetime
import getpass
import logging
import pprint
import sys
import time
from auvsi_suas.client.client import AsyncClient
from auvsi_suas.client.types import Telemetry
from mavlink_proxy import MavlinkProxy
from upload_odlcs import upload_odlcs
logger = logging.getLogger(__name__)
def missions(args, client):
missions = client.get_missions().result()
for m in missions:
pprint.pprint(m.serialize())
def odlcs(args, client):
if args.odlc_dir:
upload_odlcs(client, args.odlc_dir, args.team_id,
args.actionable_override)
else:
odlcs = client.get_odlcs().result()
for odlc in odlcs:
pprint.pprint(odlc.serialize())
def probe(args, client):
while True:
start_time = datetime.datetime.now()
telemetry = Telemetry(0, 0, 0, 0)
telemetry_resp = client.post_telemetry(telemetry).result()
end_time = datetime.datetime.now()
elapsed_time = (end_time - start_time).total_seconds()
logger.info('Executed interop. Total latency: %f', elapsed_time)
delay_time = args.interop_time - elapsed_time
if delay_time > 0:
try:
time.sleep(delay_time)
except KeyboardInterrupt:
sys.exit(0)
def mavlink(args, client):
proxy = MavlinkProxy(args.device, client)
proxy.proxy()
def main():
# Setup logging
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format='%(asctime)s: %(name)s: %(levelname)s: %(message)s')
# Parse command line args.
parser = argparse.ArgumentParser(description='AUVSI SUAS Interop CLI.')
parser.add_argument(
'--url', required=True, help='URL for interoperability.')
parser.add_argument(
'--username', required=True, help='Username for interoperability.')
parser.add_argument('--password', help='Password for interoperability.')
subparsers = parser.add_subparsers(help='Sub-command help.')
subparser = subparsers.add_parser('missions', help='Get missions.')
subparser.set_defaults(func=missions)
subparser = subparsers.add_parser(
'odlcs',
help='Upload odlcs.',
description='''Download or upload odlcs to/from the interoperability
server.
Without extra arguments, this prints all odlcs that have been uploaded to the
server.
With --odlc_dir, this uploads new odlcs to the server.
This tool searches for odlc JSON and images files within --odlc_dir
conforming to the 2017 Object File Format and uploads the odlc
characteristics and thumbnails to the interoperability server.
There is no deduplication logic. Odlcs will be uploaded multiple times, as
unique odlcs, if the tool is run multiple times.''',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=odlcs)
subparser.add_argument(
'--odlc_dir',
help='Enables odlc upload. Directory containing odlc data.')
subparser.add_argument(
'--team_id',
help='''The username of the team on whose behalf to submit odlcs.
Must be admin user to specify.''')
subparser.add_argument(
'--actionable_override',
help='''Manually sets all the odlcs in the odlc dir to be
actionable. Must be admin user to specify.''')
subparser = subparsers.add_parser('probe', help='Send dummy requests.')
subparser.set_defaults(func=probe)
subparser.add_argument(
'--interop_time',
type=float,
default=1.0,
help='Time between sent requests (sec).')
subparser = subparsers.add_parser(
'mavlink',
help='''Receive MAVLink GLOBAL_POSITION_INT packets and
forward as telemetry to interop server.''')
subparser.set_defaults(func=mavlink)
subparser.add_argument(
'--device',
type=str,
help='pymavlink device name to read from. E.g. tcp:localhost:8080.')
# Parse args, get password if not provided.
args = parser.parse_args()
if args.password:
password = args.password
else:
password = getpass.getpass('Interoperability Password: ')
# Create client and dispatch subcommand.
client = AsyncClient(args.url, args.username, password)
args.func(args, client)
if __name__ == '__main__':
main()
| 30.777778
| 77
| 0.684341
|
e5348b0815c09d1c91c919bfe004799a2a2ab442
| 3,680
|
py
|
Python
|
src/dab_tuning_lib.py
|
Opendigitalradio/ODR-StaticPrecorrection
|
984c14bf46ebd7dc66954a653c8f17212ed97efb
|
[
"MIT"
] | null | null | null |
src/dab_tuning_lib.py
|
Opendigitalradio/ODR-StaticPrecorrection
|
984c14bf46ebd7dc66954a653c8f17212ed97efb
|
[
"MIT"
] | null | null | null |
src/dab_tuning_lib.py
|
Opendigitalradio/ODR-StaticPrecorrection
|
984c14bf46ebd7dc66954a653c8f17212ed97efb
|
[
"MIT"
] | 1
|
2019-06-20T02:37:34.000Z
|
2019-06-20T02:37:34.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import src.dab_util as du
def calc_signal_sholder_ratio(fft, sampling_rate, debug = False, debug_path="", suffix=""):
fft_size = fft.shape[0]
n_sig = (du.freq_to_fft_sample(-du.c["bw"]/2., fft_size, sampling_rate),
du.freq_to_fft_sample( du.c["bw"]/2., fft_size, sampling_rate))
sig = np.mean(fft[n_sig[0]:n_sig[1]])
n_noise = (du.freq_to_fft_sample(-3000000., fft_size, sampling_rate),
du.freq_to_fft_sample(-2500000, fft_size, sampling_rate))
noise = np.mean(fft[n_noise[0]:n_noise[1]])
n_sholder = (du.freq_to_fft_sample(-1500000, fft_size, sampling_rate),
du.freq_to_fft_sample(-du.c["bw"]/2, fft_size, sampling_rate))
sholder = np.mean(fft[n_sholder[0]:n_sholder[1]])
if debug == True:
print(n_sig, n_sholder, n_noise)
plt.plot(fft)
plt.plot((n_sig[0], n_sig[1]), (sig, sig), linewidth=5, color='g')
plt.plot((n_noise[0], n_noise[1]), (noise, noise), linewidth=5, color='r')
plt.plot((n_sholder[0], n_sholder[1]), (sholder, sholder), linewidth=5, color='y')
if debug_path: plt.savefig(debug_path + "/" + str(loss) + suffix + ".png")
plt.show()
plt.clf()
return sholder
def calc_signal_sholder_peak_ratio(fft, sampling_rate, debug = False, debug_path="", suffix=""):
fft_size = fft.shape[0]
n_sig = (du.freq_to_fft_sample(-du.c["bw"]/2., fft_size, sampling_rate),
du.freq_to_fft_sample( du.c["bw"]/2., fft_size, sampling_rate))
sig = np.mean(fft[n_sig[0]:n_sig[1]])
n_noise = (du.freq_to_fft_sample(-3000000., fft_size, sampling_rate),
du.freq_to_fft_sample(-2500000, fft_size, sampling_rate))
noise = np.mean(fft[n_noise[0]:n_noise[1]])
n_sholder = (du.freq_to_fft_sample(-1500000, fft_size, sampling_rate),
du.freq_to_fft_sample(-du.c["bw"]/2, fft_size, sampling_rate))
sholder = np.mean(fft[n_sholder[0]:n_sholder[1]])
loss = sholder/sig
if debug == True:
print(n_sig, n_sholder, n_noise)
plt.plot(fft)
plt.plot((n_sig[0], n_sig[1]), (sig, sig), linewidth=5, color='g')
plt.plot((n_noise[0], n_noise[1]), (noise, noise), linewidth=5, color='r')
plt.plot((n_sholder[0], n_sholder[1]), (sholder, sholder), linewidth=5, color='y')
if debug_path: plt.savefig(debug_path + "/" + str(loss) + suffix + ".png")
plt.show()
plt.clf()
return loss
def calc_max_in_freq_range(fft, sampling_rate, f_start, f_end, debug = False, debug_path="", suffix=""):
fft_size = fft.shape[0]
n_sig = (du.freq_to_fft_sample(f_start, fft_size, sampling_rate),
du.freq_to_fft_sample(f_end, fft_size, sampling_rate))
sig = np.max(fft[n_sig[0]:n_sig[1]])
if debug == True:
print(n_sig)
plt.plot(fft)
plt.plot((n_sig[0], n_sig[1]), (sig, sig), linewidth=5, color='g')
if debug_path: plt.savefig(debug_path + "/" + str(loss) + suffix + ".png")
plt.show()
plt.clf()
return sig
def calc_mean_in_freq_range(fft, sampling_rate, f_start, f_end, debug = False, debug_path="", suffix=""):
fft_size = fft.shape[0]
n_sig = (du.freq_to_fft_sample(f_start, fft_size, sampling_rate),
du.freq_to_fft_sample(f_end, fft_size, sampling_rate))
sig = np.mean(fft[n_sig[0]:n_sig[1]])
if debug == True:
print(n_sig)
plt.plot(fft)
plt.plot((n_sig[0], n_sig[1]), (sig, sig), linewidth=5, color='g')
if debug_path: plt.savefig(debug_path + "/" + str(loss) + suffix + ".png")
plt.show()
plt.clf()
return sig
| 39.569892
| 105
| 0.625815
|
b01162212f2ec87fd88da8b75c2b06a24c351949
| 31,643
|
py
|
Python
|
sec_certs/dataset/common_criteria.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 2
|
2021-03-24T11:56:15.000Z
|
2021-04-12T12:22:16.000Z
|
sec_certs/dataset/common_criteria.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 73
|
2021-04-12T14:04:04.000Z
|
2022-03-31T15:40:26.000Z
|
sec_certs/dataset/common_criteria.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 3
|
2021-03-26T16:15:49.000Z
|
2021-05-10T07:26:23.000Z
|
import copy
import itertools
import json
import locale
import shutil
import tempfile
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Callable, ClassVar, Dict, Iterator, List, Mapping, Optional, Set, Tuple, Union
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup, Tag
from sec_certs import helpers as helpers
from sec_certs import parallel_processing as cert_processing
from sec_certs.config.configuration import config
from sec_certs.dataset.dataset import Dataset, logger
from sec_certs.dataset.protection_profile import ProtectionProfileDataset
from sec_certs.model.dependency_finder import DependencyFinder
from sec_certs.sample.cc_maintenance_update import CommonCriteriaMaintenanceUpdate
from sec_certs.sample.certificate import Certificate
from sec_certs.sample.common_criteria import CommonCriteriaCert
from sec_certs.sample.protection_profile import ProtectionProfile
from sec_certs.serialization.json import ComplexSerializableType, CustomJSONDecoder, serialize
class CCDataset(Dataset, ComplexSerializableType):
@dataclass
class DatasetInternalState(ComplexSerializableType):
meta_sources_parsed: bool = False
pdfs_downloaded: bool = False
pdfs_converted: bool = False
certs_analyzed: bool = False
def __bool__(self):
return any(vars(self))
certs: Dict[str, "CommonCriteriaCert"]
# TODO: Figure out how to type this. The problem is that this breaks covariance of the types, which mypy doesn't allow.
def __init__(
self,
certs: Mapping[str, "Certificate"],
root_dir: Path,
name: str = "dataset name",
description: str = "dataset_description",
state: Optional[DatasetInternalState] = None,
):
super().__init__(certs, root_dir, name, description)
if state is None:
state = self.DatasetInternalState()
self.state = state
def __iter__(self) -> Iterator[CommonCriteriaCert]:
yield from self.certs.values()
def to_dict(self):
return {**{"state": self.state}, **super().to_dict()}
def to_pandas(self):
df = pd.DataFrame([x.pandas_tuple for x in self.certs.values()], columns=CommonCriteriaCert.pandas_columns)
df = df.set_index("dgst")
df.not_valid_before = pd.to_datetime(df.not_valid_before, infer_datetime_format=True)
df.not_valid_after = pd.to_datetime(df.not_valid_after, infer_datetime_format=True)
df = df.astype({"category": "category", "status": "category", "scheme": "category"})
df = df.fillna(value=np.nan)
return df
@classmethod
def from_dict(cls, dct: Dict):
dset = super().from_dict(dct)
dset.state = copy.deepcopy(dct["state"])
return dset
@Dataset.root_dir.setter # type: ignore
def root_dir(self, new_dir: Union[str, Path]):
old_dset = copy.deepcopy(self)
Dataset.root_dir.fset(self, new_dir) # type: ignore
self.set_local_paths()
if self.state and old_dset.root_dir != Path(".."):
logger.info(f"Changing root dir of partially processed dataset. All contents will get copied to {new_dir}")
self.copy_dataset_contents(old_dset)
self.to_json()
def copy_dataset_contents(self, old_dset: "CCDataset"):
if old_dset.state.meta_sources_parsed:
try:
shutil.copytree(old_dset.web_dir, self.web_dir)
except FileNotFoundError as e:
logger.warning(f"Attempted to copy non-existing file: {e}")
if old_dset.state.pdfs_downloaded:
try:
shutil.copytree(old_dset.certs_dir, self.certs_dir)
except FileNotFoundError as e:
logger.warning(f"Attempted to copy non-existing file: {e}")
if old_dset.state.certs_analyzed:
try:
shutil.copytree(old_dset.auxillary_datasets_dir, self.auxillary_datasets_dir)
except FileNotFoundError as e:
logger.warning(f"Attempted to copy non-existing file: {e}")
@property
def certs_dir(self) -> Path:
return self.root_dir / "certs"
@property
def reports_dir(self) -> Path:
return self.certs_dir / "reports"
@property
def reports_pdf_dir(self) -> Path:
return self.reports_dir / "pdf"
@property
def reports_txt_dir(self) -> Path:
return self.reports_dir / "txt"
@property
def targets_dir(self) -> Path:
return self.certs_dir / "targets"
@property
def targets_pdf_dir(self) -> Path:
return self.targets_dir / "pdf"
@property
def targets_txt_dir(self) -> Path:
return self.targets_dir / "txt"
@property
def pp_dataset_path(self) -> Path:
return self.auxillary_datasets_dir / "pp_dataset.json"
BASE_URL: ClassVar[str] = "https://www.commoncriteriaportal.org"
HTML_PRODUCTS_URL = {
"cc_products_active.html": BASE_URL + "/products/",
"cc_products_archived.html": BASE_URL + "/products/index.cfm?archived=1",
}
HTML_LABS_URL = {"cc_labs.html": BASE_URL + "/labs"}
CSV_PRODUCTS_URL = {
"cc_products_active.csv": BASE_URL + "/products/certified_products.csv",
"cc_products_archived.csv": BASE_URL + "/products/certified_products-archived.csv",
}
PP_URL = {
"cc_pp_active.html": BASE_URL + "/pps/",
"cc_pp_collaborative.html": BASE_URL + "/pps/collaborativePP.cfm?cpp=1",
"cc_pp_archived.html": BASE_URL + "/pps/index.cfm?archived=1",
}
PP_CSV = {"cc_pp_active.csv": BASE_URL + "/pps/pps.csv", "cc_pp_archived.csv": BASE_URL + "/pps/pps-archived.csv"}
@property
def active_html_tuples(self) -> List[Tuple[str, Path]]:
return [(x, self.web_dir / y) for y, x in self.HTML_PRODUCTS_URL.items() if "active" in y]
@property
def archived_html_tuples(self) -> List[Tuple[str, Path]]:
return [(x, self.web_dir / y) for y, x in self.HTML_PRODUCTS_URL.items() if "archived" in y]
@property
def active_csv_tuples(self) -> List[Tuple[str, Path]]:
return [(x, self.web_dir / y) for y, x in self.CSV_PRODUCTS_URL.items() if "active" in y]
@property
def archived_csv_tuples(self) -> List[Tuple[str, Path]]:
return [(x, self.web_dir / y) for y, x in self.CSV_PRODUCTS_URL.items() if "archived" in y]
@classmethod
def from_web_latest(cls):
with tempfile.TemporaryDirectory() as tmp_dir:
dset_path = Path(tmp_dir) / "cc_latest_dataset.json"
helpers.download_file(config.cc_latest_snapshot, dset_path)
return cls.from_json(dset_path)
def set_local_paths(self):
for cert in self:
cert.set_local_paths(self.reports_pdf_dir, self.targets_pdf_dir, self.reports_txt_dir, self.targets_txt_dir)
def _merge_certs(self, certs: Dict[str, "CommonCriteriaCert"], cert_source: Optional[str] = None):
"""
Merges dictionary of certificates into the dataset. Assuming they all are CommonCriteria certificates
"""
new_certs = {x.dgst: x for x in certs.values() if x not in self}
certs_to_merge = [x for x in certs.values() if x in self]
self.certs.update(new_certs)
for crt in certs_to_merge:
self[crt.dgst].merge(crt, cert_source)
logger.info(f"Added {len(new_certs)} new and merged further {len(certs_to_merge)} certificates to the dataset.")
def download_csv_html_resources(self, get_active: bool = True, get_archived: bool = True):
self.web_dir.mkdir(parents=True, exist_ok=True)
html_items = []
csv_items = []
if get_active is True:
html_items.extend(self.active_html_tuples)
csv_items.extend(self.active_csv_tuples)
if get_archived is True:
html_items.extend(self.archived_html_tuples)
csv_items.extend(self.archived_csv_tuples)
html_urls, html_paths = [x[0] for x in html_items], [x[1] for x in html_items]
csv_urls, csv_paths = [x[0] for x in csv_items], [x[1] for x in csv_items]
logger.info("Downloading required csv and html files.")
self._download_parallel(html_urls, html_paths)
self._download_parallel(csv_urls, csv_paths)
@serialize
def process_protection_profiles(self, to_download: bool = True, keep_metadata: bool = True):
logger.info("Processing protection profiles.")
constructor: Dict[bool, Callable[..., ProtectionProfileDataset]] = {
True: ProtectionProfileDataset.from_web,
False: ProtectionProfileDataset.from_json,
}
if to_download is True and not self.auxillary_datasets_dir.exists():
self.auxillary_datasets_dir.mkdir()
pp_dataset = constructor[to_download](self.pp_dataset_path)
for cert in self:
if cert.protection_profiles is None:
raise RuntimeError("Building of the dataset probably failed - this should not be happening.")
cert.protection_profiles = {pp_dataset.pps.get((x.pp_name, x.pp_link), x) for x in cert.protection_profiles}
if not keep_metadata:
self.pp_dataset_path.unlink()
@serialize
def get_certs_from_web(
self, to_download: bool = True, keep_metadata: bool = True, get_active: bool = True, get_archived: bool = True
):
"""
Parses all metadata about certificates
"""
if to_download is True:
self.download_csv_html_resources(get_active, get_archived)
logger.info("Adding CSV certificates to CommonCriteria dataset.")
csv_certs = self._get_all_certs_from_csv(get_active, get_archived)
self._merge_certs(csv_certs, cert_source="csv")
# TODO: Someway along the way, 3 certificates get lost. Investigate and fix.
logger.info("Adding HTML certificates to CommonCriteria dataset.")
html_certs = self._get_all_certs_from_html(get_active, get_archived)
self._merge_certs(html_certs, cert_source="html")
logger.info(f"The resulting dataset has {len(self)} certificates.")
if not keep_metadata:
shutil.rmtree(self.web_dir)
self.set_local_paths()
self.state.meta_sources_parsed = True
def _get_all_certs_from_csv(self, get_active: bool, get_archived: bool) -> Dict[str, "CommonCriteriaCert"]:
"""
Creates dictionary of new certificates from csv sources.
"""
csv_sources = list(self.CSV_PRODUCTS_URL.keys())
csv_sources = [x for x in csv_sources if "active" not in x or get_active]
csv_sources = [x for x in csv_sources if "archived" not in x or get_archived]
new_certs = {}
for file in csv_sources:
partial_certs = self._parse_single_csv(self.web_dir / file)
logger.info(f"Parsed {len(partial_certs)} certificates from: {file}")
new_certs.update(partial_certs)
return new_certs
@staticmethod
def _parse_single_csv(file: Path) -> Dict[str, "CommonCriteriaCert"]:
"""
Using pandas, this parses a single CSV file.
"""
def map_ip_to_hostname(url: str) -> str:
if not url:
return url
tokens = url.split("/")
relative_path = "/" + "/".join(tokens[3:])
return CCDataset.BASE_URL + relative_path
def _get_primary_key_str(row: Tag):
prim_key = row["category"] + row["cert_name"] + row["report_link"]
return prim_key
if "active" in str(file):
cert_status = "active"
else:
cert_status = "archived"
csv_header = [
"category",
"cert_name",
"manufacturer",
"scheme",
"security_level",
"protection_profiles",
"not_valid_before",
"not_valid_after",
"report_link",
"st_link",
"maintenance_date",
"maintenance_title",
"maintenance_report_link",
"maintenance_st_link",
]
# TODO: Now skipping bad lines, smarter heuristics to be built for dumb files
df = pd.read_csv(file, engine="python", encoding="windows-1252", error_bad_lines=False)
df = df.rename(columns={x: y for (x, y) in zip(list(df.columns), csv_header)})
df["is_maintenance"] = ~df.maintenance_title.isnull()
df = df.fillna(value="")
df[["not_valid_before", "not_valid_after", "maintenance_date"]] = df[
["not_valid_before", "not_valid_after", "maintenance_date"]
].apply(pd.to_datetime)
df["dgst"] = df.apply(lambda row: helpers.get_first_16_bytes_sha256(_get_primary_key_str(row)), axis=1)
df_base = df.loc[~df.is_maintenance].copy()
df_main = df.loc[df.is_maintenance].copy()
df_base.report_link = df_base.report_link.map(map_ip_to_hostname)
df_base.st_link = df_base.st_link.map(map_ip_to_hostname)
df_main.maintenance_report_link = df_main.maintenance_report_link.map(map_ip_to_hostname)
df_main.maintenance_st_link = df_main.maintenance_st_link.map(map_ip_to_hostname)
n_all = len(df_base)
n_deduplicated = len(df_base.drop_duplicates(subset=["dgst"]))
if (n_dup := n_all - n_deduplicated) > 0:
logger.warning(f"The CSV {file} contains {n_dup} duplicates by the primary key.")
df_base = df_base.drop_duplicates(subset=["dgst"])
df_main = df_main.drop_duplicates()
profiles = {
x.dgst: set(
[ProtectionProfile(pp_name=y) for y in helpers.sanitize_protection_profiles(x.protection_profiles)]
)
for x in df_base.itertuples()
}
updates: Dict[str, Set] = {x.dgst: set() for x in df_base.itertuples()}
for x in df_main.itertuples():
updates[x.dgst].add(
CommonCriteriaCert.MaintenanceReport(
x.maintenance_date.date(), x.maintenance_title, x.maintenance_report_link, x.maintenance_st_link
)
)
certs = {
x.dgst: CommonCriteriaCert(
cert_status,
x.category,
x.cert_name,
x.manufacturer,
x.scheme,
x.security_level,
x.not_valid_before,
x.not_valid_after,
x.report_link,
x.st_link,
None,
None,
profiles.get(x.dgst, None),
updates.get(x.dgst, None),
None,
None,
None,
)
for x in df_base.itertuples()
}
return certs
def _get_all_certs_from_html(self, get_active: bool, get_archived: bool) -> Dict[str, "CommonCriteriaCert"]:
"""
Prepares dictionary of certificates from all html files.
"""
html_sources = list(self.HTML_PRODUCTS_URL.keys())
if get_active is False:
html_sources = [x for x in html_sources if "active" not in x]
if get_archived is False:
html_sources = [x for x in html_sources if "archived" not in x]
new_certs = {}
for file in html_sources:
partial_certs = self._parse_single_html(self.web_dir / file)
logger.info(f"Parsed {len(partial_certs)} certificates from: {file}")
new_certs.update(partial_certs)
return new_certs
@staticmethod
def _parse_single_html(file: Path) -> Dict[str, "CommonCriteriaCert"]:
"""
Prepares a dictionary of certificates from a single html file.
"""
def _get_timestamp_from_footer(footer):
locale.setlocale(locale.LC_ALL, "en_US")
footer_text = list(footer.stripped_strings)[0]
date_string = footer_text.split(",")[1:3]
time_string = footer_text.split(",")[3].split(" at ")[1]
formatted_datetime = date_string[0] + date_string[1] + " " + time_string
return datetime.strptime(formatted_datetime, " %B %d %Y %I:%M %p")
def _parse_table(
soup: BeautifulSoup, cert_status: str, table_id: str, category_string: str
) -> Dict[str, "CommonCriteriaCert"]:
tables = soup.find_all("table", id=table_id)
assert len(tables) <= 1
if not tables:
return {}
table = tables[0]
rows = list(table.find_all("tr"))
# header, footer = rows[0], rows[1]
body = rows[2:]
# TODO: It's possible to obtain timestamp of the moment when the list was generated. It's identical for each table and should thus only be obtained once. Not necessarily in each table
# timestamp = _get_timestamp_from_footer(footer)
# TODO: Do we have use for number of expected certs? We get rid of duplicites, so no use for assert expected == actual
# caption_str = str(table.findAll('caption'))
# n_expected_certs = int(caption_str.split(category_string + ' – ')[1].split(' Certified Products')[0])
table_certs = {
x.dgst: x for x in [CommonCriteriaCert.from_html_row(row, cert_status, category_string) for row in body]
}
return table_certs
if "active" in str(file):
cert_status = "active"
else:
cert_status = "archived"
cc_cat_abbreviations = ["AC", "BP", "DP", "DB", "DD", "IC", "KM", "MD", "MF", "NS", "OS", "OD", "DG", "TC"]
cc_table_ids = ["tbl" + x for x in cc_cat_abbreviations]
cc_categories = [
"Access Control Devices and Systems",
"Boundary Protection Devices and Systems",
"Data Protection",
"Databases",
"Detection Devices and Systems",
"ICs, Smart Cards and Smart Card-Related Devices and Systems",
"Key Management Systems",
"Mobility",
"Multi-Function Devices",
"Network and Network-Related Devices and Systems",
"Operating Systems",
"Other Devices and Systems",
"Products for Digital Signatures",
"Trusted Computing",
]
cat_dict = {x: y for (x, y) in zip(cc_table_ids, cc_categories)}
with file.open("r") as handle:
soup = BeautifulSoup(handle, "html5lib")
certs = {}
for key, val in cat_dict.items():
certs.update(_parse_table(soup, cert_status, key, val))
return certs
def _download_reports(self, fresh=True):
self.reports_pdf_dir.mkdir(parents=True, exist_ok=True)
certs_to_process = [x for x in self if x.state.report_is_ok_to_download(fresh) and x.report_link]
cert_processing.process_parallel(
CommonCriteriaCert.download_pdf_report,
certs_to_process,
config.n_threads,
progress_bar_desc="Downloading reports",
)
def _download_targets(self, fresh=True):
self.targets_pdf_dir.mkdir(parents=True, exist_ok=True)
certs_to_process = [x for x in self if x.state.report_is_ok_to_download(fresh)]
cert_processing.process_parallel(
CommonCriteriaCert.download_pdf_target,
certs_to_process,
config.n_threads,
progress_bar_desc="Downloading targets",
)
@serialize
def download_all_pdfs(self, fresh: bool = True):
if self.state.meta_sources_parsed is False:
logger.error("Attempting to download pdfs while not having csv/html meta-sources parsed. Returning.")
return
logger.info("Downloading CC sample reports")
self._download_reports(fresh)
logger.info("Downloading CC security targets")
self._download_targets(fresh)
if fresh is True:
logger.info("Attempting to re-download failed report links.")
self._download_reports(False)
logger.info("Attempting to re-download failed security target links.")
self._download_targets(False)
self.state.pdfs_downloaded = True
def _convert_reports_to_txt(self, fresh: bool = True):
self.reports_txt_dir.mkdir(parents=True, exist_ok=True)
certs_to_process = [x for x in self if x.state.report_is_ok_to_convert(fresh)]
cert_processing.process_parallel(
CommonCriteriaCert.convert_report_pdf,
certs_to_process,
config.n_threads,
progress_bar_desc="Converting reports to txt",
)
def _convert_targets_to_txt(self, fresh: bool = True):
self.targets_txt_dir.mkdir(parents=True, exist_ok=True)
certs_to_process = [x for x in self if x.state.st_is_ok_to_convert(fresh)]
cert_processing.process_parallel(
CommonCriteriaCert.convert_target_pdf,
certs_to_process,
config.n_threads,
progress_bar_desc="Converting targets to txt",
)
@serialize
def convert_all_pdfs(self, fresh: bool = True):
if self.state.pdfs_downloaded is False:
logger.info("Attempting to convert pdf while not having them downloaded. Returning.")
return
logger.info("Converting CC sample reports to .txt")
self._convert_reports_to_txt(fresh)
logger.info("Converting CC security targets to .txt")
self._convert_targets_to_txt(fresh)
if fresh is True:
logger.info("Attempting to re-convert failed report pdfs")
self._convert_reports_to_txt(False)
logger.info("Attempting to re-convert failed target pdfs")
self._convert_targets_to_txt(False)
self.state.pdfs_converted = True
def update_with_certs(self, certs: List[CommonCriteriaCert]):
if any([x not in self for x in certs]):
logger.warning("Updating dataset with certificates outside of the dataset!")
self.certs.update({x.dgst: x for x in certs})
def _extract_report_metadata(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_report_pdf_metadata,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting report metadata",
)
self.update_with_certs(processed_certs)
def _extract_targets_metadata(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.st_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_st_pdf_metadata,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting target metadata",
)
self.update_with_certs(processed_certs)
def extract_pdf_metadata(self, fresh: bool = True):
logger.info("Extracting pdf metadata from CC dataset")
self._extract_report_metadata(fresh)
self._extract_targets_metadata(fresh)
def _extract_report_frontpage(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_report_pdf_frontpage,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting report frontpages",
)
self.update_with_certs(processed_certs)
def _extract_targets_frontpage(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.st_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_st_pdf_frontpage,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting target frontpages",
)
self.update_with_certs(processed_certs)
def extract_pdf_frontpage(self, fresh: bool = True):
logger.info("Extracting pdf frontpages from CC dataset.")
self._extract_report_frontpage(fresh)
self._extract_targets_frontpage(fresh)
def _extract_report_keywords(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_report_pdf_keywords,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting report keywords",
)
self.update_with_certs(processed_certs)
def _extract_targets_keywords(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.st_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_st_pdf_keywords,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting target keywords",
)
self.update_with_certs(processed_certs)
def extract_pdf_keywords(self, fresh: bool = True):
logger.info("Extracting pdf keywords from CC dataset.")
self._extract_report_keywords(fresh)
self._extract_targets_keywords(fresh)
def _extract_data(self, fresh: bool = True):
logger.info("Extracting various stuff from converted txt filed from CC dataset.")
self.extract_pdf_metadata(fresh)
self.extract_pdf_frontpage(fresh)
self.extract_pdf_keywords(fresh)
if fresh is True:
logger.info("Attempting to re-extract failed data from report txts")
self._extract_report_metadata(False)
self._extract_report_frontpage(False)
self._extract_report_keywords(False)
logger.info("Attempting to re-extract failed data from ST txts")
self._extract_targets_metadata(False)
self._extract_targets_frontpage(False)
self._extract_targets_keywords(False)
def _compute_cert_labs(self):
logger.info("Deriving information about laboratories involved in certification.")
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze()]
for cert in certs_to_process:
cert.compute_heuristics_cert_lab()
def _compute_cert_ids(self):
logger.info("Deriving information about sample ids from pdf scan.")
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze()]
for cert in certs_to_process:
cert.compute_heuristics_cert_id()
def _compute_heuristics(self, use_nist_cpe_matching_dict: bool = True):
self._compute_cert_labs()
self._compute_cert_ids()
self._compute_dependencies()
self.compute_cpe_heuristics()
self.compute_related_cves(use_nist_cpe_matching_dict=use_nist_cpe_matching_dict)
def _compute_dependencies(self):
finder = DependencyFinder()
finder.fit(self.certs)
for dgst in self.certs:
self.certs[dgst].heuristics.directly_affecting = finder.get_directly_affecting(dgst)
self.certs[dgst].heuristics.indirectly_affecting = finder.get_indirectly_affecting(dgst)
self.certs[dgst].heuristics.directly_affected_by = finder.get_directly_affected_by(dgst)
self.certs[dgst].heuristics.indirectly_affected_by = finder.get_indirectly_affected_by(dgst)
@serialize
def analyze_certificates(self, fresh: bool = True):
if self.state.pdfs_converted is False:
logger.info(
"Attempting run analysis of txt files while not having the pdf->txt conversion done. Returning."
)
return
self._extract_data(fresh)
self._compute_heuristics()
self.state.certs_analyzed = True
def get_certs_from_name(self, cert_name: str) -> List[Certificate]:
return [crt for crt in self if crt.name == cert_name]
def process_maintenance_updates(self):
maintained_certs: List[CommonCriteriaCert] = [x for x in self if x.maintenance_updates]
updates = list(
itertools.chain.from_iterable(
[CommonCriteriaMaintenanceUpdate.get_updates_from_cc_cert(x) for x in maintained_certs]
)
)
update_dset: CCDatasetMaintenanceUpdates = CCDatasetMaintenanceUpdates(
{x.dgst: x for x in updates}, root_dir=self.certs_dir / "maintenance", name="Maintenance updates"
)
update_dset.set_local_paths()
update_dset.download_all_pdfs()
update_dset.convert_all_pdfs()
update_dset._extract_data()
def generate_cert_name_keywords(self) -> Set[str]:
df = self.to_pandas()
certificate_names = set(df["name"])
keywords = set(itertools.chain.from_iterable([x.lower().split(" ") for x in certificate_names]))
keywords.add("1.02.013")
return {x for x in keywords if len(x) > config.minimal_token_length}
class CCDatasetMaintenanceUpdates(CCDataset, ComplexSerializableType):
"""
Should be used merely for actions related to Maintenance updates: download pdfs, convert pdfs, extract data from pdfs
"""
# TODO: Types - if I use dictionary in CCDataset, I can't use more specific dictionary here (otherwise the CCDataset
# one would have to be a Mapping - not mutable)
certs: Dict[str, "CommonCriteriaMaintenanceUpdate"] # type: ignore
def __init__(
self,
certs: Mapping[str, "Certificate"],
root_dir: Path,
name: str = "dataset name",
description: str = "dataset_description",
state: Optional[CCDataset.DatasetInternalState] = None,
):
super().__init__(certs, root_dir, name, description, state)
self.state.meta_sources_parsed = True
@property
def certs_dir(self) -> Path:
return self.root_dir
def __iter__(self) -> Iterator[CommonCriteriaMaintenanceUpdate]:
yield from self.certs.values()
def _compute_heuristics(self, download_fresh_cpes: bool = False):
raise NotImplementedError
def compute_related_cves(self, download_fresh_cves: bool = False):
raise NotImplementedError
@classmethod
def from_json(cls, input_path: Union[str, Path]):
input_path = Path(input_path)
with input_path.open("r") as handle:
dset = json.load(handle, cls=CustomJSONDecoder)
return dset
def to_pandas(self):
df = pd.DataFrame(
[x.pandas_tuple for x in self.certs.values()], columns=CommonCriteriaMaintenanceUpdate.pandas_columns
)
df = df.set_index("dgst")
df.index.name = "dgst"
df.maintenance_date = pd.to_datetime(df.maintenance_date, infer_datetime_format=True)
df = df.fillna(value=np.nan)
return df
@classmethod
def from_web_latest(cls):
with tempfile.TemporaryDirectory() as tmp_dir:
dset_path = Path(tmp_dir) / "cc_maintenances_latest_dataset.json"
helpers.download_file(config.cc_maintenances_latest_snapshot, dset_path)
return cls.from_json(dset_path)
| 40.207116
| 195
| 0.650349
|
12d8a599b02540c52a40a84d7f4c7e59b412d3b8
| 2,023
|
py
|
Python
|
src/si/util/metrics.py
|
mpereira19/Si
|
09a85512724e4cd3a7f65e297f26149586e80fc0
|
[
"Apache-2.0"
] | null | null | null |
src/si/util/metrics.py
|
mpereira19/Si
|
09a85512724e4cd3a7f65e297f26149586e80fc0
|
[
"Apache-2.0"
] | null | null | null |
src/si/util/metrics.py
|
mpereira19/Si
|
09a85512724e4cd3a7f65e297f26149586e80fc0
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
__all__ = ['accuracy_score', 'mse', 'mse_prime', 'cross_entropy', 'cross_entropy_prime', 'r2_score']
def accuracy_score(y_true, y_pred):
"""
Classification performance metric that computes the accuracy of y_true
and y_pred.
:param numpy.array y_true: array-like of shape (n_samples,) Ground truth correct labels.
:param numpy.array y_pred: array-like of shape (n_samples,) Estimated target values.
:returns: C (float) Accuracy score.
"""
correct = 0
for true, pred in zip(y_true, y_pred):
if true == pred:
correct += 1
accuracy = correct/len(y_true)
return accuracy
def mse(y_true, y_pred):
"""
Mean squared error regression loss function.
Parameters
:param numpy.array y_true: array-like of shape (n_samples,)
Ground truth (correct) target values.
:param numpy.array y_pred: array-like of shape (n_samples,)
Estimated target values.
:returns: loss (float) A non-negative floating point value (the best value is 0.0).
"""
return np.mean(np.power(y_true-y_pred, 2))
def mse_prime(y_true, y_pred):
return 2*(y_pred-y_true)/y_true.size
def cross_entropy(y_true, y_pred):
return -(y_true * np.log(y_pred)).sum()
def cross_entropy_prime(y_true, y_pred):
return y_pred - y_true
def r2_score(y_true, y_pred):
"""
R^2 regression score function.
R^2 = 1 - SS_res / SS_tot
where SS_res is the residual sum of squares and SS_tot is the total
sum of squares.
:param numpy.array y_true : array-like of shape (n_samples,) Ground truth (correct) target values.
:param numpy.array y_pred : array-like of shape (n_samples,) Estimated target values.
:returns: score (float) R^2 score.
"""
# Residual sum of squares.
numerator = ((y_true - y_pred) ** 2).sum(axis=0)
# Total sum of squares.
denominator = ((y_true - np.average(y_true, axis=0)) ** 2).sum(axis=0)
# R^2.
score = 1 - numerator / denominator
return score
| 31.123077
| 102
| 0.669303
|
1de7ba497471d8458956563bc44026d5c4c4be12
| 3,544
|
py
|
Python
|
src/olympia/reviewers/tests/test_review_scenarios.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/reviewers/tests/test_review_scenarios.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/reviewers/tests/test_review_scenarios.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
"""Real life review scenarios.
For different add-on and file statuses, test reviewing them, and make sure then
end up in the correct state.
"""
import pytest
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo.tests import user_factory
from olympia.files.models import File
from olympia.reviewers.utils import ReviewAddon, ReviewFiles, ReviewHelper
from olympia.versions.models import Version
@pytest.fixture
def mock_request(rf, db): # rf is a RequestFactory provided by pytest-django.
request = rf.get('/')
request.user = user_factory()
return request
@pytest.fixture
def addon_with_files(db):
"""Return an add-on with one version and four files.
By default the add-on is public, and the files are: beta, disabled,
unreviewed, unreviewed.
"""
addon = Addon.objects.create(name='My Addon', slug='my-addon')
version = Version.objects.create(addon=addon)
for status in [amo.STATUS_BETA, amo.STATUS_DISABLED,
amo.STATUS_AWAITING_REVIEW, amo.STATUS_AWAITING_REVIEW]:
File.objects.create(version=version, status=status)
return addon
@pytest.mark.parametrize(
'review_action,addon_status,file_status,review_class,review_type,'
'final_addon_status,final_file_status',
[
# New addon request full.
# scenario0: should succeed, files approved.
('process_public', amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW,
ReviewAddon, 'nominated', amo.STATUS_PUBLIC,
amo.STATUS_PUBLIC),
# scenario1: should succeed, files rejected.
('process_sandbox', amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW,
ReviewAddon, 'nominated', amo.STATUS_NULL,
amo.STATUS_DISABLED),
# Approved addon with a new file.
# scenario2: should succeed, files approved.
('process_public', amo.STATUS_PUBLIC, amo.STATUS_AWAITING_REVIEW,
ReviewFiles, 'pending', amo.STATUS_PUBLIC,
amo.STATUS_PUBLIC),
# scenario3: should succeed, files rejected.
('process_sandbox', amo.STATUS_PUBLIC, amo.STATUS_AWAITING_REVIEW,
ReviewFiles, 'pending', amo.STATUS_NOMINATED,
amo.STATUS_DISABLED),
])
def test_review_scenario(mock_request, addon_with_files, review_action,
addon_status, file_status, review_class, review_type,
final_addon_status, final_file_status):
# Setup the addon and files.
addon = addon_with_files
addon.update(status=addon_status)
version = addon.versions.get()
version.files.filter(
status=amo.STATUS_AWAITING_REVIEW).update(status=file_status)
# Get the review helper.
helper = ReviewHelper(mock_request, addon, version)
assert isinstance(helper.handler, review_class)
helper.set_review_handler(mock_request)
assert helper.handler.review_type == review_type
helper.set_data({'comments': 'testing review scenarios'})
# Run the action (process_public, process_sandbox).
try:
getattr(helper.handler, review_action)()
except AssertionError:
# Some scenarios are expected to fail. We don't need to check it here,
# the scenario has the final statuses, and those are the ones we want
# to check.
pass
# Check the final statuses.
assert addon.reload().status == final_addon_status
assert list(version.files.values_list('status', flat=True)) == (
[amo.STATUS_BETA, amo.STATUS_DISABLED, final_file_status,
final_file_status])
| 39.377778
| 79
| 0.705418
|
e9bb21e414dffe8a1185aad71f2ff5c8f19c1257
| 513
|
py
|
Python
|
start.py
|
nanotech-empa/aiidalab-empa-molecules
|
ca2c5a2f1811c17702c032776ebdd51e9a2deecb
|
[
"MIT"
] | null | null | null |
start.py
|
nanotech-empa/aiidalab-empa-molecules
|
ca2c5a2f1811c17702c032776ebdd51e9a2deecb
|
[
"MIT"
] | 6
|
2022-01-14T13:54:40.000Z
|
2022-03-04T09:48:45.000Z
|
start.py
|
nanotech-empa/aiidalab-empa-molecules
|
ca2c5a2f1811c17702c032776ebdd51e9a2deecb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import ipywidgets as ipw
template = """
<table>
<tr>
<td valign="top"><ul>
<li><a href="{appbase}/spin_calculation.ipynb" target="_blank">Spin calculations</a></li>
</ul></td>
<td valign="top"><ul>
<li><a href="{appbase}/search.ipynb" target="_blank">Search results</a></li>
</ul></td>
</tr>
</table>
"""
def get_start_widget(appbase, jupbase, notebase):
html = template.format(appbase=appbase, jupbase=jupbase, notebase=notebase)
return ipw.HTML(html)
# EOF
| 20.52
| 93
| 0.641326
|
a16f15baa121f434b0889b0d3194ba6749caad3a
| 14,631
|
py
|
Python
|
sdk/python/pulumi_azure_native/servicefabric/v20190301/application_type_version.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/servicefabric/v20190301/application_type_version.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/servicefabric/v20190301/application_type_version.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['ApplicationTypeVersionArgs', 'ApplicationTypeVersion']
@pulumi.input_type
class ApplicationTypeVersionArgs:
def __init__(__self__, *,
app_package_url: pulumi.Input[str],
application_type_name: pulumi.Input[str],
cluster_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ApplicationTypeVersion resource.
:param pulumi.Input[str] app_package_url: The URL to the application package
:param pulumi.Input[str] application_type_name: The name of the application type name resource.
:param pulumi.Input[str] cluster_name: The name of the cluster resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] location: It will be deprecated in New API, resource location depends on the parent resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Azure resource tags.
:param pulumi.Input[str] version: The application type version.
"""
pulumi.set(__self__, "app_package_url", app_package_url)
pulumi.set(__self__, "application_type_name", application_type_name)
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="appPackageUrl")
def app_package_url(self) -> pulumi.Input[str]:
"""
The URL to the application package
"""
return pulumi.get(self, "app_package_url")
@app_package_url.setter
def app_package_url(self, value: pulumi.Input[str]):
pulumi.set(self, "app_package_url", value)
@property
@pulumi.getter(name="applicationTypeName")
def application_type_name(self) -> pulumi.Input[str]:
"""
The name of the application type name resource.
"""
return pulumi.get(self, "application_type_name")
@application_type_name.setter
def application_type_name(self, value: pulumi.Input[str]):
pulumi.set(self, "application_type_name", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the cluster resource.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
It will be deprecated in New API, resource location depends on the parent resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
The application type version.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class ApplicationTypeVersion(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_package_url: Optional[pulumi.Input[str]] = None,
application_type_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An application type version resource for the specified application type name resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_package_url: The URL to the application package
:param pulumi.Input[str] application_type_name: The name of the application type name resource.
:param pulumi.Input[str] cluster_name: The name of the cluster resource.
:param pulumi.Input[str] location: It will be deprecated in New API, resource location depends on the parent resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Azure resource tags.
:param pulumi.Input[str] version: The application type version.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApplicationTypeVersionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An application type version resource for the specified application type name resource.
:param str resource_name: The name of the resource.
:param ApplicationTypeVersionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApplicationTypeVersionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_package_url: Optional[pulumi.Input[str]] = None,
application_type_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApplicationTypeVersionArgs.__new__(ApplicationTypeVersionArgs)
if app_package_url is None and not opts.urn:
raise TypeError("Missing required property 'app_package_url'")
__props__.__dict__["app_package_url"] = app_package_url
if application_type_name is None and not opts.urn:
raise TypeError("Missing required property 'application_type_name'")
__props__.__dict__["application_type_name"] = application_type_name
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["version"] = version
__props__.__dict__["default_parameter_list"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:servicefabric/v20190301:ApplicationTypeVersion"), pulumi.Alias(type_="azure-native:servicefabric:ApplicationTypeVersion"), pulumi.Alias(type_="azure-nextgen:servicefabric:ApplicationTypeVersion"), pulumi.Alias(type_="azure-native:servicefabric/v20170701preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20170701preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-native:servicefabric/v20190301preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20190301preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-native:servicefabric/v20190601preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20190601preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-native:servicefabric/v20191101preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20191101preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-native:servicefabric/v20200301:ApplicationTypeVersion"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20200301:ApplicationTypeVersion"), pulumi.Alias(type_="azure-native:servicefabric/v20201201preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20201201preview:ApplicationTypeVersion"), pulumi.Alias(type_="azure-native:servicefabric/v20210601:ApplicationTypeVersion"), pulumi.Alias(type_="azure-nextgen:servicefabric/v20210601:ApplicationTypeVersion")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApplicationTypeVersion, __self__).__init__(
'azure-native:servicefabric/v20190301:ApplicationTypeVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApplicationTypeVersion':
"""
Get an existing ApplicationTypeVersion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ApplicationTypeVersionArgs.__new__(ApplicationTypeVersionArgs)
__props__.__dict__["app_package_url"] = None
__props__.__dict__["default_parameter_list"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ApplicationTypeVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appPackageUrl")
def app_package_url(self) -> pulumi.Output[str]:
"""
The URL to the application package
"""
return pulumi.get(self, "app_package_url")
@property
@pulumi.getter(name="defaultParameterList")
def default_parameter_list(self) -> pulumi.Output[Mapping[str, str]]:
"""
List of application type parameters that can be overridden when creating or updating the application.
"""
return pulumi.get(self, "default_parameter_list")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Azure resource etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
It will be deprecated in New API, resource location depends on the parent resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment or provisioning state, which only appears in the response
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
| 46.300633
| 1,523
| 0.6653
|
64fd40f3d22abae8926e17a0df01ed050cddd0b2
| 10,992
|
py
|
Python
|
robloxapi/group.py
|
cringxy/robloxapi
|
2cf130803874d23f1ad009facff4506d7429c800
|
[
"MIT"
] | 1
|
2020-07-21T14:22:54.000Z
|
2020-07-21T14:22:54.000Z
|
robloxapi/group.py
|
g-h-0-s-t/robloxapi
|
c586b6408696ccd71d84c3eeabf61fff7e82b1c6
|
[
"MIT"
] | null | null | null |
robloxapi/group.py
|
g-h-0-s-t/robloxapi
|
c586b6408696ccd71d84c3eeabf61fff7e82b1c6
|
[
"MIT"
] | null | null | null |
import json
import logging
import re
import asyncio
from typing import List, Tuple
from bs4 import BeautifulSoup
from .utils.errors import RoleError, NotFound
from .utils.classes import Role, Shout, WallPost, Action
from .joinrequest import JoinRequest
from .groupmember import GroupMember
from .user import User
from .auth import Auth
class Group:
"""
Represents a group.
"""
def __init__(self, request, group_id, group_name, description, member_count, shout, owner_id=None, owner_username=None):
"""
Construct a new group class.
:param request: Used to send requests
:param group_id: The id of the group
:param group_name: The name of the group
:param description: The group description
:param member_count: The amount of members in a group
:param shout: The group shout
:param owner_id: The id of the owner
:param owner_username: The username of the owner
"""
self.request = request
self.id = group_id
self.name = group_name
self.description = description
if owner_id and owner_username:
self.owner = User(self.request, owner_id, owner_username)
self.member_count = member_count
self.shout = shout
async def pay(self, user_id: int, amount: int) -> int:
"""
Pays a user.
:param user_id: The user to pay
:param amount: How much to pay the user
:return: StatusCode
"""
data = json.dumps({
"PayoutType": "FixedAmount",
"Recipients": [
{
"recipientId": user_id,
"recipientType": "User",
"amount": amount
}
]
})
r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{self.id}/payouts', data=data, method="POST")
return r.status_code
async def exile(self, user_id: int) -> int:
"""
Exiles a user from the group.
:param user_id: The users id
:return: StatusCode
"""
r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{self.id}/users/{user_id}', method='DELETE')
return r.status_code
async def set_rank(self, user_id: int, rank_id: int) -> int:
"""
Set a users rank in the group.
:param user_id: The users id
:param rank_id: The rank id
:return: StatusCode
"""
data = json.dumps({
'roleId': rank_id
})
r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{self.id}/users/{user_id}', method="PATCH", data=data)
return r.status_code
async def promote(self, user_id: int) -> Tuple[Role, Role]:
"""
Moves the users role up by one.
:param user_id: The users id
:return: oldrole & newrole in a tuple
"""
return await self.change_rank(user_id, 1)
async def demote(self, user_id: int) -> Tuple[Role, Role]:
"""
Moves the users role down by one.
:param user_id: The users id
:return: oldrole & newrole in a tuple
"""
return await self.change_rank(user_id, -1)
async def change_rank(self, user_id: int, change: int) -> Tuple[Role, Role]:
"""
Changes the rank down or up by a specified amount.
:param user_id: The users id
:param change: How much to change the users role by (-5) (5)
:return: oldrole & newrole in a tuple
"""
roles = await self.get_group_roles()
roles.sort(key=lambda r: r.rank)
role = await self.get_role_in_group(user_id)
user_role = -1
for r in roles:
user_role = user_role + 1
if r.id == role.id:
break
new_user_role = user_role + change
if len(roles) < new_user_role or int(roles[new_user_role].rank) == 255:
raise RoleError("The role is over 255 or does not exist")
await self.set_rank(user_id, roles[new_user_role].id)
return role, roles[new_user_role]
async def set_rank_by_id(self, user_id: int, role_id: int) -> int:
"""
Sets the users role using a role id.
:param user_id: The users id
:param role_id: The role id (254, 1, etc)
:return:
"""
roles = await self.get_group_roles()
choose = None
for role in roles:
if role.rank == role_id:
choose = role
if not choose:
raise NotFound(f'Role {role_id} does not exist.')
return await self.set_rank(user_id, choose.id)
async def get_group_roles(self) -> List[Role]:
"""
Get all of the group roles.
:return: A list of Role classes
"""
r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{self.id}/roles', method='GET')
roles = []
for role in r.json().get('roles'):
roles.append(Role(role['id'], role['name'], role['rank'], role['memberCount']))
roles.sort(key=lambda r: r.rank)
return roles
async def get_role_in_group(self, user_id) -> Role:
"""
Get a users role in a group.
:param user_id: The users id
:return: A role class
"""
r = await self.request.request(url=f'https://groups.roblox.com/v1/users/{user_id}/groups/roles', method='GET')
data = r.json()
user_role = None
for group in data['data']:
if group['group']['id'] == self.id:
user_role = group
break
if not user_role:
raise NotFound('The user is not in that group.')
return Role(user_role['role']['id'], user_role['role']['name'], user_role['role']['rank'], user_role['role']['memberCount'])
async def post_shout(self, message: str) -> Shout:
"""
Post a shout to a group.
:param message: The message to post
:return: A shout class
"""
data = {'message': message}
r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{self.id}/status', method='PATCH', data=json.dumps(data))
shout = r.json()
return Shout(message, shout['poster']['username'], shout['poster']['userId'], shout['created'], shout['updated'])
async def get_funds(self) -> int:
"""
Get the amount of robux a group has.
:return: The amount of robux as an int
"""
r = await self.request.request(url=f'https://economy.roblox.com/v1/groups/{self.id}/currency', method='GET')
return int(r.json().get('robux'))
# TODO: Use https://groups.roblox.com/v1/groups/{groupId}/join-requests
async def get_join_requests(self) -> List[JoinRequest]:
"""
Gets the join requests of a group.
:return: A list of Join request classes.
"""
r = await self.request.request(url=f"https://groups.roblox.com/v1/groups/{self.id}/join-requests/", method="GET")
data = r.json()
requests = []
for request in data["data"]:
requests.append(JoinRequest(self.request, self.id, request["requester"]["username"], request["requester"]["userId"]))
return requests
async def get_audit_logs(self, action=None):
"""
Gets actions in the audit log.
:param action: Filter witch action.
:return:
"""
r = await self.request.request(url=f"https://groups.roblox.com/v1/groups/{self.id}/audit-log?actionType={action or 'all'}&limit=100&sortOrder=Asc", method="GET")
data = r.json()
logs = []
for a in data['data']:
actor = User(self.request, a["actor"]["user"]["userId"], a["actor"]["user"]["username"])
description = None
target = None
if a['actionType'] == "Delete Post":
description = WallPost(a["description"]["PostDesc"], User(self.request, a["description"]["TargetId"], a["description"]["TargetName"]))
if a['actionType'] == "Remove Member":
description = User(self.request, a["description"]["TargetId"], a["description"]["TargetName"])
if a['actionType'] == "Accept Join Request" or a['actionType'] == "Decline Join Request":
description = JoinRequest(self.request, self.id, a["description"]["TargetName"], a["description"]["TargetId"])
if a['actionType'] == "Post Status":
description = Shout(a["description"]["Text"], actor.name, actor.id, a["created"], a["created"])
if a['actionType'] == "Change Rank":
description = (Role(a["description"]["OldRoleSetId"], a["description"]["OldRoleSetName"]), Role(a["description"]["NewRoleSetId"], a["description"]["NewRoleSetName"]))
target = User(self.request, a["description"]["TargetId"], a["description"]["TargetName"])
logs.append(Action(a['actionType'], actor, description, target))
return logs
async def get_members(self):
"""
Get all members of a group.
:return: A list of user classes
"""
cursor = ""
while True:
r = await self.request.request(url=f"https://groups.roblox.com/v1/groups/{self.id}/users?limit=100&sortOrder=Desc&cursor={cursor}", method="GET")
response = r.json()
for user in response['data']:
yield GroupMember(self.request, user["user"]["userId"], user["user"]["username"], self.id, Role(user['role']['id'], user['role']['name'], user['role']['rank'], user['role']['memberCount']))
if not response["nextPageCursor"]:
break
cursor = response["nextPageCursor"]
return
async def join(self, captcha: str) -> int:
"""
Join a group.
:param captcha: A 2captcha token to solve the captcha.
:return: StatusCode
"""
auth = Captcha(self.request, captcha, pkey="63E4117F-E727-42B4-6DAA-C8448E9B137F")
token = ''
data, status = await auth.create_task()
if status == 200:
while True:
r, s = await auth.check_task(data["request"])
if r['request'] != "CAPCHA_NOT_READY":
token = r['request']
break
await asyncio.sleep(1.5)
data = json.dumps({
'captchaProvider': 'PROVIDER_ARKOSE_LABS',
'captchaToken': token
})
r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{self.id}/users', data=data, method="POST")
return r.status_code
async def leave(self) -> int:
"""
Leaves a group
:return: StatusCode
"""
r = await self.request.request(url="https://groups.roblox.com/v1/groups/3788537/users/109503558", method="DELETE")
return r.status_code
| 40.862454
| 205
| 0.579694
|
2df0a55ad1038f9c58ae836c77e28984b145da97
| 1,362
|
py
|
Python
|
output/models/ibm_data/valid/d4_3_15/d4_3_15v15_xsd/d4_3_15v15.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ibm_data/valid/d4_3_15/d4_3_15v15_xsd/d4_3_15v15.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ibm_data/valid/d4_3_15/d4_3_15v15_xsd/d4_3_15v15.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class ElementType1:
class Meta:
name = "elementType1"
sub_element1: List["RootType"] = field(
default_factory=list,
metadata={
"name": "subElement1",
"type": "Element",
"namespace": "",
}
)
attr1: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class ElementType2:
class Meta:
name = "elementType2"
sub_element2: List["RootType"] = field(
default_factory=list,
metadata={
"name": "subElement2",
"type": "Element",
"namespace": "",
}
)
attr2: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class RootType:
class Meta:
name = "rootType"
ele1: List[ElementType1] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
}
)
ele2: List[ElementType2] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class Root(RootType):
class Meta:
name = "root"
| 18.916667
| 43
| 0.501468
|
ed738c6e6e37bff439f01d01d7d3e698f27f2924
| 2,293
|
py
|
Python
|
uiautomator2/version.py
|
jihunim/uiautomator2
|
b8a6f8d75931774c586b0b45a40f240b0a4cc34b
|
[
"MIT"
] | null | null | null |
uiautomator2/version.py
|
jihunim/uiautomator2
|
b8a6f8d75931774c586b0b45a40f240b0a4cc34b
|
[
"MIT"
] | null | null | null |
uiautomator2/version.py
|
jihunim/uiautomator2
|
b8a6f8d75931774c586b0b45a40f240b0a4cc34b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#
__apk_version__ = '1.1.7'
# 1.1.7 fix dumpHierarchy XML charactor error
# 1.1.6 fix android P support
# 1.1.5 waitForExists use UiObject2 method first then fallback to UiObject.waitForExists
# 1.1.4 add ADB_EDITOR_CODE broadcast support, fix bug (toast捕获导致app闪退)
# 1.1.3 use thread to make watchers.watched faster, try to fix input method type multi
# 1.1.2 fix count error when have child && sync watched, to prevent watchers.remove error
# 1.1.1 support toast capture
# 1.1.0 update uiautomator-v18:2.1.2 -> uiautomator-v18:2.1.3 (This version fixed setWaitIdleTimeout not working bug)
# 1.0.14 catch NullException, add gps mock support
# 1.0.13 whatsinput suppoort, but not very well
# 1.0.12 add toast support
# 1.0.11 add auto install support
# 1.0.10 fix service not started bug
# 1.0.9 fix apk version code and version name
# ERR: 1.0.8 bad version number. show ip on notification
# ERR: 1.0.7 bad version number. new input method, some bug fix
__atx_agent_version__ = '0.4.9'
# 0.4.9 update for go1.11
# 0.4.8 add /wlan/ip and /packages REST API for package install
# 0.4.6 fix download dns resolve error (sometimes)
# 0.4.5 add http log, change atx-agent -d into atx-agent server -d
# 0.4.4 this version is gone
# 0.4.3 ignore sigint to prevent atx-agent quit
# 0.4.2 hot fix, close upgrade-self
# 0.4.1 fix app-download time.Timer panic error, use safe-time.Timer instead.
# 0.4.0 add go-daemon lib. use safe-time.Timer to prevent panic error. this will make it run longer
# 0.3.6 support upload zip and unzip, fix minicap rotation error when atx-agent is killed -9
# 0.3.5 hot fix for session
# 0.3.4 fix session() sometimes can not get mainActivity error
# 0.3.3 /shell support timeout
# 0.3.2 fix dns resolve error when network changes
# 0.3.0 use github.com/codeskyblue/heartbeat library instead of websocket, add /whatsinput
# 0.2.1 support occupy /minicap connection
# 0.2.0 add session support
# 0.1.8 fix screenshot always the same image. (BUG in 0.1.7), add /shell/stream add timeout for /shell
# 0.1.7 fix dns resolve error in /install
# 0.1.6 change download logic. auto fix orientation
# 0.1.5 add singlefight for minicap and minitouch, proxy dial-timeout change 30 to 10
# 0.1.4 phone remote control
# 0.1.2 /download support
# 0.1.1 minicap buildin
| 48.787234
| 117
| 0.746184
|
f1bd93ebbee1a438268c08367cdebbc7e2761c69
| 6,613
|
py
|
Python
|
generate/poems.py
|
krimkus/pyambic-pentameter
|
f2355f78a9c073a27acaff23398da2511ac95a42
|
[
"MIT"
] | null | null | null |
generate/poems.py
|
krimkus/pyambic-pentameter
|
f2355f78a9c073a27acaff23398da2511ac95a42
|
[
"MIT"
] | null | null | null |
generate/poems.py
|
krimkus/pyambic-pentameter
|
f2355f78a9c073a27acaff23398da2511ac95a42
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import random
from .syllables import count_syllables, fulfills_scansion, remaining_scheme, rhyme_fingerprint, valid_option
def get_file(filepath):
'''
Read a file and return a list of chunks of text, separated by paragraph
breaks (two empty lines in a row).
When making a Markov model from the output, text chunks will be considered
separate (they will all go into the model, but the last word of one chunk
will not be connected to the first word of the next).
'''
with open(filepath, 'r') as f:
text = f.read()
return text.split('\n\n')
def build_models(data):
'''
builds and returns a Markov dictionary, a reverse dictionary, and a set of
rhymes from the input text
data is a list of seed strings; each chunk of text may be unrelated (e.g.
lyrics from different songs)
'''
d = {}
reverse_d = {}
word_set = set()
for text in data:
strip_chars = '.,()-?!":*'
words = [word.strip(strip_chars).lower() for word in text.split() if word.strip(strip_chars)]
word_set.update(words)
for i, word in enumerate(words[:-1]):
if not word in d:
d[word] = []
d[word].append(words[i+1])
# we are also gonna build a backwards markov, so we can start with a rhyme and
# fill in the lines from there
words.reverse()
for i, word in enumerate(words[:-1]):
if not word in reverse_d:
reverse_d[word] = []
reverse_d[word].append(words[i+1])
# we can seed off of words that have at least one matching rhyme
seeds = {}
for word in word_set:
rf = rhyme_fingerprint(word)
if rf is None:
continue
if not rf in seeds:
seeds[rf] = []
seeds[rf].append(word)
rhyme_seeds = {key:value for key, value in seeds.items() if len(value) >= 2}
return d, reverse_d, rhyme_seeds
def find_scansion_with_backtrack(word, scansion_pattern, d):
if fulfills_scansion(word, scansion_pattern):
# success!
return [word]
if not valid_option(word, scansion_pattern):
return None
rest_pattern = remaining_scheme(word, scansion_pattern)
options = set([w for w in d.get(word, []) if valid_option(w, rest_pattern)])
if not options:
# failure!
return None
# otherwise, we need to keep looking
options = list(options)
random.shuffle(options)
for option in options:
rest = find_scansion_with_backtrack(option, rest_pattern, d)
if rest is not None:
# a good way to debug
#print(' '.join([word] + rest))
return [word] + rest
# whoops
return None
def find_syllables_with_backtrack(word, num_syllables, d):
word_syllables = count_syllables(word)
if word_syllables == num_syllables:
# success!
return [word]
if word_syllables > num_syllables:
return None
remaining_syllables = num_syllables - word_syllables
options = set([w for w in d.get(word, []) if count_syllables(w) <= remaining_syllables])
if not options:
# failure!
return None
options = list(options)
random.shuffle(options)
for option in options:
rest = find_syllables_with_backtrack(option, remaining_syllables, d)
if rest is not None:
return [word] + rest
return None
def generate_pattern(seed_words, pattern, d, k=2):
lines = []
for seed in seed_words:
line = find_scansion_with_backtrack(seed, pattern, d)
if line is not None:
lines.append(' '.join(line[::-1]))
if len(lines) == k:
return lines
return None
def generate_syllables(num_syllables, d, preseed=None):
line = None
while line is None:
if preseed is None:
seed = random.choice(list(d.keys()))
else:
seed = random.choice(d.get(preseed, list(d.keys())))
line = find_syllables_with_backtrack(seed, num_syllables, d)
return ' '.join(line)
def generate_haiku(d, **kwargs):
haiku = []
haiku.append(generate_syllables(5, d))
haiku.append(generate_syllables(7, d, preseed=haiku[-1].split()[-1]))
haiku.append(generate_syllables(5, d, preseed=haiku[-1].split()[-1]))
return haiku
def generate_poem(pattern, definitions, rev_d, seeds, **kwargs):
'''
Build your own poem
pattern: a string describing a rhyme pattern e.g., ABABCC. Use a space
to indicate line breaks
definitions: a dictionary with keys corresponding to each rhyme line e.g.
'A' and values describing the syllable pattern e.g. '01101101'
'''
if not all(p in definitions for p in pattern if p != ' '):
raise ValueError('Must define all rhymes used')
# Generate the appropriate number of matching lines for each pattern
distinct_rhymes = set(pattern)
if ' ' in distinct_rhymes:
distinct_rhymes.remove(' ')
rhymes = {}
for p in distinct_rhymes:
rhyme = None
while rhyme is None:
rhyme_sound = random.choice(list(seeds.keys()))
rhyme = generate_pattern(seeds[rhyme_sound], definitions[p], rev_d, k=pattern.count(p))
rhymes[p] = rhyme
# Assemble them
output = []
line_output = []
for rhyme in pattern:
if rhyme == ' ':
output.append(' '.join(line_output))
line_output = []
else:
line_output.append(rhymes[rhyme].pop())
output.append(' '.join(line_output))
return output
def generate_raven_verse(rev_d, seeds, **kwargs):
segment = '10101010'
segment_short = '1010101'
return generate_poem(
'AA BC DD DC EC C',
{
'A': segment,
'B': segment,
'C': segment_short,
'D': segment,
'E': segment,
},
rev_d,
seeds,
**kwargs)
def generate_limerick(rev_d, seeds, **kwargs):
return generate_poem(
'A A B B A',
{
'A': '01001001',
'B': '01001',
},
rev_d,
seeds,
**kwargs)
def generate_sonnet(rev_d, seeds, **kwargs):
i_p = '01' * 5 # iambic pentameter
return generate_poem(
'A B A B C D C D E F E F G G',
{
'A': i_p,
'B': i_p,
'C': i_p,
'D': i_p,
'E': i_p,
'F': i_p,
'G': i_p,
},
rev_d,
seeds,
**kwargs)
| 27.102459
| 108
| 0.590806
|
0181103f00bb13c396d743c7dc15a4ce73e2258e
| 1,512
|
py
|
Python
|
tests/integration_tests/test_esc50_script.py
|
karolpiczak/echonet
|
11b633105a4459972c65ace906974b47a810fedf
|
[
"MIT"
] | 3
|
2020-03-21T04:24:22.000Z
|
2021-07-26T10:36:11.000Z
|
tests/integration_tests/test_esc50_script.py
|
karoldvl/audionet
|
11b633105a4459972c65ace906974b47a810fedf
|
[
"MIT"
] | null | null | null |
tests/integration_tests/test_esc50_script.py
|
karoldvl/audionet
|
11b633105a4459972c65ace906974b47a810fedf
|
[
"MIT"
] | 1
|
2020-07-28T10:43:09.000Z
|
2020-07-28T10:43:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test reproducibility of the original implementation of the ESC-ConvNet paper."""
import os
import re
import subprocess
from subprocess import PIPE, STDOUT
import pytest
@pytest.mark.timeout(2400)
def test_results_reproducibility(device):
if device == 'gpu0':
expected_results = [
r'Epoch: 0 (.*) | Train: 12.00 % | Validation: 21.70 % | Test: 25.50 %',
r'Epoch: 1 (.*) | Train: 24.20 % | Validation: 28.50 % | Test: 32.00 %',
r'Epoch: 2 (.*) | Train: 32.10 % | Validation: 33.20 % | Test: 37.50 %',
]
elif device == 'cpu':
expected_results = [
r'Epoch: 0 (.*) | Train: 9.90 % | Validation: 20.20 % | Test: 23.50 %',
r'Epoch: 1 (.*) | Train: 21.20 % | Validation: 28.00 % | Test: 30.70 %',
r'Epoch: 2 (.*) | Train: 28.00 % | Validation: 33.00 % | Test: 35.00 %',
]
os.chdir('experiments')
cmd = ['./esc50.py', '-D', device, 'test-esc50']
popen = subprocess.Popen(cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True)
verified_epochs = 0
for line in iter(popen.stdout.readline, ""):
if line.startswith('Epoch: '):
assert re.search(expected_results[verified_epochs], line) is not None
verified_epochs += 1
if (device == 'cpu' and verified_epochs > 1) or verified_epochs > 2:
break
if __name__ == '__main__':
pytest.main([__file__])
| 32.869565
| 89
| 0.562169
|
f83117dd71f80a4fd403b408e6d76ac032acda2f
| 7,150
|
py
|
Python
|
tree/tree/tree/tree.py
|
Sewar-web/data-structures-and-algorithms1
|
d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21
|
[
"MIT"
] | null | null | null |
tree/tree/tree/tree.py
|
Sewar-web/data-structures-and-algorithms1
|
d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21
|
[
"MIT"
] | null | null | null |
tree/tree/tree/tree.py
|
Sewar-web/data-structures-and-algorithms1
|
d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21
|
[
"MIT"
] | 1
|
2021-06-26T09:19:43.000Z
|
2021-06-26T09:19:43.000Z
|
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class Binary_Tree:
def __init__(self):
self.root = None
def pre_order(self):
output = []
try:
if not self.root:
return output
def _traverse(node):
nonlocal output # Because output is not accessible
output.append(node.value) # Root
if node.left:
_traverse(node.left)
if node.right:
_traverse(node.right)
return output
_traverse(self.root)
return output
except:
return "you have error with insertion by pre_order"
def in_order(self):
output = []
try:
if not self.root:
return output
def _traverse(node):
nonlocal output
if node.left:
_traverse(node.left)
output.append(node.value)
if node.right:
_traverse(node.right)
return output
_traverse(self.root)
return output
except:
return "you have error with insertion by in_order"
def post_order(self):
output = []
try:
if not self.root:
return output
def _traverse(node):
nonlocal output
if node.left:
_traverse(node.left)
if node.right:
_traverse(node.right)
output.append(node.value)
return output
_traverse(self.root)
return output
except:
return "you have error with insertion by post_order"
def max_value(self):
self.node=self.root
self.max=self.root.value
try:
def _max(node):
if self.max < node.value:
self.max=node.value
if node.right:
_max(node.right)
if node.left:
_max(node.left)
_max(self.node)
return self.max
except:
return "you have an error"
#///////////////////////////////////code challenge 17 ////////////////////////////////////////////
def breadth_first(self , tree):
queue=[]
output=[]
if self.root:
queue.append(self.root)
while queue:
node = queue.pop(0)
output.append(node.value)
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
return output
else:
return 'the tree is empty '
#//////////////////////////////////////////////code challenge 18/////////////////////////////////////////////
def fizzbuzz(n):
if n % 3 == 0 and n % 5 == 0:
return 'FizzBuzz'
elif n % 3 == 0:
return 'Fizz'
elif n % 5 == 0:
return 'Buzz'
else:
return str(n)
def Fizz_Buzz_Tree(k_ary):
tree = Binary_Tree()
if k_ary.root==True:
return tree
def _tree(current):
node = Node(fizzbuzz(current.value))
if current.left:
node.left = _tree(current.left)
if current.right:
node.right = _tree(current.right)
return node
tree.root = _tree(k_ary.root)
return tree
#///////////////////////////////////////////////////////////////////////////////////////////////////////////
class Binary_Search_Tree(Binary_Tree):
def __init__(self):
super().__init__()
def Add(self ,value):
try:
if self.root == None :
self.root = Node(value)
else:
def _traverse(node):
if value > node.value :
if node.right == None:
node.right = Node(value)
return
else:
_traverse(node.right)
else:
if node.left == None:
node.left = Node(value)
return
else:
_traverse(node.left)
_traverse(self.root)
except:
return "you have error with insertion value "
def Contains(self ,value):
try:
if self.root==None:
return False
else:
node = self.root
def _iscontains(node):
if value == node.value:
return True
elif value < node.value:
node = node.left
if node:
return _iscontains(node)
elif value > node.value:
node = node.right
if node:
return _iscontains(node)
if _iscontains(node) == True:
return True
else:
return False
except:
return "you have error value"
if __name__=='__main__':
tree = Binary_Search_Tree()
tree.Add(5)
tree.Add(10)
tree.Add(1)
tree.Add(7)
tree.Add(4)
tree.Add(12)
tree.Add(0)
tree.Add(50)
print(tree.Contains(5))
print(tree.Contains(30))
print(tree.Contains(12))
print('\n')
print('/'*50)
print(tree.pre_order())
print(tree.in_order())
print(tree.post_order())
print('\n')
print('/'*50)
max=Binary_Tree()
max.root = Node(10)
max.root.right = Node(15)
max.root.left = Node(11)
max.root.right.left = Node(17)
max.root.left.left = Node(20)
max.root.right.right = Node(3)
print(max.max_value())
print('\n')
print('/'*50)
breadth=Binary_Tree()
breadth.root = Node(1)
breadth.root.left = Node(2)
breadth.root.right = Node(3)
breadth.root.left.left = Node(4)
breadth.root.right.left = Node(5)
breadth.root.right.right = Node(6)
print(breadth.breadth_first(breadth))
print('\n')
print('/'*50)
fizzBuzz = Binary_Tree()
fizzBuzz.root = Node(1)
fizzBuzz.root.left = Node(5)
fizzBuzz.root.right = Node(25)
fizzBuzz.root.left.left = Node(2)
fizzBuzz.root.left.right = Node(18)
fizzBuzz.root.right.right = Node(15)
print(Fizz_Buzz_Tree(fizzBuzz).pre_order())
print(Fizz_Buzz_Tree(fizzBuzz).in_order())
print(Fizz_Buzz_Tree(fizzBuzz).post_order())
| 23.754153
| 109
| 0.437622
|
47dadf94cc0553e0ab004c656c2eb1e9811fed5d
| 67
|
py
|
Python
|
src/lesson_language_tools/inspect_getsource_class.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | 3
|
2018-08-14T09:33:52.000Z
|
2022-03-21T12:31:58.000Z
|
src/lesson_language_tools/inspect_getsource_class.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
src/lesson_language_tools/inspect_getsource_class.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
import inspect
import example
print(inspect.getsource(example.A))
| 13.4
| 35
| 0.820896
|
517ddb8c76755a08da77e0ed0863c64c22e5e50a
| 2,123
|
py
|
Python
|
commentarea/models.py
|
wxzsan/pQper
|
c1c76153fadb2dfd01fac1dca2a8d2a27f0e0f2c
|
[
"MIT"
] | 7
|
2020-10-25T14:54:52.000Z
|
2022-03-09T13:30:24.000Z
|
commentarea/models.py
|
wxzsan/pQper
|
c1c76153fadb2dfd01fac1dca2a8d2a27f0e0f2c
|
[
"MIT"
] | 12
|
2020-12-11T03:13:35.000Z
|
2020-12-30T14:45:12.000Z
|
commentarea/models.py
|
wxzsan/pQper
|
c1c76153fadb2dfd01fac1dca2a8d2a27f0e0f2c
|
[
"MIT"
] | 4
|
2020-10-25T15:07:35.000Z
|
2021-01-02T11:23:33.000Z
|
from django.db import models
# Create your models here.
class Paper(models.Model):
title = models.CharField(max_length=255)
path = models.CharField(max_length=255)
#paper = models.FileField(upload_to = 'papers')
def __str__(self):
return str(self.id) + '--' + self.title
class PaperFile(models.Model):
title = models.CharField(max_length=255)
paper = models.FileField(upload_to = 'papers')
class ShortComment(models.Model):
poster = models.ForeignKey('user.User', on_delete=models.CASCADE, related_name = 'post_short_comment')
post_time = models.DateTimeField(auto_now_add=True)
content = models.CharField(max_length=255)
rose_number = models.IntegerField(default=0)
egg_number = models.IntegerField(default=0)
rose_user_list = models.ManyToManyField('user.User', related_name='rose_short_comment')
egg_user_list = models.ManyToManyField('user.User', related_name='egg_short_comment')
class LongComment(models.Model):
poster = models.ForeignKey('user.User', on_delete=models.CASCADE, related_name='post_long_comment')
post_time = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length = 255)
star_number = models.IntegerField(default=0)
star_user_list = models.ManyToManyField('user.User', related_name='star_long_comment')
content = models.TextField()
short_comment_list = models.ManyToManyField(ShortComment)
class CreateRequest(models.Model):
requestor = models.ForeignKey('user.User', on_delete=models.CASCADE)
paper = models.ForeignKey(Paper, on_delete=models.CASCADE)
paper_title = models.CharField(max_length=255)
class CommentArea(models.Model):
name = models.CharField(max_length=255)
master = models.ForeignKey('user.User', on_delete=models.CASCADE)
paper = models.ForeignKey(Paper, on_delete=models.CASCADE)
long_comment_list = models.ManyToManyField(LongComment)
short_comment_list = models.ManyToManyField(ShortComment)
star_number = models.IntegerField(default = 0)
star_user_list = models.ManyToManyField('user.User', related_name='star_comment_area')
| 45.170213
| 106
| 0.757419
|
38ef71003625ad41e60815c775f0cde74ee80f22
| 618
|
py
|
Python
|
manage.py
|
xiaojieluo/flask_restapi_template
|
70b93c90d3cf9dd53a4dcbeb05bf2f67b81bfc01
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
xiaojieluo/flask_restapi_template
|
70b93c90d3cf9dd53a4dcbeb05bf2f67b81bfc01
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
xiaojieluo/flask_restapi_template
|
70b93c90d3cf9dd53a4dcbeb05bf2f67b81bfc01
|
[
"Apache-2.0"
] | null | null | null |
from werkzeug.exceptions import HTTPException
from app import create_app
from app.libs.error import APIException
from app.libs.error_code import ServerError
app = create_app()
@app.errorhandler(Exception)
def framework_error(e):
''' 全局拦截异常'''
if isinstance(e, APIException):
return e
if isinstance(e, HTTPException):
code = e.code
msg = e.description
error_code = 1007
return APIException(msg, code, error_code)
else:
if app.config['DEBUG']:
return ServerError()
else:
raise e
if __name__ == '__main__':
app.run()
| 22.888889
| 50
| 0.647249
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.