hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65d6993d98b839d0b1c35f622ef912360fb8837f
| 3,564
|
py
|
Python
|
ws_server/ws_server2.py
|
majj/Rexroth
|
079f2890a39e34776627d53c00d7debbfbd13146
|
[
"MIT"
] | null | null | null |
ws_server/ws_server2.py
|
majj/Rexroth
|
079f2890a39e34776627d53c00d7debbfbd13146
|
[
"MIT"
] | null | null | null |
ws_server/ws_server2.py
|
majj/Rexroth
|
079f2890a39e34776627d53c00d7debbfbd13146
|
[
"MIT"
] | null | null | null |
#get torque data and send it to browser by websockets
import time
import asyncio
import datetime
import random
import websockets
import ast
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler #LoggingEventHandler,
import threading
import queue
q1 = queue.Queue()
q2 = queue.Queue()
import json
class EventHandler(FileSystemEventHandler):
"""
handler for file create event
['event_type', 'is_directory', 'key', 'src_path']
"""
#def __init__(self):
# pass
def on_created(self, event):
""" test """
#print(dir(event))
if not event.is_directory:
#print(event.key)
#print(event.event_type)
print(event.src_path)
file_path = event.src_path
with open(file_path, 'r') as fh:
data = fh.read()
d = ast.literal_eval(data)
v = d['tightening steps'][2]['angle threshold']['act']
print(v)
q1.put({"angle act":v})
q1.put({"path":file_path})
print("=="*20)
def on_moved(self, event):
pass
def on_modified(self, event):
pass
def timer():
"""
thread, timer for browser
"""
while True:
now = datetime.datetime.utcnow().isoformat() + 'Z'
q1.put({"now":now})
time.sleep(3)
def watcher():
"""
thread, read torque data
"""
path = '../data'
event_handler = EventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def opc_ua_client():
"""
thread, opc ua client.
"""
try:
while True:
msg = q2.get()
print("opc client")
print(msg)
time.sleep(1)
except KeyboardInterrupt:
pass
connected = set()
async def handler(websocket, path):
"""
websockets handler
"""
connected.add(websocket)
try:
while True:
data = q1.get()
print(data)
print(connected)
print(">>>")
try:
await websocket.send(json.dumps(data))
ack = await websocket.recv()
q2.put(ack)
print(ack)
except Exception as ex:
connected.remove(websocket)
print(ex)
finally:
# Unregister.
print("remove")
connected.remove(websocket)
print('hhh')
def main():
"""
"""
t1 = threading.Thread(target=timer)
t1.start()
t2 = threading.Thread(target=watcher)
t2.start()
t3 = threading.Thread(target=opc_ua_client)
t3.start()
start_server = websockets.serve(handler, '127.0.0.1', 5678)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
main()
| 18.5625
| 72
| 0.477553
|
a6d4c39fc4bc73848b8ad4d958a10d6423ddd1af
| 55
|
py
|
Python
|
MeltSortGrow/Main.py
|
ht-932/MeltSortGrow
|
8cbd7af82dc79c98a7d318efb8a35472ff35a088
|
[
"MIT"
] | null | null | null |
MeltSortGrow/Main.py
|
ht-932/MeltSortGrow
|
8cbd7af82dc79c98a7d318efb8a35472ff35a088
|
[
"MIT"
] | null | null | null |
MeltSortGrow/Main.py
|
ht-932/MeltSortGrow
|
8cbd7af82dc79c98a7d318efb8a35472ff35a088
|
[
"MIT"
] | null | null | null |
import GUI as g
def main():
g.main()
main()
| 6.875
| 15
| 0.509091
|
a0f5a7cd81eed08df5f490f11b18e5adb780a622
| 1,245
|
py
|
Python
|
app.py
|
nicohein/dash-okta-auth
|
216761d8ea9241603574ee3d02fd90b56999fc8d
|
[
"MIT"
] | null | null | null |
app.py
|
nicohein/dash-okta-auth
|
216761d8ea9241603574ee3d02fd90b56999fc8d
|
[
"MIT"
] | null | null | null |
app.py
|
nicohein/dash-okta-auth
|
216761d8ea9241603574ee3d02fd90b56999fc8d
|
[
"MIT"
] | null | null | null |
from flask import Flask, session
from config import FLASK_SECRET_KEY
from dash_okta_auth.okta_oauth import OktaOAuth
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# configure app
server = Flask(__name__)
server.secret_key = FLASK_SECRET_KEY
app = dash.Dash(
__name__,
server=server,
url_base_pathname='/'
)
auth = OktaOAuth(app)
app.layout = html.Div(children=[
html.H1(children="Private Dash App"),
html.Div(id='placeholder', style={'display': 'none'}),
html.Div(id='welcome'),
dcc.Graph(
id='example-graph',
figure={
'data': [
{'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},
{'x': [1, 2, 3], 'y': [2, 4, 6], 'type': 'bar', 'name': 'Montreal'},
],
'layout': {
'title': 'Dash Data Visualization'
}
}
)
])
@app.callback(
Output('welcome', 'children'),
[Input('placeholder', 'children')]
)
def on_load(value):
# we need value for it to render (be in the tree)
return "Welcome, {}!".format(session['email'])
if __name__ == '__main__':
app.run_server(host='localhost')
| 22.636364
| 84
| 0.591165
|
ad0e79045135732d9146c94cacb4baec2b99fdc4
| 733
|
py
|
Python
|
.history/award/models_20201129155153.py
|
Nyash-Mauro/awwards
|
aa8200a4d2a26a9d022f0ebda280379c1b3761c3
|
[
"MIT"
] | null | null | null |
.history/award/models_20201129155153.py
|
Nyash-Mauro/awwards
|
aa8200a4d2a26a9d022f0ebda280379c1b3761c3
|
[
"MIT"
] | null | null | null |
.history/award/models_20201129155153.py
|
Nyash-Mauro/awwards
|
aa8200a4d2a26a9d022f0ebda280379c1b3761c3
|
[
"MIT"
] | null | null | null |
from django.db import models
import datetime as dt
from django.core.validators import FileExtensionValidator
from django.contrib.auth.models import User
from tinymce.models import HTMLField
from django.db.models.signals import post_save
from django.dispatch import receiver
from cloudinary.models import CloudinaryField
class Profile(models.Model):
First_Name = models.CharField(max_length=50)
Last_Name = models.CharField(max_length=50)
Email = models.EmailField(max_length=50)
bio = HTMLField()
profile_pic = CloudinaryField('image', default="media/", validators=[FileExtensionValidator(['png', 'jpg', 'jpeg'])], blank=True)
user = models.OneToOneField(User,on_delete=models.CASCADE,primary_key=True)
| 43.117647
| 133
| 0.784447
|
8d6e2f079119af5721b55c0cfff3ec8e405fdbf5
| 7,010
|
py
|
Python
|
model/SiameseStyle.py
|
interactiveaudiolab/voogle
|
9654d8602b97bbe5d161092edbe9e4abd9833eeb
|
[
"MIT"
] | 28
|
2019-07-25T08:23:45.000Z
|
2022-03-27T06:14:32.000Z
|
model/SiameseStyle.py
|
interactiveaudiolab/voogle
|
9654d8602b97bbe5d161092edbe9e4abd9833eeb
|
[
"MIT"
] | null | null | null |
model/SiameseStyle.py
|
interactiveaudiolab/voogle
|
9654d8602b97bbe5d161092edbe9e4abd9833eeb
|
[
"MIT"
] | 1
|
2021-09-21T08:26:28.000Z
|
2021-09-21T08:26:28.000Z
|
import librosa
import numpy as np
import os
import tensorflow as tf
from keras.models import load_model
from model.QueryByVoiceModel import QueryByVoiceModel
class SiameseStyle(QueryByVoiceModel):
'''
A siamese-style neural network for query-by-voice applications.
citation: Y. Zhang, B. Pardo, and Z. Duan, "Siamese Style Convolutional
Neural Networks for Sound Search by Vocal Imitation," in IEEE/ACM
Transactions on Audio, Speech, and Language Processing, pp. 99-112,
2018.
'''
def __init__(
self,
model_filepath,
parametric_representation=False,
uses_windowing=True,
window_length=4.0,
hop_length=2.0):
'''
SiameseStyle model constructor.
Arguments:
model_filepath: A string. The path to the model weight file on
disk.
parametric_representation: A boolen. True if the audio
representations depend on the model weights.
uses_windowing: A boolean. Indicates whether the model slices the
representation
window_length: A float. The window length in seconds. Unused if
uses_windowing is False.
hop_length: A float. The hop length between windows in seconds.
Unused if uses_windowing is False.
'''
super().__init__(
model_filepath,
parametric_representation,
uses_windowing,
window_length,
hop_length)
def construct_representation(self, audio_list, sampling_rates, is_query):
'''
Constructs the audio representation used during inference. Audio
files from the dataset are constructed only once and cached for
later reuse.
Arguments:
audio_list: A python list of 1D numpy arrays. Each array represents
one variable-length mono audio file.
sampling_rate: A python list of ints. The corresponding sampling
rate of each element of audio_list.
is_query: A boolean. True only if audio is a user query.
Returns:
A python list of audio representations. The list order should be
the same as in audio_list.
'''
# Siamese-style network requires different representation of query
# and dataset audio
if is_query:
representation = self._construct_representation_query(
audio_list[0], sampling_rates[0])
else:
representation = self._construct_representation_dataset(
audio_list, sampling_rates)
return representation
def measure_similarity(self, query, items):
'''
Runs model inference on the query.
Arguments:
query: A numpy array. An audio representation as defined by
construct_representation. The user's vocal query.
items: A numpy array. The audio representations as defined by
construct_representation. The dataset of potential matches for
the user's query.
Returns:
A python list of floats. The similarity score of the query and each
element in the dataset. The list order should be the same as
in dataset.
'''
if not self.model:
raise RuntimeError('No model loaded during call to \
measure_similarity.')
# run model inference
with self.graph.as_default():
self.logger.debug('Running inference')
return np.array(self.model.predict(
[query, items], batch_size=len(query), verbose=1),
dtype='float64')
def _load_model(self):
'''
Loads the model weights from disk. Prepares the model to be able to
make predictions.
'''
self.logger.info(
'Loading model weights from {}'.format(self.model_filepath))
self.model = load_model(self.model_filepath)
self.graph = tf.get_default_graph()
def _construct_representation_query(self, query, sampling_rate):
self.logger.debug('Constructing query representation')
# resample query at 16k
new_sampling_rate = 16000
query = librosa.resample(query, sampling_rate, new_sampling_rate)
sampling_rate = new_sampling_rate
if self.uses_windowing:
windows = self._window(query, sampling_rate)
else:
windows = [
librosa.util.fix_length(
query, self.window_length * sampling_rate)]
# construct the logmelspectrogram of the signal
representation = []
for window in windows:
melspec = librosa.feature.melspectrogram(
window, sr=sampling_rate, n_fft=133,
hop_length=133, power=2, n_mels=39,
fmin=0.0, fmax=5000)
melspec = melspec[:, :482]
logmelspec = librosa.power_to_db(melspec, ref=np.max)
representation.append(logmelspec)
# normalize to zero mean and unit variance
representation = np.array(representation)
representation = self._normalize(representation).astype('float32')
return [representation]
def _construct_representation_dataset(self, dataset, sampling_rates):
new_sampling_rate = 44100
representations = []
for audio, sampling_rate in zip(dataset, sampling_rates):
# resample audio at 44.1k
audio = librosa.resample(audio, sampling_rate, new_sampling_rate)
sampling_rate = new_sampling_rate
if self.uses_windowing:
windows = self._window(audio, sampling_rate)
else:
windows = [
librosa.util.fix_length(
audio, self.window_length * sampling_rate)]
representation = []
for window in windows:
# construct the logmelspectrogram of the signal
melspec = librosa.feature.melspectrogram(
window,
sr=sampling_rate,
n_fft=1024,
hop_length=1024,
power=2)
melspec = melspec[:, 0:128]
logmelspec = librosa.power_to_db(melspec, ref=np.max)
representation.append(logmelspec)
# normalize to zero mean and unit variance
representation = np.array(representation)
representation = self._normalize(representation).astype('float32')
representation = np.expand_dims(representation, axis=1)
representations.append(representation)
return representations
def _normalize(self, x):
# normalize to zero mean and unit variance
mean = x.mean(keepdims=True)
std = x.std(keepdims=True)
return (x - mean) / std
| 37.486631
| 79
| 0.607703
|
b1348d5c0cdf6aa9ac28a1f12a677192e2ff2fa5
| 2,434
|
py
|
Python
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeInstanceMaintenanceAttributesRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeInstanceMaintenanceAttributesRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribeInstanceMaintenanceAttributesRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeInstanceMaintenanceAttributesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeInstanceMaintenanceAttributes')
self.set_method('POST')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InstanceIds(self):
return self.get_query_params().get('InstanceId')
def set_InstanceIds(self, InstanceIds):
for depth1 in range(len(InstanceIds)):
if InstanceIds[depth1] is not None:
self.add_query_param('InstanceId.' + str(depth1 + 1) , InstanceIds[depth1])
| 34.771429
| 90
| 0.767872
|
9ac8c04e18ec7cef51c292c62d1c33ca48ce4201
| 1,664
|
py
|
Python
|
test/bsort_f64_test.py
|
nathants/c-utils
|
5c8408870182cc48da487547397d266888ffded2
|
[
"MIT"
] | 29
|
2019-10-10T21:15:31.000Z
|
2021-11-09T11:03:10.000Z
|
test/bsort_f64_test.py
|
nathants/c-utils
|
5c8408870182cc48da487547397d266888ffded2
|
[
"MIT"
] | 1
|
2020-05-14T22:26:11.000Z
|
2020-05-14T22:26:11.000Z
|
test/bsort_f64_test.py
|
nathants/c-utils
|
5c8408870182cc48da487547397d266888ffded2
|
[
"MIT"
] | 2
|
2020-03-01T04:13:26.000Z
|
2020-05-14T22:20:38.000Z
|
import pytest
import os
import string
import shell
from hypothesis.database import ExampleDatabase
from hypothesis import given, settings
from hypothesis.strategies import text, lists, composite, integers, floats
from test_util import run, rm_whitespace, clone_source
def setup_module(m):
m.tempdir = clone_source()
m.orig = os.getcwd()
m.path = os.environ['PATH']
os.chdir(m.tempdir)
os.environ['PATH'] = f'{os.getcwd()}/bin:/usr/bin:/usr/local/bin:/sbin:/usr/sbin:/bin'
shell.run('make clean && make bsv csv bcut bsort bschema', stream=True)
def teardown_module(m):
os.chdir(m.orig)
os.environ['PATH'] = m.path
assert m.tempdir.startswith('/tmp/') or m.tempdir.startswith('/private/var/folders/')
shell.run('rm -rf', m.tempdir)
@composite
def inputs(draw):
num_columns = draw(integers(min_value=1, max_value=3))
column = floats(allow_nan=False, min_value=1e-10, max_value=1e10)
line = lists(column, min_size=num_columns, max_size=num_columns)
lines = draw(lists(line))
lines = [','.join(map(str, line)) for line in lines]
return '\n'.join(lines) + '\n'
def expected(csv):
xs = csv.splitlines()
xs = [float(x.split(',')[0]) for x in xs if x]
xs = sorted(xs)
return [round(x, 2) for x in xs]
@given(inputs())
@settings(database=ExampleDatabase(':memory:'), max_examples=100 * int(os.environ.get('TEST_FACTOR', 1)), deadline=os.environ.get("TEST_DEADLINE", 1000 * 60)) # type: ignore
def test_props(csv):
result = expected(csv)
assert result == [round(float(x), 2) for x in run(csv, 'bsv | bschema a:f64,... | bsort f64 | bcut 1 | bschema f64:a | csv').splitlines() if x]
| 37.818182
| 173
| 0.683293
|
2d237e4cdb86b510107f323b30377960072b257d
| 2,397
|
py
|
Python
|
kindred/Relation.py
|
vj1494/kindred
|
06d73448c35e65e727cbcaf7b754efa2f725bd5a
|
[
"MIT"
] | 141
|
2017-08-03T15:51:42.000Z
|
2022-01-20T05:16:20.000Z
|
kindred/Relation.py
|
vj1494/kindred
|
06d73448c35e65e727cbcaf7b754efa2f725bd5a
|
[
"MIT"
] | 24
|
2017-08-04T12:07:54.000Z
|
2021-06-22T12:40:53.000Z
|
kindred/Relation.py
|
vj1494/kindred
|
06d73448c35e65e727cbcaf7b754efa2f725bd5a
|
[
"MIT"
] | 34
|
2017-08-22T21:44:36.000Z
|
2022-03-27T11:24:19.000Z
|
import kindred
import six
class Relation:
"""
Describes relationship between entities (including relation type and argument names if applicable).
:ivar relationType: Type of relation
:ivar entities: List of entities in relation
:ivar argNames: Names of relation argument associated with each entity
:ivar probability: Optional probability for predicted relations
:ivar sourceRelationID: Relation ID used in source document
"""
def __init__(self,relationType=None,entities=None,argNames=None,probability=None,sourceRelationID=None):
"""
Constructor for Relation class
:param relationType: Type of relation
:param entities: List of entities in relation
:param argNames: Names of relation argument associated with each entity
:param probability: Optional probability for predicted relations
:param sourceRelationID: Relation ID used in source document
:type relationType: str
:type entities: list of kindred.Entity
:type argNames: list of str
:type probability: float
:type sourceRelationID: str
"""
if entities is None:
entities = []
assert relationType is None or isinstance(relationType, six.string_types), "relationType must be a string"
self.relationType = relationType
assert isinstance(entities,list), "entities must be a list of kindred.Entity"
for entity in entities:
assert isinstance(entity, kindred.Entity), "entities must be a list of kindred.Entity"
self.entities = entities
if argNames == None:
self.argNames = None
else:
assert len(argNames) == len(entities)
self.argNames = [ str(a) for a in argNames ]
if not probability is None:
assert isinstance(probability, float)
self.probability = probability
self.sourceRelationID = sourceRelationID
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
"""Define a non-equality test"""
return not self.__eq__(other)
def __str__(self):
return "<Relation %s %s %s>" % (self.relationType,str(self.entities),str(self.argNames))
def __repr__(self):
return self.__str__()
def __hash__(self):
if self.argNames is None:
return hash((self.relationType,tuple(self.entities),self.probability))
else:
return hash((self.relationType,tuple(self.entities),tuple(self.argNames),self.probability))
| 31.12987
| 108
| 0.751356
|
3ff3cf88b7c3fa5713ffba3cb8ab6ba2ad529338
| 6,352
|
py
|
Python
|
covid/models.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 3
|
2020-04-26T06:28:50.000Z
|
2021-04-05T08:02:26.000Z
|
covid/models.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 10
|
2020-06-05T17:36:10.000Z
|
2022-03-11T23:16:42.000Z
|
covid/models.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 5
|
2021-04-08T08:43:49.000Z
|
2021-11-27T06:36:46.000Z
|
# -*- encoding: utf-8 -*-
import datetime
import json
import sys
from datetime import timedelta
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from accounts.models import Authority, User
from reports.models import Report
class MonitoringReport(models.Model):
authority = models.ForeignKey(Authority, on_delete=models.PROTECT)
reporter = models.ForeignKey(User, on_delete=models.PROTECT)
report = models.ForeignKey(Report, on_delete=models.PROTECT)
village_no = models.IntegerField()
until = models.DateField()
active = models.BooleanField(default=True)
started_at = models.DateField()
last_updated = models.DateTimeField(auto_now=True)
terminate_cause = models.TextField(max_length=255, blank=True, null=True)
name = models.TextField(max_length=255, default="")
report_latest_state_code = models.TextField(max_length=50, blank=True, null=True)
latest_followup_date = models.DateField(blank=True, null=True)
followup_count = models.IntegerField(default=0)
def __unicode__(self):
return "%s %s" % (self.id, self.name,)
@classmethod
def sync_from_report(cls, instance):
if not MonitoringReport.objects.filter(report_id=instance.id).exists():
form_data = json.loads(instance.form_data)
report_latest_state_code = instance.state.code
flag_active = True
terminate_cause = None
if settings.COVID_FOLLOWUP_TERMINATE_FIELD_NAME in form_data:
followup_status = form_data[settings.COVID_FOLLOWUP_TERMINATE_FIELD_NAME]
if followup_status.find(settings.COVID_FOLLOWUP_CONFIRMED_CASE_PATTERN) != -1:
report_latest_state_code = "ConfirmedCase"
flag_active = False
terminate_cause = form_data[settings.COVID_FOLLOWUP_TERMINATE_FIELD_NAME]
MonitoringReport.objects.create(
authority=instance.administration_area.authority,
reporter=instance.created_by,
village_no=form_data['village_no'] or 0,
report=instance,
started_at=instance.incident_date,
name=form_data['name'] if 'name' in form_data else '',
until=instance.incident_date + timedelta(days=settings.COVID_FOLLOWUP_DAYS or 14),
report_latest_state_code=report_latest_state_code,
active=flag_active,
terminate_cause=terminate_cause,
)
else:
monitoring = MonitoringReport.objects.get(report_id=instance.id)
if monitoring.report_latest_state_code != instance.state.code and monitoring.active:
monitoring.report_latest_state_code = instance.state.code
monitoring.save()
@classmethod
def sync_from_followup(cls, instance):
form_data = json.loads(instance.form_data)
parent_id = instance.parent_id
assert parent_id is not None, "followup report must have parent id"
monitoring = MonitoringReport.objects.get(report_id=parent_id)
followup_status = form_data[settings.COVID_FOLLOWUP_TERMINATE_FIELD_NAME]
if followup_status is not None:
if followup_status.find(
settings.COVID_FOLLOWUP_TERMINATE_14_DAYS_PATTERN) != -1 or followup_status.find(
settings.COVID_FOLLOWUP_TERMINATE_DEPARTURE_PATTERN) != -1 or followup_status.find(
settings.COVID_FOLLOWUP_CONFIRMED_CASE_PATTERN) != -1:
monitoring.until = datetime.datetime.now()
if followup_status.find(settings.COVID_FOLLOWUP_CONFIRMED_CASE_PATTERN) != -1:
monitoring.report_latest_state_code = "ConfirmedCase"
monitoring.terminate_cause = followup_status
monitoring.active = False
monitoring.followup_count = monitoring.followup_count + 1
monitoring.latest_followup_date = instance.incident_date
monitoring.save()
@receiver(post_save, sender=Report)
def covid_monitoring_handler(sender, instance, **kwargs):
if not settings.COVID_MONITORING_ENABLE:
return
if not instance.test_flag:
try:
if instance.type.code == settings.COVID_REPORT_TYPE_CODE:
MonitoringReport.sync_from_report(instance)
elif instance.type.code == settings.COVID_FOLLOWUP_TYPE_CODE:
MonitoringReport.sync_from_followup(instance)
except:
print(sys.exc_info()[0])
class DailySummary(models.Model):
authority = models.ForeignKey(Authority, on_delete=models.PROTECT)
date = models.DateField()
qty_new_case = models.IntegerField(default=0)
qty_new_monitoring = models.IntegerField(default=0)
qty_ongoing_monitoring = models.IntegerField(default=0)
qty_acc_finished = models.IntegerField(default=0)
class Meta:
unique_together = ("authority", "date")
@property
def qty_total(self):
return self.qty_ongoing_monitoring + self.qty_acc_finished
class DailySummaryByVillage(models.Model):
authority = models.ForeignKey(Authority, on_delete=models.PROTECT)
date = models.DateField()
village_no = models.IntegerField()
low_risk = models.IntegerField(default=0)
medium_risk = models.IntegerField(default=0)
high_risk = models.IntegerField(default=0)
confirmed = models.IntegerField(default=0)
confirmed_found_in_14 = models.BooleanField(default=False)
class Meta:
unique_together = ("authority", "date", "village_no")
@property
def total(self):
return self.low_risk + self.medium_risk + self.high_risk
@property
def risk_type(self):
if self.confirmed_found_in_14 or self.confirmed > 0:
return 'confirmed'
elif self.high_risk + self.medium_risk > 10:
return 'high'
elif self.high_risk + self.medium_risk > 0:
return 'medium'
elif self.low_risk > 0:
return 'low'
return 'none'
class AuthorityInfo(models.Model):
authority = models.ForeignKey(Authority, on_delete=models.PROTECT, unique=True)
line_notify_token = models.TextField(max_length=255, blank=True, null=True)
| 42.346667
| 103
| 0.689232
|
1ac7dcbdf2f1f1701da496ce640ee106b3773c2d
| 474
|
py
|
Python
|
plugins/snowflake/dbt/adapters/snowflake/__init__.py
|
ClaySheffler/dbt
|
588851ac1cd2ef9856706f14cee573dedbc46d8c
|
[
"Apache-2.0"
] | 1
|
2020-08-11T08:44:33.000Z
|
2020-08-11T08:44:33.000Z
|
plugins/snowflake/dbt/adapters/snowflake/__init__.py
|
ClaySheffler/dbt
|
588851ac1cd2ef9856706f14cee573dedbc46d8c
|
[
"Apache-2.0"
] | null | null | null |
plugins/snowflake/dbt/adapters/snowflake/__init__.py
|
ClaySheffler/dbt
|
588851ac1cd2ef9856706f14cee573dedbc46d8c
|
[
"Apache-2.0"
] | 1
|
2019-04-16T10:51:10.000Z
|
2019-04-16T10:51:10.000Z
|
from dbt.adapters.snowflake.connections import SnowflakeConnectionManager
from dbt.adapters.snowflake.connections import SnowflakeCredentials
from dbt.adapters.snowflake.relation import SnowflakeRelation
from dbt.adapters.snowflake.impl import SnowflakeAdapter
from dbt.adapters.base import AdapterPlugin
from dbt.include import snowflake
Plugin = AdapterPlugin(
adapter=SnowflakeAdapter,
credentials=SnowflakeCredentials,
include_path=snowflake.PACKAGE_PATH)
| 36.461538
| 73
| 0.85654
|
61720cb09baa7498fdff14059ea78ee9dc360557
| 2,159
|
py
|
Python
|
tests/test_parsec.py
|
tudinfse/intel_mpx_explained
|
9a3d7b060742d8fe89c1b56898f2b2e3617b670b
|
[
"MIT"
] | 15
|
2018-07-31T07:24:36.000Z
|
2020-05-31T03:18:25.000Z
|
tests/test_parsec.py
|
OleksiiOleksenko/intel_mpx_explained
|
dd6da57e0fcf22df358d1a742079b414620a7c88
|
[
"MIT"
] | 1
|
2020-02-01T00:29:32.000Z
|
2020-02-04T14:25:57.000Z
|
tests/test_parsec.py
|
OleksiiOleksenko/intel_mpx_explained
|
dd6da57e0fcf22df358d1a742079b414620a7c88
|
[
"MIT"
] | 3
|
2017-02-08T04:02:51.000Z
|
2018-03-30T07:58:45.000Z
|
# ATTENTION: the tests are supposed to be run inside a container!
# all dependencies have to be installed before running the tests by `./entrypoint install -n benchmark_name`
from .abstract_test import BuildAndRun, INPUT_PATH
from .abstract_acceptance_test import Acceptance
import sys
import logging
import unittest
class TestParsec(unittest.TestCase, BuildAndRun):
# tested applications
benchmarks = {
# benchmark name: (path, test input)
'blackscholes': ('src/parsec/blackscholes', '1 %s/parsec/blackscholes/in_4.txt prices.txt' % INPUT_PATH),
'bodytrack': ('src/parsec/bodytrack', '%s/parsec/bodytrack/sequenceB_1 4 1 4000 5 0 1' % INPUT_PATH),
'canneal': ('src/parsec/canneal', '1 15000 2000 %s/parsec/canneal/10.nets 6000' % INPUT_PATH),
'dedup': ('src/parsec/dedup', '-c -p -t 1 -i %s/parsec/dedup/test.dat -o output.dat.ddp' % INPUT_PATH),
'facesim': ('src/parsec/facesim', '-timing -threads 1 -data_dir %s/parsec/facesim/test/' % INPUT_PATH),
'ferret': ('src/parsec/ferret', '{0}/parsec/ferret/test/corel lsh {0}/parsec/ferret/test/queries 5 5 1 output.txt'.format(INPUT_PATH)),
'fluidanimate': ('src/parsec/fluidanimate', '1 500 %s/parsec/fluidanimate/in_5K.fluid out.fluid' % INPUT_PATH),
'raytrace': ('src/parsec/raytrace', '%s/parsec/raytrace/thai_statue.obj -automove -nthreads 1 -frames 20 -res 360 480' % INPUT_PATH),
'streamcluster': ('src/parsec/streamcluster', '10 20 128 1000 200 5000 none output.txt 1'),
'swaptions': ('src/parsec/swaptions', '-ns 128 -sm 100 -nt 1'),
'vips': ('src/parsec/vips', 'im_benchmark %s/parsec/vips/barbados_256x288.v output.v' % INPUT_PATH),
'x264': ('src/parsec/x264', '--quiet --qp 20 --partitions b8x8,i4x4 --ref 5 --direct auto --b-pyramid --weightb --mixed-refs --no-fast-pskip --me umh --subme 7 --analyse b8x8,i4x4 --threads 1 -o eledream.264 %s/parsec/x264/eledream_32x18_1.y4m' % INPUT_PATH),
}
class TestParsecAcceptance(TestParsec, Acceptance):
pass
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr)
logging.getLogger("Test").setLevel(logging.DEBUG)
| 61.685714
| 267
| 0.692913
|
759314409b221fbe606468ba933ed1c570428af6
| 1,612
|
py
|
Python
|
api/library.py
|
chance-nelson/SnowStorm
|
65352080c8590aa0ec5fa17ce625bd95c16264a7
|
[
"MIT"
] | 6
|
2019-02-21T08:25:28.000Z
|
2020-07-22T10:59:24.000Z
|
api/library.py
|
chance-nelson/SnowStorm
|
65352080c8590aa0ec5fa17ce625bd95c16264a7
|
[
"MIT"
] | 12
|
2019-02-21T20:46:07.000Z
|
2019-10-27T14:28:47.000Z
|
api/library.py
|
chance-nelson/SnowStorm
|
65352080c8590aa0ec5fa17ce625bd95c16264a7
|
[
"MIT"
] | 14
|
2019-10-02T01:42:21.000Z
|
2019-10-27T14:28:59.000Z
|
import base64
from io import BytesIO
from flask import request, make_response, Blueprint, jsonify
from mongoengine.errors import DoesNotExist
from mutagen.mp3 import MP3
from models.song import Song
LIBRARY = Blueprint('library', __name__, url_prefix='/library')
def _resolve_songs(*songs):
return [
{
'id': i.id,
'title': i.title,
'artist': i.artist,
'runtime': i.runtime,
}
for i in songs
]
@LIBRARY.route('/', defaults={'song_id': None}, methods=['GET'])
@LIBRARY.route('/<string:song_id>/', methods=['GET'])
def get_songs(song_id: str):
songs = []
if song_id:
try:
songs.append(Song.objects.get(id=song_id))
except DoesNotExist:
return make_response('', 404)
else:
songs = Song.objects()
return make_response(jsonify(_resolve_songs(*songs)), 200)
@LIBRARY.route('/', methods=['PUT'])
def add_song():
r = request.get_json()
song_data = base64.b64decode(r.get('song').encode('utf-8'))
# Read song metadata
metadata = MP3(BytesIO(song_data))
title = str(metadata.get('TIT2'))
album = str(metadata.get('TALB'))
artist = str(metadata.get('TPE1'))
runtime = int(metadata.info.length)
bitrate = int(metadata.info.bitrate)
new_song = Song(
title=title,
artist=artist,
album=album,
runtime=runtime,
bitrate=bitrate,
)
new_song.song.put(BytesIO(song_data), content_type='audio/mpeg')
new_song.save()
return make_response(jsonify(_resolve_songs(new_song)), 200)
| 23.705882
| 68
| 0.622829
|
49110335b8c7edfeab298f3164c5565b2f9eb44a
| 347
|
py
|
Python
|
Class1/Class1X7.py
|
GnetworkGnome/Class
|
b57212622b6fae278d048d09f847d7001238987e
|
[
"Apache-2.0"
] | null | null | null |
Class1/Class1X7.py
|
GnetworkGnome/Class
|
b57212622b6fae278d048d09f847d7001238987e
|
[
"Apache-2.0"
] | null | null | null |
Class1/Class1X7.py
|
GnetworkGnome/Class
|
b57212622b6fae278d048d09f847d7001238987e
|
[
"Apache-2.0"
] | null | null | null |
# Import Modules
import yaml, json
# Read YAML File
with open("gnome_list.yml") as f:
gnome_yaml = yaml.load(f)
# Read JSON File
with open("gnome_list.json") as f:
gnome_json = json.load(f)
# Print Out Variations
print("The following is the YAML List:")
print(gnome_yaml)
print("\nThe follwing is the JSON List:")
print(gnome_json)
| 16.52381
| 41
| 0.708934
|
a78b60d08f65afe3e5b9454a9e036ac40eb5d40f
| 4,301
|
py
|
Python
|
botlib/tgclient.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
botlib/tgclient.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
botlib/tgclient.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import telebot # pip3 install pytelegrambotapi
from threading import Thread
from time import sleep
from typing import List, Optional, Any, Union, Iterable, Callable
from telebot.types import Message, Chat # typing
from .helper import Log
class Kill(Exception):
''' Used to intentionally kill the bot. '''
pass
class TGClient(telebot.TeleBot):
'''
Telegram client. Wrapper around telebot.TeleBot.
If `polling` if False, you can run the bot for a single send_message.
If `allowedUsers` is None, all users are allowed.
'''
def __init__(
self,
apiKey: str,
*, polling: bool,
allowedUsers: Optional[List[str]] = None,
**kwargs: Any
) -> None:
''' If '''
super().__init__(apiKey, **kwargs)
self.users = allowedUsers
self.onKillCallback = None # type: Optional[Callable[[], None]]
if polling:
def _fn() -> None:
try:
Log.info('Ready')
self.polling(skip_pending=True) # none_stop=True
except Kill:
Log.info('Quit by /kill command.')
if self.onKillCallback:
self.onKillCallback()
return
except Exception as e:
Log.error(repr(e))
Log.info('Auto-restart in 15 sec ...')
sleep(15)
_fn()
Thread(target=_fn, name='Polling').start()
@self.message_handler(commands=['?'])
def _healthcheck(message: Message) -> None:
if self.allowed(message):
self.reply_to(message, 'yes')
@self.message_handler(commands=['kill'])
def _kill(message: Message) -> None:
if self.allowed(message):
self.reply_to(message, 'bye bye')
raise Kill()
def set_on_kill(self, callback: Optional[Callable[[], None]]) -> None:
''' Callback is executed when a Kill exception is raised. '''
self.onKillCallback = callback
@staticmethod
def listen_chat_info(api_key: str, user: str) -> 'TGClient':
''' Wait for a single /start command, print chat-id, then quit. '''
bot = TGClient(api_key, polling=True, allowedUsers=[user])
@bot.message_handler(commands=['start'])
def handle_start(message: Message) -> None:
bot.log_chat_info(message.chat)
raise Kill()
return bot
# Helper methods
def log_chat_info(self, chat: Chat) -> None:
''' Print current chat details (chat-id, title, etc.) to console. '''
Log.info('[INFO] chat-id: {} ({}, title: "{}")'.format(
chat.id, chat.type, chat.title or ''))
def allowed(self, src_msg: Message) -> bool:
''' Return true if message is sent to an previously allowed user. '''
return not self.users or src_msg.from_user.username in self.users
def send(self, chat_id: int, msg: str, **kwargs: Any) -> Optional[Message]:
''' Send a message to chat. '''
try:
return self.send_message(chat_id, msg, **kwargs)
except Exception as e:
Log.error(repr(e))
sleep(45)
return None
def send_buttons(
self,
chat_id: int,
msg: str,
options: Iterable[Union[str, int, float]]
) -> Message:
''' Send tiling keyboard with predefined options to user. '''
markup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add(*(telebot.types.KeyboardButton(str(x)) for x in options))
return self.send_message(chat_id, msg, reply_markup=markup)
def send_abort_keyboard(self, src_msg: Message, reply_msg: str) -> Message:
''' Cancel previously sent keyboards. '''
return self.reply_to(src_msg, reply_msg,
reply_markup=telebot.types.ReplyKeyboardRemove())
def send_force_reply(self, chat_id: int, msg: str) -> Message:
''' Send a message which is automatically set to reply_to. '''
return self.send_message(chat_id, msg,
reply_markup=telebot.types.ForceReply())
| 36.449153
| 79
| 0.574053
|
c35295191bf8f93bbe3873394f2e6bd7e5feca8e
| 283
|
py
|
Python
|
src/rastervision/commands/eval.py
|
nholeman/raster-vision
|
f3e1e26c555feed6fa018183c3fa04d7858d91bd
|
[
"Apache-2.0"
] | null | null | null |
src/rastervision/commands/eval.py
|
nholeman/raster-vision
|
f3e1e26c555feed6fa018183c3fa04d7858d91bd
|
[
"Apache-2.0"
] | null | null | null |
src/rastervision/commands/eval.py
|
nholeman/raster-vision
|
f3e1e26c555feed6fa018183c3fa04d7858d91bd
|
[
"Apache-2.0"
] | null | null | null |
from rastervision.core.command import Command
class Eval(Command):
def __init__(self, scenes, ml_task, options):
self.scenes = scenes
self.ml_task = ml_task
self.options = options
def run(self):
self.ml_task.eval(self.scenes, self.options)
| 23.583333
| 52
| 0.667845
|
7aa4a1bef0cf3bfdab8a6185967a3086c70f2a8f
| 2,563
|
py
|
Python
|
src/sima/post/xyitem.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/post/xyitem.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/post/xyitem.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
# This an autogenerated file
#
# Generated with XYItem
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.xyitem import XYItemBlueprint
from typing import Dict
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class XYItem(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
x : float
(default 0.0)
y : float
(default 0.0)
"""
def __init__(self , name="", description="", _id="", x=0.0, y=0.0, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.x = x
self.y = y
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return XYItemBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def x(self) -> float:
""""""
return self.__x
@x.setter
def x(self, value: float):
"""Set x"""
self.__x = float(value)
@property
def y(self) -> float:
""""""
return self.__y
@y.setter
def y(self, value: float):
"""Set y"""
self.__y = float(value)
| 23.513761
| 81
| 0.567694
|
02cf42ad46f13042670e2ea5e302a5215a925df3
| 1,461
|
py
|
Python
|
tests/test_functions/test_tanimoto_similarity.py
|
CMargreitter/ChemCharts
|
ec47b8f572f6b77518051aafc578557a5a10c2d0
|
[
"Apache-2.0"
] | 16
|
2022-01-29T05:32:13.000Z
|
2022-03-02T15:19:17.000Z
|
tests/test_functions/test_tanimoto_similarity.py
|
CMargreitter/ChemCharts
|
ec47b8f572f6b77518051aafc578557a5a10c2d0
|
[
"Apache-2.0"
] | 7
|
2022-02-01T22:34:57.000Z
|
2022-03-11T23:02:27.000Z
|
tests/test_functions/test_tanimoto_similarity.py
|
CMargreitter/ChemCharts
|
ec47b8f572f6b77518051aafc578557a5a10c2d0
|
[
"Apache-2.0"
] | 1
|
2022-01-19T12:41:38.000Z
|
2022-01-19T12:41:38.000Z
|
import unittest
from rdkit import Chem
from chemcharts.core.container.chemdata import ChemData
from chemcharts.core.container.fingerprint import FingerprintContainer
from chemcharts.core.container.smiles import Smiles
from chemcharts.core.functions.tanimoto_similarity import TanimotoSimilarity
class TestTanimotoSimilarity(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
test_chemdata = ChemData(Smiles([""]), name="test_chemdata")
test_fingerprint = FingerprintContainer("test_fingerprint",
[Chem.RDKFingerprint(Chem.MolFromSmiles('CCOC')),
Chem.RDKFingerprint(Chem.MolFromSmiles('CCO')),
Chem.RDKFingerprint(Chem.MolFromSmiles('COC')),
Chem.RDKFingerprint(Chem.MolFromSmiles('COCC'))])
test_chemdata.set_fingerprints(test_fingerprint)
cls.test_chemdata = test_chemdata
def setUp(self) -> None:
pass
def test_tanimoto_similarity(self):
test_tan_sim = TanimotoSimilarity()
tan_sim_chemdata = test_tan_sim.simplify(self.test_chemdata)
tan_sim = tan_sim_chemdata.get_tanimoto_similarity()
self.assertListEqual([0.6, 0.4, 1.0], list(tan_sim[0][1:]))
self.assertListEqual([0.4], list(tan_sim[-2][3:]))
self.assertEqual(4, len(list(tan_sim[-1])))
| 45.65625
| 98
| 0.644764
|
4e8641fa9a69985d033161a99747075655998ba4
| 22,001
|
py
|
Python
|
varifocal/calibrate.py
|
qenops/dDisplay
|
3a7846378733d95c17b6274cc3ebe775bbd8f758
|
[
"Apache-2.0"
] | null | null | null |
varifocal/calibrate.py
|
qenops/dDisplay
|
3a7846378733d95c17b6274cc3ebe775bbd8f758
|
[
"Apache-2.0"
] | null | null | null |
varifocal/calibrate.py
|
qenops/dDisplay
|
3a7846378733d95c17b6274cc3ebe775bbd8f758
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
__author__ = ('David Dunn')
__date__ = '2016-09-05'
__version__ = '0.1'
import cv2, sys, time, os, fnmatch, ctypes
import numpy as np
from scipy.misc import imsave
from scipy.interpolate import interp1d
from scipy.signal import medfilt
#from main import runDisplay
#import expglobals, expgui, explogic
#from exptools import ImageTools
from multiprocessing import Process, Value, Array
import dDisplay as dd
import dCamera as dc
import dGraph as dg
import dGraph.ui as ui
import dGraph.textures as dgt
import dGraph.util as dgu
from optics_correction import mapping
WINDOWS = [
{
"name": 'HMD Left',
#"location": (0, 0),
"location": (3266, 1936), # px coordinates of the startup screen for window location
"size": (830, 800), # px size of the startup screen for centering
"center": (290,216), # center of the display
"refresh_rate": 60, # refreshrate of the display for precise time measuring
"px_size_mm": 0.09766, # px size of the display in mm
"distance_cm": 20, # distance from the viewer in cm,
#"is_hmd": False,
#"warp_path": 'data/calibration/newRight/',
},
]
DISPLAY = 0
CAMERA = (None)
RESOLUTION = (768,1024)
POINT = (384,512)
DEPTH = 20
#DEPTH = 100
SIDES = {0:'left_masked'}#,1:'newLeft'}
SIDE = SIDES[DISPLAY]
CORNERS = (10,7)
DIRECTORY = './dDisplay/depth/data'
CAMCALIB = os.path.join(DIRECTORY,'cameraCalibration')
GLWARP = False
#IMAGE = 'data/images/circleCheck.png'
#IMAGE = 'data/images/airplane.png'
#IMAGE = 'data/images/smallC.png'
#IMAGE = 'data/images/smallBox.png'
IMAGE = 'data/images/pattern1.png'
#IMAGE = 'data/images/rightRenders/20cm.png'
#IMAGE = 'data/images/rightRenders/201cm.png'
#IMAGE = 'data/images/leftRenders/20cm.png'
#IMAGE = 'data/images/leftRenders/201cm.png'
TRIGGER = False
def wait(window):
global TRIGGER
while not (TRIGGER and not ui.window_should_close(window)):
ui.swap_buffers(window)
ui.poll_events()
TRIGGER = False
def keyCallback(window, key, scancode, action, mods):
if action == ui.PRESS and key == ui.KEY_ESCAPE:
ui.set_window_should_close(window, True)
if action == ui.PRESS and key == ui.KEY_ENTER:
global TRIGGER
TRIGGER = True
print key
def initDisplay():
global DISPLAY
ui.init()
#open windows
windows = []
# Control window with informational display
windows.append(ui.open_window('Calibration',1500,50,640,480,None,keyCallback))
# HMD Window
window = WINDOWS[DISPLAY]
windowName = window["name"]
winLocation = window["location"]
winSize = window["size"]
windows.append(ui.open_window(windowName,winLocation[0],winLocation[1],winSize[0],winSize[1],windows[0],keyCallback))
ui.make_context_current(windows[1])
return windows
def initCamera():
global CAMERA, CAMCALIB
#init camera
ptg = dc.Camera(*CAMERA)
#ptg.claibrate(CORNERS)
#files = [f for f in os.listdir(CAMCALIB) if os.path.isfile(os.path.join(CAMCALIB, f))]
#images = [cv2.imread(os.path.join(CAMCALIB, f)) for f in files]
#ret, matrix, dist = dc.calibrate(images, CORNERS, **kwargs)
#print ret
#ptg.matrix = matrix
#ptg.dist = dist
ptg.matrix = np.array( [[ 2.88992746e+03, 0.00000000e+00, 1.98587472e+03],
[ 0.00000000e+00, 2.88814288e+03, 1.46326356e+03],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
ptg.distortion = np.array([[-0.04156773, -0.02731505, -0.00368037, -0.00125477, 0.02314031]])
return ptg
def captureWarpInteractive(windows):
#generate greycodes
winSize = ui.get_window_size(windows[1])
codes = dd.genGreyCodes(winSize)
texture = dgt.createTexture(codes[0][0])
texture2 = dgt.createTexture(np.zeros((480,640,3),dtype=np.float32))
# set the focus
dgt.updateTexture(texture,codes[1][5])
ui.make_context_current(windows[1])
dgu.drawQuad(texture)
ui.swap_buffers(windows[1])
dgu.drawQuad(texture)
ui.swap_buffers(windows[1])
wait(windows[1])
#display grey codes and allow for capture
for idx, sequence in enumerate(codes):
seq = []
for idx2, image in enumerate(sequence):
dgt.updateTexture(texture,image)
ui.make_context_current(windows[1])
dgu.drawQuad(texture)
ui.swap_buffers(windows[1])
wait(windows[1])
#capture image
def captureWarp(id,currentDepth,requestedDepth,stillRunning,*args):
global DEPTH, DISPLAY, SIDE, DIRECTORY
WINDOWS = initDisplay()
#generate greycodes
winSize = ui.get_window_size(WINDOWS[1])
codes = dd.genGreyCodes(winSize)
texture = dgt.createTexture(codes[0][0])
texture2 = dgt.createTexture(np.zeros((480,640,3),dtype=np.float32))
captures = []
#set display depth
requestedDepth[DISPLAY].value = DEPTH
ptg = initCamera()
expgui.updateTexture(texture,codes[1][5])
glfw.make_context_current(WINDOWS[1])
expgui.drawQuad(texture)
glfw.swap_buffers(WINDOWS[1])
expgui.drawQuad(texture)
glfw.swap_buffers(WINDOWS[1])
check = ptg.captureFrames()
check = ptg.undistort(check)
directory = '%s/%s/%04d'%(DIRECTORY,SIDE,DEPTH)
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite('%s/check.png'%directory, check[len(check)-1])
#display grey codes and allow for capture
for idx, sequence in enumerate(codes):
seq = []
for idx2, image in enumerate(sequence):
expgui.updateTexture(texture,image)
glfw.make_context_current(WINDOWS[1])
expgui.drawQuad(texture)
glfw.swap_buffers(WINDOWS[1])
#time.sleep(1)
#capture image
for i in range(10):
ret, frame = ptg.read()
expgui.updateTexture(texture2,frame)
glfw.make_context_current(WINDOWS[0])
expgui.drawQuad(texture2)
glfw.swap_buffers(WINDOWS[0])
frame = ptg.undistort([frame,])[0]
seq.append(frame)
cv2.imwrite('%s/capture_%02d_%02d.png'%(directory,idx,idx2),frame)
captures.append(ptg.undistort(seq))
#process captures
#cameraMas, mask = dd.evalGreyCodeCameraSpace(captures, winSize)
#displayMaps = dd.toDisplaySpace(cameraMaps, mask, winSize,angleMap=)
#cv2.imwrite('warp.png',map)
stillRunning.value = False
glfw.terminate()
def genAngleMap(depth, side,checkSize,checkPixels=[20,20],width=10,roi=[0,0,768,1024],write=True,k_harris=[2,3,0.04]):
global DIRECTORY
# Input and output locations of the calibration image.
fn_cal = '%s/%s/%04d/check.JPG'%(DIRECTORY,side,depth)
img = cv2.imread(fn_cal, cv2.CV_LOAD_IMAGE_GRAYSCALE)
fnmapimg = '%s/%s/%04d/angles.png'%(DIRECTORY,side,depth)
# Step sizes of the checkerboard patterns at both axes in mm.
step = [checkSize,checkSize]
# Distance threshold in pixels to detect the interesting points wrt to the center of the image.
target_thr = checkPixels
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Filter by Area.
params.filterByArea = True
params.minArea = 1
params.maxArea = 100
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = False
# Filter by Inertia
params.filterByInertia = False
# Degree of the polynomial that you want to generate.
degree = [1,1]
# Corner Harris parameters.
# Call the calibration
map = mapping.FindMap(degree,img,roi,depth,step,params,k_harris,target_thr,width,True)
if write:
# Save the output as image and a numpy array.
fnmaparr = fnmapimg.replace('.png','.npy')
np.save(fnmaparr,map)
imsave(fnmapimg,map)
# Prompt save files.
print('written: %s' % fnmapimg)
print('written: %s' % fnmaparr)
return map
'''
import dDisplay.depth.calibrate as cal
map = cal.genAngleMap(15,'right',10.6,[20,45],width=15,roi=[321,266,841,689])
map = cal.genAngleMap(20,'right',23.,[20,10])
map = cal.genAngleMap(25,'right',23.,[15,10])
map = cal.genAngleMap(33,'right',23.,[30,40])
map = cal.genAngleMap(50,'right',23.,[30,20],roi=[285,210,723,555])
map = cal.genAngleMap(100,'right',30.18,[20,20],roi=[382,209,703,456])
map = cal.genAngleMap(500,'right',120.4,[20,60],width=5,roi=[456,324,618,434])
map = cal.genAngleMap(1000,'right',120.4,[20,60],width=5,roi=[492,285,582,329])
cal.genDisplayMaps([15,20,25,33,50,100,1000],0)
map = cal.genAngleMap(20,'left_clean',23.,[200,200],width=20,roi=[850,630,3100,2260],k_harris=[3,11,0.04])
cal.genDisplayMaps([20],0,cameraMaps=False,roi=[1400,600,2900,2100])
map = cal.genAngleMap(20,'stereoLeftClean',10.6,[10,10],width=15)
map = cal.genAngleMap(25,'stereoLeftClean',10.6,[20,20],width=15)
map = cal.genAngleMap(33,'stereoLeftClean',23.,[20,15])
map = cal.genAngleMap(25,'stereoLeft',23.,[30,60])
map = cal.genAngleMap(50,'stereoLeft',23.,[30,30],roi=[512,240,750,524])
map = cal.genAngleMap(500,'stereoLeft',120.4,[10,10],roi=[450,324,620,460])
map = cal.genAngleMap(20,'stereoLeftClean',10.6,[20,30],roi=[266,258,632,555])
map = cal.genAngleMap(25,'stereoLeftClean',10.6,[20,20],roi=[318,275,615,510])
map = cal.genAngleMap(33,'stereoLeftClean',23.,[20,15])
map = cal.genAngleMap(50,'stereoLeftClean',23.,[30,15])
map = cal.genAngleMap(100,'stereoLeftClean',30.18,[30,20])
cal.genDisplayMaps([25,50,500],1)
'''
def genDisplayMaps(depths,display,cameraMaps=True,roi=None):
global DIRECTORY, SIDES, SIDE, WINDOWS
DISPLAY = display
SIDE = SIDES[DISPLAY]
print "Working in %s/%s"%(DIRECTORY,SIDE)
window = WINDOWS[DISPLAY]
winSize = window["size"]
displayMaps = []
for depth in depths:
directory = '%s/%s/%04d'%(DIRECTORY,SIDE,depth)
if cameraMaps:
captures = []
for file in sorted(os.listdir(directory)):
if fnmatch.fnmatch(file, 'capture*'):
seq = int(file.split('_')[1])
if not len(captures) > seq:
captures.append([])
captures[seq].append(cv2.imread('%s/%s'%(directory,file),cv2.CV_LOAD_IMAGE_GRAYSCALE))
angleMap = np.load('%s/angles.npy'%directory)
print "Generating camera maps for: %scm"%depth
cameraMaps, mask = dd.evalGreyCodeCameraSpace(captures, winSize)
np.save('%s/cameraMap.npy'%directory,cameraMaps)
np.save('%s/mask.npy'%directory,mask)
cameraMaps[0] = medfilt(cameraMaps[0],5)
cameraMaps[1] = medfilt(cameraMaps[1],5)
kernel = np.ones((5,5),np.float64)/25
#cameraMaps[0] = cv2.filter2D(cameraMaps[0],-1,kernel)
#cameraMaps[1] = cv2.filter2D(cameraMaps[1],-1,kernel)
imsave('%s/cameraMap.png'%directory,np.dstack((cameraMaps[0],cameraMaps[1],np.zeros_like(cameraMaps[0]))))
imsave('%s/mask.png'%directory,mask)
else:
angleMap = np.load('%s/angles.npy'%directory)
cameraMaps = np.load('%s/cameraMap.npy'%directory)
mask = np.load('%s/mask.npy'%directory)
kernel = np.ones((5,5),np.float64)/25
print "Generating display maps for: %scm"%depth
if roi is None:
displayMap = dd.toDisplaySpace(cameraMaps,mask,winSize,angleMap=angleMap,filter=5)
else:
displayMap = dd.toDisplaySpace(cameraMaps[:,roi[0]:roi[2],roi[1]:roi[3]],mask[roi[0]:roi[2],roi[1]:roi[3]],winSize,angleMap=angleMap[roi[0]:roi[2],roi[1]:roi[3]],filter=5)
np.save('%s/displayMap.npy'%directory,displayMap)
displayMap[0] = medfilt(displayMap[0],5)
displayMap[1] = medfilt(displayMap[1],5)
#displayMap[0] = medfilt(displayMap[0],5)
#displayMap[1] = medfilt(displayMap[1],5)
displayMap[0] = cv2.filter2D(displayMap[0],-1,kernel)
displayMap[1] = cv2.filter2D(displayMap[1],-1,kernel)
#displayMap = cv2.GaussianBlur(displayMap,(5,5),0)
displayMaps.append(displayMap)
img = np.zeros_like(displayMap)
img[~np.isnan(displayMap)] = displayMap[~np.isnan(displayMap)]
imsave('%s/displayMap.png'%directory,np.dstack((img[1],img[0],np.zeros_like(img[0]))))
print "Generating UV look up tables."
images, angleMap = dd.generateUVluts(displayMaps)
for idx, image in enumerate(images):
np.save('%s/%s/%04d.npy'%(DIRECTORY,SIDE,depths[idx]),image.astype(np.float64))
imsave('%s/%s/%04d.png'%(DIRECTORY,SIDE,depths[idx]),image)
np.save('%s/%s/angleMap.npy'%(DIRECTORY,SIDE),angleMap.astype(np.float64))
imsave('%s/%s/angleMap.png'%(DIRECTORY,SIDE),angleMap)
def calibratePoint(id,currentDepth,requestedDepth,stillRunning,sharedImage):
''' NOTE: curent method should only work for the point straight down the axis - otherwise we need to map angles'''
print 'Entering calibratePoint'
global DEPTH, DISPLAY, SIDE, DIRECTORY, IMAGE, POINT
requestedDepth[DISPLAY].value = DEPTH
WINDOWS = initDisplay()
winSize = glfw.get_window_size(WINDOWS[1])
img = np.zeros(winSize,dtype=np.uint8)
# Create a marker at POINT
img[POINT[0]-5:POINT[0]+5,POINT[1]-5:POINT[1]+5] = 255
img = np.fliplr(img)
imgTex = expgui.createTexture(img)
# load lut
tables, depths = explogic.ExperimentScreen.loadLuts('%s/%s'%(DIRECTORY,SIDE))
lutImg = tables[depths.index(DEPTH)]
lutTex = expgui.createTexture(lutImg)
glfw.make_context_current(WINDOWS[1])
frameTex, frameBuf = expgui.createWarp(tables[0].shape[1],tables[0].shape[0])
shader = expgui.lutShader()
# get camera image
image = toNumpyImage(sharedImage)
# setup blob detector
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 50
params.maxThreshold = 150
params.minArea=30
params.maxArea=100
params.filterByConvexity = False
params.filterByInertia = False
detector = cv2.SimpleBlobDetector(params)
count = 0
while not glfw.window_should_close(WINDOWS[0]) and not glfw.window_should_close(WINDOWS[1]):
#get the right luts
glfw.make_context_current(WINDOWS[1])
expgui.drawWarp(frameTex, frameBuf, lutTex, imgTex, shader)
#expgui.drawQuad(imgTex)
glfw.swap_buffers(WINDOWS[1])
glfw.poll_events()
count += 1
if count % 30:
with sharedImage.get_lock():
frame = np.copy(image)
print frame.shape
print frame.dtype
frame2 = 1.-frame
imsave('blobShared.png',frame2)
ret,thresh = cv2.threshold(frame2,params.maxThreshold,255,cv2.THRESH_TRUNC)
#blob detect
culled = []
coord = []
keypoints = detector.detect(thresh)
if len(keypoints):
for key in keypoints:
if bounds[0] < key.pt[0] < bounds[1] and bounds[2] < key.pt[1] < bounds[3]:
coord.append(key.pt)
culled.append(key)
#frame = cv2.drawKeypoints(frame, culled, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
print coord
#
stillRunning.value = False
glfw.terminate()
def streamCamera(stillRunning,sharedImage,currentDepth):
global DISPLAY
print 'Entering streamCamera'
cam = initCamera()
alpha = 0.
if not cam.open():
return None
image = toNumpyImage(sharedImage)
print 'Camera entering while loop'
while(True):
ret, frame = cam.read() # Capture the frame
size = frame.shape
newMtx,roi = cv2.getOptimalNewCameraMatrix(cam.matrix,cam.distortion,(size[1],size[0]),alpha)
map1, map2 = cv2.initUndistortRectifyMap(cam.matrix,cam.distortion,np.eye(3),newMtx,(size[1],size[0]),cv2.CV_16SC2)
frame = cv2.remap(frame,map1,map2,cv2.INTER_LINEAR)
display = cv2.cvtColor(frame,cv2.COLOR_GRAY2RGB)
depth = currentDepth[0].value
diopter = 100./max(depth,.1)
height = int((.04727+diopter/7.333)*-display.shape[0])+display.shape[0]
point = (5,height)
height = int((.03+depth/1100.)*display.shape[0])
point2 = (100,height)
ImageTools.drawStr(display,point2,'-%.4gcm'%depth,scale=1.7,thick=2,color=(0,0,255),backCol=(255,255,255))
ImageTools.drawStr(display,point,'%1.2fd-'%diopter,scale=1.7,thick=2,color=(0,255,0),backCol=(255,255,255))
cv2.imshow('frame',display) # Display the frame
ch = cv2.waitKey(1) & 0xFF
if ch == 27: # escape
break
elif ch == 32: # space bar
imsave('blobOrig.png',frame)
with sharedImage.get_lock():
image[:,:] = frame[:,:]
cam.release() # release the capture
cv2.destroyAllWindows()
def verifyCalibration(id,currentDepth,requestedDepth,stillRunning,*args):
print 'Entering verifyCalibration'
global DEPTH, DISPLAY, SIDE, DIRECTORY, IMAGE, GLWARP
WINDOWS = initDisplay()
speed = 20 #smaller is faster
img = cv2.imread(IMAGE,-1)
winSize = glfw.get_window_size(WINDOWS[1])
img = img[0:winSize[1],0:winSize[0]]
#pts1 = np.float32([[612,410],[792,366],[621,589],[808,543]])
#pts2 = np.float32([[612,410],[792,410],[612,589],[792,589]])
#M = cv2.getPerspectiveTransform(pts1,pts2)
#img = cv2.warpPerspective(img,M,(img.shape[1],img.shape[0]))
imgTex = expgui.createTexture(img)
tables, depths = explogic.ExperimentScreen.loadLuts('%s/%s'%(DIRECTORY,SIDE))
lutMap = interp1d(depths,range(len(tables)))
# double up on the ends so we can see it
depths.insert(0,depths[0])
depths.append(depths[-1])
depthMap = interp1d(np.linspace(0.,1.,len(depths)),depths)
glfw.make_context_current(WINDOWS[1])
frameTex, frameBuf = expgui.createWarp(tables[0].shape[1],tables[0].shape[0])
shader = expgui.lutMixShader()
lutTex = []
for table in tables:
#table = np.dstack((table[:,:,0],table[:,:,2],table[:,:,1]))
#table = np.flipud(table)
#table = np.fliplr(table)
lutTex.append(expgui.createTexture(table))
print 'Calibration entering while loop'
while not glfw.window_should_close(WINDOWS[0]) and not glfw.window_should_close(WINDOWS[1]):
#set depth
#triangle = lambda x: abs(abs(((x+1)%4)-2)-1)
#depth = depthMap(triangle(time.time()/speed))
depth = DEPTH
requestedDepth[DISPLAY].value = depth
#get the right luts
glfw.make_context_current(WINDOWS[1])
low, high, factor = explogic.ExperimentScreen.chooseLut(lutMap,depth)
if GLWARP:
expgui.drawMixWarp(frameTex, frameBuf, lutTex[low], lutTex[high], factor, imgTex, shader)
else:
maps = cv2.addWeighted(tables[low],factor,tables[high],1-factor,0)
result = cv2.remap(img,maps[:,:,0].astype(np.float32)*maps.shape[0],(1-maps[:,:,1].astype(np.float32))*maps.shape[1],cv2.INTER_LINEAR)
expgui.updateTexture(imgTex, result)
expgui.drawQuad(imgTex)
glfw.swap_buffers(WINDOWS[1])
glfw.poll_events()
stillRunning.value = False
glfw.terminate()
def verifyDepth(id,currentDepth,requestedDepth,stillRunning):
global DEPTH, DISPLAY, SIDE, DIRECTORY, WINDOWS
pass
def toNumpyImage(mp_arr):
global RESOLUTION
array = np.frombuffer(mp_arr.get_obj())
array.shape = RESOLUTION
return array
def main(function=captureWarp,display=None,camera=None,depth=None,directory=None,image=None,resolution=None,point=None):
global DISPLAY,SIDES,SIDE,CAMERA,DEPTH,DIRECTORY,IMAGE,RESOLUTION,POINT
DISPLAY = display if display is not None else DISPLAY
SIDE = SIDES[DISPLAY]
CAMERA = camera if camera is not None else CAMERA
DEPTH = depth if depth is not None else DEPTH
DIRECTORY = directory if directory is not None else DIRECTORY
IMAGE = image if image is not None else IMAGE
RESOLUTION = resolution if resolution is not None else RESOLUTION
POINT = point if point is not None else POINT
currentDepth = []
requestedDepth = []
for i in range(expglobals.NUM_DEPTH_DISPLAYS):
currentDepth.append(Value('d', 0.))
requestedDepth.append(Value('d',0.))
stillRunning = Value('b', True)
sharedImage = Array(ctypes.c_double, RESOLUTION[0]*RESOLUTION[1])
processes = []
processes.append(Process(target=function, args=(DISPLAY,currentDepth,requestedDepth,stillRunning,sharedImage)))
#if function != captureWarp:
# processes.append(Process(target=streamCamera, args=(stillRunning,sharedImage,currentDepth)))
processes.append(Process(target=runDisplay, args=(sys.argv,DISPLAY,currentDepth,requestedDepth,stillRunning)))
for p in processes:
p.start()
for p in processes:
p.join()
def interactiveMain():
windows = initDisplay()
# capture the images by hand
captureWarpInteractive(windows)
# transfer the images to some directory
wait(windows[1])
ui.terminate
if __name__ == "__main__":
#main(calibratePoint)
#main(verifyCalibration)
#main(captureWarp)
interactiveMain()
'''
captures = []
for i in range(3):
seq = []
for j in range(9):
seq.append(cv2.imread('../AccomodationExperiment/data/calibration/capture_%02d_%02d.png'%(i,j),-1))
captures.append(seq)
import ctypes, cv2
from multiprocessing import Array
import numpy as np
a = Array(ctypes.c_double, 786432)
image = tonumpyarray(a)
image.shape = (768,1024)
cv2.imshow('temp',image)
cv2.waitKey()
'''
| 40.667283
| 183
| 0.64888
|
ea37ac0e52f132168518ec4a9b6bdf0f5b34531a
| 921
|
py
|
Python
|
pack/nn/attentionreadout.py
|
yippp/SY-GNN
|
65a1e2566dce549724cef080dfac7efc00fbe91b
|
[
"MIT"
] | 6
|
2020-05-04T03:37:24.000Z
|
2021-04-21T15:15:54.000Z
|
pack/nn/attentionreadout.py
|
yippp/SY-GNN
|
65a1e2566dce549724cef080dfac7efc00fbe91b
|
[
"MIT"
] | null | null | null |
pack/nn/attentionreadout.py
|
yippp/SY-GNN
|
65a1e2566dce549724cef080dfac7efc00fbe91b
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn as nn
import torch.nn.functional as F
__all__ = [
'AttentionReadout'
]
class AttentionReadout(nn.Module):
def __init__(self, f_in, f_attn, f_out):
super(AttentionReadout, self).__init__()
self.in_features = f_in
self.out_features = f_out
self.W1 = nn.Parameter(torch.zeros(size=(f_attn, f_in)))
nn.init.xavier_uniform_(self.W1.data, gain=1.414)
self.W2 = nn.Parameter(torch.zeros(size=(f_out, f_attn)))
nn.init.xavier_uniform_(self.W2.data, gain=1.414)
def forward(self, H: torch.Tensor):
x = self.W1 @ H.permute(0, 2, 1) # f_attn x atoms = (f_attn x f_in) @ (f_in x atoms)
x = self.W2 @ torch.tanh(x) # f_out x atoms = (f_out x f_attn) @ (f_attn x atoms)
x = F.softmax(x, dim=2)
y = x @ H # f_out x f_in = (f_out x atoms) @ (atoms x f_in)
return y.view(y.shape[0], -1)
| 32.892857
| 93
| 0.618893
|
682773002935b27fc4b6c471581cae95d1f5ecab
| 1,885
|
py
|
Python
|
utils.py
|
Mohamed501258/flask-fileexplorer
|
e3131a759b7a3803eaf0a848852385ad048fc24e
|
[
"MIT"
] | null | null | null |
utils.py
|
Mohamed501258/flask-fileexplorer
|
e3131a759b7a3803eaf0a848852385ad048fc24e
|
[
"MIT"
] | null | null | null |
utils.py
|
Mohamed501258/flask-fileexplorer
|
e3131a759b7a3803eaf0a848852385ad048fc24e
|
[
"MIT"
] | null | null | null |
from setup import app
from flask import request, Response
import os
import re
video_types = ['mp4', "webm", "opgg"]
audio_types = ['mp3', "wav", "ogg", "mpeg", "aac", "3gpp", "3gpp2", "aiff", "x-aiff", "amr", "mpga"]
@app.after_request
def after_request(response):
response.headers.add('Accept-Ranges', 'bytes')
return response
def get_chunk(start_byte=None, end_byte=None, full_path=None):
file_size = os.stat(full_path).st_size
if end_byte:
length = end_byte + 1 - start_byte
else:
length = file_size - start_byte
with open(full_path, 'rb') as f:
f.seek(start_byte)
chunk = f.read(length)
return chunk, start_byte, length, file_size
def get_file(file_path, mimetype):
range_header = request.headers.get('Range', None)
start_byte, end_byte = 0, None
if range_header:
match = re.search(r'(\d+)-(\d*)', range_header)
groups = match.groups()
if groups[0]:
start_byte = int(groups[0])
if groups[1]:
end_byte = int(groups[1])
chunk, start, length, file_size = get_chunk(start_byte, end_byte, file_path)
resp = Response(chunk, 206, mimetype=f'video/{mimetype}',
content_type=mimetype, direct_passthrough=True)
print(length)
resp.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(start, start + length - 1, file_size))
return resp
def is_media(filepath):
found_media = re.search("\.mp4$|\.mp3$", filepath, re.IGNORECASE)
if found_media:
extension = found_media[0].lower()[1:]
if found_media in video_types:
return f"video/{extension}"
return f"audio/{extension}"
return False
def get_file_extension(fname):
found_extension = re.search("\.[A-Za-z0-9]*$", fname, re.IGNORECASE)
if found_extension:
return found_extension[0][1:].lower()
| 31.949153
| 103
| 0.635544
|
3f837c753c3cf6651e9b866c343ca8b8367fa0a7
| 1,300
|
py
|
Python
|
main.py
|
arathee2/what-movie
|
122c7a3527209d1ff9c2809fceceabb0bf542d97
|
[
"MIT"
] | null | null | null |
main.py
|
arathee2/what-movie
|
122c7a3527209d1ff9c2809fceceabb0bf542d97
|
[
"MIT"
] | null | null | null |
main.py
|
arathee2/what-movie
|
122c7a3527209d1ff9c2809fceceabb0bf542d97
|
[
"MIT"
] | null | null | null |
import argparse
from what_movie.model.imdb import IMDbDF
from what_movie.utils.constants import OUTPUT_COLS, LANGUAGE_FILTER, \
GENRE_FILTER, COLS_TO_NAMES
def main(n, year_from, year_to):
data = IMDbDF()
data.filter_data(LANGUAGE_FILTER,
GENRE_FILTER,
year_from=year_from,
year_to=year_to)
movies = data.pick_top_movies(n=n)
for i in range(n):
heading = f'Movie #{i + 1}'
hyphens = '-' * len(heading)
print(f'{hyphens}\n{heading}\n{hyphens}')
for col in OUTPUT_COLS:
print(f'{COLS_TO_NAMES[col]}: {movies[i][col]}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num_movies", type=int, default=3,
help="Number of of movies to recommend.")
parser.add_argument("-f", "--year_from", type=int, default=1800,
help="Consider movies released this "
"or after this year.")
parser.add_argument("-t", "--year_to", type=int, default=2020,
help="Consider movies released this "
"or before this year.")
args = parser.parse_args()
main(args.num_movies, args.year_from, args.year_to)
| 37.142857
| 70
| 0.582308
|
2788455967ebe512a9cddb3a35c51907a2af817c
| 8,792
|
py
|
Python
|
bayesian_cut/cuts/spectral_models.py
|
DTUComputeCognitiveSystems/bayesian_cut
|
89dc3e5a2f4b9d4ab7bd7aadbc7d0f7314ffc680
|
[
"BSD-3-Clause"
] | 7
|
2019-03-01T13:58:59.000Z
|
2021-12-16T00:41:26.000Z
|
bayesian_cut/cuts/spectral_models.py
|
DTUComputeCognitiveSystems/bayesian_cut
|
89dc3e5a2f4b9d4ab7bd7aadbc7d0f7314ffc680
|
[
"BSD-3-Clause"
] | 3
|
2019-02-28T17:38:18.000Z
|
2019-03-01T13:55:30.000Z
|
bayesian_cut/cuts/spectral_models.py
|
DTUComputeCognitiveSystems/bayesian_cut
|
89dc3e5a2f4b9d4ab7bd7aadbc7d0f7314ffc680
|
[
"BSD-3-Clause"
] | 1
|
2019-02-28T17:55:24.000Z
|
2019-02-28T17:55:24.000Z
|
#!/usr/bin/env python3
#
# -*- coding: utf-8 -*-
#
# Author: Laurent Vermue <lauve@dtu.dk>
# Maciej Korzepa <mjko@dtu.dk>
# Petr Taborsky <ptab@dtu.dk>
#
# License: 3-clause BSD
import scipy as sc
from sklearn.cluster import KMeans
from numpy.linalg import eigh
import numpy as np
class base_class(object):
def __init__(self, X):
self.X = X.toarray()
self.z_ = None
class RatioCut(base_class):
"""
cuts a graph with the RatioCut method using its adjacency matrix X of size nxn
Model settings
--------------
X : sparse scipy matrix, shape(n, n)
Adjacency matrix
Model attributes after running
------------------------------
z_ : numpy array, shape(n,)
Group assignment vector
Reference:
Von Luxburg, Ulrike. "A tutorial on spectral clustering." Statistics and computing 17.4 (2007): 395-416.
"""
def run(self, add_noiselinks=False):
""" Perform given cut method on the adjacency matrix X
Parameters
----------
add_noiselinks : float
Percentage of all links to be altered. Example: The adjacency matrix contains 100 links and add_noiselinks
is set to 0.1. In this case 10 links are altered, i.e. existing links can disappear or new links appear.
"""
X = self.X.copy()
if add_noiselinks > 0:
add_noiselinks = int(add_noiselinks * X.sum() / 2)
indices = np.triu_indices(X.shape[0], 1)
choices = np.random.choice(indices[0].shape[0], add_noiselinks, replace=False)
indices = np.array(indices)
indices = indices[:, choices]
indices = (indices[0], indices[1])
X[indices] = 1 - X[indices]
lower_indices = np.tril_indices(X.shape[0], 0)
X[lower_indices] = 0
X = X + X.T
D = np.diagflat(np.sum(X, axis=0)) # degree matrix nxn
L = D - X # unnormalized Laplacian
k = 2 # simplified problem with k set to 2
(eigvals, eigvects) = np.linalg.eigh(L) # eigenvectors and coresponding eigenvalues of L
U = eigvects[:, eigvals.argsort()][:, 0:k]
RR = KMeans(n_clusters=k).fit(U) # clustering rows of U (nxk matrix) having eigenvectors in columns
self.z_ = RR.predict(U)
class NormCutSM(base_class):
"""
cuts a graph with the NormCut method by Shi and Malik method using its adjacency matrix X of size nxn
Model settings
--------------
X : sparse scipy matrix, shape(n, n)
Adjacency matrix
Model attributes after running
------------------------------
z_ : numpy array, shape(n,)
Group assignment vector
Reference:
Von Luxburg, Ulrike. "A tutorial on spectral clustering." Statistics and computing 17.4 (2007): 395-416.
"""
def run(self, add_noiselinks=False):
""" Perform given cut method on the adjacency matrix X
Parameters
----------
add_noiselinks : float
Percentage of all links to be altered. Example: The adjacency matrix contains 100 links and add_noiselinks
is set to 0.1. In this case 10 links are altered, i.e. existing links can disappear or new links appear.
"""
X = self.X.copy()
if add_noiselinks > 0:
add_noiselinks = int(add_noiselinks * X.sum() / 2)
indices = np.triu_indices(X.shape[0], 1)
choices = np.random.choice(indices[0].shape[0], add_noiselinks, replace=False)
indices = np.array(indices)
indices = indices[:, choices]
indices = (indices[0], indices[1])
X[indices] = 1 - X[indices]
lower_indices = np.tril_indices(X.shape[0], 0)
X[lower_indices] = 0
X = X + X.T
D = np.diagflat(np.sum(X, axis=1)) # degree matrix nxn
L = D - X # unnormalized Laplacian
k = 2 # simplified problem with k set to 2
(eigvals, eigvects) = sc.linalg.eigh(a=L, b=D)
# eigenvectors and coresponding eigenvalues of generalized eigenproblem Lu=lambdaDu
U = eigvects[:, eigvals.argsort()][:, 0:k]
RR = KMeans(n_clusters=k).fit(U) # clustering rows of U (nxk matrix) having eigenvectors in columns
self.z_ = RR.predict(U)
class NormCutNJW(base_class):
"""
cuts a graph with the NormCut method by NG, Jordan and Weiss using its adjacency matrix X of size nxn
Model settings
--------------
X : sparse scipy matrix, shape(n, n)
Adjacency matrix
Model attributes after running
------------------------------
z_ : numpy array, shape(n,)
Group assignment vector
Reference:
Von Luxburg, Ulrike. "A tutorial on spectral clustering." Statistics and computing 17.4 (2007): 395-416.
"""
def run(self, add_noiselinks=False):
""" Perform given cut method on the adjacency matrix X
Parameters
----------
add_noiselinks : float
Percentage of all links to be altered. Example: The adjacency matrix contains 100 links and add_noiselinks
is set to 0.1. In this case 10 links are altered, i.e. existing links can disappear or new links appear.
"""
X = self.X.copy()
if add_noiselinks > 0:
add_noiselinks = int(add_noiselinks * X.sum() / 2)
indices = np.triu_indices(X.shape[0], 1)
choices = np.random.choice(indices[0].shape[0], add_noiselinks, replace=False)
indices = np.array(indices)
indices = indices[:, choices]
indices = (indices[0], indices[1])
X[indices] = 1 - X[indices]
lower_indices = np.tril_indices(X.shape[0], 0)
X[lower_indices] = 0
X = X + X.T
D = np.diagflat(np.sum(X, axis=1)) # degree matrix nxn
D1_2 = np.linalg.cholesky(np.linalg.inv(D))
L = np.matlib.eye(X.shape[0]) - np.dot(np.dot(D1_2, X), D1_2) # symetric Laplacian
k = 2 # simplified problem with k set to 2
(eigvals, eigvects) = sc.linalg.eigh(L) # eigenvectors and coresponding eigenvalues
U = eigvects[:, eigvals.argsort()][:, 0:k]
Uu = np.matrix(np.sqrt(np.sum(np.power(U, 2), axis=1)))
T = np.multiply(U, (1 / Uu).T)
# check of the norm equals one: np.sqrt(np.sum(np.power(T,2),axis=1))
RR = KMeans(n_clusters=k).fit(T) # clustering rows of U (nxk matrix) having eigenvectors in columns
self.z_ = RR.predict(T)
class NewmanModularityCut(base_class):
"""
cuts a graph with the spectral modularity optimization method by Newman using its adjacency matrix X of size nxn
Model settings
--------------
X : sparse scipy matrix, shape(n, n)
Adjacency matrix
Model attributes after running
------------------------------
z_ : numpy array, shape(n,)
Group assignment vector
Reference:
Newman, M. E. J. “Modularity and Community Structure in Networks.” Proceedings of the National Academy of
Sciences of the United States of America 103.23 (2006): 8577–8582. PMC. Web. 2 Oct. 2018.
"""
def run(self, add_noiselinks=False):
""" Perform given cut method on the adjacency matrix X
Parameters
----------
add_noiselinks : float
Percentage of all links to be altered. Example: The adjacency matrix contains 100 links and add_noiselinks
is set to 0.1. In this case 10 links are altered, i.e. existing links can disappear or new links appear.
"""
A = self.X.copy()
np.fill_diagonal(A, 0)
if add_noiselinks > 0:
add_noiselinks = int(add_noiselinks * A.sum() / 2)
indices = np.triu_indices(A.shape[0], 1)
choices = np.random.choice(indices[0].shape[0], add_noiselinks, replace=False)
indices = np.array(indices)
indices = indices[:, choices]
indices = (indices[0], indices[1])
A[indices] = 1 - A[indices]
lower_indices = np.tril_indices(A.shape[0], 0)
A[lower_indices] = 0
A = A + A.T
degree_vec = np.sum(A, axis=1)
m = np.sum(degree_vec) / 2
deduct_matrix = np.outer(degree_vec, degree_vec) / (2 * m)
B = A - deduct_matrix
w, v = eigh(B)
leading_eig_value = np.argmax(w)
z_ = np.squeeze(np.array(v[:, leading_eig_value]))
self.z_ = np.zeros((A.shape[0]))
self.z_[z_ >= 0] = 1
self.z_[z_ < 0] = -1
modularity = (1/(4*m)) * self.z_.T.dot(B).dot(self.z_)
# print("Modularity of method Newman Cut: {:.4f}".format(modularity))
self.z_[z_ < 0] = 0
| 36.941176
| 126
| 0.586215
|
0986a5b3af838b1abe308e3cd8ee5e386c11a842
| 416
|
py
|
Python
|
dem fractal boiis/bifur.py
|
WalrusArtist/FractalBoiis
|
35b22ac45ad6baa4ad00805ac1bcb314d2ce4b99
|
[
"MIT"
] | null | null | null |
dem fractal boiis/bifur.py
|
WalrusArtist/FractalBoiis
|
35b22ac45ad6baa4ad00805ac1bcb314d2ce4b99
|
[
"MIT"
] | null | null | null |
dem fractal boiis/bifur.py
|
WalrusArtist/FractalBoiis
|
35b22ac45ad6baa4ad00805ac1bcb314d2ce4b99
|
[
"MIT"
] | null | null | null |
import math
from PIL import Image
imgx = 10000
imgy = 5000
image = Image.new("RGB", (imgx, imgy))
xa = 3.2388663968
xb = 4.0
maxit = 1000
for i in range(imgx):
r = xa + (xb - xa) * float(i) / (imgx - 1)
x = 0.00003160718
for j in range(maxit):
x = r * x * (1 - x)
if j > maxit / 2:
image.putpixel((i, int(x * imgy)), (255, 255, 255))
image.save("Bifurcation1.png", "PNG")
| 19.809524
| 63
| 0.550481
|
560c13946ed1357daf9e8fda4678f474ed9ad394
| 13,154
|
py
|
Python
|
tools_hu/rule/rule_ddj_total.py
|
hukefei/chongqing_contest
|
c38ae3e6f25230282c65cdd568de93f28e88c6d6
|
[
"Apache-2.0"
] | 1
|
2021-04-12T13:29:54.000Z
|
2021-04-12T13:29:54.000Z
|
tools_hu/rule/rule_ddj_total.py
|
hukefei/chongqing_contest
|
c38ae3e6f25230282c65cdd568de93f28e88c6d6
|
[
"Apache-2.0"
] | 1
|
2021-04-12T13:31:27.000Z
|
2021-04-12T13:33:10.000Z
|
tools_hu/rule/rule_ddj_total.py
|
hukefei/chongqing_contest
|
c38ae3e6f25230282c65cdd568de93f28e88c6d6
|
[
"Apache-2.0"
] | 1
|
2021-04-21T10:14:15.000Z
|
2021-04-21T10:14:15.000Z
|
import numpy as np
import os
import itertools
import pandas as pd
import json
import pickle
import cv2
def bboxes_iou(boxes1, boxes2):
"""
boxes: [xmin, ymin, xmax, ymax, score, class] format coordinates.
"""
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:4], boxes2[..., 2:4])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, 0.0)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
best_bboxes_idx = []
while len(bboxes) > 0:
max_ind = np.argmax(bboxes[:, 4])
best_bbox = bboxes[max_ind]
best_bboxes_idx.append(max_ind)
bboxes = np.concatenate([bboxes[: max_ind], bboxes[max_ind + 1:]])
iou = bboxes_iou(best_bbox[np.newaxis, :4], bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
bboxes[:, 4] = bboxes[:, 4] * weight
score_mask = bboxes[:, 4] > 0.
bboxes = bboxes[score_mask]
return best_bboxes_idx
# def check_in(boxes1, boxes2, thr=0.9):
# """
# boxes: [xmin, ymin, xmax, ymax, score, class] format coordinates.
# check if boxes2 in boxes1 using threshold thr.
# """
# boxes1 = np.array(boxes1)
# boxes2 = np.array(boxes2)
#
# boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
# boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
#
# left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
# right_down = np.minimum(boxes1[..., 2:4], boxes2[..., 2:4])
#
# inter_section = np.maximum(right_down - left_up, 0.0)
# inter_area = inter_section[..., 0] * inter_section[..., 1]
# if inter_area / boxes2_area >= thr:
# return 2
# elif inter_area / boxes1_area >= thr:
# return 1
# else:
# return 0
#
#
# def check_in_filter(original_df, df, thr=0.9):
# """
# filter result df using check in function.
# :param df:
# :param thr: threshold
# :return: filtered df
# """
# delect_bbox = []
# for i in itertools.combinations(df.index, 2):
# bbox1 = df.loc[i[0], 'bbox']
# bbox2 = df.loc[i[1], 'bbox']
# check = check_in(bbox1, bbox2, thr)
# if check:
# delect_bbox.append(i[check - 1])
# delect_bbox = set(delect_bbox)
# for bbox_idx in delect_bbox:
# original_df.drop(index=bbox_idx, inplace=True)
#
# return original_df
def check_concat(boxes1, boxes2, thr=0):
"""
boxes: [xmin, ymin, xmax, ymax] format coordinates.
check if boxes1 contact boxes2
"""
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:4], boxes2[..., 2:4])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
if inter_area > thr:
return True
else:
return False
def show_and_save_images(img_path, img_name, bboxes, codes, out_dir=None):
img = cv2.imread(os.path.join(img_path, img_name))
for i, bbox in enumerate(bboxes):
bbox = np.array(bbox)
bbox_int = bbox[:4].astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
code = codes[i]
label_txt = code + ': ' + str(round(bbox[4], 2))
cv2.rectangle(img, left_top, right_bottom, (0, 0, 255), 1)
cv2.putText(img, label_txt, (bbox_int[0], max(bbox_int[1] - 2, 0)),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
if out_dir is not None:
cv2.imwrite(os.path.join(out_dir, img_name), img)
return img
def prio_check(prio_lst, code_lst):
idx_lst = []
for code in code_lst:
assert code in prio_lst, '{} should be in priority file'.format(code)
idx = prio_lst.index(code)
idx_lst.append(idx)
final_code = prio_lst[min(idx_lst)]
return final_code
def filter_code(df, code, thr, replace=None):
check_code = df[(df['category'] == code) & (df['score'] < thr)]
if replace is None:
df = df.drop(index=check_code.index)
else:
df.loc[check_code.index, 'category'] = replace
return df
def model_test(result,
img_name,
codes,
score_thr=0.05):
output_bboxes = []
json_dict = []
pattern = img_name.split('_')[1][0]
total_bbox = []
for id, boxes in enumerate(result): # loop for categories
category_id = id + 1
if len(boxes) != 0:
for box in boxes: # loop for bbox
conf = box[4]
if conf > score_thr:
total_bbox.append(list(box) + [category_id])
bboxes = np.array(total_bbox)
best_bboxes = bboxes
output_bboxes.append(best_bboxes)
for bbox in best_bboxes:
coord = [round(i, 2) for i in bbox[:4]]
conf, category = bbox[4], codes[int(bbox[5]) - 1]
json_dict.append({'name': img_name, 'category': category, 'bbox': coord, 'score': conf, 'bbox_score': bbox[:5], 'pattern': pattern})
return json_dict
def default_rule(result_lst, img_path_lst, img_name_lst, config, codes, draw_img=False, **kwargs):
"""
:param det_lst: list, in order B, G, R, W
:param img_path: list,
:param img_name: list,
:param size: float, size from .gls file
:param json_config_file: str, config parameters in json format
:param code_file: str, code file in txt format
:param draw_img: Boolean,
:return: main code, bbox, score, image
"""
# get product id
product = kwargs.get('product', None)
df_lst = []
for i in range(4):
det_lst = result_lst[i]
img_path = img_path_lst[i]
img_name = img_name_lst[i]
# convert list result to dict
json_dict = model_test(det_lst, img_name, codes)
det_df = pd.DataFrame(json_dict, columns=['name', 'category', 'bbox', 'score', 'bbox_score', 'pattern'])
# prio parameters
prio_weight = config['prio_weight']
prio_lst = config['prio_order']
if config['false_name'] not in prio_lst:
prio_lst.append(config['false_name'])
if config['other_name'] not in prio_lst:
prio_lst.append(config['other_name'])
# change other name using threshold
det_df.loc[det_df['score'] < config['other_thr'], 'category'] = config['other_name']
# filter pattern for r, g, b
pattern = img_name.split('_')[1][0]
if pattern == 'R':
det_df = filter_code(det_df, 'V06-R', 0.7)
det_df = filter_code(det_df, 'V06-G', 1.1)
det_df = filter_code(det_df, 'V06-B', 1.1)
det_df = filter_code(det_df, 'E07-R', 0.9)
det_df = filter_code(det_df, 'E07-G', 1.1)
det_df = filter_code(det_df, 'E07-B', 1.1)
det_df = filter_code(det_df, 'E02-R', 0.8)
det_df = filter_code(det_df, 'E02-G', 1.1)
det_df = filter_code(det_df, 'E02-B', 1.1)
if pattern == 'G':
det_df = filter_code(det_df, 'V06-R', 1.1)
det_df = filter_code(det_df, 'V06-G', 0.7)
det_df = filter_code(det_df, 'V06-B', 1.1)
det_df = filter_code(det_df, 'E07-R', 1.1)
det_df = filter_code(det_df, 'E07-G', 0.9)
det_df = filter_code(det_df, 'E07-B', 1.1)
det_df = filter_code(det_df, 'E02-R', 1.1)
det_df = filter_code(det_df, 'E02-G', 0.8)
det_df = filter_code(det_df, 'E02-B', 1.1)
if pattern == 'B':
det_df = filter_code(det_df, 'V06-R', 1.1)
det_df = filter_code(det_df, 'V06-G', 1.1)
det_df = filter_code(det_df, 'V06-B', 0.7)
det_df = filter_code(det_df, 'E07-R', 1.1)
det_df = filter_code(det_df, 'E07-G', 1.1)
det_df = filter_code(det_df, 'E07-B', 0.9)
det_df = filter_code(det_df, 'E02-R', 1.1)
det_df = filter_code(det_df, 'E02-G', 1.1)
det_df = filter_code(det_df, 'E02-B', 0.8)
if pattern == 'W':
det_df = filter_code(det_df, 'V06-R', 0.3)
det_df = filter_code(det_df, 'V06-G', 0.3)
det_df = filter_code(det_df, 'V06-B', 0.3)
det_df = filter_code(det_df, 'E07-R', 0.9)
det_df = filter_code(det_df, 'E07-G', 0.9)
det_df = filter_code(det_df, 'E07-B', 0.9)
det_df = filter_code(det_df, 'E02-R', 0.8)
det_df = filter_code(det_df, 'E02-G', 0.8)
det_df = filter_code(det_df, 'E02-B', 0.8)
# filtering
det_df = filter_code(det_df, 'notch', 0.6)
det_df = filter_code(det_df, 'L01', 0.5)
det_df = filter_code(det_df, 'L02', 0.5)
det_df = filter_code(det_df, 'L09', 0.5)
det_df = filter_code(det_df, 'L10', 0.5)
det_df = filter_code(det_df, 'V04', 0.8)
det_df = filter_code(det_df, 'V01', 0.9)
det_df = filter_code(det_df, 'V03', 0.5)
det_df = filter_code(det_df, 'M07', 0.7)
det_df = filter_code(det_df, 'M07-64', 0.6)
det_df = filter_code(det_df, 'V99', 0.3)
det_df = filter_code(det_df, 'M97', 0.3)
# filter M97
chip = img_name.split('_')[0]
position = chip[-4:]
if (position[:2] not in ('01', '05')) and (position[2:] not in ('01', '18')):
det_df = filter_code(det_df, 'M97', 1.1)
# filter V04
if 'V04' in det_df['category']:
det_df = filter_code(det_df, 'V01', 1.1)
# judge C08
if product == '639' and ('notch' in det_df['category'].values):
notch_bbox = det_df.loc[det_df['category'] == 'notch', 'bbox'].values[0]
for idx in det_df.index:
cate = det_df.loc[idx, 'category']
if cate in ('L01', 'L02', 'L09', 'L10'):
bbox = det_df.loc[idx, 'bbox']
if check_concat(bbox, notch_bbox):
det_df.loc[idx, 'category'] = 'C08'
# judge L17
cates = det_df['category'].values
idx_lst = []
if ('L01' in cates or 'L02' in cates) and ('L09' in cates or 'L10' in cates):
for idx1 in det_df[(det_df['category'] == 'L01') | (det_df['category'] == 'L02')].index:
for idx2 in det_df[(det_df['category'] == 'L09') | (det_df['category'] == 'L10')].index:
bbox1 = det_df.loc[idx1, 'bbox']
bbox2 = det_df.loc[idx2, 'bbox']
if check_concat(bbox1, bbox2):
idx_lst.append(idx1)
idx_lst.append(idx2)
idx_lst = list(set(idx_lst))
det_df.loc[idx_lst, 'category'] = 'L17'
# delect notch
det_df = filter_code(det_df, 'notch', 1.1)
df_lst.append(det_df)
final_det_df = pd.concat(df_lst)
final_det_df.reset_index(inplace=True)
# ET judge
final_code = list(det_df['category'].values)
best_bbox = list(det_df['bbox'].values)
best_score = list(det_df['score'].values)
# draw images
if draw_img:
img = show_and_save_images(img_path,
img_name,
det_df.bbox_score.values,
det_df.category.values)
else:
img = None
return final_code, best_bbox, best_score, img
if __name__ == '__main__':
import matplotlib.pyplot as plt
config_file = r'C:\Users\huker\Desktop\G6_21101-V1.0\G6_21101-V1.0.json'
code_file = r'C:\Users\huker\Desktop\G6_21101-V1.0\G6_21101-V1.0.txt'
img_path = r'C:\Users\huker\Desktop\G6_21101-V1.0'
img_name = r'w97kp1222a0216_-142801_-511998_before.jpg'
result_pkl = r'C:\Users\huker\Desktop\G6_21101-V1.0\21101_testimg.pkl'
with open(result_pkl, 'rb') as f:
result_lst = pickle.load(f)
main_code, bbox, score, img = default_rule(result_lst, img_path, img_name, config_file, code_file)
print(main_code, bbox, score)
b, g, r = cv2.split(img)
img2 = cv2.merge([r, g, b])
plt.imshow(img2)
plt.show()
| 36.337017
| 140
| 0.564695
|
aab6054b3354ae3f704edbadfa8f223d8ec0ee26
| 1,257
|
py
|
Python
|
src/space/space.py
|
lepisma/zsn
|
6f2cc759b9261a1f922c86153acad262cb8095fc
|
[
"MIT"
] | null | null | null |
src/space/space.py
|
lepisma/zsn
|
6f2cc759b9261a1f922c86153acad262cb8095fc
|
[
"MIT"
] | null | null | null |
src/space/space.py
|
lepisma/zsn
|
6f2cc759b9261a1f922c86153acad262cb8095fc
|
[
"MIT"
] | null | null | null |
"""
Word vector embedding space
"""
import numpy as np
class Space(object):
"""
A wrapper for trained word vector model
"""
def __init__(self, data_file):
"""
Read output from C version of glove
"""
self.words = []
self.data = []
f = open(data_file, "r")
for line in f:
items = line.split()
self.words.append(items[0])
self.data.append([float(item) for item in items[1:]])
f.close()
self.words = np.array(self.words)
self.data = np.array(self.data)
# Precomputing vector norms
self.norm = np.linalg.norm(self.data, axis=1)
def get_nearest_words(self, vector, count):
"""
Return the nearest 'count' words with confidence for the given vector
"""
similarity = np.divide(np.dot(self.data, vector),
(self.norm * np.linalg.norm(vector)))
indices = np.argsort(similarity)
return self.words[indices[-count:]][::-1]
def get_vector(self, word):
"""
Return vector of given word
"""
return self.data[np.where(self.words == word)[0][0]]
| 24.173077
| 78
| 0.521082
|
592eb2ab1dcb3d7f94aabb81df7679a16fed8e31
| 669
|
py
|
Python
|
ruuvi_gateway.py
|
RanzQ/ruuvi-gateway
|
4f3523efdcd3a6536d2c9a19846bd8e8f4b0640c
|
[
"MIT"
] | null | null | null |
ruuvi_gateway.py
|
RanzQ/ruuvi-gateway
|
4f3523efdcd3a6536d2c9a19846bd8e8f4b0640c
|
[
"MIT"
] | null | null | null |
ruuvi_gateway.py
|
RanzQ/ruuvi-gateway
|
4f3523efdcd3a6536d2c9a19846bd8e8f4b0640c
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from ruuvitag_sensor.ruuvi import RuuviTagSensor
from google.cloud import firestore
SCAN_TIMEOUT = 4
db = firestore.Client()
measurements_ref = db.collection(u'measurements')
timestamp = datetime.now()
def measurement_to_firestore(mac, payload):
result = payload.copy()
result["mac"] = mac
result["timestamp"] = timestamp
return result
datas = RuuviTagSensor.get_data_for_sensors(None, SCAN_TIMEOUT)
for mac, payload in datas.items():
value = measurement_to_firestore(mac, payload)
measurements_ref.add(value)
print('[{0}] Measurement added for {1}'.format(timestamp.strftime('%Y-%m-%d %H:%M:%S'), mac))
| 26.76
| 97
| 0.738416
|
b0787414236d83ea250ed2ba4ac5a340a6ff50d4
| 13,568
|
py
|
Python
|
warrior/Framework/ClassUtils/netconf_utils_class.py
|
YutakaMizugaki/warriorframework
|
685761cf044182ec88ce86a942d4be1e150a1256
|
[
"Apache-2.0"
] | 24
|
2017-06-06T15:48:08.000Z
|
2021-03-17T07:52:52.000Z
|
warrior/Framework/ClassUtils/netconf_utils_class.py
|
YutakaMizugaki/warriorframework
|
685761cf044182ec88ce86a942d4be1e150a1256
|
[
"Apache-2.0"
] | 272
|
2017-05-19T20:39:12.000Z
|
2021-12-13T19:34:51.000Z
|
warrior/Framework/ClassUtils/netconf_utils_class.py
|
pavithra-gowda/warrior
|
19b153310552b986b86b5470fcfea9547a74c3a9
|
[
"Apache-2.0"
] | 37
|
2017-05-17T21:24:37.000Z
|
2021-07-24T18:09:22.000Z
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""API for operations related to NetConf Interfaces
Packages used = Requests (documentation available at http://docs.python-requests.org/)
modified by ymizugaki 2017/07/11
"""
from ast import literal_eval
import traceback
import netconf
class WNetConf(object):
"""WNetConf class has methods required to interact with NetConf interfaces"""
def __init__(self):
'''Constructor for WNetConf'''
self.nc_manager = None
def open(self, session_kwds):
"""
Opens a SSH connection to a Netconf system
:Arguments:
The following keywords are allowed in session_kwds:
<ip> = IP address of the system (Required)
<nc_port> = use this tag to provide ssh port to connect to (default = 830)
<username> = username for the ssh session (default = None)
<password> = password for the ssh session (default = None)
<hostkey_verify> = enables hostkey verification from ~/.ssh/known_hosts (default = True)
belows are not used.
<timeout> = use if you want to set timeout while connecting (default = None)
<allow_agent> = enables querying SSH agent (default = True)
<look_for_keys> = enables looking in the usual locations for ssh keys (default = True)
<unknown_host_cb> = called when the server host key is not recognized (default = None)
<key_filename> = where the private key can be found (default = None)
<ssh_config> = enables parsing of OpenSSH configuration file (default = None)
<device_params> = ncclient device name (default = 'default')
:Returns:
1. connected(bool)= True / False
"""
nc_session = {'host': session_kwds.get('ip'),
'port': int(session_kwds.get('nc_port', 830)),
'username': session_kwds.get('username', None),
'password': session_kwds.get('password', None),
'hostkey_verify': literal_eval(session_kwds.get('hostkey_verify')),
'protocol_version': session_kwds.get('protocol_version', 'None')
}
try:
self.nc_manager = netconf.connect(nc_session['host'],
nc_session['port'],
nc_session['username'],
nc_session['password'],
nc_session['hostkey_verify'],
nc_session['protocol_version'])
# If connection fails return False
if self.nc_manager is None:
return False
except:
traceback.print_exc()
return False
return self.nc_manager.isOpen
def close(self):
'''Closes Netconf SSH session
:Arguments:
None
:Returns:
rpc_reply
'''
return self.nc_manager.close_session()
def request_rpc(self, request):
'''Send RPC command
:Arguments:
1. request = command to be sent as xml string
:Returns:
rpc_reply
'''
return self.nc_manager.rpc(request)
def get_config(self, datastore, filter_string=None, filter_type='subtree'):
'''Get configuration data from datastore
:Arguments:
1. datastore = name of datastore being queried
2. filter_string = portion of the configuration to retrieve. None = Everything
:Returns:
rpc_reply
'''
return self.nc_manager.get_config(datastore, filter_string, filter_type)
def edit_config(self, datastore, config,
default_operation=None, test_option=None, error_option=None):
'''Load config to the datastore
:Arguments:
1. datastore = Name of datastore being edited
2. config = The configuration data.
3. default_operation = [merger | replace | none (default)]
4. test_option = [test_then_set | set | none (default)]
5. error_option = [stop-on-error | continue-on-error
| rollback-on-error | none (default)]
rollback-on-error depends on :rollback-on-error capability
:Returns:
rpc_reply
'''
return self.nc_manager.edit_config(target=datastore,
config_string=config,
default_operation=default_operation,
test_option=test_option,
error_option=error_option)
def copy_config(self, source, target):
"""Create or replace an entire configuration datastore
with the contents of another complete configuration datastore
:Arguments:
1. source = name of the configuration datastore to use as the source of the copy
operation or config element containing the configuration subtree to copy.
2. target = name of the configuration datastore to use
as the destination of the copy operation
:Returns:
rpc_reply
"""
return self.nc_manager.copy_config(target, source)
def delete_config(self, datastore):
"""Delete a configuration datastore
:Arguments:
1. datastore = name of the configuration datastore to be deleted
:Returns:
rpc_reply
"""
return self.nc_manager.delete_config(datastore)
def commit(self, confirmed=False, timeout=None, persist=None, persist_id=None):
"""Commit the candidate datastore as the device's new current configuration
:Arguments:
1. confirmed(bool) = Commit is reverted if there is no followup commit
within the timeout interval.
2. timeout(int seconds) = The confirm timeout (Default=600 seconds)
3. persist = string to persistance
4. persist-id = persist string when if specified in previous commit
:Returns:
rpc_reply
"""
return self.nc_manager.commit(confirmed, timeout, persist, persist_id)
def lock(self, datastore):
"""Lock the configuration system
:Arguments:
1. datastore = name of the configuration datastore to be locked
:Returns:
rpc_reply
"""
return self.nc_manager.lock(datastore)
def unlock(self, datastore):
"""Release the configuration lock
:Arguments:
1. datastore = name of the configuration datastore to be unlocked
:Returns:
rpc_reply
"""
return self.nc_manager.unlock(datastore)
def get(self, filter_string=None, filter_type=None):
"""Retrieve running configuration and device state information.
:Arguments:
1. filter = specifies the portion of the configuration to retrieve
(by default entire configuration is retrieved)
xpath string or xml string
:Returns:
rpc_reply
"""
return self.nc_manager.get(filter_string, filter_type)
def kill_session(self, session_id):
"""Force the termination of a NETCONF session (not the current one!)
:Arguments:
1. session_id = is the session identifier of the NETCONF session
to be terminated as a string
:Returns:
rpc_reply
"""
return self.nc_manager.kill_session(session_id)
def validate(self, datastore):
"""Validate the contents of the specified configuration.
:Arguments:
1. datastore = the name of the configuration datastore being validated
:Returns:
rpc_reply
"""
return self.nc_manager.validate(datastore)
def cancel_commit(self, persist_id=None):
'''
cancel commit
:Arguments:
1. persist_id = persist-id which specifed in confirmed commit operation
:Returns:
rpc_reply
'''
return self.nc_manager.cancel_commit(persist_id)
def discard_changes(self):
'''
discard current modify to candidate datasotre
:Arguments:
None
:Returns:
rpc_reply
'''
return self.nc_manager.discard_changes()
def create_subscription(self,
stream_from="NETCONF",
filter_type=None,
filter_string=None,
start_time=None,
stop_time=None):
'''
create subscription to receive event notification
:Arguments:
1. stream_from = NETCONF/SNMP/syslog etc.
2. filter_type = filter type xpath or subtree
3. filter_string = filter string, xml string or xpath string
4. start_time = start time
5. stop_time = stop time
:Returns:
rpc_reply
'''
return self.nc_manager.create_subscription(stream_from,
filter_type,
filter_string,
start_time,
stop_time)
def waitfor_subscription(self, wait_string, timeout=600):
'''
wait for specified notification event
:Arguments:
1. waitString(tuple) = tuple of xpath string and namespace dict
prefix and namespace string).
e.g.
wait_string = ("//ns:event[./ns:eventClass/text()='fault']",
{'ns':'urn:ietf:params:xml:ns:netconf:notification:1.0'})
*xpath string must include namespace prefix
2. timeout(integer) = timeout value in second
:Returns:
result(bool)
'''
return self.nc_manager.waitfor_subscription(wait_string, timeout)
def clear_notification_buffer(self):
'''
clear notification buffer
:Arguments:
None
:Returns:
always true
'''
return self.nc_manager.clear_notification_buffer()
def get_schema(self, identifier, version_number=None, format_type=None):
'''
get-schema rpc
:Arguments:
1. identifier(string) = schema id (name of yang module)
2. version_number(string) = schema version (e.g. 1.0)
3. format_type(string) = format name (e.g. yang)
:Returns:
rpc reply
'''
return self.nc_manager.get_schema(identifier, version_number, format_type)
def get_notification_buffer(self, notification_type=None):
"""get specified notification type from buffer
notification_type = Event | Alarm | DB-Change |
any product specific notification type
och-notif,
dhcpv6-client-event etc..
"""
templist = []
if notification_type is not None and \
not notification_type.lower() == "all":
if notification_type.lower() == "event":
notification_type = "event-notification"
elif notification_type.lower() == "alarm":
notification_type = "alarm-notification"
elif notification_type.lower() == "db-change":
notification_type = "netconf-config-change"
for notif in self.notification_data:
if notification_type in notif:
templist.append(notif)
else:
templist = self.notification_data
return templist
def clear_notification_buffer_for_print(self):
"""clear the notification print buffer
"""
return self.nc_manager.clear_notification_print_buffer()
@property
def session_id(self):
'''
netconf session-id which is in hello message
'''
return self.nc_manager.session_id
@property
def isCOMPLD(self):
'''
indicates whether rpc-reply = ok (True/False)
'''
return self.nc_manager.isCOMPLD
@property
def ErrorMessage(self):
'''
error message when rpc command gets rpc-error
'''
return self.nc_manager.error_message
@property
def send_data(self):
'''
previous send data
'''
return self.nc_manager.send_data
@property
def response_data(self):
'''
rpc-reply data
'''
return self.nc_manager.response_data
@property
def notification_data(self):
'''
received event notification data
'''
return self.nc_manager.notification_data
| 37.274725
| 96
| 0.579673
|
1b13dee9ce346e60c9f46dd6df4eb756189fa818
| 478
|
py
|
Python
|
data/scripts/templates/object/tangible/wearables/shirt/shared_shirt_s34.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/wearables/shirt/shared_shirt_s34.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/wearables/shirt/shared_shirt_s34.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/shirt/shared_shirt_s34.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","shirt_s34")
#### BEGIN MODIFICATIONS ####
result.max_condition = 1000
#### END MODIFICATIONS ####
return result
| 25.157895
| 73
| 0.740586
|
e53baa3723123d61b304d5262b9c2ce8f624ea63
| 4,162
|
py
|
Python
|
src/python/pants/backend/codegen/tasks/ragel_gen.py
|
areitz/pants
|
9bfb3feb0272c05f36e190c9147091b97ee1950d
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/tasks/ragel_gen.py
|
areitz/pants
|
9bfb3feb0272c05f36e190c9147091b97ee1950d
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/tasks/ragel_gen.py
|
areitz/pants
|
9bfb3feb0272c05f36e190c9147091b97ee1950d
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import subprocess
from pants.backend.codegen.targets.java_ragel_library import JavaRagelLibrary
from pants.backend.codegen.tasks.simple_codegen_task import SimpleCodegenTask
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.binaries.binary_util import BinaryUtil
from pants.util.dirutil import safe_mkdir_for
from pants.util.memo import memoized_property
class RagelGen(SimpleCodegenTask):
@classmethod
def global_subsystems(cls):
return super(RagelGen, cls).global_subsystems() + (BinaryUtil.Factory,)
@classmethod
def register_options(cls, register):
super(RagelGen, cls).register_options(register)
register('--supportdir', default='bin/ragel', advanced=True,
help='The path to find the ragel binary. Used as part of the path to lookup the'
'tool with --pants-support-baseurls and --pants_bootstrapdir.')
# We take the cautious approach here and assume a version bump will always correspond to
# changes in ragel codegen products.
register('--version', default='6.9', advanced=True, fingerprint=True,
help='The version of ragel to use. Used as part of the path to lookup the'
'tool with --pants-support-baseurls and --pants-bootstrapdir')
def __init__(self, *args, **kwargs):
super(RagelGen, self).__init__(*args, **kwargs)
self._java_out = os.path.join(self.workdir, 'gen-java')
@memoized_property
def ragel_binary(self):
binary_util = BinaryUtil.Factory.create()
return binary_util.select_binary(self.get_options().supportdir,
self.get_options().version,
'ragel')
@property
def synthetic_target_type(self):
return JavaLibrary
def is_gentarget(self, target):
return isinstance(target, JavaRagelLibrary)
def execute_codegen(self, invalid_targets):
for target in invalid_targets:
output_dir = self.codegen_workdir(target)
for source in target.sources_relative_to_buildroot():
abs_source = os.path.join(get_buildroot(), source)
output_file = os.path.join(output_dir, calculate_genfile(abs_source))
safe_mkdir_for(output_file)
args = [self.ragel_binary, '-J', '-o', output_file, abs_source]
self.context.log.debug('Executing: {args}'.format(args=' '.join(args)))
process = subprocess.Popen(args)
result = process.wait()
if result != 0:
raise TaskError('{binary} ... exited non-zero ({result})'
.format(binary=self.ragel_binary, result=result))
def calculate_class_and_package(path):
package, classname = None, None
with open(path, 'r') as ragel:
for line in ragel.readlines():
line = line.strip()
package_match = re.match(r'^package ([.a-zA-Z0-9]+);', line)
if package_match:
if package:
raise TaskError('Multiple package declarations in {path}'.format(path=path))
package = package_match.group(1)
class_match = re.match(r'^public class ([A-Za-z0-9_]+).*', line)
if class_match:
if classname:
raise TaskError('Multiple class declarations in {path}'.format(path=path))
classname = class_match.group(1)
if not package:
raise TaskError('Missing package declaration in {path}'.format(path=path))
if not classname:
raise TaskError('Missing class declaration in {path}'.format(path=path))
return package, classname
def get_filename(package, classname):
return "{package}/{cls}.java".format(package=package.replace(".", os.path.sep), cls=classname)
def calculate_genfile(path):
package, classname = calculate_class_and_package(path)
return get_filename(package, classname)
| 38.897196
| 96
| 0.699664
|
46277b38cf9820eaaf3114e04320d6e7822a17e8
| 3,903
|
py
|
Python
|
examples/test_adapter_examples.py
|
HimashiRathnayake/emea
|
5e3ddd8393082b7bc7551f6ad7b136ab08ec08f9
|
[
"Apache-2.0"
] | 12
|
2021-09-13T18:31:09.000Z
|
2022-03-31T12:10:28.000Z
|
examples/test_adapter_examples.py
|
HimashiRathnayake/emea
|
5e3ddd8393082b7bc7551f6ad7b136ab08ec08f9
|
[
"Apache-2.0"
] | 5
|
2021-12-01T04:34:07.000Z
|
2022-01-28T08:28:18.000Z
|
examples/test_adapter_examples.py
|
HimashiRathnayake/emea
|
5e3ddd8393082b7bc7551f6ad7b136ab08ec08f9
|
[
"Apache-2.0"
] | 3
|
2022-01-18T10:56:05.000Z
|
2022-01-28T01:46:43.000Z
|
# coding=utf-8
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_non_multigpu_but_fix_me
SRC_DIRS = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-generation",
"text-classification",
"token-classification",
"language-modeling",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_fusion_glue
import run_glue_alt
import run_squad
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
class AdapterExamplesTests(TestCasePlus):
@require_torch_non_multigpu_but_fix_me
def test_run_glue_adapters(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
testargs = """
run_glue_alt.py
--model_name_or_path bert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
--train_adapter
--adapter_config=houlsby
--load_adapter=qqp@ukp
""".split()
with patch.object(sys, "argv", testargs):
result = run_glue_alt.main()
del result["eval_loss"]
for value in result.values():
self.assertGreaterEqual(value, 0.75)
@require_torch_non_multigpu_but_fix_me
def test_run_fusion_glue(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
testargs = """
run_fusion_glue.py
--model_name_or_path bert-base-uncased
--data_dir ./tests/fixtures/tests_samples/MRPC/
--task_name mrpc
--do_train
--do_eval
--output_dir ./tests/fixtures/tests_samples/temp_dir
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=5e-5
--max_steps=20
--warmup_steps=2
--overwrite_output_dir
--seed=42
--max_seq_length=128
""".split()
with patch.object(sys, "argv", testargs):
result = run_fusion_glue.main()
del result["eval_loss"]
for value in result.values():
self.assertGreaterEqual(value, 0.5)
@require_torch_non_multigpu_but_fix_me
def test_run_squad_adapters(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
testargs = """
run_squad.py
--model_type=bert
--model_name_or_path=bert-base-uncased
--data_dir=./tests/fixtures/tests_samples/SQUAD
--model_name=bert-base-uncased
--output_dir=./tests/fixtures/tests_samples/temp_dir
--max_steps=20
--warmup_steps=2
--do_train
--do_eval
--version_2_with_negative
--learning_rate=2e-4
--per_gpu_train_batch_size=2
--per_gpu_eval_batch_size=1
--overwrite_output_dir
--seed=42
--train_adapter
--adapter_config=houlsby
--adapter_reduction_factor=8
""".split()
with patch.object(sys, "argv", testargs):
result = run_squad.main()
self.assertGreaterEqual(result["f1"], 30)
self.assertGreaterEqual(result["exact"], 30)
| 31.224
| 90
| 0.600051
|
08025cf24f61c11ed7f3ccc628e83cc9bfff6a40
| 1,137
|
py
|
Python
|
Rubik's Cube/cube_models.py
|
LucasEmmes/pythonScripts
|
0316b355795cd7f99012321a94492d57af60dd8d
|
[
"MIT"
] | null | null | null |
Rubik's Cube/cube_models.py
|
LucasEmmes/pythonScripts
|
0316b355795cd7f99012321a94492d57af60dd8d
|
[
"MIT"
] | null | null | null |
Rubik's Cube/cube_models.py
|
LucasEmmes/pythonScripts
|
0316b355795cd7f99012321a94492d57af60dd8d
|
[
"MIT"
] | null | null | null |
from rubik import Face, Rubiks_Cube
stencil = Rubiks_Cube(faces=[
Face(["x","x","x","x","x","x","x","x","x"]),
Face(["x","x","x","x","x","x","x","x","x"]),
Face(["x","x","x","x","x","x","x","x","x"]),
Face(["x","x","x","x","x","x","x","x","x"]),
Face(["x","x","x","x","x","x","x","x","x"]),
Face(["x","x","x","x","x","x","x","x","x"])]
)
solved = Rubiks_Cube(faces=[
Face(['Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y', 'Y']),
Face(['R', 'R', 'R', 'R', 'R', 'R', 'R', 'R', 'R']),
Face(['G', 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G']),
Face(['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']),
Face(['W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W']),
Face(['B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B'])]
)
white_cross = Rubiks_Cube(faces=[
Face(["x","x","x","x","x","x","x","x","x"]),
Face(["x","x","x","x","x","x","x","R","x"]),
Face(["x","x","x","x","x","x","x","G","x"]),
Face(["x","x","x","x","x","x","x","O","x"]),
Face(["W","W","W","W","W","W","W","W","W"]),
Face(["x","B","x","x","x","x","x","x","x"])]
)
models = {
'solved':solved,
'white cross':white_cross,
}
| 34.454545
| 56
| 0.329815
|
ec24cd911f19f080f5a1671a53bedfe28f24d50e
| 22,962
|
py
|
Python
|
tests/test_batch/test_batch.py
|
alexsult/moto
|
ed861ecae1039a048a6350a4ff832ef094cdf2c2
|
[
"Apache-2.0"
] | 2
|
2019-07-10T14:44:12.000Z
|
2020-06-08T17:26:29.000Z
|
tests/test_batch/test_batch.py
|
alexsult/moto
|
ed861ecae1039a048a6350a4ff832ef094cdf2c2
|
[
"Apache-2.0"
] | 5
|
2018-04-25T21:04:20.000Z
|
2018-11-02T19:59:27.000Z
|
tests/test_batch/test_batch.py
|
alexsult/moto
|
ed861ecae1039a048a6350a4ff832ef094cdf2c2
|
[
"Apache-2.0"
] | 2
|
2020-07-24T18:14:07.000Z
|
2020-12-10T10:55:26.000Z
|
from __future__ import unicode_literals
import time
import datetime
import boto3
from botocore.exceptions import ClientError
import sure # noqa
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs
import functools
import nose
def expected_failure(test):
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except Exception as err:
raise nose.SkipTest
return inner
DEFAULT_REGION = 'eu-central-1'
def _get_clients():
return boto3.client('ec2', region_name=DEFAULT_REGION), \
boto3.client('iam', region_name=DEFAULT_REGION), \
boto3.client('ecs', region_name=DEFAULT_REGION), \
boto3.client('logs', region_name=DEFAULT_REGION), \
boto3.client('batch', region_name=DEFAULT_REGION)
def _setup(ec2_client, iam_client):
"""
Do prerequisite setup
:return: VPC ID, Subnet ID, Security group ID, IAM Role ARN
:rtype: tuple
"""
resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24')
vpc_id = resp['Vpc']['VpcId']
resp = ec2_client.create_subnet(
AvailabilityZone='eu-central-1a',
CidrBlock='172.30.0.0/25',
VpcId=vpc_id
)
subnet_id = resp['Subnet']['SubnetId']
resp = ec2_client.create_security_group(
Description='test_sg_desc',
GroupName='test_sg',
VpcId=vpc_id
)
sg_id = resp['GroupId']
resp = iam_client.create_role(
RoleName='TestRole',
AssumeRolePolicyDocument='some_policy'
)
iam_arn = resp['Role']['Arn']
return vpc_id, subnet_id, sg_id, iam_arn
# Yes, yes it talks to all the things
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_managed_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='MANAGED',
state='ENABLED',
computeResources={
'type': 'EC2',
'minvCpus': 5,
'maxvCpus': 10,
'desiredvCpus': 5,
'instanceTypes': [
't2.small',
't2.medium'
],
'imageId': 'some_image_id',
'subnets': [
subnet_id,
],
'securityGroupIds': [
sg_id,
],
'ec2KeyPair': 'string',
'instanceRole': iam_arn,
'tags': {
'string': 'string'
},
'bidPercentage': 123,
'spotIamFleetRole': 'string'
},
serviceRole=iam_arn
)
resp.should.contain('computeEnvironmentArn')
resp['computeEnvironmentName'].should.equal(compute_name)
# Given a t2.medium is 2 vcpu and t2.small is 1, therefore 2 mediums and 1 small should be created
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(3)
# Should have created 1 ECS cluster
resp = ecs_client.list_clusters()
resp.should.contain('clusterArns')
len(resp['clusterArns']).should.equal(1)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_unmanaged_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
resp.should.contain('computeEnvironmentArn')
resp['computeEnvironmentName'].should.equal(compute_name)
# Its unmanaged so no instances should be created
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(0)
# Should have created 1 ECS cluster
resp = ecs_client.list_clusters()
resp.should.contain('clusterArns')
len(resp['clusterArns']).should.equal(1)
# TODO create 1000s of tests to test complex option combinations of create environment
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_describe_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(1)
resp['computeEnvironments'][0]['computeEnvironmentName'].should.equal(compute_name)
# Test filtering
resp = batch_client.describe_compute_environments(
computeEnvironments=['test1']
)
len(resp['computeEnvironments']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_unmanaged_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
batch_client.delete_compute_environment(
computeEnvironment=compute_name,
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(0)
resp = ecs_client.list_clusters()
len(resp.get('clusterArns', [])).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_managed_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='MANAGED',
state='ENABLED',
computeResources={
'type': 'EC2',
'minvCpus': 5,
'maxvCpus': 10,
'desiredvCpus': 5,
'instanceTypes': [
't2.small',
't2.medium'
],
'imageId': 'some_image_id',
'subnets': [
subnet_id,
],
'securityGroupIds': [
sg_id,
],
'ec2KeyPair': 'string',
'instanceRole': iam_arn,
'tags': {
'string': 'string'
},
'bidPercentage': 123,
'spotIamFleetRole': 'string'
},
serviceRole=iam_arn
)
batch_client.delete_compute_environment(
computeEnvironment=compute_name,
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(0)
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(3)
for reservation in resp['Reservations']:
reservation['Instances'][0]['State']['Name'].should.equal('terminated')
resp = ecs_client.list_clusters()
len(resp.get('clusterArns', [])).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_unmanaged_compute_environment_state():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
batch_client.update_compute_environment(
computeEnvironment=compute_name,
state='DISABLED'
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(1)
resp['computeEnvironments'][0]['state'].should.equal('DISABLED')
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
resp.should.contain('jobQueueArn')
resp.should.contain('jobQueueName')
queue_arn = resp['jobQueueArn']
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['jobQueueArn'].should.equal(queue_arn)
resp = batch_client.describe_job_queues(jobQueues=['test_invalid_queue'])
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_job_queue_bad_arn():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
try:
batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn + 'LALALA'
},
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
batch_client.update_job_queue(
jobQueue=queue_arn,
priority=5
)
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['priority'].should.equal(5)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
batch_client.delete_job_queue(
jobQueue=queue_arn
)
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_register_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
resp.should.contain('jobDefinitionArn')
resp.should.contain('jobDefinitionName')
resp.should.contain('revision')
assert resp['jobDefinitionArn'].endswith('{0}:{1}'.format(resp['jobDefinitionName'], resp['revision']))
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_reregister_task_definition():
# Reregistering task with the same name bumps the revision number
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp1 = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
resp1.should.contain('jobDefinitionArn')
resp1.should.contain('jobDefinitionName')
resp1.should.contain('revision')
assert resp1['jobDefinitionArn'].endswith('{0}:{1}'.format(resp1['jobDefinitionName'], resp1['revision']))
resp1['revision'].should.equal(1)
resp2 = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 68,
'command': ['sleep', '10']
}
)
resp2['revision'].should.equal(2)
resp2['jobDefinitionArn'].should_not.equal(resp1['jobDefinitionArn'])
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
batch_client.deregister_job_definition(jobDefinition=resp['jobDefinitionArn'])
resp = batch_client.describe_job_definitions()
len(resp['jobDefinitions']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_describe_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 64,
'command': ['sleep', '10']
}
)
batch_client.register_job_definition(
jobDefinitionName='test1',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 64,
'command': ['sleep', '10']
}
)
resp = batch_client.describe_job_definitions(
jobDefinitionName='sleep10'
)
len(resp['jobDefinitions']).should.equal(2)
resp = batch_client.describe_job_definitions()
len(resp['jobDefinitions']).should.equal(3)
resp = batch_client.describe_job_definitions(
jobDefinitions=['sleep10', 'test1']
)
len(resp['jobDefinitions']).should.equal(3)
# SLOW TESTS
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_submit_job():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id = resp['jobId']
future = datetime.datetime.now() + datetime.timedelta(seconds=30)
while datetime.datetime.now() < future:
resp = batch_client.describe_jobs(jobs=[job_id])
print("{0}:{1} {2}".format(resp['jobs'][0]['jobName'], resp['jobs'][0]['jobId'], resp['jobs'][0]['status']))
if resp['jobs'][0]['status'] == 'FAILED':
raise RuntimeError('Batch job failed')
if resp['jobs'][0]['status'] == 'SUCCEEDED':
break
time.sleep(0.5)
else:
raise RuntimeError('Batch job timed out')
resp = logs_client.describe_log_streams(logGroupName='/aws/batch/job')
len(resp['logStreams']).should.equal(1)
ls_name = resp['logStreams'][0]['logStreamName']
resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name)
len(resp['events']).should.be.greater_than(5)
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_list_jobs():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id1 = resp['jobId']
resp = batch_client.submit_job(
jobName='test2',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id2 = resp['jobId']
future = datetime.datetime.now() + datetime.timedelta(seconds=30)
resp_finished_jobs = batch_client.list_jobs(
jobQueue=queue_arn,
jobStatus='SUCCEEDED'
)
# Wait only as long as it takes to run the jobs
while datetime.datetime.now() < future:
resp = batch_client.describe_jobs(jobs=[job_id1, job_id2])
any_failed_jobs = any([job['status'] == 'FAILED' for job in resp['jobs']])
succeeded_jobs = all([job['status'] == 'SUCCEEDED' for job in resp['jobs']])
if any_failed_jobs:
raise RuntimeError('A Batch job failed')
if succeeded_jobs:
break
time.sleep(0.5)
else:
raise RuntimeError('Batch jobs timed out')
resp_finished_jobs2 = batch_client.list_jobs(
jobQueue=queue_arn,
jobStatus='SUCCEEDED'
)
len(resp_finished_jobs['jobSummaryList']).should.equal(0)
len(resp_finished_jobs2['jobSummaryList']).should.equal(2)
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_terminate_job():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id = resp['jobId']
time.sleep(2)
batch_client.terminate_job(jobId=job_id, reason='test_terminate')
time.sleep(1)
resp = batch_client.describe_jobs(jobs=[job_id])
resp['jobs'][0]['jobName'].should.equal('test1')
resp['jobs'][0]['status'].should.equal('FAILED')
resp['jobs'][0]['statusReason'].should.equal('test_terminate')
| 28.348148
| 116
| 0.635398
|
99a5dda6284f8282bad4158408b4b765e0081e37
| 1,082
|
py
|
Python
|
Photo_Uploder/accounts/urls.py
|
GeneraalMaritz/Photo-Uploader
|
626fc30db052edbac3464e6029a37247fc8a3fbd
|
[
"MIT"
] | null | null | null |
Photo_Uploder/accounts/urls.py
|
GeneraalMaritz/Photo-Uploader
|
626fc30db052edbac3464e6029a37247fc8a3fbd
|
[
"MIT"
] | null | null | null |
Photo_Uploder/accounts/urls.py
|
GeneraalMaritz/Photo-Uploader
|
626fc30db052edbac3464e6029a37247fc8a3fbd
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('register/', views.registerPage, name="register"),
path('login/', views.loginPage, name="login"),
path('logout/', views.logoutUser, name="logout"),
path('', views.home, name="home"),
path('user/<str:pk>/', views.user, name="user"),
path('picture/', views.picture, name="pictures"),
path('create_upload/<str:pk>/', views.createUpload, name="create_upload"),
path('update_upload/<str:pk>/', views.updateUpload, name="update_upload"),
path('delete_upload/<str:pk>/', views.deleteUpload, name="delete_upload"),
path('add_picture/', views.addPicture, name="add_picture"),
path('view_picture/<str:pk>', views.viewPicture, name="view_picture"),
path('gallery', views.gallery, name="gallery")
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 43.28
| 78
| 0.716266
|
d7866f4f0054eb8c2d4fbed79db81d64c40a227b
| 37,441
|
py
|
Python
|
ckan/lib/create_test_data.py
|
irev/ckan
|
00947b6dd9611c88ca74500258cbf4e3c5a82c2f
|
[
"BSD-3-Clause"
] | null | null | null |
ckan/lib/create_test_data.py
|
irev/ckan
|
00947b6dd9611c88ca74500258cbf4e3c5a82c2f
|
[
"BSD-3-Clause"
] | 2
|
2020-05-20T10:10:18.000Z
|
2020-09-21T12:22:42.000Z
|
ckan/lib/create_test_data.py
|
irev/ckan
|
00947b6dd9611c88ca74500258cbf4e3c5a82c2f
|
[
"BSD-3-Clause"
] | 2
|
2020-05-13T14:01:14.000Z
|
2021-04-10T20:22:54.000Z
|
# encoding: utf-8
import logging
import datetime
import ckan.model as model
log = logging.getLogger(__name__)
class CreateTestData(object):
# keep track of the objects created by this class so that
# tests can easy call delete() method to delete them all again.
pkg_names = []
tag_names = []
group_names = set()
user_refs = []
author = u'tester'
pkg_core_fields = ['name', 'title', 'version', 'url', 'notes',
'author', 'author_email',
'maintainer', 'maintainer_email',
'private',
]
@classmethod
def create_basic_test_data(cls):
cls.create()
@classmethod
def create_search_test_data(cls):
cls.create_arbitrary(search_items)
@classmethod
def create_gov_test_data(cls, extra_users=[]):
cls.create_arbitrary(gov_items, extra_user_names=extra_users)
@classmethod
def create_family_test_data(cls, extra_users=[]):
cls.create_arbitrary(family_items,
relationships=family_relationships,
extra_user_names=extra_users)
@classmethod
def create_group_hierarchy_test_data(cls, extra_users=[]):
cls.create_users(group_hierarchy_users)
cls.create_groups(group_hierarchy_groups)
cls.create_arbitrary(group_hierarchy_datasets)
@classmethod
def create_test_user(cls):
tester = model.User.by_name(u'tester')
if tester is None:
tester = model.User(name=u'tester', apikey=u'tester',
password=u'tester')
model.Session.add(tester)
model.Session.commit()
model.Session.remove()
cls.user_refs.append(u'tester')
@classmethod
def create_translations_test_data(cls):
import ckan.model
CreateTestData.create()
sysadmin_user = ckan.model.User.get('testsysadmin')
package = ckan.model.Package.get('annakarenina')
# Add some new tags to the package.
# These tags are codes that are meant to be always translated before
# display, if not into the user's current language then into the
# fallback language.
package.add_tags([ckan.model.Tag('123'), ckan.model.Tag('456'),
ckan.model.Tag('789')])
# Add the above translations to CKAN.
for (lang_code, translations) in (('de', german_translations),
('fr', french_translations), ('en', english_translations)):
for term in terms:
if term in translations:
data_dict = {
'term': term,
'term_translation': translations[term],
'lang_code': lang_code,
}
context = {
'model': ckan.model,
'session': ckan.model.Session,
'user': sysadmin_user.name,
}
ckan.logic.action.update.term_translation_update(context,
data_dict)
ckan.model.Session.commit()
@classmethod
def create_vocabs_test_data(cls):
import ckan.model
CreateTestData.create()
sysadmin_user = ckan.model.User.get('testsysadmin')
annakarenina = ckan.model.Package.get('annakarenina')
warandpeace = ckan.model.Package.get('warandpeace')
# Create a couple of vocabularies.
context = {
'model': ckan.model,
'session': ckan.model.Session,
'user': sysadmin_user.name
}
data_dict = {
'name': 'Genre',
'tags': [{'name': 'Drama'}, {'name': 'Sci-Fi'},
{'name': 'Mystery'}],
}
ckan.logic.action.create.vocabulary_create(context, data_dict)
data_dict = {
'name': 'Actors',
'tags': [{'name': 'keira-knightley'}, {'name': 'jude-law'},
{'name': 'alessio-boni'}],
}
ckan.logic.action.create.vocabulary_create(context, data_dict)
# Add some vocab tags to some packages.
genre_vocab = ckan.model.Vocabulary.get('Genre')
actors_vocab = ckan.model.Vocabulary.get('Actors')
annakarenina.add_tag_by_name('Drama', vocab=genre_vocab)
annakarenina.add_tag_by_name('keira-knightley', vocab=actors_vocab)
annakarenina.add_tag_by_name('jude-law', vocab=actors_vocab)
warandpeace.add_tag_by_name('Drama', vocab=genre_vocab)
warandpeace.add_tag_by_name('alessio-boni', vocab=actors_vocab)
@classmethod
def create_arbitrary(cls, package_dicts, relationships=[],
extra_user_names=[], extra_group_names=[]):
'''Creates packages and a few extra objects as well at the
same time if required.
@param package_dicts - a list of dictionaries with the package
properties.
Extra keys allowed:
@param extra_group_names - a list of group names to create. No
properties get set though.
'''
assert isinstance(relationships, (list, tuple))
assert isinstance(extra_user_names, (list, tuple))
assert isinstance(extra_group_names, (list, tuple))
model.Session.remove()
new_user_names = extra_user_names
new_group_names = set()
new_groups = {}
if package_dicts:
if isinstance(package_dicts, dict):
package_dicts = [package_dicts]
for item in package_dicts:
pkg_dict = {}
for field in cls.pkg_core_fields:
if field in item:
pkg_dict[field] = str(item[field])
if model.Package.by_name(pkg_dict['name']):
log.warning('Cannot create package "%s" as it already exists.' % \
(pkg_dict['name']))
continue
pkg = model.Package(**pkg_dict)
model.Session.add(pkg)
for attr, val in item.items():
if isinstance(val, str):
val = str(val)
if attr=='name':
continue
if attr in cls.pkg_core_fields:
pass
elif attr == 'download_url':
pkg.add_resource(str(val))
elif attr == 'resources':
assert isinstance(val, (list, tuple))
for res_dict in val:
non_extras = {}
for k, v in res_dict.items():
if k != 'extras':
if not isinstance(v, datetime.datetime):
v = str(v)
non_extras[str(k)] = v
extras = {str(k): str(v) for k, v in res_dict.get('extras', {}).items()}
pkg.add_resource(extras=extras, **non_extras)
elif attr == 'tags':
if isinstance(val, str):
tags = val.split()
elif isinstance(val, list):
tags = val
else:
raise NotImplementedError
for tag_name in tags:
tag_name = str(tag_name)
tag = model.Tag.by_name(tag_name)
if not tag:
tag = model.Tag(name=tag_name)
cls.tag_names.append(tag_name)
model.Session.add(tag)
pkg.add_tag(tag)
model.Session.flush()
elif attr == 'groups':
model.Session.flush()
if isinstance(val, str):
group_names = val.split()
elif isinstance(val, list):
group_names = val
else:
raise NotImplementedError
for group_name in group_names:
group = model.Group.by_name(str(group_name))
if not group:
if not group_name in new_groups:
group = model.Group(name=
str(group_name))
model.Session.add(group)
new_group_names.add(group_name)
new_groups[group_name] = group
else:
# If adding multiple packages with the same
# group name, model.Group.by_name will not
# find the group as the session has not yet
# been committed at this point. Fetch from
# the new_groups dict instead.
group = new_groups[group_name]
capacity = 'organization' if group.is_organization\
else 'public'
member = model.Member(group=group, table_id=pkg.id,
table_name='package',
capacity=capacity)
model.Session.add(member)
if group.is_organization:
pkg.owner_org = group.id
elif attr == 'license':
pkg.license_id = val
elif attr == 'license_id':
pkg.license_id = val
elif attr == 'extras':
pkg.extras = val
elif attr == 'admins':
assert 0, 'Deprecated param "admins"'
else:
raise NotImplementedError(attr)
cls.pkg_names.append(item['name'])
model.repo.commit_and_remove()
needs_commit = False
for group_name in extra_group_names:
group = model.Group(name=str(group_name))
model.Session.add(group)
new_group_names.add(group_name)
needs_commit = True
if needs_commit:
model.repo.commit_and_remove()
needs_commit = False
# create users that have been identified as being needed
for user_name in new_user_names:
if not model.User.by_name(str(user_name)):
user = model.User(name=str(user_name))
model.Session.add(user)
cls.user_refs.append(user_name)
needs_commit = True
if needs_commit:
model.repo.commit_and_remove()
needs_commit = False
# setup authz for groups just created
for group_name in new_group_names:
group = model.Group.by_name(str(group_name))
cls.group_names.add(group_name)
needs_commit = True
if needs_commit:
model.repo.commit_and_remove()
needs_commit = False
if relationships:
def pkg(pkg_name):
return model.Package.by_name(str(pkg_name))
for subject_name, relationship, object_name in relationships:
pkg(subject_name).add_relationship(
str(relationship), pkg(object_name))
needs_commit = True
model.repo.commit_and_remove()
@classmethod
def create_groups(cls, group_dicts, admin_user_name=None, auth_profile=""):
'''A more featured interface for creating groups.
All group fields can be filled, packages added, can have
an admin user and be a member of other groups.'''
if admin_user_name:
admin_users = [model.User.by_name(admin_user_name)]
else:
admin_users = []
assert isinstance(group_dicts, (list, tuple))
group_attributes = set(('name', 'title', 'description', 'parent_id',
'type', 'is_organization'))
for group_dict in group_dicts:
if model.Group.by_name(str(group_dict['name'])):
log.warning('Cannot create group "%s" as it already exists.' %
group_dict['name'])
continue
pkg_names = group_dict.pop('packages', [])
group = model.Group(name=str(group_dict['name']))
group.type = auth_profile or 'group'
for key in group_dict:
if key in group_attributes:
setattr(group, key, group_dict[key])
elif key not in ('admins', 'editors', 'parent'):
group.extras[key] = group_dict[key]
assert isinstance(pkg_names, (list, tuple))
for pkg_name in pkg_names:
pkg = model.Package.by_name(str(pkg_name))
assert pkg, pkg_name
member = model.Member(group=group, table_id=pkg.id,
table_name='package')
model.Session.add(member)
model.Session.add(group)
admins = [model.User.by_name(user_name)
for user_name in group_dict.get('admins', [])] + \
admin_users
for admin in admins:
member = model.Member(group=group, table_id=admin.id,
table_name='user', capacity='admin')
model.Session.add(member)
editors = [model.User.by_name(user_name)
for user_name in group_dict.get('editors', [])]
for editor in editors:
member = model.Member(group=group, table_id=editor.id,
table_name='user', capacity='editor')
model.Session.add(member)
# Need to commit the current Group for two reasons:
# 1. It might have a parent, and the Member will need the Group.id
# value allocated on commit.
# 2. The next Group created may have this Group as a parent so
# creation of the Member needs to refer to this one.
model.Session.commit()
# add it to a parent's group
if 'parent' in group_dict:
parent = model.Group.by_name(str(group_dict['parent']))
assert parent, group_dict['parent']
member = model.Member(group=group, table_id=parent.id,
table_name='group', capacity='parent')
model.Session.add(member)
cls.group_names.add(group_dict['name'])
model.repo.commit_and_remove()
@classmethod
def create(cls, auth_profile="", package_type=None):
model.Session.remove()
if auth_profile == "publisher":
organization_group = model.Group(name=u"organization_group",
type="organization")
cls.pkg_names = [u'annakarenina', u'warandpeace']
pkg1 = model.Package(name=cls.pkg_names[0], type=package_type)
if auth_profile == "publisher":
pkg1.group = organization_group
model.Session.add(pkg1)
pkg1.title = u'A Novel By Tolstoy'
pkg1.version = u'0.7a'
pkg1.url = u'http://datahub.io'
# put an & in the url string to test escaping
if 'alt_url' in model.Resource.get_extra_columns():
configured_extras = ({'alt_url': u'alt123'},
{'alt_url': u'alt345'})
else:
configured_extras = ({}, {})
pr1 = model.Resource(
url=u'http://datahub.io/download/x=1&y=2',
format=u'plain text',
description=u'Full text. Needs escaping: " Umlaut: \xfc',
hash=u'abc123',
extras={'size_extra': u'123'},
**configured_extras[0]
)
pr2 = model.Resource(
url=u'http://datahub.io/index.json',
format=u'JSON',
description=u'Index of the novel',
hash=u'def456',
extras={'size_extra': u'345'},
**configured_extras[1]
)
model.Session.add(pr1)
model.Session.add(pr2)
pkg1.resources_all.append(pr1)
pkg1.resources_all.append(pr2)
pkg1.notes = u'''Some test notes
### A 3rd level heading
**Some bolded text.**
*Some italicized text.*
Foreign characters:
u with umlaut \xfc
66-style quote \u201c
foreign word: th\xfcmb
Needs escaping:
left arrow <
<http://ckan.net/>
'''
pkg2 = model.Package(name=cls.pkg_names[1], type=package_type)
tag1 = model.Tag(name=u'russian')
tag2 = model.Tag(name=u'tolstoy')
if auth_profile == "publisher":
pkg2.group = organization_group
# Flexible tag, allows spaces, upper-case,
# and all punctuation except commas
tag3 = model.Tag(name=u'Flexible \u30a1')
for obj in [pkg2, tag1, tag2, tag3]:
model.Session.add(obj)
pkg1.add_tags([tag1, tag2, tag3])
pkg2.add_tags([ tag1, tag3 ])
cls.tag_names = [ t.name for t in (tag1, tag2, tag3) ]
pkg1.license_id = u'other-open'
pkg2.license_id = u'cc-nc' # closed license
pkg2.title = u'A Wonderful Story'
pkg1.extras = {u'genre':'romantic novel',
u'original media':'book'}
# group
david = model.Group(name=u'david',
title=u'Dave\'s books',
description=u'These are books that David likes.',
type=auth_profile or 'group')
roger = model.Group(name=u'roger',
title=u'Roger\'s books',
description=u'Roger likes these books.',
type=auth_profile or 'group')
for obj in [david, roger]:
model.Session.add(obj)
cls.group_names.add(u'david')
cls.group_names.add(u'roger')
model.Session.flush()
model.Session.add(model.Member(table_id=pkg1.id, table_name='package', group=david))
model.Session.add(model.Member(table_id=pkg2.id, table_name='package', group=david))
model.Session.add(model.Member(table_id=pkg1.id, table_name='package', group=roger))
# authz
sysadmin = model.User(name=u'testsysadmin', password=u'testsysadmin')
sysadmin.sysadmin = True
model.Session.add_all([
model.User(name=u'tester', apikey=u'tester', password=u'tester'),
model.User(name=u'joeadmin', password=u'joeadmin'),
model.User(name=u'annafan', about=u'I love reading Annakarenina. My site: http://datahub.io', password=u'annafan'),
model.User(name=u'russianfan', password=u'russianfan'),
sysadmin,
])
cls.user_refs.extend([u'tester', u'joeadmin', u'annafan', u'russianfan', u'testsysadmin'])
# Create activities for packages
for item in [pkg1, pkg2]:
activity = item.activity_stream_item('new', 'not logged in')
model.Session.add(activity)
model.repo.commit_and_remove()
# method used in DGU and all good tests elsewhere
@classmethod
def create_users(cls, user_dicts):
needs_commit = False
for user_dict in user_dicts:
user = cls._create_user_without_commit(**user_dict)
if user:
needs_commit = True
if needs_commit:
model.repo.commit_and_remove()
@classmethod
def _create_user_without_commit(cls, name='', **user_dict):
if model.User.by_name(name):
log.warning('Cannot create user "%s" as it already exists.' %
name or user_dict['name'])
return
# User objects are not revisioned so no need to create a revision
user_ref = name
assert user_ref
for k, v in user_dict.items():
if v is not None:
if bool(v):
user_dict[k] = v
else:
# avoid unicode warnings
user_dict[k] = str(v)
user = model.User(name=str(name), **user_dict)
model.Session.add(user)
cls.user_refs.append(user_ref)
return user
@classmethod
def create_user(cls, name='', **kwargs):
user = cls._create_user_without_commit(name, **kwargs)
model.Session.commit()
return user
@classmethod
def flag_for_deletion(cls, pkg_names=[], tag_names=[], group_names=[],
user_names=[]):
'''If you create a domain object manually in your test then you
can name it here (flag it up) and it will be deleted when you next
call CreateTestData.delete().'''
if isinstance(pkg_names, str):
pkg_names = [pkg_names]
cls.pkg_names.extend(pkg_names)
cls.tag_names.extend(tag_names)
cls.group_names = cls.group_names.union(set(group_names))
cls.user_refs.extend(user_names)
@classmethod
def delete(cls):
'''Purges packages etc. that were created by this class.'''
for pkg_name in cls.pkg_names:
model.Session().autoflush = False
pkg = model.Package.by_name(str(pkg_name))
if pkg:
pkg.purge()
for tag_name in cls.tag_names:
tag = model.Tag.by_name(str(tag_name))
if tag:
tag.purge()
for group_name in cls.group_names:
group = model.Group.by_name(str(group_name))
if group:
model.Session.delete(group)
revs = model.Session.query(model.Revision).filter_by(author=cls.author)
for rev in revs:
for pkg in rev.packages:
pkg.purge()
for grp in rev.groups:
grp.purge()
model.Session.commit()
model.Session.delete(rev)
for user_name in cls.user_refs:
user = model.User.get(str(user_name))
if user:
user.purge()
model.Session.commit()
model.Session.remove()
cls.reset()
@classmethod
def reset(cls):
cls.pkg_names = []
cls.group_names = set()
cls.tag_names = []
cls.user_refs = []
@classmethod
def get_all_data(cls):
return cls.pkg_names + list(cls.group_names) + cls.tag_names + cls.user_refs
@classmethod
def make_some_vocab_tags(cls):
# Create a couple of vocabularies.
genre_vocab = model.Vocabulary(u'genre')
model.Session.add(genre_vocab)
composers_vocab = model.Vocabulary(u'composers')
model.Session.add(composers_vocab)
# Create some additional free tags for tag search tests.
tolkien_tag = model.Tag(name="tolkien")
model.Session.add(tolkien_tag)
toledo_tag = model.Tag(name="toledo")
model.Session.add(toledo_tag)
tolerance_tag = model.Tag(name="tolerance")
model.Session.add(tolerance_tag)
tollbooth_tag = model.Tag(name="tollbooth")
model.Session.add(tollbooth_tag)
# We have to add free tags to a package or they won't show up in tag results.
model.Package.get('warandpeace').add_tags((tolkien_tag, toledo_tag,
tolerance_tag, tollbooth_tag))
# Create some tags that belong to vocabularies.
sonata_tag = model.Tag(name=u'sonata', vocabulary_id=genre_vocab.id)
model.Session.add(sonata_tag)
bach_tag = model.Tag(name=u'Bach', vocabulary_id=composers_vocab.id)
model.Session.add(bach_tag)
neoclassical_tag = model.Tag(name='neoclassical',
vocabulary_id=genre_vocab.id)
model.Session.add(neoclassical_tag)
neofolk_tag = model.Tag(name='neofolk', vocabulary_id=genre_vocab.id)
model.Session.add(neofolk_tag)
neomedieval_tag = model.Tag(name='neomedieval',
vocabulary_id=genre_vocab.id)
model.Session.add(neomedieval_tag)
neoprog_tag = model.Tag(name='neoprog',
vocabulary_id=genre_vocab.id)
model.Session.add(neoprog_tag)
neopsychedelia_tag = model.Tag(name='neopsychedelia',
vocabulary_id=genre_vocab.id)
model.Session.add(neopsychedelia_tag)
neosoul_tag = model.Tag(name='neosoul', vocabulary_id=genre_vocab.id)
model.Session.add(neosoul_tag)
nerdcore_tag = model.Tag(name='nerdcore', vocabulary_id=genre_vocab.id)
model.Session.add(nerdcore_tag)
model.Package.get('warandpeace').add_tag(bach_tag)
model.Package.get('annakarenina').add_tag(sonata_tag)
model.Session.commit()
search_items = [{'name':'gils',
'title':'Government Information Locator Service',
'url':'',
'tags':'registry,country-usa,government,federal,gov,workshop-20081101,penguin'.split(','),
'resources':[{'url':'http://www.dcsf.gov.uk/rsgateway/DB/SFR/s000859/SFR17_2009_tables.xls',
'format':'XLS',
'last_modified': datetime.datetime(2005, 10, 1),
'description':'December 2009 | http://www.statistics.gov.uk/hub/id/119-36345'},
{'url':'http://www.dcsf.gov.uk/rsgateway/DB/SFR/s000860/SFR17_2009_key.doc',
'format':'DOC',
'description':'http://www.statistics.gov.uk/hub/id/119-34565'}],
'groups':'ukgov test1 test2 penguin',
'license':'odc-by',
'notes':u'''From <http://www.gpoaccess.gov/gils/about.html>
> The Government Information Locator Service (GILS) is an effort to identify, locate, and describe publicly available Federal
> Because this collection is decentralized, the GPO
Foreign word:
u with umlaut th\xfcmb
''',
'extras':{'date_released':'2008'},
},
{'name':'us-gov-images',
'title':'U.S. Government Photos and Graphics',
'url':'http://www.usa.gov/Topics/Graphics.shtml',
'download_url':'http://www.usa.gov/Topics/Graphics.shtml',
'tags':'images,graphics,photographs,photos,pictures,us,usa,america,history,wildlife,nature,war,military,todo split,gov,penguin'.split(','),
'groups':'ukgov test1 penguin',
'license':'other-open',
'notes':'''## About
Collection of links to different US image collections in the public domain.
## Openness
> Most of these images and graphics are available for use in the public domain, and''',
'extras':{'date_released':'2009'},
},
{'name':'usa-courts-gov',
'title':'Text of US Federal Cases',
'url':'http://bulk.resource.org/courts.gov/',
'download_url':'http://bulk.resource.org/courts.gov/',
'tags':'us,courts,case-law,us,courts,case-law,gov,legal,law,access-bulk,penguins,penguin'.split(','),
'groups':'ukgov test2 penguin',
'license':'cc-zero',
'notes':'''### Description
1.8 million pages of U.S. case law available with no restrictions. From the [README](http://bulk.resource.org/courts.gov/0_README.html):
> This file is http://bulk.resource.org/courts.gov/0_README.html and was last revised.
penguin
''',
'extras':{'date_released':'2007-06'},
},
{'name':'uk-government-expenditure',
'title':'UK Government Expenditure',
'tags':'workshop-20081101,uk,gov,expenditure,finance,public,funding,penguin'.split(','),
'groups':'ukgov penguin',
'notes':'''Discussed at [Workshop on Public Information, 2008-11-02](http://okfn.org/wiki/PublicInformation).
Overview is available in Red Book, or Financial Statement and Budget Report (FSBR), [published by the Treasury](http://www.hm-treasury.gov.uk/budget.htm).''',
'extras':{'date_released':'2007-10'},
},
{'name':'se-publications',
'title':'Sweden - Government Offices of Sweden - Publications',
'url':'http://www.sweden.gov.se/sb/d/574',
'groups':'penguin',
'tags':u'country-sweden,format-pdf,access-www,documents,publications,government,eutransparency,penguin,CAPITALS,surprise.,greek omega \u03a9,japanese katakana \u30a1'.split(','),
'license':'',
'notes':'''### About
Official documents including "government bills and reports, information material and other publications".
### Reuse
Not clear.''',
'extras':{'date_released':'2009-10-27'},
},
{'name':'se-opengov',
'title':'Opengov.se',
'groups':'penguin',
'url':'http://www.opengov.se/',
'download_url':'http://www.opengov.se/data/open/',
'tags':'country-sweden,government,data,penguin'.split(','),
'license':'cc-by-sa',
'notes':'''### About
From [website](http://www.opengov.se/sidor/english/):
> Opengov.se is an initiative to highlight available public datasets in Sweden. It contains a commentable catalog of government datasets, their formats and usage restrictions.
> The goal is to highlight the benefits of open access to government data and explain how this is done in practice.
### Openness
It appears that the website is under a CC-BY-SA license. Legal status of the data varies. Data that is fully open can be viewed at:
* <http://www.opengov.se/data/open/>'''
},
]
family_items = [{'name':u'abraham', 'title':u'Abraham'},
{'name':u'homer', 'title':u'Homer'},
{'name':u'homer_derived', 'title':u'Homer Derived'},
{'name':u'beer', 'title':u'Beer'},
{'name':u'bart', 'title':u'Bart'},
{'name':u'lisa', 'title':u'Lisa'},
{'name':u'marge', 'title':u'Marge'},
]
family_relationships = [('abraham', 'parent_of', 'homer'),
('homer', 'parent_of', 'bart'),
('homer', 'parent_of', 'lisa'),
('marge', 'parent_of', 'lisa'),
('marge', 'parent_of', 'bart'),
('homer_derived', 'derives_from', 'homer'),
('homer', 'depends_on', 'beer'),
]
gov_items = [
{'name':'private-fostering-england-2009',
'title':'Private Fostering',
'notes':'Figures on children cared for and accommodated in private fostering arrangements, England, Year ending 31 March 2009',
'resources':[{'url':'http://www.dcsf.gov.uk/rsgateway/DB/SFR/s000859/SFR17_2009_tables.xls',
'format':'XLS',
'description':'December 2009 | http://www.statistics.gov.uk/hub/id/119-36345'},
{'url':'http://www.dcsf.gov.uk/rsgateway/DB/SFR/s000860/SFR17_2009_key.doc',
'format':'DOC',
'description':'http://www.statistics.gov.uk/hub/id/119-34565'}],
'url':'http://www.dcsf.gov.uk/rsgateway/DB/SFR/s000859/index.shtml',
'author':'DCSF Data Services Group',
'author_email':'statistics@dcsf.gsi.gov.uk',
'license':'ukcrown',
'tags':'children fostering',
'extras':{
'external_reference':'DCSF-DCSF-0024',
'date_released':'2009-07-30',
'date_updated':'2009-07-30',
'update_frequency':'annually',
'geographic_granularity':'regional',
'geographic_coverage':'100000: England',
'department':'Department for Education',
'published_by':'Department for Education [3]',
'published_via':'',
'temporal_granularity':'years',
'temporal_coverage-from':'2008-6',
'temporal_coverage-to':'2009-6',
'mandate':'',
'national_statistic':'yes',
'precision':'Numbers to nearest 10, percentage to nearest whole number',
'taxonomy_url':'',
'agency':'',
'import_source':'ONS-Jan-09',
}
},
{'name':'weekly-fuel-prices',
'title':'Weekly fuel prices',
'notes':'Latest price as at start of week of unleaded petrol and diesel.',
'resources':[{'url':'http://www.decc.gov.uk/assets/decc/statistics/source/prices/qep211.xls', 'format':'XLS', 'description':'Quarterly 23/2/12'}],
'url':'http://www.decc.gov.uk/en/content/cms/statistics/source/prices/prices.aspx',
'author':'DECC Energy Statistics Team',
'author_email':'energy.stats@decc.gsi.gov.uk',
'license':'ukcrown',
'tags':'fuel prices',
'extras':{
'external_reference':'DECC-DECC-0001',
'date_released':'2009-11-24',
'date_updated':'2009-11-24',
'update_frequency':'weekly',
'geographic_granularity':'national',
'geographic_coverage':'111100: United Kingdom (England, Scotland, Wales, Northern Ireland)',
'department':'Department of Energy and Climate Change',
'published_by':'Department of Energy and Climate Change [4]',
'published_via':'',
'mandate':'',
'temporal_granularity':'weeks',
'temporal_coverage-from':'2008-11-24',
'temporal_coverage-to':'2009-11-24',
'national_statistic':'no',
'import_source':'DECC-Jan-09',
}
}
]
group_hierarchy_groups = [
{'name': 'department-of-health',
'title': 'Department of Health',
'contact-email': 'contact@doh.gov.uk',
'type': 'organization',
'is_organization': True
},
{'name': 'food-standards-agency',
'title': 'Food Standards Agency',
'contact-email': 'contact@fsa.gov.uk',
'parent': 'department-of-health',
'type': 'organization',
'is_organization': True},
{'name': 'national-health-service',
'title': 'National Health Service',
'contact-email': 'contact@nhs.gov.uk',
'parent': 'department-of-health',
'type': 'organization',
'is_organization': True,
'editors': ['nhseditor'],
'admins': ['nhsadmin']},
{'name': 'nhs-wirral-ccg',
'title': 'NHS Wirral CCG',
'contact-email': 'contact@wirral.nhs.gov.uk',
'parent': 'national-health-service',
'type': 'organization',
'is_organization': True,
'editors': ['wirraleditor'],
'admins': ['wirraladmin']},
{'name': 'nhs-southwark-ccg',
'title': 'NHS Southwark CCG',
'contact-email': 'contact@southwark.nhs.gov.uk',
'parent': 'national-health-service',
'type': 'organization',
'is_organization': True},
{'name': 'cabinet-office',
'title': 'Cabinet Office',
'contact-email': 'contact@cabinet-office.gov.uk',
'type': 'organization',
'is_organization': True},
]
group_hierarchy_datasets = [
{'name': 'doh-spend', 'title': 'Department of Health Spend Data',
'groups': ['department-of-health']},
{'name': 'nhs-spend', 'title': 'NHS Spend Data',
'groups': ['national-health-service']},
{'name': 'wirral-spend', 'title': 'Wirral Spend Data',
'groups': ['nhs-wirral-ccg']},
{'name': 'southwark-spend', 'title': 'Southwark Spend Data',
'groups': ['nhs-southwark-ccg']},
]
group_hierarchy_users = [{'name': 'nhsadmin', 'password': 'pass'},
{'name': 'nhseditor', 'password': 'pass'},
{'name': 'wirraladmin', 'password': 'pass'},
{'name': 'wirraleditor', 'password': 'pass'},
]
# Some test terms and translations.
terms = ('A Novel By Tolstoy',
'Index of the novel',
'russian',
'tolstoy',
"Dave's books",
"Roger's books",
'romantic novel',
'book',
'123',
'456',
'789',
'plain text',
'Roger likes these books.',
)
english_translations = {
'123': 'jealousy',
'456': 'realism',
'789': 'hypocrisy',
}
german_translations = {
'A Novel By Tolstoy': 'Roman von Tolstoi',
'Index of the novel': 'Index des Romans',
'russian': 'Russisch',
'tolstoy': 'Tolstoi',
"Dave's books": 'Daves Bucher',
"Roger's books": 'Rogers Bucher',
'romantic novel': 'Liebesroman',
'book': 'Buch',
'456': 'Realismus',
'789': 'Heuchelei',
'plain text': 'Klartext',
'Roger likes these books.': 'Roger mag diese Bucher.'
}
french_translations = {
'A Novel By Tolstoy': 'A Novel par Tolstoi',
'Index of the novel': 'Indice du roman',
'russian': 'russe',
'romantic novel': 'roman romantique',
'book': 'livre',
'123': 'jalousie',
'789': 'hypocrisie',
}
| 40.785403
| 192
| 0.549772
|
1154696c8df7ab0863a592e82cb6edd194e8fa87
| 3,267
|
py
|
Python
|
kw_tests/common_class.py
|
alex-kalanis/kw_input
|
ac7beddadc5e766d7b4921352a472abcea6e16cf
|
[
"BSD-3-Clause"
] | null | null | null |
kw_tests/common_class.py
|
alex-kalanis/kw_input
|
ac7beddadc5e766d7b4921352a472abcea6e16cf
|
[
"BSD-3-Clause"
] | null | null | null |
kw_tests/common_class.py
|
alex-kalanis/kw_input
|
ac7beddadc5e766d7b4921352a472abcea6e16cf
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
class CommonTestClass(unittest.TestCase):
"""
Used format is list of tuples where first item is string key and second is mixed value
Total madness by typing, but something tells me it's correct pythonic way
"""
def entry_dataset(self):
return [
('foo', 'val1'),
('bar', ['bal1', 'bal2']),
('baz', True),
('aff', 42),
]
def strange_entry_dataset(self):
return [
('foo ', ' val1 '),
('ba' + chr(0) + 'r', ["<script>alert('XSS!!!')</script>", 'bal2']),
('b<a>z', False),
('a**ff', '<?php echo "ded!";'),
]
def file_dataset(self):
return [
('files', [ # simple upload
('name', 'facepalm.jpg'),
('type', 'image/jpeg'),
('tmp_name', '/tmp/php3zU3t5'),
('error', 0),
('size', 591387),
]),
('download', [ # multiple upload
('name', [
('file1', 'MyFile.txt'),
('file2', 'MyFile.jpg'),
]),
('type', [
('file1', 'text/plain'),
('file2', 'image/jpeg'),
]),
('tmp_name', [
('file1', '/tmp/php/phpgj46fg'),
('file2', '/tmp/php/php7s4ag4'),
]),
('error', [
('file1', 7),
('file2', 3),
]),
('size', [
('file1', 816),
('file2', 3075),
]),
]),
]
def strange_file_dataset(self):
return [
('fi' + chr(0) + 'les', [ # simple upload
('name', 'face' + chr(0) + 'palm.jpg'),
('type', 'image<?= \'/\'; ?>jpeg'),
('tmp_name', '/tmp/php3zU3t5'),
('error', 0),
('size', '591387'),
]),
('download', [ # multiple upload
('name', [
('file1', 'C:\System\MyFile.txt'),
('file2', 'A:\MyFile.jpg'),
]),
('type', [
('file1', 'text/plain'),
('file2', 'image/jpeg'),
]),
('tmp_name', [
('file1', '/tmp/php/phpgj46fg'),
('file2', '/tmp/php/php7s4ag4'),
]),
('error', [
('file1', 7),
('file2', 3),
]),
('size', [
('file1', 816),
('file2', 6874),
]),
]),
]
def cli_dataset(self):
return [
'--testing=foo',
'--bar=baz',
'--file1=./data/tester.gif',
'--file2=data/testing.1.txt',
'--file3=./data/testing.2.txt',
'-abc',
'known',
'what',
]
def strange_cli_dataset(self):
return [
'--tes' + chr(0) + 'ting=f<o>o',
'---bar=b**a**z',
'-a-*c',
]
| 29.7
| 90
| 0.328436
|
6b7cec4a7b8e0f4180b80e1abbf3877045839a83
| 978
|
py
|
Python
|
kubernetes/test/test_v1_cluster_role_binding.py
|
fooka03/python
|
073cf4d89e532f92b57e8955b4efc3d5d5eb80cf
|
[
"Apache-2.0"
] | 2
|
2020-07-02T05:47:41.000Z
|
2020-07-02T05:50:34.000Z
|
kubernetes/test/test_v1_cluster_role_binding.py
|
fooka03/python
|
073cf4d89e532f92b57e8955b4efc3d5d5eb80cf
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:44:49.000Z
|
2021-03-25T23:44:49.000Z
|
k8sdeployment/k8sstat/python/kubernetes/test/test_v1_cluster_role_binding.py
|
JeffYFHuang/gpuaccounting
|
afa934350ebbd0634beb60b9df4a147426ea0006
|
[
"MIT"
] | 1
|
2021-10-13T17:45:37.000Z
|
2021-10-13T17:45:37.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_cluster_role_binding import V1ClusterRoleBinding # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1ClusterRoleBinding(unittest.TestCase):
"""V1ClusterRoleBinding unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ClusterRoleBinding(self):
"""Test V1ClusterRoleBinding"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_cluster_role_binding.V1ClusterRoleBinding() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.45
| 124
| 0.729039
|
7bbfd95e0d48bf38ab2aeb03f5ec8c2ae159fb73
| 273
|
py
|
Python
|
anonymization/anonymizers/__init__.py
|
alterway/anonymization
|
57e6c20f8c97e902f3513b5adfdbc211791aaef0
|
[
"MIT"
] | 15
|
2020-06-18T12:29:55.000Z
|
2021-12-14T16:31:26.000Z
|
anonymization/anonymizers/__init__.py
|
alterway/anonymization
|
57e6c20f8c97e902f3513b5adfdbc211791aaef0
|
[
"MIT"
] | 3
|
2021-03-20T17:47:03.000Z
|
2021-09-01T14:36:03.000Z
|
anonymization/anonymizers/__init__.py
|
alterway/anonymization
|
57e6c20f8c97e902f3513b5adfdbc211791aaef0
|
[
"MIT"
] | 2
|
2021-09-24T14:28:10.000Z
|
2022-03-07T19:54:48.000Z
|
from .fileAnonymizers import *
from .internetAnonymizers import *
from .spacyAnonymizers import *
from .phoneNumberAnonymizers import *
from .dictionaryAnonymizers import *
from .bankingAnonymizers import *
from .dateAnonymizers import *
from .signatureAnonymizers import *
| 34.125
| 37
| 0.827839
|
790d2055df1fbb87c99f2e8e1b90cb03dfa54398
| 1,279
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/tf/lrn_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 1
|
2019-09-22T01:05:07.000Z
|
2019-09-22T01:05:07.000Z
|
tools/mo/openvino/tools/mo/front/tf/lrn_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 58
|
2020-11-06T12:13:45.000Z
|
2022-03-28T13:20:11.000Z
|
tools/mo/openvino/tools/mo/front/tf/lrn_ext.py
|
pazamelin/openvino
|
b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48
|
[
"Apache-2.0"
] | 2
|
2021-07-14T07:40:50.000Z
|
2021-07-27T01:40:03.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.ops.lrn import AttributedLRN
class LRNExtractor(FrontExtractorOp):
"""
TF and IE(CAFFE) parameters in LRN differs in several places :
region (IE) : in TF there is no such parameter, they just use last dimension (feature dimension in case of NHWC)
local-size (IE) : it's the size of 1D vector in Caffe. In TF they have 'depth_radius' that eq
'(local-size * 2) + 1'
alpha (IE) : in Caffe 'alpha' divides on local-size, so we should multiply alpha on local-size
Caffe ref : http://caffe.berkeleyvision.org/tutorial/layers/lrn.html
TF ref : https://www.tensorflow.org/api_docs/python/tf/nn/local_response_normalization
"""
op = 'LRN'
enabled = True
@classmethod
def extract(cls, node):
pb = node.pb
AttributedLRN.update_node_stat(node, {
'alpha': pb.attr['alpha'].f * (2. * pb.attr['depth_radius'].i + 1.),
'beta': pb.attr['beta'].f,
'bias': pb.attr['bias'].f,
'local_size': (2 * pb.attr['depth_radius'].i + 1),
})
return cls.enabled
| 39.96875
| 124
| 0.630962
|
fb4db9d3aabf69433d15bdf35c9ffe5ca17af852
| 3,311
|
py
|
Python
|
gitlab_stats/utils.py
|
anderslindho/gitlab_stats
|
464d5e22aa3bc67eaaabd9b20b30c33f19236622
|
[
"MIT"
] | 8
|
2018-10-19T10:17:20.000Z
|
2022-03-21T06:07:02.000Z
|
gitlab_stats/utils.py
|
anderslindho/gitlab_stats
|
464d5e22aa3bc67eaaabd9b20b30c33f19236622
|
[
"MIT"
] | 10
|
2019-01-28T19:53:26.000Z
|
2021-11-30T15:32:35.000Z
|
gitlab_stats/utils.py
|
Sylhare/gitlab_stats
|
157de3b703031ef0de39e187314133624f1ac1bb
|
[
"MIT"
] | 6
|
2019-01-28T19:07:21.000Z
|
2021-11-29T21:05:13.000Z
|
import csv
import datetime
import os
import gitlab_stats
GITLAB_TOKEN_ENV = 'GITLAB_TOKEN'
def check_token(token):
if token is None:
try:
token = os.environ[GITLAB_TOKEN_ENV]
except KeyError:
print("\nEnvironment variable containing your gitlab token could not be found"
"\nSet it using `export GITLAB_TOKEN=<your gitlab token>")
return token
def format_proxy(url):
return {'http': url, 'https': url}
def get_name_and_id(project_dict):
project_info = [
{'id': elem['id'], 'name': elem['name']} for elem in project_dict
]
return project_info
def get_pipelines_id(pipeline_dict):
pipelines = [elem['id'] for elem in pipeline_dict]
return pipelines
def get_pipeline_info(elem):
pipeline_info = {'id': elem['id'],
'status': elem['status'],
'duration': elem['duration'],
'date': str(elem['finished_at'])[:10]}
return pipeline_info
def seconds_to_min(seconds):
mins, sec = divmod(round(seconds), 60)
return "{} min {}s".format(round(mins), sec)
def get_duration_moy(project_info):
duration = (pipeline['duration'] for pipeline in project_info['pipelines'])
duration = list(filter(lambda x: x is not None, duration))
return round(sum(duration) / len(duration), 1) if len(duration) else None
def get_success_percentage(project_info):
success = [pipeline['status'] for pipeline in project_info['pipelines']]
return round(success.count('success') * 100 / len(success)) if len(success) else None
def get_pipeline_info_from(project_info, days=15):
date = datetime.datetime.now() - datetime.timedelta(days=days)
pipelines = []
for pipeline in project_info['pipelines']:
if datetime.datetime.strptime(pipeline['date'], "%Y-%m-%d") > date:
pipelines.append(pipeline)
else:
break
project_info['pipelines'] = pipelines
return project_info
def enhance_project_info(project_info):
project_info.update({'duration_moy': get_duration_moy(project_info)})
project_info.update({'duration_in_minutes': seconds_to_min(project_info['duration_moy'])})
project_info.update({'success_percentage': get_success_percentage(project_info)})
project_info.pop('pipelines', None)
return project_info
def print_cli_report(project_info):
print(gitlab_stats.CLI_REPORT.format(project_info['name'],
project_info['id'],
datetime.date.today(),
project_info['duration_in_minutes'],
project_info['success_percentage']))
def generate_report(project_info, path='output.csv'):
if os.path.isfile(path):
write_dict_to_csv(project_info, path)
else:
create_dict_to_csv(project_info, path)
def write_dict_to_csv(project_info, path):
with open(path, 'a') as f: # Just use 'w' mode in 3.x
w = csv.DictWriter(f, project_info.keys())
w.writerow(project_info)
def create_dict_to_csv(project_info, path):
with open(path, 'w') as f:
w = csv.DictWriter(f, project_info.keys())
w.writeheader()
w.writerow(project_info)
| 29.04386
| 94
| 0.643008
|
308823f6b12bbd2a3773043b94646afdf604ec53
| 1,317
|
py
|
Python
|
baidu_code/soap_mockserver/lib/settings.py
|
deevarvar/myLab
|
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
|
[
"MIT"
] | null | null | null |
baidu_code/soap_mockserver/lib/settings.py
|
deevarvar/myLab
|
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
|
[
"MIT"
] | null | null | null |
baidu_code/soap_mockserver/lib/settings.py
|
deevarvar/myLab
|
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
|
[
"MIT"
] | 3
|
2016-10-08T15:01:49.000Z
|
2018-05-24T03:14:24.000Z
|
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(created)s - %(name)s - %(levelname)s - %(module)s- %(process)d -%(thread)d - %(filename)s - %(funcName)s - %(lineno)d - %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': './debug.log',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'mockserver': {
'handlers': ['console', 'mail_admins','file'],
'level': 'INFO',
}
}
}
| 26.877551
| 157
| 0.410023
|
e50ec456f5aede70968f3188e873b9d232f7efa9
| 844
|
py
|
Python
|
deprecated/dataloaders/deprecated_examples/multimedia/avmnist_low_rank_tensor.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 148
|
2021-03-06T06:54:13.000Z
|
2022-03-29T19:27:21.000Z
|
deprecated/dataloaders/deprecated_examples/multimedia/avmnist_low_rank_tensor.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 10
|
2021-07-19T22:57:49.000Z
|
2022-02-04T03:12:29.000Z
|
deprecated/dataloaders/deprecated_examples/multimedia/avmnist_low_rank_tensor.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 18
|
2021-07-22T07:17:27.000Z
|
2022-03-27T16:11:40.000Z
|
from unimodals.common_models import LeNet, MLP, Constant
import torch
from torch import nn
from datasets.avmnist.get_data import get_dataloader
from fusions.common_fusions import LowRankTensorFusion
from training_structures.Simple_Late_Fusion import train, test
import sys
import os
sys.path.append(os.getcwd())
filename = 'lowrank.pt'
traindata, validdata, testdata = get_dataloader(
'/data/yiwei/avmnist/_MFAS/avmnist')
channels = 6
encoders = [LeNet(1, channels, 3).cuda(), LeNet(1, channels, 5).cuda()]
head = MLP(channels*20, 100, 10).cuda()
fusion = LowRankTensorFusion([channels*8, channels*32], channels*20, 40).cuda()
train(encoders, fusion, head, traindata, validdata, 30,
optimtype=torch.optim.SGD, lr=0.05, weight_decay=0.0002, save=filename)
print("Testing:")
model = torch.load(filename).cuda()
test(model, testdata)
| 33.76
| 79
| 0.766588
|
0bb916b6c6a28509906763d761a638c27b0fd154
| 93,949
|
py
|
Python
|
dnacentersdk/api/v2_2_1/event_management.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 32
|
2019-09-05T05:16:56.000Z
|
2022-03-22T09:50:38.000Z
|
dnacentersdk/api/v2_2_1/event_management.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 35
|
2019-09-07T18:58:54.000Z
|
2022-03-24T19:29:36.000Z
|
dnacentersdk/api/v2_2_1/event_management.py
|
oboehmer/dnacentersdk
|
25c4e99900640deee91a56aa886874d9cb0ca960
|
[
"MIT"
] | 18
|
2019-09-09T11:07:21.000Z
|
2022-03-25T08:49:59.000Z
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center Event Management API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class EventManagement(object):
"""Cisco DNA Center Event Management API (version: 2.2.1).
Wraps the DNA Center Event Management
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new EventManagement
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(EventManagement, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def count_of_event_subscriptions(self,
event_ids,
headers=None,
**request_parameters):
"""Returns the Count of EventSubscriptions.
Args:
event_ids(basestring): eventIds query parameter. List of subscriptions related to the respective
eventIds.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_ids, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventIds':
event_ids,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/count')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c538dc50a4555b5fba17b672a89ee1b8_v2_2_1', json_data)
def count_of_notifications(self,
category=None,
domain=None,
end_time=None,
event_ids=None,
severity=None,
source=None,
start_time=None,
sub_domain=None,
type=None,
headers=None,
**request_parameters):
"""Get the Count of Published Notifications.
Args:
event_ids(basestring): eventIds query parameter. The registered EventId should be provided.
start_time(int): startTime query parameter. Start Time in milliseconds.
end_time(int): endTime query parameter. End Time in milliseconds.
category(basestring): category query parameter.
type(basestring): type query parameter.
severity(basestring): severity query parameter.
domain(basestring): domain query parameter.
sub_domain(basestring): subDomain query parameter. Sub Domain.
source(basestring): source query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_ids, basestring)
check_type(start_time, int)
check_type(end_time, int)
check_type(category, basestring)
check_type(type, basestring)
check_type(severity, basestring)
check_type(domain, basestring)
check_type(sub_domain, basestring)
check_type(source, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventIds':
event_ids,
'startTime':
start_time,
'endTime':
end_time,
'category':
category,
'type':
type,
'severity':
severity,
'domain':
domain,
'subDomain':
sub_domain,
'source':
source,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/event-series/count')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_fd269fe156e4b5ad3f4210b7b168_v2_2_1', json_data)
def get_syslog_subscription_details(self,
connector_type,
instance_id=None,
name=None,
headers=None,
**request_parameters):
"""Gets the list of subscription details for specified connectorType.
Args:
connector_type(basestring): connectorType query parameter. Connector Type [SYSLOG].
name(basestring): name query parameter. Name of the specific configuration.
instance_id(basestring): instanceId query parameter. Instance Id of the specific configuration.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(connector_type, basestring,
may_be_none=False)
check_type(name, basestring)
check_type(instance_id, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'connectorType':
connector_type,
'name':
name,
'instanceId':
instance_id,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription-details/syslog')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c0dcb335458a58fa8bc5a485b174427d_v2_2_1', json_data)
def get_email_subscription_details(self,
connector_type,
instance_id=None,
name=None,
headers=None,
**request_parameters):
"""Gets the list of subscription details for specified connectorType.
Args:
connector_type(basestring): connectorType query parameter. Connector Type [EMAIL].
name(basestring): name query parameter. Name of the specific configuration.
instance_id(basestring): instanceId query parameter. Instance Id of the specific configuration.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(connector_type, basestring,
may_be_none=False)
check_type(name, basestring)
check_type(instance_id, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'connectorType':
connector_type,
'name':
name,
'instanceId':
instance_id,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription-details/email')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_d420225889bb16f99ec7ba099a_v2_2_1', json_data)
def get_email_event_subscriptions(self,
event_ids=None,
limit=None,
offset=None,
order=None,
sort_by=None,
headers=None,
**request_parameters):
"""Gets the list of email Subscriptions's based on provided offset and limit.
Args:
event_ids(basestring): eventIds query parameter. List of email subscriptions related to the respective
eventIds (Comma separated event ids).
offset(int): offset query parameter. The number of Subscriptions's to offset in the resultset whose
default value 0.
limit(int): limit query parameter. The number of Subscriptions's to limit in the resultset whose default
value 10.
sort_by(basestring): sortBy query parameter. SortBy field name.
order(basestring): order query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_ids, basestring)
check_type(offset, int)
check_type(limit, int)
check_type(sort_by, basestring)
check_type(order, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventIds':
event_ids,
'offset':
offset,
'limit':
limit,
'sortBy':
sort_by,
'order':
order,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/email')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_bc212b5ee1f252479f35e8dd58319f17_v2_2_1', json_data)
def update_email_event_subscription(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Update Email Subscription Endpoint for list of registered events.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_f8b4842604b65658afb34b4f124db469_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/email')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_f8b4842604b65658afb34b4f124db469_v2_2_1', json_data)
def create_email_event_subscription(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Create Email Subscription Endpoint for list of registered events.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_e69d02d71905aecbd10b782469efbda_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/email')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_e69d02d71905aecbd10b782469efbda_v2_2_1', json_data)
def get_events(self,
tags,
event_id=None,
limit=None,
offset=None,
order=None,
sort_by=None,
headers=None,
**request_parameters):
"""Gets the list of registered Events with provided eventIds or tags as mandatory.
Args:
event_id(basestring): eventId query parameter. The registered EventId should be provided.
tags(basestring): tags query parameter. The registered Tags should be provided.
offset(int): offset query parameter. The number of Registries to offset in the resultset whose default
value 0.
limit(int): limit query parameter. The number of Registries to limit in the resultset whose default
value 10.
sort_by(basestring): sortBy query parameter. SortBy field name.
order(basestring): order query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_id, basestring)
check_type(tags, basestring,
may_be_none=False)
check_type(offset, int)
check_type(limit, int)
check_type(sort_by, basestring)
check_type(order, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventId':
event_id,
'tags':
tags,
'offset':
offset,
'limit':
limit,
'sortBy':
sort_by,
'order':
order,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/events')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_bf36f1819e61575189c0709efab6e48a_v2_2_1', json_data)
def get_auditlog_summary(self,
category=None,
context=None,
description=None,
device_id=None,
domain=None,
end_time=None,
event_hierarchy=None,
event_id=None,
instance_id=None,
is_parent_only=None,
is_system_events=None,
name=None,
parent_instance_id=None,
severity=None,
site_id=None,
source=None,
start_time=None,
sub_domain=None,
user_id=None,
headers=None,
**request_parameters):
"""Get Audit Log Summary from the Event-Hub.
Args:
parent_instance_id(basestring): parentInstanceId query parameter. Parent Audit Log record's instanceID.
is_parent_only(bool): isParentOnly query parameter. Parameter to filter parent only audit-logs.
instance_id(basestring): instanceId query parameter. InstanceID of the Audit Log.
name(basestring): name query parameter. Audit Log notification event name.
event_id(basestring): eventId query parameter. Audit Log notification's event ID. .
category(basestring): category query parameter. Audit Log notification's event category. Supported
values: INFO, WARN, ERROR, ALERT, TASK_PROGRESS, TASK_FAILURE, TASK_COMPLETE, COMMAND,
QUERY, CONVERSATION.
severity(basestring): severity query parameter. Audit Log notification's event severity. Supported
values: 1, 2, 3, 4, 5.
domain(basestring): domain query parameter. Audit Log notification's event domain.
sub_domain(basestring): subDomain query parameter. Audit Log notification's event sub-domain.
source(basestring): source query parameter. Audit Log notification's event source.
user_id(basestring): userId query parameter. Audit Log notification's event userId.
context(basestring): context query parameter. Audit Log notification's event correlationId.
event_hierarchy(basestring): eventHierarchy query parameter. Audit Log notification's event
eventHierarchy. Example: "US.CA.San Jose" OR "US.CA" OR "CA.San Jose" Delimiter for
hierarchy separation is ".".
site_id(basestring): siteId query parameter. Audit Log notification's siteId.
device_id(basestring): deviceId query parameter. Audit Log notification's deviceId.
is_system_events(bool): isSystemEvents query parameter. Parameter to filter system generated audit-logs.
description(basestring): description query parameter. String full/partial search (Provided input string
is case insensitively matched for records).
start_time(int): startTime query parameter. Start Time in milliseconds since Epoch Eg. 1597950637211
(when provided endTime is mandatory).
end_time(int): endTime query parameter. End Time in milliseconds since Epoch Eg. 1597961437211 (when
provided startTime is mandatory).
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(parent_instance_id, basestring)
check_type(is_parent_only, bool)
check_type(instance_id, basestring)
check_type(name, basestring)
check_type(event_id, basestring)
check_type(category, basestring)
check_type(severity, basestring)
check_type(domain, basestring)
check_type(sub_domain, basestring)
check_type(source, basestring)
check_type(user_id, basestring)
check_type(context, basestring)
check_type(event_hierarchy, basestring)
check_type(site_id, basestring)
check_type(device_id, basestring)
check_type(is_system_events, bool)
check_type(description, basestring)
check_type(start_time, int)
check_type(end_time, int)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'parentInstanceId':
parent_instance_id,
'isParentOnly':
is_parent_only,
'instanceId':
instance_id,
'name':
name,
'eventId':
event_id,
'category':
category,
'severity':
severity,
'domain':
domain,
'subDomain':
sub_domain,
'source':
source,
'userId':
user_id,
'context':
context,
'eventHierarchy':
event_hierarchy,
'siteId':
site_id,
'deviceId':
device_id,
'isSystemEvents':
is_system_events,
'description':
description,
'startTime':
start_time,
'endTime':
end_time,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/data/api/v1/event/event-series/audit-log/summary')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_ea7c0220d55ae9e1a51d6823ce862_v2_2_1', json_data)
def get_event_subscriptions(self,
event_ids=None,
limit=None,
offset=None,
order=None,
sort_by=None,
headers=None,
**request_parameters):
"""Gets the list of Subscriptions's based on provided offset and limit.
Args:
event_ids(basestring): eventIds query parameter. List of subscriptions related to the respective
eventIds.
offset(int): offset query parameter. The number of Subscriptions's to offset in the resultset whose
default value 0.
limit(int): limit query parameter. The number of Subscriptions's to limit in the resultset whose default
value 10.
sort_by(basestring): sortBy query parameter. SortBy field name.
order(basestring): order query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_ids, basestring)
check_type(offset, int)
check_type(limit, int)
check_type(sort_by, basestring)
check_type(order, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventIds':
event_ids,
'offset':
offset,
'limit':
limit,
'sortBy':
sort_by,
'order':
order,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_d7d4e55d6bbb21c34ce863a131_v2_2_1', json_data)
def update_event_subscriptions(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Update SubscriptionEndpoint to list of registered events.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_dfda5beca4cc5437876bff366493ebf0_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_dfda5beca4cc5437876bff366493ebf0_v2_2_1', json_data)
def create_event_subscriptions(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Subscribe SubscriptionEndpoint to list of registered events.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_fcc151af7615a84adf48b714d146192_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_fcc151af7615a84adf48b714d146192_v2_2_1', json_data)
def delete_event_subscriptions(self,
subscriptions,
headers=None,
**request_parameters):
"""Delete EventSubscriptions.
Args:
subscriptions(basestring): subscriptions query parameter. List of EventSubscriptionId's for removal.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(subscriptions, basestring,
may_be_none=False)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'subscriptions':
subscriptions,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_a0e0b1772dfc5a02a96a9f6ee6e2579b_v2_2_1', json_data)
def get_syslog_event_subscriptions(self,
event_ids=None,
limit=None,
offset=None,
order=None,
sort_by=None,
headers=None,
**request_parameters):
"""Gets the list of Syslog Subscriptions's based on provided offset and limit.
Args:
event_ids(basestring): eventIds query parameter. List of subscriptions related to the respective
eventIds (Comma separated event ids).
offset(int): offset query parameter. The number of Subscriptions's to offset in the resultset whose
default value 0.
limit(int): limit query parameter. The number of Subscriptions's to limit in the resultset whose default
value 10.
sort_by(basestring): sortBy query parameter. SortBy field name.
order(basestring): order query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_ids, basestring)
check_type(offset, int)
check_type(limit, int)
check_type(sort_by, basestring)
check_type(order, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventIds':
event_ids,
'offset':
offset,
'limit':
limit,
'sortBy':
sort_by,
'order':
order,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/syslog')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c7bed4b4148753e6bc9912e3be135217_v2_2_1', json_data)
def update_syslog_event_subscription(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Update Syslog Subscription Endpoint for list of registered events.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_d8fc92ddeab597ebb50ea003a6d46bd_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/syslog')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_d8fc92ddeab597ebb50ea003a6d46bd_v2_2_1', json_data)
def create_syslog_event_subscription(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Create Syslog Subscription Endpoint for list of registered events.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_fb5a8c0075563491622171958074bf_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/syslog')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_fb5a8c0075563491622171958074bf_v2_2_1', json_data)
def count_of_events(self,
tags,
event_id=None,
headers=None,
**request_parameters):
"""Get the count of registered events with provided eventIds or tags as mandatory.
Args:
event_id(basestring): eventId query parameter. The registered EventId should be provided.
tags(basestring): tags query parameter. The registered Tags should be provided.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_id, basestring)
check_type(tags, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventId':
event_id,
'tags':
tags,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/events/count')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b21d2947d715c198f5e62ba3149839a_v2_2_1', json_data)
def get_eventartifacts(self,
event_ids=None,
limit=None,
offset=None,
order=None,
search=None,
sort_by=None,
tags=None,
headers=None,
**request_parameters):
"""Gets the list of artifacts based on provided offset and limit.
Args:
event_ids(basestring): eventIds query parameter. List of eventIds.
tags(basestring): tags query parameter. Tags defined.
offset(int): offset query parameter. Record start offset.
limit(int): limit query parameter. # of records to return in result set.
sort_by(basestring): sortBy query parameter. Sort by field.
order(basestring): order query parameter. sorting order (asc/desc).
search(basestring): search query parameter. findd matches in name, description, eventId, type, category.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_ids, basestring)
check_type(tags, basestring)
check_type(offset, int)
check_type(limit, int)
check_type(sort_by, basestring)
check_type(order, basestring)
check_type(search, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventIds':
event_ids,
'tags':
tags,
'offset':
offset,
'limit':
limit,
'sortBy':
sort_by,
'order':
order,
'search':
search,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/system/api/v1/event/artifact')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c0e0d76b2561b8f2efd0220f02267_v2_2_1', json_data)
def get_notifications(self,
category=None,
domain=None,
end_time=None,
event_ids=None,
limit=None,
offset=None,
order=None,
severity=None,
sort_by=None,
source=None,
start_time=None,
sub_domain=None,
type=None,
headers=None,
**request_parameters):
"""Get the list of Published Notifications.
Args:
event_ids(basestring): eventIds query parameter. The registered EventId should be provided.
start_time(int): startTime query parameter. Start Time in milliseconds.
end_time(int): endTime query parameter. End Time in milliseconds.
category(basestring): category query parameter.
type(basestring): type query parameter.
severity(basestring): severity query parameter.
domain(basestring): domain query parameter.
sub_domain(basestring): subDomain query parameter. Sub Domain.
source(basestring): source query parameter.
offset(int): offset query parameter. Start Offset.
limit(int): limit query parameter. # of records.
sort_by(basestring): sortBy query parameter. Sort By column.
order(basestring): order query parameter. Ascending/Descending order [asc/desc].
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_ids, basestring)
check_type(start_time, int)
check_type(end_time, int)
check_type(category, basestring)
check_type(type, basestring)
check_type(severity, basestring)
check_type(domain, basestring)
check_type(sub_domain, basestring)
check_type(source, basestring)
check_type(offset, int)
check_type(limit, int)
check_type(sort_by, basestring)
check_type(order, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventIds':
event_ids,
'startTime':
start_time,
'endTime':
end_time,
'category':
category,
'type':
type,
'severity':
severity,
'domain':
domain,
'subDomain':
sub_domain,
'source':
source,
'offset':
offset,
'limit':
limit,
'sortBy':
sort_by,
'order':
order,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/event-series')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c641f481dd285301861010da8d6fbf9f_v2_2_1', json_data)
def get_auditlog_records(self,
category=None,
context=None,
description=None,
device_id=None,
domain=None,
end_time=None,
event_hierarchy=None,
event_id=None,
instance_id=None,
is_system_events=None,
limit=None,
name=None,
offset=None,
order=None,
parent_instance_id=None,
severity=None,
site_id=None,
sort_by=None,
source=None,
start_time=None,
sub_domain=None,
user_id=None,
headers=None,
**request_parameters):
"""Get Audit Log Event instances from the Event-Hub .
Args:
parent_instance_id(basestring): parentInstanceId query parameter. Parent Audit Log record's instanceID.
instance_id(basestring): instanceId query parameter. InstanceID of the Audit Log.
name(basestring): name query parameter. Audit Log notification event name.
event_id(basestring): eventId query parameter. Audit Log notification's event ID. .
category(basestring): category query parameter. Audit Log notification's event category. Supported
values: INFO, WARN, ERROR, ALERT, TASK_PROGRESS, TASK_FAILURE, TASK_COMPLETE, COMMAND,
QUERY, CONVERSATION.
severity(basestring): severity query parameter. Audit Log notification's event severity. Supported
values: 1, 2, 3, 4, 5.
domain(basestring): domain query parameter. Audit Log notification's event domain.
sub_domain(basestring): subDomain query parameter. Audit Log notification's event sub-domain.
source(basestring): source query parameter. Audit Log notification's event source.
user_id(basestring): userId query parameter. Audit Log notification's event userId.
context(basestring): context query parameter. Audit Log notification's event correlationId.
event_hierarchy(basestring): eventHierarchy query parameter. Audit Log notification's event
eventHierarchy. Example: "US.CA.San Jose" OR "US.CA" OR "CA.San Jose" Delimiter for
hierarchy separation is ".".
site_id(basestring): siteId query parameter. Audit Log notification's siteId.
device_id(basestring): deviceId query parameter. Audit Log notification's deviceId.
is_system_events(bool): isSystemEvents query parameter. Parameter to filter system generated audit-logs.
description(basestring): description query parameter. String full/partial search (Provided input string
is case insensitively matched for records).
offset(int): offset query parameter. Position of a particular Audit Log record in the data. .
limit(int): limit query parameter. Number of Audit Log records to be returned per page.
start_time(int): startTime query parameter. Start Time in milliseconds since Epoch Eg. 1597950637211
(when provided endTime is mandatory).
end_time(int): endTime query parameter. End Time in milliseconds since Epoch Eg. 1597961437211 (when
provided startTime is mandatory).
sort_by(basestring): sortBy query parameter. Sort the Audit Logs by certain fields. Supported values are
event notification header attributes.
order(basestring): order query parameter. Order of the sorted Audit Log records. Default value is desc
by timestamp. Supported values: asc, desc.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(parent_instance_id, basestring)
check_type(instance_id, basestring)
check_type(name, basestring)
check_type(event_id, basestring)
check_type(category, basestring)
check_type(severity, basestring)
check_type(domain, basestring)
check_type(sub_domain, basestring)
check_type(source, basestring)
check_type(user_id, basestring)
check_type(context, basestring)
check_type(event_hierarchy, basestring)
check_type(site_id, basestring)
check_type(device_id, basestring)
check_type(is_system_events, bool)
check_type(description, basestring)
check_type(offset, int)
check_type(limit, int)
check_type(start_time, int)
check_type(end_time, int)
check_type(sort_by, basestring)
check_type(order, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'parentInstanceId':
parent_instance_id,
'instanceId':
instance_id,
'name':
name,
'eventId':
event_id,
'category':
category,
'severity':
severity,
'domain':
domain,
'subDomain':
sub_domain,
'source':
source,
'userId':
user_id,
'context':
context,
'eventHierarchy':
event_hierarchy,
'siteId':
site_id,
'deviceId':
device_id,
'isSystemEvents':
is_system_events,
'description':
description,
'offset':
offset,
'limit':
limit,
'startTime':
start_time,
'endTime':
end_time,
'sortBy':
sort_by,
'order':
order,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/data/api/v1/event/event-series/audit-logs')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b0aa5a61f64a5da997dfe05bc8a4a64f_v2_2_1', json_data)
def get_rest_webhook_event_subscriptions(self,
event_ids=None,
limit=None,
offset=None,
order=None,
sort_by=None,
headers=None,
**request_parameters):
"""Gets the list of Rest/Webhook Subscriptions's based on provided offset and limit.
Args:
event_ids(basestring): eventIds query parameter. List of subscriptions related to the respective
eventIds (Comma separated event ids).
offset(int): offset query parameter. The number of Subscriptions's to offset in the resultset whose
default value 0.
limit(int): limit query parameter. The number of Subscriptions's to limit in the resultset whose default
value 10.
sort_by(basestring): sortBy query parameter. SortBy field name.
order(basestring): order query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(event_ids, basestring)
check_type(offset, int)
check_type(limit, int)
check_type(sort_by, basestring)
check_type(order, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'eventIds':
event_ids,
'offset':
offset,
'limit':
limit,
'sortBy':
sort_by,
'order':
order,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/rest')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_ee2008494d158e7bff7f106519a64c5_v2_2_1', json_data)
def update_rest_webhook_event_subscription(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Update Rest/Webhook Subscription Endpoint for list of registered events.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_b6581534bb321eaea272365b7_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/rest')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.put(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_b6581534bb321eaea272365b7_v2_2_1', json_data)
def create_rest_webhook_event_subscription(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Create Rest/Webhook Subscription Endpoint for list of registered events.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_f41eb48a0da56949cfaddeecb51ab66_v2_2_1')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription/rest')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_f41eb48a0da56949cfaddeecb51ab66_v2_2_1', json_data)
def get_auditlog_parent_records(self,
category=None,
context=None,
description=None,
device_id=None,
domain=None,
end_time=None,
event_hierarchy=None,
event_id=None,
instance_id=None,
is_system_events=None,
limit=None,
name=None,
offset=None,
order=None,
severity=None,
site_id=None,
sort_by=None,
source=None,
start_time=None,
sub_domain=None,
user_id=None,
headers=None,
**request_parameters):
"""Get Parent Audit Log Event instances from the Event-Hub .
Args:
instance_id(basestring): instanceId query parameter. InstanceID of the Audit Log.
name(basestring): name query parameter. Audit Log notification event name.
event_id(basestring): eventId query parameter. Audit Log notification's event ID. .
category(basestring): category query parameter. Audit Log notification's event category. Supported
values: INFO, WARN, ERROR, ALERT, TASK_PROGRESS, TASK_FAILURE, TASK_COMPLETE, COMMAND,
QUERY, CONVERSATION.
severity(basestring): severity query parameter. Audit Log notification's event severity. Supported
values: 1, 2, 3, 4, 5.
domain(basestring): domain query parameter. Audit Log notification's event domain.
sub_domain(basestring): subDomain query parameter. Audit Log notification's event sub-domain.
source(basestring): source query parameter. Audit Log notification's event source.
user_id(basestring): userId query parameter. Audit Log notification's event userId.
context(basestring): context query parameter. Audit Log notification's event correlationId.
event_hierarchy(basestring): eventHierarchy query parameter. Audit Log notification's event
eventHierarchy. Example: "US.CA.San Jose" OR "US.CA" OR "CA.San Jose" Delimiter for
hierarchy separation is ".".
site_id(basestring): siteId query parameter. Audit Log notification's siteId.
device_id(basestring): deviceId query parameter. Audit Log notification's deviceId.
is_system_events(bool): isSystemEvents query parameter. Parameter to filter system generated audit-logs.
description(basestring): description query parameter. String full/partial search (Provided input string
is case insensitively matched for records).
offset(int): offset query parameter. Position of a particular Audit Log record in the data. .
limit(int): limit query parameter. Number of Audit Log records to be returned per page.
start_time(int): startTime query parameter. Start Time in milliseconds since Epoch Eg. 1597950637211
(when provided endTime is mandatory).
end_time(int): endTime query parameter. End Time in milliseconds since Epoch Eg. 1597961437211 (when
provided startTime is mandatory).
sort_by(basestring): sortBy query parameter. Sort the Audit Logs by certain fields. Supported values are
event notification header attributes.
order(basestring): order query parameter. Order of the sorted Audit Log records. Default value is desc
by timestamp. Supported values: asc, desc.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(instance_id, basestring)
check_type(name, basestring)
check_type(event_id, basestring)
check_type(category, basestring)
check_type(severity, basestring)
check_type(domain, basestring)
check_type(sub_domain, basestring)
check_type(source, basestring)
check_type(user_id, basestring)
check_type(context, basestring)
check_type(event_hierarchy, basestring)
check_type(site_id, basestring)
check_type(device_id, basestring)
check_type(is_system_events, bool)
check_type(description, basestring)
check_type(offset, int)
check_type(limit, int)
check_type(start_time, int)
check_type(end_time, int)
check_type(sort_by, basestring)
check_type(order, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'instanceId':
instance_id,
'name':
name,
'eventId':
event_id,
'category':
category,
'severity':
severity,
'domain':
domain,
'subDomain':
sub_domain,
'source':
source,
'userId':
user_id,
'context':
context,
'eventHierarchy':
event_hierarchy,
'siteId':
site_id,
'deviceId':
device_id,
'isSystemEvents':
is_system_events,
'description':
description,
'offset':
offset,
'limit':
limit,
'startTime':
start_time,
'endTime':
end_time,
'sortBy':
sort_by,
'order':
order,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/data/api/v1/event/event-series/audit-log/parent-'
+ 'records')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_f8e3a0674c15fd58cd78f42dca37c7c_v2_2_1', json_data)
def eventartifact_count(self,
headers=None,
**request_parameters):
"""Get the count of registered event artifacts with provided eventIds or tags as mandatory.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/system/api/v1/event/artifact/count')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a137e0b583c85ffe80fbbd85b480bf15_v2_2_1', json_data)
def get_rest_webhook_subscription_details(self,
connector_type,
instance_id=None,
name=None,
headers=None,
**request_parameters):
"""Gets the list of subscription details for specified connectorType.
Args:
connector_type(basestring): connectorType query parameter. Connector Type [REST].
name(basestring): name query parameter. Name of the specific configuration.
instance_id(basestring): instanceId query parameter. Instance Id of the specific configuration.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(connector_type, basestring,
may_be_none=False)
check_type(name, basestring)
check_type(instance_id, basestring)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'connectorType':
connector_type,
'name':
name,
'instanceId':
instance_id,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/subscription-details/rest')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_f278c72555e9a56f554b2a21c85_v2_2_1', json_data)
def get_status_api_for_events(self,
execution_id,
headers=None,
**request_parameters):
"""Get the Status of events API calls with provided executionId as mandatory path parameter.
Args:
execution_id(basestring): executionId path parameter. Execution ID.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(execution_id, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'executionId': execution_id,
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/event/api-status/{executionId}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e1bd67a1a0225713ab23f0d0d3ceb4f6_v2_2_1', json_data)
| 41.314424
| 116
| 0.565073
|
fea40b45e0642db0f2715c651a05e9f16cbfe8c0
| 1,486
|
py
|
Python
|
code/apps/testsuite/testsuites/core.py
|
FHMS-ITS/CORSICA
|
2bbbf5c0d3b8ec0db756b79c11fa5727f6990683
|
[
"MIT"
] | null | null | null |
code/apps/testsuite/testsuites/core.py
|
FHMS-ITS/CORSICA
|
2bbbf5c0d3b8ec0db756b79c11fa5727f6990683
|
[
"MIT"
] | null | null | null |
code/apps/testsuite/testsuites/core.py
|
FHMS-ITS/CORSICA
|
2bbbf5c0d3b8ec0db756b79c11fa5727f6990683
|
[
"MIT"
] | null | null | null |
import json
from sqlalchemy.orm import sessionmaker
from database.models.crawler import TestDevice
from database.models.generator.memory import MemWebRoots
import pymysql
import warnings
from utils.log import _info
from utils.utils import jsv_save_javascript_value
warnings.filterwarnings('ignore', category=pymysql.Warning)
def create(config, db_engine, args, result_path):
db_session = sessionmaker(bind=db_engine)()
test_devices = db_session.query(TestDevice).all()
res_list = {}
for device in test_devices:
str_dev = "{dev.scheme}{dev.address}:{dev.port}".format(dev=device)
if device.fw_id in res_list:
res_list[device.fw_id].append(str_dev)
else:
res_list[device.fw_id] = [str_dev]
firm_web_roots = {}
web_roots = db_session.query(MemWebRoots).all()
for web_root in web_roots:
if web_root.firmware in firm_web_roots:
firm_web_roots[web_root.firmware].append(web_root.id)
else:
firm_web_roots[web_root.firmware] = [web_root.id]
jsv_save_javascript_value(db_session, 'devices', json.dumps(res_list))
jsv_save_javascript_value(db_session, 'firm_web_roots', json.dumps(firm_web_roots))
f = open("{}/devices.js".format(result_path), "w")
f.write("devices = {}\n\n".format(json.dumps(res_list)))
f.write("firm_web_roots = {}\n\n".format(json.dumps(firm_web_roots)))
f.close()
db_session.close()
| 36.243902
| 88
| 0.693136
|
d40390a529208ca3e70ff2c0ce57eec575b25d31
| 1,893
|
py
|
Python
|
msmart/client.py
|
phillcz/midea-msmart
|
d6e82e527b076a93e6c419817a415cc8d1314e82
|
[
"MIT"
] | 2
|
2021-05-31T15:56:34.000Z
|
2021-06-01T16:04:09.000Z
|
msmart/client.py
|
phillcz/midea-msmart
|
d6e82e527b076a93e6c419817a415cc8d1314e82
|
[
"MIT"
] | null | null | null |
msmart/client.py
|
phillcz/midea-msmart
|
d6e82e527b076a93e6c419817a415cc8d1314e82
|
[
"MIT"
] | null | null | null |
# This library is part of an effort to get Midea air conditioning devices to work with Home Assistant
# This library is based off the work by Yitsushi. The original work was a ruby based commandline utility.
# The orignal Ruby version can be found here https://github.com/yitsushi/midea-air-condition
# License MIT - Use as you please and at your own risk
from typing import Dict, List
from msmart.lan import lan
from msmart.device import air_conditioning_device
from msmart.device import dehumidifier_device
from msmart.device import unknown_device
VERSION = '0.1.15'
DEVICE_TYPES = {
0xAC: air_conditioning_device,
0x00: dehumidifier_device
}
def build_device(cloud_service: cloud, device_detail: dict):
device_type = int(device_detail['type'], 0)
device_constructor = DEVICE_TYPES.get(device_type, None)
if device_constructor is not None:
device = device_constructor(cloud_service)
else:
device = unknown_device(cloud_service)
device.set_device_detail(device_detail)
return device
class client:
def __init__(self, device_ip: str, device_id: str):
self._lan = lan(device_ip, device_ip)
self._devices = {} # type: Dict[str, device]
def setup(self):
self._lan.login()
def devices(self):
self.setup()
device_status_list = self._cloud.list()
for device_status in device_status_list:
current_device_id = device_status['id']
current_device = self._devices.setdefault(current_device_id, None)
if current_device is None:
current_device = build_device(self._cloud, device_status)
self._devices[current_device_id] = current_device
else:
current_device.set_device_detail(device_status)
return list(self._devices.values())
| 34.418182
| 106
| 0.689382
|
05034eb27325773e47de244069a7be32c66ff147
| 500
|
py
|
Python
|
lizard_ext/__init__.py
|
BjrnJhsn/lizard
|
5c3f02b67f72f70f4dbdbd2e97249e0ec20d40fa
|
[
"MIT"
] | 1,255
|
2015-01-07T20:24:45.000Z
|
2022-03-31T02:39:50.000Z
|
lizard_ext/__init__.py
|
BjrnJhsn/lizard
|
5c3f02b67f72f70f4dbdbd2e97249e0ec20d40fa
|
[
"MIT"
] | 293
|
2015-01-05T14:31:16.000Z
|
2022-03-24T18:12:16.000Z
|
lizard_ext/__init__.py
|
sider/lizard
|
61ad3c1f9989280dfd4157c337e70e08174f7c34
|
[
"MIT"
] | 217
|
2015-01-07T20:24:49.000Z
|
2022-03-30T19:20:21.000Z
|
""" extensions of lizard """
from __future__ import print_function
from .version import version
from .htmloutput import html_output
from .csvoutput import csv_output
from .xmloutput import xml_output
from .auto_open import auto_open, auto_read
def print_xml(results, options, _, total_factory):
print(xml_output(total_factory(list(results)), options.verbose))
return 0
def print_csv(results, options, _, total_factory):
csv_output(total_factory(list(results)), options)
return 0
| 26.315789
| 68
| 0.78
|
9464612c97a6eb858a89bd35875b63a0321dd073
| 4,259
|
py
|
Python
|
source/tests/configuration/setbuilder/test_weekday_setbuilder.py
|
dgomez407/aws-instance-scheduler
|
b61839c8b91ed5e4966f590d614d61bd7da35771
|
[
"MIT"
] | 1
|
2021-05-20T22:49:05.000Z
|
2021-05-20T22:49:05.000Z
|
source/tests/configuration/setbuilder/test_weekday_setbuilder.py
|
dgomez407/aws-instance-scheduler
|
b61839c8b91ed5e4966f590d614d61bd7da35771
|
[
"MIT"
] | null | null | null |
source/tests/configuration/setbuilder/test_weekday_setbuilder.py
|
dgomez407/aws-instance-scheduler
|
b61839c8b91ed5e4966f590d614d61bd7da35771
|
[
"MIT"
] | 1
|
2019-03-23T15:19:29.000Z
|
2019-03-23T15:19:29.000Z
|
import calendar
import unittest
from configuration.setbuilders.weekday_setbuilder import WeekdaySetBuilder
class TestMonthdaySetBuilder(unittest.TestCase):
def test_name(self):
for i, day_name in enumerate(calendar.day_abbr):
self.assertEqual(WeekdaySetBuilder().build(day_name), {i})
for i, day_name in enumerate(calendar.day_name):
self.assertEqual(WeekdaySetBuilder().build(day_name), {i})
def test_value(self):
for i in range(0, len(calendar.day_abbr) - 1):
self.assertEqual(WeekdaySetBuilder().build(str(i)), {i})
def test_L_wildcard(self):
for year in [2016, 2017]:
for month in range(1, 13):
weekday, days_in_month = calendar.monthrange(year, month)
for tested_on_day in range(1, days_in_month + 1):
builder = WeekdaySetBuilder(year=year, month=month, day=tested_on_day)
# test by name of weekday
day_num_l = calendar.day_abbr[weekday] + "L"
tested_by_name = builder.build(day_num_l)
# test by number of weekday
day_value_l = str(weekday) + "L"
tested_by_value = builder.build(day_value_l)
# everything before last week should be empty set
if tested_on_day <= (days_in_month - 7):
self.assertEquals(tested_by_name, set())
self.assertEquals(tested_by_value, set())
else:
# in last week the set should contain the day
self.assertEquals(tested_by_name, {weekday})
self.assertEquals(tested_by_value, {weekday})
# test if ofther weekdays on that day return empty set
for d in range(0, 6):
if d != weekday:
day_num_l = calendar.day_abbr[d] + "L"
day_value_l = str(d) + "L"
self.assertEquals(builder.build(day_num_l), set())
self.assertEqual(builder.build(day_value_l), set())
weekday = (weekday + 1) % 7
def test_weekday_numbered(self):
for year in [2016, 2017]:
for month in range(1, 13):
weekday, days_in_month = calendar.monthrange(year, month)
for day in range(1, days_in_month + 1):
num = int((day - 1) / 7) + 1
builder = WeekdaySetBuilder(year=year, month=month, day=day)
tested_by_name = builder.build(calendar.day_abbr[weekday] + "#" + str(num))
self.assertEquals(tested_by_name, {weekday})
tested_by_value = builder.build(str(weekday) + "#" + str(num))
self.assertEquals(tested_by_value, {weekday})
for other_weekday in range(0, 7):
if other_weekday != weekday:
tested_by_name = builder.build(calendar.day_abbr[other_weekday] + "#" + str(num))
self.assertEquals(tested_by_name, set())
tested_by_value = builder.build(str(other_weekday) + "#" + str(num))
self.assertEquals(tested_by_value, set())
for other_num in range(1, 6):
if num != other_num:
tested_by_name = builder.build(calendar.day_abbr[weekday] + "#" + str(other_num))
self.assertEquals(tested_by_name, set())
tested_by_value = builder.build(str(weekday) + "#" + str(other_num))
self.assertEquals(tested_by_value, set())
weekday = (weekday + 1) % 7
def test_exceptions(self):
# L needs year, month and daya params
self.assertRaises(ValueError, WeekdaySetBuilder().build, "1L")
self.assertRaises(ValueError, WeekdaySetBuilder(year=2016, month=10, day=4).build, "0#6")
self.assertRaises(ValueError, WeekdaySetBuilder(year=2016, month=10, day=4).build, "0#0")
| 47.322222
| 109
| 0.540502
|
88f83b414bf61dd7eee5656d897dfce01ee20b34
| 8,288
|
py
|
Python
|
dtaidistance/util.py
|
yasirroni/dtaidistance
|
d67d7ccd5e67ecc99af389e788f979ee1b424162
|
[
"Apache-2.0"
] | 711
|
2017-02-07T07:24:58.000Z
|
2022-03-31T07:46:47.000Z
|
dtaidistance/util.py
|
yasirroni/dtaidistance
|
d67d7ccd5e67ecc99af389e788f979ee1b424162
|
[
"Apache-2.0"
] | 142
|
2018-04-09T10:36:11.000Z
|
2022-03-31T11:30:26.000Z
|
dtaidistance/util.py
|
yasirroni/dtaidistance
|
d67d7ccd5e67ecc99af389e788f979ee1b424162
|
[
"Apache-2.0"
] | 155
|
2017-06-01T08:37:45.000Z
|
2022-03-23T08:50:13.000Z
|
# -*- coding: UTF-8 -*-
"""
dtaidistance.util
~~~~~~~~~~~~~~~~~
Utility functions for DTAIDistance.
:author: Wannes Meert
:copyright: Copyright 2017-2018 KU Leuven, DTAI Research Group.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import os
import sys
import csv
import logging
from array import array
from pathlib import Path
import tempfile
try:
import numpy as np
except ImportError:
np = None
try:
from . import dtw_cc
except ImportError:
dtw_cc = None
try:
from . import dtw_cc_omp
except ImportError:
dtw_cc_omp = None
try:
from . import dtw_cc_numpy
except ImportError:
dtw_cc_numpy = None
logger = logging.getLogger("be.kuleuven.dtai.distance")
dtaidistance_dir = os.path.abspath(os.path.dirname(__file__))
def prepare_directory(directory=None):
"""Prepare the given directory, create it if necessary.
If no directory is given, a new directory will be created in the system's temp directory.
"""
if directory is not None:
directory = Path(directory)
if not directory.exists():
directory.mkdir(parents=True)
logger.debug("Using directory: {}".format(directory))
return Path(directory)
directory = tempfile.mkdtemp(prefix="dtaidistance_")
logger.debug("Using directory: {}".format(directory))
return Path(directory)
def read_substitution_matrix(file):
"""Read substitution matrix from file.
Comments starting with # and newlines are allowed anywhere
in the file.
:return: A dictionary mapping tuples of symbols to their weight.
"""
def strip_comments(reader):
for line in reader:
if not line.rstrip() or line[0] == '#':
continue
yield line.rstrip()
matrix = dict()
with open(file) as f:
reader = csv.reader(strip_comments(f), delimiter=" ", skipinitialspace=True)
line = next(reader)
idx = {i: symbol for i, symbol in enumerate(line)}
for line in reader:
symbol = line[0]
for j, value in enumerate(line[1:]):
matrix[(idx[j], symbol)] = float(value)
return matrix
class SeriesContainer:
def __init__(self, series, support_ndim=True):
"""Container for a list of series.
This wrapper class knows how to deal with multiple types of datastructures to represent
a list of sequences:
- List[array.array]
- List[numpy.array]
- List[List]
- numpy.array
- numpy.matrix
When using the C-based extensions, the data is automatically verified and converted.
"""
self.support_ndim = support_ndim
self.detected_ndim = False
if isinstance(series, SeriesContainer):
self.series = series.series
elif np is not None and isinstance(series, np.ndarray):
# A matrix always returns a 2D array, also if you select one row (to be consistent
# and always be a matrix datastructure). The methods in this toolbox expect a
# 1D array thus we need to convert to a 1D or 2D array. This is taken care by asarray
self.series = np.asarray(series, order="C")
if self.series.ndim > 2:
if not self.support_ndim:
raise Exception(f'N-dimensional series are not supported '
f'(series.ndim = {self.series.ndim})')
else:
self.detected_ndim = True
elif type(series) == set or type(series) == tuple or type(series) == list:
self.series = list(series)
if np is not None and isinstance(self.series[0], np.ndarray):
if self.series[0].ndim > 1:
if not self.support_ndim:
raise Exception(f'N-dimensional series are not supported '
f'(series[0].ndim = {self.series[0].ndim})')
else:
self.detected_ndim = True
else:
self.series = series
def c_data_compat(self):
"""Return a datastructure that the C-component knows how to handle.
The method tries to avoid copying or reallocating memory.
:return: Either a list of buffers or a two-dimensional buffer. The
buffers are guaranteed to be C-contiguous and can thus be used
as regular pointer-based arrays in C.
"""
if dtw_cc is None:
raise Exception('C library not loaded')
if type(self.series) == list:
for i in range(len(self.series)):
serie = self.series[i]
if np is not None and isinstance(serie, np.ndarray):
if not self.support_ndim and serie.ndim != 1:
raise Exception(f'N-dimensional arrays are not supported (serie.ndim = {serie.ndim})')
if not serie.flags.c_contiguous:
serie = np.asarray(serie, order="C")
self.series[i] = serie
elif isinstance(serie, array):
pass
else:
raise Exception(
"Type of series not supported, "
"expected numpy.array or array.array but got {}".format(
type(serie)
)
)
return dtw_cc.dtw_series_from_data(self.series)
elif np is not None and isinstance(self.series, np.ndarray):
if not self.series.flags.c_contiguous:
logger.warning("Numpy array not C contiguous, copying data.")
self.series = self.series.copy(order="C")
if not self.support_ndim and self.series.ndim > 2:
raise Exception(f'N-dimensional series are not supported (series.ndim = {self.series.ndim})')
if dtw_cc_numpy is None:
logger.warning("DTAIDistance C-extension for Numpy is not available. Proceeding anyway.")
return dtw_cc.dtw_series_from_data(self.series)
elif len(self.series.shape) == 3:
return dtw_cc_numpy.dtw_series_from_numpy_ndim(self.series)
else:
return dtw_cc_numpy.dtw_series_from_numpy(self.series)
return dtw_cc.dtw_series_from_data(self.series)
def get_max_y(self):
max_y = 0
if isinstance(self.series, np.ndarray) and len(self.series.shape) == 2:
max_y = max(np.max(self.series), abs(np.min(self.series)))
else:
for serie in self.series:
max_y = max(max_y, np.max(serie), abs(np.min(serie)))
return max_y
def get_max_length(self):
max_length = 0
if isinstance(self.series, np.ndarray) and len(self.series.shape) == 2:
max_length = self.series.shape[1]
else:
for serie in self.series:
max_length = max(max_length, len(serie))
return max_length
def get_avg_length(self):
max_length = 0
if isinstance(self.series, np.ndarray) and len(self.series.shape) == 2:
max_length = self.series.shape[1]
else:
for serie in self.series:
max_length += len(serie)
max_length /= len(self.series)
return max_length
def __getitem__(self, item):
return self.series[item]
def __len__(self):
return len(self.series)
def __str__(self):
return "SeriesContainer:\n{}".format(self.series)
@staticmethod
def wrap(series, support_ndim=True):
if isinstance(series, SeriesContainer):
series.support_ndim = support_ndim
return series
return SeriesContainer(series, support_ndim=support_ndim)
def recompile():
import subprocess as sp
sp.run([sys.executable, "setup.py", "build_ext", "--inplace"], cwd=dtaidistance_dir)
def argmin(a):
imin, vmin = 0, float("inf")
for i, v in enumerate(a):
if v < vmin:
imin, vmin = i, v
return imin
def argmax(a):
imax, vmax = 0, float("-inf")
for i, v in enumerate(a):
if v > vmax:
imax, vmax = i, v
return imax
| 34.247934
| 110
| 0.591578
|
3e778ae26944186922ff3f88bd9663efe1eccfc2
| 3,025
|
py
|
Python
|
tests/test_to_cupyd.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | 3
|
2020-06-22T20:59:14.000Z
|
2021-04-09T21:24:45.000Z
|
tests/test_to_cupyd.py
|
Borda/MONAI
|
e0db5a564225a7cb62e7a23df97267019006302f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_to_cupyd.py
|
Borda/MONAI
|
e0db5a564225a7cb62e7a23df97267019006302f
|
[
"Apache-2.0"
] | 1
|
2020-06-22T19:22:59.000Z
|
2020-06-22T19:22:59.000Z
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import skipUnless
import numpy as np
import torch
from monai.transforms import ToCupyd
from monai.utils import optional_import
from tests.utils import HAS_CUPY, skip_if_no_cuda
cp, _ = optional_import("cupy")
@skipUnless(HAS_CUPY, "CuPy is required.")
class TestToCupyd(unittest.TestCase):
def test_cupy_input(self):
test_data = cp.array([[1, 2], [3, 4]])
test_data = cp.rot90(test_data)
self.assertFalse(test_data.flags["C_CONTIGUOUS"])
result = ToCupyd(keys="img")({"img": test_data})["img"]
self.assertTrue(isinstance(result, cp.ndarray))
self.assertTrue(result.flags["C_CONTIGUOUS"])
cp.testing.assert_allclose(result, test_data)
def test_numpy_input(self):
test_data = np.array([[1, 2], [3, 4]])
test_data = np.rot90(test_data)
self.assertFalse(test_data.flags["C_CONTIGUOUS"])
result = ToCupyd(keys="img")({"img": test_data})["img"]
self.assertTrue(isinstance(result, cp.ndarray))
self.assertTrue(result.flags["C_CONTIGUOUS"])
cp.testing.assert_allclose(result, test_data)
def test_tensor_input(self):
test_data = torch.tensor([[1, 2], [3, 4]])
test_data = test_data.rot90()
self.assertFalse(test_data.is_contiguous())
result = ToCupyd(keys="img")({"img": test_data})["img"]
self.assertTrue(isinstance(result, cp.ndarray))
self.assertTrue(result.flags["C_CONTIGUOUS"])
cp.testing.assert_allclose(result, test_data.numpy())
@skip_if_no_cuda
def test_tensor_cuda_input(self):
test_data = torch.tensor([[1, 2], [3, 4]]).cuda()
test_data = test_data.rot90()
self.assertFalse(test_data.is_contiguous())
result = ToCupyd(keys="img")({"img": test_data})["img"]
self.assertTrue(isinstance(result, cp.ndarray))
self.assertTrue(result.flags["C_CONTIGUOUS"])
cp.testing.assert_allclose(result, test_data.cpu().numpy())
def test_list_tuple(self):
test_data = [[1, 2], [3, 4]]
result = ToCupyd(keys="img", wrap_sequence=True)({"img": test_data})["img"]
cp.testing.assert_allclose(result, cp.asarray(test_data))
test_data = ((1, 2), (3, 4))
result = ToCupyd(keys="img", wrap_sequence=True)({"img": test_data})["img"]
cp.testing.assert_allclose(result, cp.asarray(test_data))
if __name__ == "__main__":
unittest.main()
| 40.333333
| 83
| 0.678678
|
27e00f521117dce9e1db61fc2c25ee3530f61b88
| 12,947
|
py
|
Python
|
lib/doekbase/data_api/interactive/display.py
|
scanon/data_api2
|
f453a8e544cb4052feb56f4cf77ba79122aeedf8
|
[
"MIT"
] | null | null | null |
lib/doekbase/data_api/interactive/display.py
|
scanon/data_api2
|
f453a8e544cb4052feb56f4cf77ba79122aeedf8
|
[
"MIT"
] | null | null | null |
lib/doekbase/data_api/interactive/display.py
|
scanon/data_api2
|
f453a8e544cb4052feb56f4cf77ba79122aeedf8
|
[
"MIT"
] | null | null | null |
"""
Objects for displaying the results in the IPython notebook.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '8/1/15'
# Stdlib
import logging
# Third-party
from IPython.display import display
import matplotlib.pyplot as plt
import pandas as pd
# Qgrid table display
try:
import qgrid
qgrid.nbinstall()
except ImportError:
qgrid = None
from jinja2 import Template
# Seaborn graphing
try:
import seaborn as sns
sns.set_style("whitegrid")
except ImportError:
sns = None
# Local
from doekbase.data_api.util import get_logger, log_start, log_end
_log = get_logger('doekbase.data_api.display')
_nbviewer = False
def nbviewer_mode(value=None):
"""Get/set the global nbviewer-friendly mode.
This is currently used to tell qgrid where to get
its JavaScript from (local or a CDN).
"""
global _nbviewer
if value is not None:
_nbviewer = bool(value)
return _nbviewer
class Table(pd.DataFrame):
"""Create a Table from the input data.
This is a thin wrapper around the Pandas DataFrame object.
"""
def _ipython_display(self):
if qgrid:
return qgrid.show_grid(self, remote_js=nbviewer_mode())
else:
print
return display(self)
class Contigs(Table):
def __init__(self, contigs):
"""Create contigset from list of strings.
Args:
contigs: List of strings for contigs
"""
Table.__init__(self, {'ids': contigs})
class TemplateMixin(object):
template = ''
def __init__(self):
self._template = Template(self.template)
def render(self, *args, **kwargs):
return self._template.render(*args, **kwargs)
class Classification(TemplateMixin):
"""Taxonomic classification.
Attributes:
taxon (TaxonAPI): base taxon
name (str): Scientific name
children (list of TaxonAPI): List of TaxonAPI objects
parents (list of Taxon
"""
template = '''{% for name in classification %}
<span style="margin-left: {{ loop.index0 * 10 }}px">
<span style="font-size: 50%">></span> {{ name }}
</span><br>{% endfor %}'''
def __init__(self, obj):
"""Create from a taxon.
Args:
obj: TaxonAPI object or object with `get_taxon`.
"""
TemplateMixin.__init__(self)
self.taxon = obj.get_taxon() if hasattr(obj, 'get_taxon') else obj
self.classification = self.taxon.get_scientific_lineage().split(';')
self.name = self.taxon.get_scientific_name()
# self.children = self.taxon.get_children() or []
# tx, self.parents = self.taxon, []
# while tx:
# tx = tx.get_parent()
# if tx:
# self.parents.insert(tx.get_scientific_name(), 0)
# self.classification = self.parents + [self.name] + [
# child.get_scientific_name() for child in self.children]
def _repr_html_(self):
return self.render(classification=self.classification)
class Organism(TemplateMixin):
"""Summary of an organism as per ENSEMBL page, from
a TaxonAPI.
Attributes:
taxon (TaxonAPI): Taxon with info for organism
"""
template = '''<b>Taxonomy ID</b>: {{taxon.get_taxonomic_id()}}<br>
<b>Name</b>: {{taxon.get_scientific_name()}}<br>
<b>Aliases</b>:<br>
{% for a in taxon.get_aliases() %}
{{ a }}<br>
{% endfor %}
<b>Classification</b>:<br>''' + \
Classification.template
def __init__(self, obj):
"""Create from an API object.
Args:
obj: TaxonAPI object or object with `get_taxon`.
"""
TemplateMixin.__init__(self)
self.taxon = obj.get_taxon() if hasattr(obj, 'get_taxon') else obj
def _repr_html_(self):
if self.taxon is None:
return None
classf = Classification(self.taxon).classification
return self.render(classification=classf, taxon=self.taxon)
class AssemblyInfo(TemplateMixin):
"""Get information about assembly.
Attributes:
stats (dict): Statistics from `AssemblyAPI.get_stats()`
"""
template = '''<b>GC content</b>: {{gc_content}}<br>
<b>Total DNA sequence length</b>:{{dna_size}}<br>
<b>Number of contigs</b>:{{num_contigs}}'''
def __init__(self, obj):
"""Create assembly info.
Args:
obj: AssemblyAPI or object with `get_assembly` method.
"""
TemplateMixin.__init__(self)
if hasattr(obj, 'get_assembly'):
self.assembly = obj.get_assembly()
else:
self.assembly = obj
self.stats = self.assembly.get_stats()
def _repr_html_(self):
return self.render(self.stats)
class FeatureStats(Table):
"""Feature information for genomes
"""
def __init__(self, ga):
"""Create from a genome.
Args:
ga: GenomeAnnotation object
"""
data = []
for feature in ga.get_feature_types(): # all feature types
count = 0
# get lists of positions for each feature_id
feature_id_lists = ga.get_feature_ids_by_type([feature])
for fi, values in feature_id_lists.items():
count += len(values)
data.append((feature, count))
Table.__init__(self, data, columns=('feature_type', 'count'))
class FeaturePositions(Table):
"""The position (and ID and type) of features in the genome.
"""
def __init__(self, ga):
"""Create from a genome.
Args:
ga: GenomeAnnotation object
"""
data = self._get_features(ga)
Table.__init__(self, data, columns=('type', 'id', 'start', 'len', 'dir'))
def _get_features(self, ga):
"This should probably move into genome_annotation module"
from doekbase.data_api.object import ObjectAPI
fcr = 'feature_container_references'
refs = ga.get_data_subset(path_list=[fcr])[fcr]
result = []
for ref in refs.values():
obj = ObjectAPI(ga.services, ref) # fetch data
features = obj.get_data()['features']
for feat_id in features.keys(): # iterate features
ftype = features[feat_id]['type']
for loc in features[feat_id]['locations']:
# biuld an output row and add to result
row = (ftype, feat_id, loc[1], loc[3], loc[2])
result.append(row)
return result
def stripplot(self):
"""Make a 'stripplot' of all feature positions.
Requires the 'seaborn' library
"""
if sns is None:
raise NotImplementedError('Requires the "seaborn" library. See: '
'https://github.com/mwaskom/seaborn')
ax = sns.stripplot(x='start', y='type', data=self)
# get rid of spurious negative tick
ax.set_xlim(0, ax.get_xlim()[1])
return ax
class ProteinStats(Table):
"""Various statistics for proteins.
"""
STATS_LENGTH = 'length'
def __init__(self, ga, stats=[STATS_LENGTH]):
"""Create from a genome.
Args:
ga: GenomeAnnotation object
"""
proteins = ga.get_proteins()
data = {}
if self.STATS_LENGTH in stats:
data[self.STATS_LENGTH] = [
len(v['amino_acid_sequence'])
for v in proteins.values()]
Table.__init__(self, data)
def plot_protein_lengths(self):
return self.plot(x=self.STATS_LENGTH, kind='hist')
class GenomeSummary(TemplateMixin):
"""Summary of a GenomeAnnotation.
Attributes:
taxon (dict): Information about the taxonomic type
assembly (dict): Infomration about the contigs in the assembly
annotation (dict): Information about the assembly
data (dict): All the information as a single dict with the attributes
listed above as top-level keys.
"""
template = '<h3>Genome Summary</h3>'+ Organism.template
def __init__(self, ga, taxons=True, assembly=True, annotation=True):
"""Create new summary from GenomeAnnotation.
Args:
ga (GenomeAnnotation): input object
taxons: If False, do not retrieve taxons
assembly: If False, do not retrieve assembly
annotation: If False, do not retrieve annotation
"""
if not hasattr(ga, 'get_taxon') or not hasattr(ga, 'get_assembly'):
raise TypeError('{} is not a recognized GenomeAnnotation type.'
.format(type(ga)))
self.data = { 'taxon': {}, 'assembly': {}, 'annotation': {}}
if taxons:
self.data['taxon'] = self._get_taxon(ga)
if assembly:
self.data['assembly'] = self._get_assembly(ga)
if annotation:
self.data['annotation'] = self._get_annotation(ga)
self.ga = ga
self._set_attrs()
def _set_attrs(self):
"""Set attributes for top-level keys"""
for key, value in self.data.items():
setattr(self, key, value)
TemplateMixin.__init__(self)
@staticmethod
def _get_taxon(ga):
t0 = log_start(_log, 'get_taxon')
try:
taxon = ga.get_taxon()
except Exception as err:
raise RuntimeError('Cannot get taxon: {}'.format(err))
txn = { k: getattr(taxon, 'get_' + k)()
for k in ('taxonomic_id', 'kingdom', 'domain',
'genetic_code', 'scientific_name', 'aliases',
'scientific_lineage')}
txn['lineage_list'] = txn['scientific_lineage'].split(';')
log_end(_log, t0, 'get_taxon')
return txn
@staticmethod
def _get_assembly(ga):
t0 = log_start(_log, 'get_assembly')
try:
assembly = ga.get_assembly()
except Exception as err:
raise RuntimeError('Cannot get assembly: {}'.format(err))
asy = {
k1: getattr(assembly, 'get_' + k2)()
for k1, k2 in (
('number_of_contigs', 'number_contigs'),
('total_length', 'dna_size'),
('total_gc_content', 'gc_content'),
('contig_length', 'contig_lengths'),
('contig_gc_content', 'contig_gc_content')
)}
log_end(_log, t0, 'get_assembly')
return asy
@staticmethod
def _get_annotation(ga):
t0 = log_start(_log, 'get_annotation')
try:
feature_types = ga.get_feature_types()
except Exception as err:
raise RuntimeError('Cannot get feature_types: {}'.format(err))
ann = { 'feature_' + k: getattr(ga, 'get_feature_' + k)(feature_types)
for k in ('type_descriptions', 'type_counts')}
ann['feature_types'] = feature_types
log_end(_log, t0, 'get_annotation')
return ann
def summary_plots(self):
"""Show some plots summarizing the information in the Genome.
"""
# First plot: feature types and counts
n = sum(map(bool, self.data.keys()))
i = 1
plt.close()
if self.annotation:
plt.subplot(n, 1, i)
self._plot_feature_counts()
i += 1
# Second plot
if self.assembly:
plt.subplot(n, 1, i)
self._plot_contig_lengths()
i += 1
# Third plot
if self.assembly:
plt.subplot(n, 1, i)
self._plot_contig_gc()
i += 1
plt.show()
def _plot_feature_counts(self):
d = pd.DataFrame({'Feature Type': self.annotation['feature_types'],
'Count': self.annotation['feature_type_counts']})
ax = sns.barplot(x='Count', y='Feature Type', orient='h', data=d)
ax.set_title('Feature type counts from {} to{}'.format(min(d['Count']),
max(d['Count'])))
def _plot_contig_lengths(self):
vals = pd.Series(self.assembly['contig_length'].values(),
name='Sequence length (bp)')
ax = sns.distplot(vals)
ax.set_title('Contig lengths from {} to {}'.format(
vals.min(), vals.max()))
def _plot_contig_gc(self):
gc = self.assembly['contig_gc_content'].values()
gcp, ctg = 'GC percent', 'Contigs'
d = pd.DataFrame({gcp: [x*100.0 for x in sorted(gc)],
ctg: range(1, len(gc) + 1)})
ax = sns.factorplot(x=gcp, y=ctg, data=d)
#ax.set_title("Contig {} from {.2f} to {.2f}"
# .format(gcp, min(gc), max(gc)))
return ax
def _repr_html_(self):
self.summary_plots()
classf = Classification(self.taxon).classification
return self.render(classification=classf, taxon=self.taxon)
| 33.028061
| 81
| 0.582529
|
853c2a04ad17a6f3cf9ac093302a8fa7f10cfc17
| 23,673
|
py
|
Python
|
test/selenium_tests/framework.py
|
ashvark/galaxy
|
71d315cf3692b4a42fb684395a519d53f360ec92
|
[
"CC-BY-3.0"
] | 1
|
2019-07-27T19:30:55.000Z
|
2019-07-27T19:30:55.000Z
|
test/selenium_tests/framework.py
|
ashvark/galaxy
|
71d315cf3692b4a42fb684395a519d53f360ec92
|
[
"CC-BY-3.0"
] | 4
|
2021-02-08T20:28:34.000Z
|
2022-03-02T02:52:55.000Z
|
test/selenium_tests/framework.py
|
ashvark/galaxy
|
71d315cf3692b4a42fb684395a519d53f360ec92
|
[
"CC-BY-3.0"
] | 1
|
2018-05-30T07:38:54.000Z
|
2018-05-30T07:38:54.000Z
|
"""Basis for Selenium test framework."""
from __future__ import absolute_import
from __future__ import print_function
import datetime
import json
import os
import traceback
import unittest
from functools import partial, wraps
import requests
from gxformat2 import (
convert_and_import_workflow,
ImporterGalaxyInterface,
)
try:
from pyvirtualdisplay import Display
except ImportError:
Display = None
from six.moves.urllib.parse import urljoin
from base import populators # noqa: I100,I202
from base.api import UsesApiTestCaseMixin # noqa: I100
from base.driver_util import classproperty, DEFAULT_WEB_HOST, get_ip_address # noqa: I100
from base.testcase import FunctionalTestCase # noqa: I100
from galaxy_selenium import ( # noqa: I100,I201
driver_factory,
)
from galaxy_selenium.navigates_galaxy import ( # noqa: I100
NavigatesGalaxy,
retry_during_transitions
)
from galaxy.util import asbool # noqa: I201
DEFAULT_TIMEOUT_MULTIPLIER = 1
DEFAULT_TEST_ERRORS_DIRECTORY = os.path.abspath("database/test_errors")
DEFAULT_SELENIUM_BROWSER = "auto"
DEFAULT_SELENIUM_REMOTE = False
DEFAULT_SELENIUM_REMOTE_PORT = "4444"
DEFAULT_SELENIUM_REMOTE_HOST = "127.0.0.1"
DEFAULT_SELENIUM_HEADLESS = "auto"
DEFAULT_ADMIN_USER = "test@bx.psu.edu"
DEFAULT_ADMIN_PASSWORD = "testpass"
TIMEOUT_MULTIPLIER = float(os.environ.get("GALAXY_TEST_TIMEOUT_MULTIPLIER", DEFAULT_TIMEOUT_MULTIPLIER))
GALAXY_TEST_ERRORS_DIRECTORY = os.environ.get("GALAXY_TEST_ERRORS_DIRECTORY", DEFAULT_TEST_ERRORS_DIRECTORY)
GALAXY_TEST_SCREENSHOTS_DIRECTORY = os.environ.get("GALAXY_TEST_SCREENSHOTS_DIRECTORY", None)
# Test browser can be ["CHROME", "FIREFOX", "OPERA", "PHANTOMJS"]
GALAXY_TEST_SELENIUM_BROWSER = os.environ.get("GALAXY_TEST_SELENIUM_BROWSER", DEFAULT_SELENIUM_BROWSER)
GALAXY_TEST_SELENIUM_REMOTE = os.environ.get("GALAXY_TEST_SELENIUM_REMOTE", DEFAULT_SELENIUM_REMOTE)
GALAXY_TEST_SELENIUM_REMOTE_PORT = os.environ.get("GALAXY_TEST_SELENIUM_REMOTE_PORT", DEFAULT_SELENIUM_REMOTE_PORT)
GALAXY_TEST_SELENIUM_REMOTE_HOST = os.environ.get("GALAXY_TEST_SELENIUM_REMOTE_HOST", DEFAULT_SELENIUM_REMOTE_HOST)
GALAXY_TEST_SELENIUM_HEADLESS = os.environ.get("GALAXY_TEST_SELENIUM_HEADLESS", DEFAULT_SELENIUM_HEADLESS)
GALAXY_TEST_EXTERNAL_FROM_SELENIUM = os.environ.get("GALAXY_TEST_EXTERNAL_FROM_SELENIUM", None)
# Auto-retry selenium tests this many times.
GALAXY_TEST_SELENIUM_RETRIES = int(os.environ.get("GALAXY_TEST_SELENIUM_RETRIES", "0"))
GALAXY_TEST_SELENIUM_USER_EMAIL = os.environ.get("GALAXY_TEST_SELENIUM_USER_EMAIL", None)
GALAXY_TEST_SELENIUM_USER_PASSWORD = os.environ.get("GALAXY_TEST_SELENIUM_USER_PASSWORD", None)
GALAXY_TEST_SELENIUM_ADMIN_USER_EMAIL = os.environ.get("GALAXY_TEST_SELENIUM_ADMIN_USER_EMAIL", DEFAULT_ADMIN_USER)
GALAXY_TEST_SELENIUM_ADMIN_USER_PASSWORD = os.environ.get("GALAXY_TEST_SELENIUM_ADMIN_USER_PASSWORD", DEFAULT_ADMIN_PASSWORD)
# JS code to execute in Galaxy JS console to setup localStorage of session for logging and
# logging "flatten" messages because it seems Selenium (with Chrome at least) only grabs
# the first argument to console.XXX when recovering the browser log.
SETUP_LOGGING_JS = '''
window.localStorage && window.localStorage.setItem("galaxy:debug", true);
window.localStorage && window.localStorage.setItem("galaxy:debug:flatten", true);
'''
try:
from nose.tools import nottest
except ImportError:
def nottest(x):
return x
def managed_history(f):
"""Ensure a Selenium test has a distinct, named history.
Cleanup the history after the job is complete as well unless
GALAXY_TEST_NO_CLEANUP is set in the environment.
"""
@wraps(f)
def func_wrapper(self, *args, **kwds):
self.home()
history_name = f.__name__ + datetime.datetime.now().strftime("%Y%m%d%H%M%s")
self.history_panel_create_new_with_name(history_name)
try:
f(self, *args, **kwds)
finally:
if "GALAXY_TEST_NO_CLEANUP" not in os.environ:
try:
current_history_id = self.current_history_id()
self.dataset_populator.cancel_history_jobs(current_history_id)
self.api_delete("histories/%s" % current_history_id)
except Exception:
print("Faild to cleanup managed history, selenium connection corrupted somehow?")
return func_wrapper
def dump_test_information(self, name_prefix):
if GALAXY_TEST_ERRORS_DIRECTORY and GALAXY_TEST_ERRORS_DIRECTORY != "0":
if not os.path.exists(GALAXY_TEST_ERRORS_DIRECTORY):
os.makedirs(GALAXY_TEST_ERRORS_DIRECTORY)
result_name = name_prefix + datetime.datetime.now().strftime("%Y%m%d%H%M%s")
target_directory = os.path.join(GALAXY_TEST_ERRORS_DIRECTORY, result_name)
def write_file(name, content, raw=False):
with open(os.path.join(target_directory, name), "wb") as buf:
buf.write(content.encode("utf-8") if not raw else content)
os.makedirs(target_directory)
write_file("stacktrace.txt", traceback.format_exc())
for snapshot in getattr(self, "snapshots", []):
snapshot.write_to_error_directory(write_file)
# Try to use the Selenium driver to recover more debug information, but don't
# throw an exception if the connection is broken in some way.
try:
self.driver.save_screenshot(os.path.join(target_directory, "last.png"))
write_file("page_source.txt", self.driver.page_source)
write_file("DOM.txt", self.driver.execute_script("return document.documentElement.outerHTML"))
except Exception:
print("Failed to use test driver to recover debug information from Selenium.")
write_file("selenium_exception.txt", traceback.format_exc())
for log_type in ["browser", "driver"]:
try:
full_log = self.driver.get_log(log_type)
trimmed_log = [l for l in full_log if l["level"] not in ["DEBUG", "INFO"]]
write_file("%s.log.json" % log_type, json.dumps(trimmed_log, indent=True))
write_file("%s.log.verbose.json" % log_type, json.dumps(full_log, indent=True))
except Exception:
continue
@nottest
def selenium_test(f):
test_name = f.__name__
@wraps(f)
def func_wrapper(self, *args, **kwds):
retry_attempts = 0
while True:
if retry_attempts > 0:
self.reset_driver_and_session()
try:
return f(self, *args, **kwds)
except unittest.SkipTest:
dump_test_information(self, test_name)
# Don't retry if we have purposely decided to skip the test.
raise
except Exception:
dump_test_information(self, test_name)
if retry_attempts < GALAXY_TEST_SELENIUM_RETRIES:
retry_attempts += 1
print("Test function [%s] threw an exception, retrying. Failed attempts - %s." % (test_name, retry_attempts))
else:
raise
return func_wrapper
retry_assertion_during_transitions = partial(retry_during_transitions, exception_check=lambda e: isinstance(e, AssertionError))
class TestSnapshot(object):
def __init__(self, driver, index, description):
self.screenshot_binary = driver.get_screenshot_as_png()
self.description = description
self.index = index
self.exc = traceback.format_exc()
self.stack = traceback.format_stack()
def write_to_error_directory(self, write_file_func):
prefix = "%d-%s" % (self.index, self.description)
write_file_func("%s-screenshot.png" % prefix, self.screenshot_binary, raw=True)
write_file_func("%s-traceback.txt" % prefix, self.exc)
write_file_func("%s-stack.txt" % prefix, str(self.stack))
class SeleniumTestCase(FunctionalTestCase, NavigatesGalaxy, UsesApiTestCaseMixin):
# If run one-off via nosetests, the next line ensures test
# tools and datatypes are used instead of configured tools.
framework_tool_and_types = True
# Override this in subclasses to ensure a user is logged in
# before each test. If GALAXY_TEST_SELENIUM_USER_EMAIL and
# GALAXY_TEST_SELENIUM_USER_PASSWORD are set these values
# will be used to login.
ensure_registered = False
requires_admin = False
def setUp(self):
super(SeleniumTestCase, self).setUp()
# Deal with the case when Galaxy has a different URL when being accessed by Selenium
# then when being accessed by local API calls.
if GALAXY_TEST_EXTERNAL_FROM_SELENIUM is not None:
self.target_url_from_selenium = GALAXY_TEST_EXTERNAL_FROM_SELENIUM
else:
self.target_url_from_selenium = self.url
self.snapshots = []
self.setup_driver_and_session()
if self.requires_admin and GALAXY_TEST_SELENIUM_ADMIN_USER_EMAIL == DEFAULT_ADMIN_USER:
self._setup_interactor()
self._setup_user(GALAXY_TEST_SELENIUM_ADMIN_USER_EMAIL)
self._try_setup_with_driver()
def _try_setup_with_driver(self):
try:
self.setup_with_driver()
except Exception:
dump_test_information(self, self.__class__.__name__ + "_setup")
raise
def setup_with_driver(self):
"""Override point that allows setting up data using self.driver and Selenium connection.
Overriding this instead of setUp will ensure debug data such as screenshots and stack traces
are dumped if there are problems with the setup and it will be re-ran on test retries.
"""
def tearDown(self):
exception = None
try:
super(SeleniumTestCase, self).tearDown()
except Exception as e:
exception = e
try:
self.tear_down_driver()
except Exception as e:
exception = e
if exception is not None:
raise exception
def snapshot(self, description):
"""Create a debug snapshot (DOM, screenshot, etc...) that is written out on tool failure.
This information will be automatically written to a per-test directory created for all
failed tests.
"""
self.snapshots.append(TestSnapshot(self.driver, len(self.snapshots), description))
def screenshot(self, label):
"""If GALAXY_TEST_SCREENSHOTS_DIRECTORY is set create a screenshot there named <label>.png.
Unlike the above "snapshot" feature, this will be written out regardless and not in a per-test
directory. The above method is used for debugging failures within a specific test. This method
if more for creating a set of images to augment automated testing with manual human inspection
after a test or test suite has executed.
"""
target = self._screenshot_path(label)
if target is None:
return
self.driver.save_screenshot(target)
def write_screenshot_directory_file(self, label, content):
target = self._screenshot_path(label, ".txt")
if target is None:
return
with open(target, "w") as f:
f.write(content)
def _screenshot_path(self, label, extension=".png"):
if GALAXY_TEST_SCREENSHOTS_DIRECTORY is None:
return
if not os.path.exists(GALAXY_TEST_SCREENSHOTS_DIRECTORY):
os.makedirs(GALAXY_TEST_SCREENSHOTS_DIRECTORY)
target = os.path.join(GALAXY_TEST_SCREENSHOTS_DIRECTORY, label + extension)
copy = 1
while os.path.exists(target):
# Maybe previously a test re-run - keep the original.
target = os.path.join(GALAXY_TEST_SCREENSHOTS_DIRECTORY, "%s-%d%s" % (label, copy, extension))
copy += 1
return target
def reset_driver_and_session(self):
self.tear_down_driver()
self.setup_driver_and_session()
self._try_setup_with_driver()
def setup_driver_and_session(self):
self.display = driver_factory.virtual_display_if_enabled(use_virtual_display())
self.driver = get_driver()
# New workflow index page does not degrade well to smaller sizes, needed
# to increase this.
# Needed to up the height for paired list creator being taller in BS4 branch.
self.driver.set_window_size(1280, 1000)
self._setup_galaxy_logging()
if self.ensure_registered:
self.login()
def _setup_galaxy_logging(self):
self.home()
self.driver.execute_script(SETUP_LOGGING_JS)
def login(self):
if GALAXY_TEST_SELENIUM_USER_EMAIL:
assert GALAXY_TEST_SELENIUM_USER_PASSWORD, "If GALAXY_TEST_SELENIUM_USER_EMAIL is set, a password must be set also with GALAXY_TEST_SELENIUM_USER_PASSWORD"
self.home()
self.submit_login(
email=GALAXY_TEST_SELENIUM_USER_EMAIL,
password=GALAXY_TEST_SELENIUM_USER_PASSWORD,
assert_valid=True,
)
else:
self.register()
def tear_down_driver(self):
exception = None
try:
self.driver.close()
except Exception as e:
if "cannot kill Chrome" in str(e):
print("Ignoring likely harmless error in Selenium shutdown %s" % e)
else:
exception = e
try:
self.display.stop()
except Exception as e:
exception = e
if exception is not None:
raise exception
@classproperty
def default_web_host(cls):
return default_web_host_for_selenium_tests()
@property
def timeout_multiplier(self):
return TIMEOUT_MULTIPLIER
def build_url(self, url, for_selenium=True):
if for_selenium:
base = self.target_url_from_selenium
else:
base = self.url
return urljoin(base, url)
def assert_initial_history_panel_state_correct(self):
# Move into a TestsHistoryPanel mixin
unnamed_name = self.components.history_panel.new_name.text
name_element = self.history_panel_name_element()
assert name_element.is_displayed()
assert unnamed_name in name_element.text
initial_size_str = self.components.history_panel.new_size.text
size_selector = self.components.history_panel.size
size_text = size_selector.wait_for_text()
assert initial_size_str in size_text, "%s not in %s" % (initial_size_str, size_text)
self.components.history_panel.empty_message.wait_for_visible()
def admin_login(self):
self.home()
self.submit_login(
GALAXY_TEST_SELENIUM_ADMIN_USER_EMAIL,
GALAXY_TEST_SELENIUM_ADMIN_USER_PASSWORD
)
with self.main_panel():
self.assert_no_error_message()
@property
def dataset_populator(self):
return SeleniumSessionDatasetPopulator(self)
@property
def dataset_collection_populator(self):
return SeleniumSessionDatasetCollectionPopulator(self)
@property
def workflow_populator(self):
return SeleniumSessionWorkflowPopulator(self)
def workflow_upload_yaml_with_random_name(self, content, **kwds):
workflow_populator = self.workflow_populator
name = self._get_random_name()
workflow_populator.upload_yaml_workflow(content, name=name, **kwds)
return name
def ensure_visualization_available(self, hid, visualization_name):
"""Skip or fail a test if visualization for file doesn't appear.
Precondition: viz menu has been opened with history_panel_item_click_visualization_menu.
"""
visualization_names = self.history_panel_item_available_visualizations(hid)
if visualization_name not in visualization_names:
raise unittest.SkipTest("Skipping test, visualization [%s] doesn't appear to be configured." % visualization_name)
class SharedStateSeleniumTestCase(SeleniumTestCase):
"""This describes a class Selenium tests that setup class state for all tests.
This is a bit hacky because we are simulating class level initialization
with instance level methods. The problem is that super.setUp() works at
instance level. It might be worth considering having two variants of
SeleniumTestCase - one that initializes with the class and the other that
initializes with the instance but all the helpers are instance helpers.
"""
shared_state_initialized = False
shared_state_in_error = False
def setup_with_driver(self):
if not self.__class__.shared_state_initialized:
try:
self.setup_shared_state()
self.logout_if_needed()
except Exception:
self.__class__.shared_state_in_error = True
raise
finally:
self.__class__.shared_state_initialized = True
else:
if self.__class__.shared_state_in_error:
raise unittest.SkipTest("Skipping test, failed to initialize state previously.")
def setup_shared_state(self):
"""Override this to setup shared data for tests that gets initialized only once."""
class UsesHistoryItemAssertions(object):
def assert_item_peek_includes(self, hid, expected):
item_body = self.history_panel_item_component(hid=hid)
peek_text = item_body.peek.wait_for_text()
assert expected in peek_text
def assert_item_info_includes(self, hid, expected):
item_body = self.history_panel_item_component(hid=hid)
info_text = item_body.info.wait_for_text()
assert expected in info_text, "Failed to find expected info text [%s] in info [%s]" % (expected, info_text)
def assert_item_dbkey_displayed_as(self, hid, dbkey):
item_body = self.history_panel_item_component(hid=hid)
dbkey_text = item_body.dbkey.wait_for_text()
assert dbkey in dbkey_text
def assert_item_summary_includes(self, hid, expected_text):
item_body = self.history_panel_item_component(hid=hid)
summary_text = item_body.summary.wait_for_text()
assert expected_text in summary_text, "Expected summary [%s] not found in [%s]." % (expected_text, summary_text)
def assert_item_name(self, hid, expected_name):
item_body = self.history_panel_item_component(hid=hid)
name = item_body.name.wait_for_text()
assert name == expected_name, name
def assert_item_hid_text(self, hid):
# Check the text HID matches HID returned from API.
item_body = self.history_panel_item_component(hid=hid)
hid_text = item_body.hid.wait_for_text()
assert hid_text == str(hid), hid_text
def default_web_host_for_selenium_tests():
if asbool(GALAXY_TEST_SELENIUM_REMOTE):
try:
dev_ip = get_ip_address('docker0')
return dev_ip
except IOError:
return DEFAULT_WEB_HOST
else:
return DEFAULT_WEB_HOST
def get_driver():
if asbool(GALAXY_TEST_SELENIUM_REMOTE):
return get_remote_driver()
else:
return get_local_driver()
def headless_selenium():
if asbool(GALAXY_TEST_SELENIUM_REMOTE):
return False
if GALAXY_TEST_SELENIUM_HEADLESS == "auto":
if driver_factory.is_virtual_display_available() or driver_factory.get_local_browser(GALAXY_TEST_SELENIUM_BROWSER) == "CHROME":
return True
else:
return False
else:
return asbool(GALAXY_TEST_SELENIUM_HEADLESS)
def use_virtual_display():
if asbool(GALAXY_TEST_SELENIUM_REMOTE):
return False
if GALAXY_TEST_SELENIUM_HEADLESS == "auto":
if driver_factory.is_virtual_display_available() and not driver_factory.get_local_browser(GALAXY_TEST_SELENIUM_BROWSER) == "CHROME":
return True
else:
return False
else:
return asbool(GALAXY_TEST_SELENIUM_HEADLESS)
def get_local_driver():
return driver_factory.get_local_driver(
GALAXY_TEST_SELENIUM_BROWSER,
headless_selenium()
)
def get_remote_driver():
return driver_factory.get_remote_driver(
host=GALAXY_TEST_SELENIUM_REMOTE_HOST,
port=GALAXY_TEST_SELENIUM_REMOTE_PORT,
browser=GALAXY_TEST_SELENIUM_BROWSER,
)
class SeleniumSessionGetPostMixin(object):
"""Mixin for adapting Galaxy testing populators helpers to Selenium session backed bioblend."""
def _get(self, route, data={}):
full_url = self.selenium_test_case.build_url("api/" + route, for_selenium=False)
response = requests.get(full_url, data=data, cookies=self.selenium_test_case.selenium_to_requests_cookies())
return response
def _post(self, route, data=None, files=None):
full_url = self.selenium_test_case.build_url("api/" + route, for_selenium=False)
if data is None:
data = {}
if files is None:
files = data.get("__files", None)
if files is not None:
del data["__files"]
response = requests.post(full_url, data=data, cookies=self.selenium_test_case.selenium_to_requests_cookies(), files=files)
return response
def _delete(self, route, data={}):
full_url = self.selenium_test_case.build_url("api/" + route, for_selenium=False)
response = requests.delete(full_url, data=data, cookies=self.selenium_test_case.selenium_to_requests_cookies())
return response
def __url(self, route):
return self._gi.url + "/" + route
class SeleniumSessionDatasetPopulator(populators.BaseDatasetPopulator, SeleniumSessionGetPostMixin):
"""Implementation of BaseDatasetPopulator backed by bioblend."""
def __init__(self, selenium_test_case):
"""Construct a dataset populator from a bioblend GalaxyInstance."""
self.selenium_test_case = selenium_test_case
class SeleniumSessionDatasetCollectionPopulator(populators.BaseDatasetCollectionPopulator, SeleniumSessionGetPostMixin):
"""Implementation of BaseDatasetCollectionPopulator backed by bioblend."""
def __init__(self, selenium_test_case):
"""Construct a dataset collection populator from a bioblend GalaxyInstance."""
self.selenium_test_case = selenium_test_case
self.dataset_populator = SeleniumSessionDatasetPopulator(selenium_test_case)
def _create_collection(self, payload):
create_response = self._post("dataset_collections", data=payload)
return create_response
class SeleniumSessionWorkflowPopulator(populators.BaseWorkflowPopulator, SeleniumSessionGetPostMixin, ImporterGalaxyInterface):
"""Implementation of BaseWorkflowPopulator backed by bioblend."""
def __init__(self, selenium_test_case):
"""Construct a workflow populator from a bioblend GalaxyInstance."""
self.selenium_test_case = selenium_test_case
self.dataset_populator = SeleniumSessionDatasetPopulator(selenium_test_case)
def import_workflow(self, workflow, **kwds):
workflow_str = json.dumps(workflow, indent=4)
data = {
'workflow': workflow_str,
}
data.update(**kwds)
upload_response = self._post("workflows", data=data)
assert upload_response.status_code == 200
return upload_response.json()
def upload_yaml_workflow(self, has_yaml, **kwds):
workflow = convert_and_import_workflow(has_yaml, galaxy_interface=self, **kwds)
return workflow["id"]
| 39.193709
| 167
| 0.698348
|
aa173890ecc41148016348a9fd3e24588ab5b9ec
| 296
|
py
|
Python
|
Python/code case/code case 130.py
|
amazing-2020/pdf
|
8cd3f5f510a1c1ed89b51b1354f4f8c000c5b24d
|
[
"Apache-2.0"
] | 3
|
2021-01-01T13:08:24.000Z
|
2021-02-03T09:27:56.000Z
|
Python/code case/code case 130.py
|
amazing-2020/pdf
|
8cd3f5f510a1c1ed89b51b1354f4f8c000c5b24d
|
[
"Apache-2.0"
] | null | null | null |
Python/code case/code case 130.py
|
amazing-2020/pdf
|
8cd3f5f510a1c1ed89b51b1354f4f8c000c5b24d
|
[
"Apache-2.0"
] | null | null | null |
def countX(lst, x):
count = 0
for ele in lst:
if (ele == x):
count += 1
return count
def countX2(lst, x):
return lst.count(x)
lst = [1, 21, 2, 12, 21, 21, 21, 21, 1, 1, 21, 545, 44, 45, 78, 9, 89, 89, 8]
x = 89
print(countX(lst, x))
print(countX2(lst, x))
| 17.411765
| 77
| 0.513514
|
21184477d9c003062ef4f6c1a54f9b980eb4cdeb
| 14,157
|
py
|
Python
|
resolwe/storage/connectors/transfer.py
|
OtonicarJan/resolwe
|
5ff4a4b8f87606ad6bf9df9e5c51ba283bd885c9
|
[
"Apache-2.0"
] | 27
|
2015-12-07T18:29:12.000Z
|
2022-03-16T08:01:47.000Z
|
resolwe/storage/connectors/transfer.py
|
OtonicarJan/resolwe
|
5ff4a4b8f87606ad6bf9df9e5c51ba283bd885c9
|
[
"Apache-2.0"
] | 681
|
2015-12-01T11:52:24.000Z
|
2022-03-21T07:43:37.000Z
|
resolwe/storage/connectors/transfer.py
|
OtonicarJan/resolwe
|
5ff4a4b8f87606ad6bf9df9e5c51ba283bd885c9
|
[
"Apache-2.0"
] | 28
|
2015-12-01T08:32:57.000Z
|
2021-12-14T00:04:16.000Z
|
"""Data transfer between connectors."""
import concurrent.futures
import logging
from contextlib import suppress
from functools import partial
from pathlib import Path
from time import sleep
from typing import TYPE_CHECKING, Iterable, List, Union
import wrapt
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import ReadTimeout
from urllib3.exceptions import ProtocolError
from .baseconnector import BaseStorageConnector
from .circular_buffer import CircularBuffer
from .exceptions import DataTransferError
from .utils import paralelize
if TYPE_CHECKING:
from os import PathLike
try:
from google.api_core.exceptions import ServiceUnavailable
from google.resumable_media.common import DataCorruption
gcs_exceptions = [DataCorruption, ServiceUnavailable]
except ModuleNotFoundError:
gcs_exceptions = []
try:
from botocore.exceptions import ClientError
boto_exceptions = [ClientError]
except ModuleNotFoundError:
boto_exceptions = []
logger = logging.getLogger(__name__)
ERROR_MAX_RETRIES = 3
ERROR_TIMEOUT = 5 # In seconds.
transfer_exceptions = tuple(
boto_exceptions
+ gcs_exceptions
+ [RequestsConnectionError, ReadTimeout, ConnectionResetError, ProtocolError]
+ [DataTransferError]
)
@wrapt.decorator
def retry_on_transfer_error(wrapped, instance, args, kwargs):
"""Retry on tranfser error."""
for retry in range(1, ERROR_MAX_RETRIES + 1):
try:
return wrapped(*args, **kwargs)
except transfer_exceptions:
# Log the exception on retry for inspection.
if retry != ERROR_MAX_RETRIES:
logger.exception(
"Retry %d/%d got exception, will retry in %d seconds.",
retry,
ERROR_MAX_RETRIES,
ERROR_TIMEOUT,
)
sleep(ERROR_TIMEOUT)
# Raise exception when max retries are exceeded.
else:
logger.exception("Final retry got exception, re-raising it.")
raise
class Transfer:
"""Transfer data between two storage connectors using in-memory buffer."""
def __init__(
self,
from_connector: "BaseStorageConnector",
to_connector: "BaseStorageConnector",
):
"""Initialize transfer object."""
self.from_connector = from_connector
self.to_connector = to_connector
def pre_processing(self, url: Union[str, Path], objects: List[dict]):
"""Notify connectors that transfer is about to start.
The connector is allowed to change names of the objects that are to be
transfered. This allows us to do some pre-processing, like zipping all
files into one and transfering that one.
:param url: base url for file transfer.
:param objects: list of objects to be transfered, their paths are
relative with respect to the url.
"""
objects_to_transfer = self.from_connector.before_get(objects, url)
self.to_connector.before_push(objects_to_transfer, url)
return objects_to_transfer
def post_processing(self, url: Union[str, Path], objects: List[dict]):
"""Notify connectors that transfer is complete.
:param url: base url for file transfer.
:param objects: the list ob objects that was actually transfered.The
paths are relative with respect to the url.
"""
self.from_connector.after_get(objects, url)
objects_stored = self.to_connector.after_push(objects, url)
return objects_stored
def transfer_objects(
self, url: Union[str, Path], objects: List[dict], max_threads: int = 10
) -> List[dict]:
"""Transfer objects under the given URL.
Objects are read from from_connector and copied to to_connector.
:param url: the given URL to transfer from/to.
:param objects: the list of objects to transfer. Each object is
represented with the dictionary containing at least keys "path",
"size", "md5", "crc32c", "awss3etag", "chunk_size".
All values for key "path" must be relative with respect to the
argument url.
:returns: the list of objects that were stored in the to_connector if
it is different that argument objects or None.
"""
# Pre-processing.
try:
objects_to_transfer = self.pre_processing(url, objects)
except Exception:
logger.exception(
"Error in pre-processing while transfering data from url {}".format(url)
)
raise DataTransferError()
url = Path(url)
futures = paralelize(
objects=objects_to_transfer,
worker=partial(self.transfer_chunk, url),
max_threads=max_threads,
)
# Check future results. This wil re-raise any exception raised in
# _transfer_chunk.
if not all(future.result() for future in futures):
raise DataTransferError()
# Post-processing.
try:
objects_stored = self.post_processing(url, objects_to_transfer)
except Exception:
logger.exception(
"Error in post-processing while transfering data from url {}".format(
url
)
)
raise DataTransferError()
return objects_stored
def transfer_chunk(self, url: Path, objects: Iterable[dict]) -> bool:
"""Transfer a single chunk of objects.
When objects have properties `from_base_url` and `to_base_url` they
override the `url` argument.
:raises DataTransferError: on failure.
:returns: True on success.
"""
to_connector = self.to_connector.duplicate()
from_connector = self.from_connector.duplicate()
for entry in objects:
# Do not transfer directories.
if not entry["path"].endswith("/"):
if not self.transfer(
entry.get("from_base_url", url),
entry,
entry.get("to_base_url", url),
Path(entry["path"]),
from_connector,
to_connector,
):
raise DataTransferError()
return True
@retry_on_transfer_error
def transfer(
self,
from_base_url: Union[str, Path],
object_: dict,
to_base_url: Union[str, Path],
to_url: "PathLike[str]",
from_connector: "BaseStorageConnector" = None,
to_connector: "BaseStorageConnector" = None,
) -> bool:
"""Transfer single object between two storage connectors.
:param from_base_url: base url on from_connector.
:param object_: object to transfer. It must be a dictionary containing
at least keys "path", "md5", "crc32c", "size" and "awss3etag". It
can also contain key "chunk_size" that specifies a custom
chunk_size to use for upload / download.
:param to_base_url: base url on to_connector.
:param to_url: where to copy object. It is relative with respect to the
argument to_base_url.
:param from_connector: from connector, defaults to None. If None
duplicate of from_connector from the Transfer class instance is
used.
:param to_connector: to connector, defaults to None. If None
duplicate of to_connector from the Transfer class instance is
used.
:raises DataTransferError: on failure.
:returns: True on success.
"""
to_base_url = Path(to_base_url)
chunk_size = object_.get("chunk_size", BaseStorageConnector.CHUNK_SIZE)
# Duplicate connectors for thread safety.
to_connector = to_connector or self.to_connector.duplicate()
from_connector = from_connector or self.from_connector.duplicate()
from_url = Path(from_base_url) / object_["path"]
hashes = {type_: object_[type_] for type_ in ["md5", "crc32c", "awss3etag"]}
skip_final_hash_check = (
from_connector.get_ensures_data_integrity
and to_connector.put_ensures_data_integrity
)
if skip_final_hash_check:
# When final check is skipped make sure that the input connector
# hash equals to the hash given by the _object (usually read from
# the database). This ensures that the data was not modified.
hash_to_check = next(
hash for hash in from_connector.supported_hash if hash in hashes.keys()
)
from_connector_hash = from_connector.get_hash(from_url, hash_to_check)
expected_hash = object_[hash_to_check]
if expected_hash != from_connector_hash:
raise DataTransferError(
f"Connector {from_connector} has {from_connector_hash} stored "
f"as {from_connector_hash} hash for object "
f"{from_url}, expected {expected_hash}."
)
common_hash_type = next(
e for e in to_connector.supported_hash if e in hashes.keys()
)
from_hash = hashes[common_hash_type]
# Check if file with the correct hash already exist in to_connector.
to_hash = to_connector.get_hash(to_base_url / to_url, common_hash_type)
if from_hash == to_hash:
logger.debug(
"From: {}:{}".format(from_connector.name, from_url)
+ " to: {}:{}".format(to_connector.name, to_base_url / to_url)
+ " object exists with right hash, skipping."
)
return True
# We have three posible ways of transfering the data:
# - if from_connector supports streams then we open the stream and
# transfer the data.
# - if to_connector supporst streams then we transfer the data from
# from_connector directly to the opened stream.
# - if neither support streams then we use buffer to transfer the data
# from from_connector to to_connector.
if from_connector.can_open_stream:
stream = from_connector.open_stream(from_url, "rb")
to_connector.push(
stream, to_base_url / to_url, chunk_size=chunk_size, hashes=hashes
)
stream.close()
elif to_connector.can_open_stream:
stream = to_connector.open_stream(to_base_url / to_url, "wb")
from_connector.get(from_url, stream, chunk_size=chunk_size)
stream.close()
to_connector.set_hashes(to_base_url / to_url, hashes)
# Otherwise create out own stream and use threads to transfer data.
else:
def future_done(stream_to_close, future):
stream_to_close.close()
if future.exception() is not None:
executor.shutdown(wait=False)
# The constant 1 is added to the object size to make sure the
# buffer_size is at least 1. Otherwise uploading files of size 0
# will cause a deadlock.
data_stream = CircularBuffer(
buffer_size=min(200 * 1024 * 1024, object_["size"] + 1)
)
with concurrent.futures.ThreadPoolExecutor() as executor:
download_task = executor.submit(
from_connector.get,
from_url,
data_stream,
chunk_size=chunk_size,
)
upload_task = executor.submit(
to_connector.push,
data_stream,
to_base_url / to_url,
chunk_size=chunk_size,
hashes=hashes,
)
download_task.add_done_callback(partial(future_done, data_stream))
futures = (download_task, upload_task)
# If any of the transfer futures has raised an exception the
# upload must be aborted and DataTransferError raised.
#
# When transfer was a success we have to store the server-side
# computed hashes to the hashes given on object. They may differ
# from computed ones since server-side encryption can be used
# which changes hashes (at least this happens with awss3etag when
# SSE-KMS encryption is used on S3 bucket).
if any(f.exception() is not None for f in futures):
# Log exceptions in threads to preserve original stack trace.
for f in futures:
try:
f.result()
except Exception:
logger.exception("Exception occured while transfering data")
# Delete transfered data.
with suppress(Exception):
to_connector.delete(to_base_url, [to_url])
# Re-raise exception.
ex = [f.exception() for f in futures if f.exception() is not None]
messages = [str(e) for e in ex]
raise DataTransferError("\n\n".join(messages))
# Check hash of the uploaded object.
if not skip_final_hash_check:
to_hash = to_connector.get_hash(to_base_url / to_url, common_hash_type)
if from_hash != to_hash:
with suppress(Exception):
to_connector.delete(to_base_url, [to_url])
raise DataTransferError(
f"Hash {common_hash_type} does not match while transfering "
f"{from_url} -> {to_base_url/to_url}: using hash type "
f"{common_hash_type}: expected {from_hash}, got {to_hash}."
)
for hash_type in to_connector.refresh_hash_after_transfer:
hash = to_connector.get_hash(to_base_url / to_url, hash_type)
object_[hash_type] = hash
return True
| 39
| 88
| 0.614678
|
242c69eee02d2f1f2769571ea77c3953229c16bb
| 149
|
py
|
Python
|
scrapers/WIL-wiltshire/councillors.py
|
DemocracyClub/LGSF
|
21c2a049db08575e03db2fb63a8bccc8de0c636b
|
[
"MIT"
] | 4
|
2018-10-17T13:30:08.000Z
|
2021-06-22T13:29:43.000Z
|
scrapers/WIL-wiltshire/councillors.py
|
DemocracyClub/LGSF
|
21c2a049db08575e03db2fb63a8bccc8de0c636b
|
[
"MIT"
] | 46
|
2018-10-15T13:47:48.000Z
|
2022-03-23T10:26:18.000Z
|
scrapers/WIL-wiltshire/councillors.py
|
DemocracyClub/LGSF
|
21c2a049db08575e03db2fb63a8bccc8de0c636b
|
[
"MIT"
] | 1
|
2018-10-15T13:36:03.000Z
|
2018-10-15T13:36:03.000Z
|
from lgsf.councillors.scrapers import ModGovCouncillorScraper
class Scraper(ModGovCouncillorScraper):
base_url = "http://cms.wiltshire.gov.uk"
| 24.833333
| 61
| 0.805369
|
f4f1f491f8d938cd6d1f3f733fbeb6cd5410d975
| 11,858
|
py
|
Python
|
scripts/transfer.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 2
|
2016-02-23T00:09:14.000Z
|
2019-02-11T07:48:44.000Z
|
scripts/transfer.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
scripts/transfer.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 6
|
2015-05-27T13:09:50.000Z
|
2019-02-11T07:48:46.000Z
|
#!/usr/bin/env python
"""
Downloads files to temp locations. This script is invoked by the Transfer
Manager (galaxy.jobs.transfer_manager) and should not normally be invoked by
hand.
"""
import os, sys, optparse, ConfigParser, socket, SocketServer, threading, logging, random, urllib2, tempfile, time
galaxy_root = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '..' ) )
sys.path.insert( 0, os.path.abspath( os.path.join( galaxy_root, 'lib' ) ) )
from galaxy import eggs
import pkg_resources
pkg_resources.require( "pexpect" )
import pexpect
eggs.require( "SQLAlchemy >= 0.4" )
from sqlalchemy import *
from sqlalchemy.orm import *
from galaxy.model.mapping import load_egg_for_url
import galaxy.model
from galaxy.util import json, bunch
eggs.require( 'python_daemon' )
from daemon import DaemonContext
log = logging.getLogger( __name__ )
log.setLevel( logging.DEBUG )
handler = logging.StreamHandler( sys.stdout )
log.addHandler( handler )
debug = False
slow = False
class ArgHandler( object ):
"""
Collect command line flags.
"""
def __init__( self ):
self.parser = optparse.OptionParser()
self.parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)',
default=os.path.abspath( os.path.join( galaxy_root, 'universe_wsgi.ini' ) ) )
self.parser.add_option( '-d', '--debug', action='store_true', dest='debug', help="Debug (don't detach)" )
self.parser.add_option( '-s', '--slow', action='store_true', dest='slow', help="Transfer slowly (for debugging)" )
self.opts = None
def parse( self ):
self.opts, args = self.parser.parse_args()
if len( args ) != 1:
log.error( 'usage: transfer.py <transfer job id>' )
sys.exit( 1 )
try:
self.transfer_job_id = int( args[0] )
except TypeError:
log.error( 'The provided transfer job ID is not an integer: %s' % args[0] )
sys.exit( 1 )
if self.opts.debug:
global debug
debug = True
log.setLevel( logging.DEBUG )
if self.opts.slow:
global slow
slow = True
class GalaxyApp( object ):
"""
A shell Galaxy App to provide access to the Galaxy configuration and
model/database.
"""
def __init__( self, config_file ):
self.config = ConfigParser.ConfigParser( dict( database_file = 'database/universe.sqlite',
file_path = 'database/files',
transfer_worker_port_range = '12275-12675',
transfer_worker_log = None ) )
self.config.read( config_file )
self.model = bunch.Bunch()
self.connect_database()
def connect_database( self ):
# Avoid loading the entire model since doing so is exceptionally slow
default_dburl = 'sqlite:///%s?isolation_level=IMMEDIATE' % self.config.get( 'app:main', 'database_file' )
try:
dburl = self.config.get( 'app:main', 'database_connection' )
except ConfigParser.NoOptionError:
dburl = default_dburl
load_egg_for_url( dburl )
engine = create_engine( dburl )
metadata = MetaData( engine )
self.sa_session = scoped_session( sessionmaker( bind=engine, autoflush=False, autocommit=True ) )
self.model.TransferJob = galaxy.model.TransferJob
self.model.TransferJob.table = Table( "transfer_job", metadata, autoload=True )
def get_transfer_job( self, id ):
return self.sa_session.query( self.model.TransferJob ).get( int( id ) )
class ListenerServer( SocketServer.ThreadingTCPServer ):
"""
The listener will accept state requests and new transfers for as long as
the manager is running.
"""
def __init__( self, port_range, RequestHandlerClass, app, transfer_job, state_result ):
self.state_result = state_result
# Try random ports until a free one is found
while True:
random_port = random.choice( port_range )
try:
SocketServer.ThreadingTCPServer.__init__( self, ( 'localhost', random_port ), RequestHandlerClass )
log.info( 'Listening on port %s' % random_port )
break
except Exception, e:
log.warning( 'Tried binding port %s: %s' % ( random_port, str( e ) ) )
transfer_job.socket = random_port
app.sa_session.add( transfer_job )
app.sa_session.flush()
class ListenerRequestHandler( SocketServer.BaseRequestHandler ):
"""
Handle state or transfer requests received on the socket.
"""
def handle( self ):
request = self.request.recv( 8192 )
response = {}
valid, request, response = json.validate_jsonrpc_request( request, ( 'get_state', ), () )
if valid:
self.request.send( json.to_json_string( json.jsonrpc_response( request=request, result=self.server.state_result.result ) ) )
else:
error_msg = 'Unable to serve request: %s' % response['error']['message']
if 'data' in response['error']:
error_msg += ': %s' % response['error']['data']
log.error( error_msg )
log.debug( 'Original request was: %s' % request )
class StateResult( object ):
"""
A mutable container for the 'result' portion of JSON-RPC responses to state requests.
"""
def __init__( self, result=None ):
self.result = result
def transfer( app, transfer_job_id ):
transfer_job = app.get_transfer_job( transfer_job_id )
if transfer_job is None:
log.error( 'Invalid transfer job ID: %s' % transfer_job_id )
return False
port_range = app.config.get( 'app:main', 'transfer_worker_port_range' )
try:
port_range = [ int( p ) for p in port_range.split( '-' ) ]
except Exception, e:
log.error( 'Invalid port range set in transfer_worker_port_range: %s: %s' % ( port_range, str( e ) ) )
return False
protocol = transfer_job.params[ 'protocol' ]
if protocol not in ( 'http', 'https', 'scp' ):
log.error( 'Unsupported protocol: %s' % protocol )
return False
state_result = StateResult( result = dict( state = transfer_job.states.RUNNING, info='Transfer process starting up.' ) )
listener_server = ListenerServer( range( port_range[0], port_range[1] + 1 ), ListenerRequestHandler, app, transfer_job, state_result )
# daemonize here (if desired)
if not debug:
daemon_context = DaemonContext( files_preserve=[ listener_server.fileno() ], working_directory=os.getcwd() )
daemon_context.open()
# If this fails, it'll never be detected. Hopefully it won't fail since it succeeded once.
app.connect_database() # daemon closed the database fd
transfer_job = app.get_transfer_job( transfer_job_id )
listener_thread = threading.Thread( target=listener_server.serve_forever )
listener_thread.setDaemon( True )
listener_thread.start()
# Store this process' pid so unhandled deaths can be handled by the restarter
transfer_job.pid = os.getpid()
app.sa_session.add( transfer_job )
app.sa_session.flush()
terminal_state = None
if protocol in [ 'http', 'https' ]:
for transfer_result_dict in http_transfer( transfer_job ):
state_result.result = transfer_result_dict
if transfer_result_dict[ 'state' ] in transfer_job.terminal_states:
terminal_state = transfer_result_dict
elif protocol in [ 'scp' ]:
# Transfer the file using scp
transfer_result_dict = scp_transfer( transfer_job )
# Handle the state of the transfer
state = transfer_result_dict[ 'state' ]
state_result.result = transfer_result_dict
if state in transfer_job.terminal_states:
terminal_state = transfer_result_dict
if terminal_state is not None:
transfer_job.state = terminal_state[ 'state' ]
for name in [ 'info', 'path' ]:
if name in terminal_state:
transfer_job.__setattr__( name, terminal_state[ name ] )
else:
transfer_job.state = transfer_job.states.ERROR
transfer_job.info = 'Unknown error encountered by transfer worker.'
app.sa_session.add( transfer_job )
app.sa_session.flush()
return True
def http_transfer( transfer_job ):
"""Plugin" for handling http(s) transfers."""
url = transfer_job.params['url']
try:
f = urllib2.urlopen( url )
except urllib2.URLError, e:
yield dict( state = transfer_job.states.ERROR, info = 'Unable to open URL: %s' % str( e ) )
return
size = f.info().getheader( 'Content-Length' )
if size is not None:
size = int( size )
chunksize = 1024 * 1024
if slow:
chunksize = 1024
read = 0
last = 0
try:
fh, fn = tempfile.mkstemp()
except Exception, e:
yield dict( state = transfer_job.states.ERROR, info = 'Unable to create temporary file for transfer: %s' % str( e ) )
return
log.debug( 'Writing %s to %s, size is %s' % ( url, fn, size or 'unknown' ) )
try:
while True:
chunk = f.read( chunksize )
if not chunk:
break
os.write( fh, chunk )
read += chunksize
if size is not None and read < size:
percent = int( float( read ) / size * 100 )
if percent != last:
yield dict( state = transfer_job.states.PROGRESS, read = read, percent = '%s' % percent )
last = percent
elif size is None:
yield dict( state = transfer_job.states.PROGRESS, read = read )
if slow:
time.sleep( 1 )
os.close( fh )
yield dict( state = transfer_job.states.DONE, path = fn )
except Exception, e:
yield dict( state = transfer_job.states.ERROR, info = 'Error during file transfer: %s' % str( e ) )
return
return
def scp_transfer( transfer_job ):
"""Plugin" for handling scp transfers using pexpect"""
def print_ticks( d ):
pass
host = transfer_job.params[ 'host' ]
user_name = transfer_job.params[ 'user_name' ]
password = transfer_job.params[ 'password' ]
file_path = transfer_job.params[ 'file_path' ]
try:
fh, fn = tempfile.mkstemp()
except Exception, e:
return dict( state = transfer_job.states.ERROR, info = 'Unable to create temporary file for transfer: %s' % str( e ) )
try:
# TODO: add the ability to determine progress of the copy here like we do in the http_transfer above.
cmd = "scp %s@%s:'%s' '%s'" % ( user_name,
host,
file_path.replace( ' ', '\ ' ),
fn )
output = pexpect.run( cmd,
events={ '.ssword:*': password + '\r\n',
pexpect.TIMEOUT: print_ticks },
timeout=10 )
return dict( state = transfer_job.states.DONE, path = fn )
except Exception, e:
return dict( state = transfer_job.states.ERROR, info = 'Error during file transfer: %s' % str( e ) )
if __name__ == '__main__':
arg_handler = ArgHandler()
arg_handler.parse()
app = GalaxyApp( arg_handler.opts.config )
log.debug( 'Initiating transfer...' )
if transfer( app, arg_handler.transfer_job_id ):
log.debug( 'Finished' )
else:
log.error( 'Error in transfer process...' )
sys.exit( 1 )
sys.exit( 0 )
| 42.35
| 138
| 0.612414
|
b24816e6797fdd991f5327ff68efce4cf0143ebd
| 4,049
|
py
|
Python
|
sudoku/graphics.py
|
kyrellosNasr/sudoku
|
e6f389b8d16cc662311cf1d5fec7c95e36133d2c
|
[
"MIT"
] | null | null | null |
sudoku/graphics.py
|
kyrellosNasr/sudoku
|
e6f389b8d16cc662311cf1d5fec7c95e36133d2c
|
[
"MIT"
] | null | null | null |
sudoku/graphics.py
|
kyrellosNasr/sudoku
|
e6f389b8d16cc662311cf1d5fec7c95e36133d2c
|
[
"MIT"
] | null | null | null |
from tkinter import Tk , Frame , Label , LabelFrame , Entry , Radiobutton , BooleanVar , Button , END
from initiate import Sudoku , bcolors
class MainWin(Tk , Sudoku) :
def __init__(self , title , *args , **kwargs) :
Tk.__init__(self,*args,**kwargs);
self.title(title);
self.resizable(False , False);
self.drawBoxes();
self.mode = BooleanVar();
self.mode.set(True);
self.drawChoice()
self.doBtn = Button(self,text=" Try ",bg="#452",fg="#FFF",command=self._onClick)
self.doBtn.grid(row=4,column=0,pady=10,padx=5,ipady=2,sticky="we")
self.bind("<Return>",self._onClick)
def drawBoxes(self) :
self._boxes = [];
frame = Frame(self,bg="#000")
frame.grid(row=0,column=0,padx=5,pady=5)
for i in range(9) :
for j in range(9) :
temp = Entry(frame,borderwidth=1,width=4,font=("Consolas",12),justify="center",relief="ridge")
temp.grid(row=i,column=j,ipady=3)
if j in [3,6] : temp.grid(row=i,column=j,ipady=3,padx=(2,0))
if i in [3,6] : temp.grid(row=i,column=j,ipady=3,pady=(2,0))
temp.grid(row=i,column=j,ipady=3);
self._boxes.append((temp , i , j));
def drawChoice(self) :
frame = LabelFrame(self,text="Mode")
frame.grid(row=1,column=0,sticky="we",padx=5,pady=5);
Radiobutton(frame,text="solve",variable=self.mode , value=True).grid(row=0,column=1,pady=(0,8))
Radiobutton(frame,text="check",variable=self.mode , value=False).grid(row=0,column=3,pady=(0,8))
frame2 = LabelFrame(self,text="Console")
frame2.grid(row=2,column=0,sticky="we",padx=5,pady=5);
self.debug = Label(frame2,text="\n\n")
self.debug.grid(row=0,column=0,ipadx=5,pady=(8,0))
rstLbl = Label(self,text="reset ?",fg="blue",cursor="hand2")
rstLbl.grid(row=3,column=0,sticky="w",padx=5,pady=0);
rstLbl.bind("<Button-1>",self._resetPuzzle);
def getData(self) :
self.data = [[]];
col = 0;
for point in self._boxes :
value = point[0].get().replace(" ","");
if value == "" and not self.mode.get() : self.data = [[]];return -1;
if value == "" :
self.data[-1].append(-1);
col += 1;
if col % 9 == 0 : self.data.append([]);
continue;
if value.isdigit() and int(value) >= 1 and int(value) <= 9 :
self.data[-1].append(int(value));
else :
self.data = [[]];
#print("error 2");
return -2;
col += 1;
if col % 9 == 0 : self.data.append([]);
self.data.pop();
#print(self.data)
return True;
def _onClick(self,event=None) :
if self.getData() in [-1,-2] : self._report("puzzle are not numbers from 1-9 or not compatible with\nmode\n" , "f");return;
if self.mode.get() :
#if mode is solving
if self.checkPreSol() :
try :
self.getAns();
except IndexError : self._report("puzzle combinations are wrong\n","f");return;
if self.checkSol() :
#print(self.data);
self._report("solution listed above\n\n","s")
self._putData()
else :
self._report("something went wrong\n\n","f");
else :
self._report("puzzle data are repeated\n\n","f");
else :
#if mode is checking
if self.checkSol() :
self._report("solution is right\n\n","s");
else :
self._report("solution is wrong\n\n","f");
def _report(self , text , label) :
if label == "s" :
self.debug.configure(text=text,fg="green");
elif label == "f" :
self.debug.configure(text=text,fg="red");
def _putData(self) :
for row in range(9) :
for col in range(9) :
order = ((row * 9) - (9 - col)) + 9
entry = self._boxes[order][0];
if entry.get().replace(" ","") == "" : entry.configure(fg="#FFF",bg="blue");entry.insert(0,self.data[row][col]);
else : entry.configure(fg="#000",bg="#FFF");
def _resetPuzzle(self,event) :
for entry in self._boxes :
entry[0].configure(bg="#FFF",fg="#000");
entry[0].delete(0,END)
entry[0].insert(0,"");
def run(self) :
self.mainloop();
MainWin("jesus christ").run()
"""
row = 8;
col = 8;
print( ((row * 9) - (9 - col)) + 9 )
"""
| 32.134921
| 126
| 0.596938
|
fb800d941ef4bd73be5ddd2c288e4274ac7a13ad
| 2,646
|
py
|
Python
|
lib/scripts/experiment_instance.py
|
technoligest/kmeansII
|
d5326421786990f29fbd1ec18b3bccf4207fd2c0
|
[
"MIT"
] | 1
|
2019-11-15T14:36:27.000Z
|
2019-11-15T14:36:27.000Z
|
lib/scripts/experiment_instance.py
|
technoligest/kmeansII
|
d5326421786990f29fbd1ec18b3bccf4207fd2c0
|
[
"MIT"
] | null | null | null |
lib/scripts/experiment_instance.py
|
technoligest/kmeansII
|
d5326421786990f29fbd1ec18b3bccf4207fd2c0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
clusters
centres
averages(distance/area)[]
distance
ExperimentInstance is the class to help us extract information from a series of experiment runs.
"""
from lib.scripts.experiment_cluster import *
from lib.scripts.experiment_utils import distance
import sys
class ExperimentInstance:
"""
returns an array of arrays corresponding to the point positions of every centre
in the experiment run. These pointpositions are disjoint and are calculated based
on the closest points to each centre.
"""
def calcPointPositions(self):
result = [[] for _ in range(len(self.centres))]
for instId in range(len(ExperimentInstance.dataset)):
currCandidateCentreId = 0
minDistance = sys.float_info.max
for centreId in range(len(self.centres)):
currDistance = distance(ExperimentInstance.dataset[instId], self.centres[centreId])
if minDistance > currDistance:
minDistance = currDistance
currCandidateCentreId = centreId
result[currCandidateCentreId].append(instId)
return result
def __init__(self, centres, algorithm, totalDistanceToCentres, seedPickingTime, numIterations, iterationsRuntime):
assert hasattr(ExperimentInstance, 'dataset')
self.centres = centres
self.algorithm = algorithm
self.totalDistanceToCentres = totalDistanceToCentres
self.seedPickingTime = seedPickingTime
self.numIterations = numIterations
self.iterationsRuntime = iterationsRuntime
self.clusters = []
pointPositions = self.calcPointPositions()
for centre, p in zip(self.centres, pointPositions):
self.clusters.append(Cluster(centre, p))
self.averageDistancesOverArea_ = None
@property
def averageDistancesOverArea(self):
if self.averageDistancesOverArea_ == None:
self.averageDistancesOverArea_ = [c.totalDistance / c.area for c in self.clusters]
return self.averageDistancesOverArea_
def centresToString(self):
result = ""
for inst in self.centres:
result += str(inst[0]) + " " + str(inst[1]) + "\n"
return result
def __str__(self):
return (">>>Start Experiment\n" +
"algorithm:" + self.algorithm + "\n" +
"Sum of distance squared to centre:" + str(self.totalDistanceToCentres) + "\n" +
"Time to pick the seeds:" + str(self.seedPickingTime) + "\n" +
"Number of iterations run:" + str(self.numIterations) + "\n" +
"Time to run the iterations:" + str(self.iterationsRuntime) + "\n" +
"Start Centres:\n" + self.centresToString() +
"End Centres:\n" +
"End Experiment:"
)
| 36.246575
| 116
| 0.6935
|
572c80f10032da9d12fc77fc7d0b82e2503a992c
| 608
|
py
|
Python
|
[1] BEGINNER/1003 - Soma Simples.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | 1
|
2022-03-15T03:03:26.000Z
|
2022-03-15T03:03:26.000Z
|
[1] BEGINNER/1003 - Soma Simples.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | null | null | null |
[1] BEGINNER/1003 - Soma Simples.py
|
tiago040/URI-SOLUTIONS
|
519d3950252a6002e8926416b2f8217ba08fe721
|
[
"MIT"
] | null | null | null |
'''
Leia dois valores inteiros, no caso para variáveis A e B. A seguir, calcule a soma entre elas e atribua à variável SOMA. A seguir escrever o valor desta variável.
Entrada
O arquivo de entrada contém 2 valores inteiros.
Saída
Imprima a variável SOMA com todas as letras maiúsculas, com um espaço em branco antes e depois da igualdade seguido pelo valor correspondente à soma de A e B. Como todos os problemas, não esqueça de imprimir o fim de linha após o resultado, caso contrário, você receberá "Presentation Error".
'''
A = int(input())
B = int(input())
SOMA = A + B
print('SOMA = {}'.format(SOMA))
| 40.533333
| 292
| 0.751645
|
af42483ab1204c7cdfc7643bbde49711a37f444b
| 948
|
py
|
Python
|
setup.py
|
umangtank/oneNeuron_pypi
|
c975170f4ce5814e6cf6e44519da288a7edecef9
|
[
"MIT"
] | null | null | null |
setup.py
|
umangtank/oneNeuron_pypi
|
c975170f4ce5814e6cf6e44519da288a7edecef9
|
[
"MIT"
] | null | null | null |
setup.py
|
umangtank/oneNeuron_pypi
|
c975170f4ce5814e6cf6e44519da288a7edecef9
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
PROJECT_NAME = "oneNeuron"
USER_NAME = "umangtank"
setuptools.setup(
name= f"{PROJECT_NAME}-{USER_NAME}",
version="0.0.2",
author=USER_NAME,
author_email="umangtank08@gmail.com",
description="It's an Implimentation of Perceptron",
long_description=long_description,
long_description_content_type="text/markdown",
url= f"https://github.com/{USER_NAME}/{PROJECT_NAME}",
project_urls={
"Bug Tracker": f"https://github.com/{USER_NAME}/{PROJECT_NAME}",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.7",
install_requires=[
"numpy",
"tqdm",
]
)
| 27.882353
| 72
| 0.640295
|
a3aae4bc091b083a04833e581d3684d4d1444bf3
| 7,363
|
py
|
Python
|
pygluu/kubernetes/terminal/prompt.py
|
WaqasAhmedLatif/cloud-native-edition
|
1e6002f27ea971c153df59373e30d4506e9932dc
|
[
"Apache-2.0"
] | 23
|
2020-04-18T14:51:41.000Z
|
2022-03-31T19:59:40.000Z
|
pygluu/kubernetes/terminal/prompt.py
|
WaqasAhmedLatif/cloud-native-edition
|
1e6002f27ea971c153df59373e30d4506e9932dc
|
[
"Apache-2.0"
] | 236
|
2020-04-22T08:59:27.000Z
|
2022-03-31T07:21:12.000Z
|
pygluu/kubernetes/terminal/prompt.py
|
WaqasAhmedLatif/cloud-native-edition
|
1e6002f27ea971c153df59373e30d4506e9932dc
|
[
"Apache-2.0"
] | 23
|
2020-04-19T15:25:59.000Z
|
2022-03-16T17:17:36.000Z
|
"""
pygluu.kubernetes.terminal.prompt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains helpers to initialize all terminal prompts to
interact with user's inputs for terminal installations.
License terms and conditions for Gluu Cloud Native Edition:
https://www.apache.org/licenses/LICENSE-2.0
"""
from pygluu.kubernetes.settings import SettingsHandler
from pygluu.kubernetes.terminal.confirmsettings import PromptConfirmSettings
from pygluu.kubernetes.terminal.volumes import PromptVolumes
from pygluu.kubernetes.terminal.gke import PromptGke
from pygluu.kubernetes.terminal.configuration import PromptConfiguration
from pygluu.kubernetes.terminal.jackrabbit import PromptJackrabbit
from pygluu.kubernetes.terminal.istio import PromptIstio
from pygluu.kubernetes.terminal.replicas import PromptReplicas
from pygluu.kubernetes.terminal.couchbase import PromptCouchbase
from pygluu.kubernetes.terminal.architecture import PromptArch
from pygluu.kubernetes.terminal.namespace import PromptNamespace
from pygluu.kubernetes.terminal.optionalservices import PromptOptionalServices
from pygluu.kubernetes.terminal.testenv import PromptTestEnvironment
from pygluu.kubernetes.terminal.aws import PromptAws
from pygluu.kubernetes.terminal.helpers import gather_ip
from pygluu.kubernetes.terminal.persistencebackend import PromptPersistenceBackend
from pygluu.kubernetes.terminal.ldap import PromptLdap
from pygluu.kubernetes.terminal.images import PromptImages
from pygluu.kubernetes.terminal.cache import PromptCache
from pygluu.kubernetes.terminal.backup import PromptBackup
from pygluu.kubernetes.terminal.license import PromptLicense
from pygluu.kubernetes.terminal.version import PromptVersion
from pygluu.kubernetes.terminal.sql import PromptSQL
from pygluu.kubernetes.terminal.google import PromptGoogle
class Prompt:
"""Prompt is used for prompting users for input used in deploying Gluu.
"""
def __init__(self):
self.settings = SettingsHandler()
def load_settings(self):
self.settings = SettingsHandler()
def license(self):
self.load_settings()
PromptLicense(self.settings)
def versions(self):
self.load_settings()
PromptVersion(self.settings)
def arch(self):
self.load_settings()
arch = PromptArch(self.settings)
arch.prompt_arch()
def namespace(self):
self.load_settings()
namespace = PromptNamespace(self.settings)
namespace.prompt_gluu_namespace()
def optional_services(self):
self.load_settings()
optional_services = PromptOptionalServices(self.settings)
optional_services.prompt_optional_services()
def jackrabbit(self):
self.load_settings()
jackrabbit = PromptJackrabbit(self.settings)
jackrabbit.prompt_jackrabbit()
def istio(self):
self.load_settings()
istio = PromptIstio(self.settings)
istio.prompt_istio()
def test_enviornment(self):
self.load_settings()
test_environment = PromptTestEnvironment(self.settings)
if not self.settings.get("TEST_ENVIRONMENT") and \
self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s", "minikube"):
test_environment.prompt_test_environment()
if self.settings.get("DEPLOYMENT_ARCH") in ("eks", "gke", "do", "local", "aks"):
if not self.settings.get("NODE_SSH_KEY"):
test_environment.prompt_ssh_key()
def network(self):
if not self.settings.get("HOST_EXT_IP"):
ip = gather_ip()
self.load_settings()
self.settings.set("HOST_EXT_IP", ip)
if self.settings.get("DEPLOYMENT_ARCH") == "eks" and self.settings.get("USE_ISTIO_INGRESS") != "Y":
aws = PromptAws(self.settings)
aws.prompt_aws_lb()
def gke(self):
self.load_settings()
if self.settings.get("DEPLOYMENT_ARCH") == "gke":
gke = PromptGke(self.settings)
gke.prompt_gke()
def persistence_backend(self):
self.load_settings()
persistence_backend = PromptPersistenceBackend(self.settings)
persistence_backend.prompt_persistence_backend()
def ldap(self):
self.load_settings()
if self.settings.get("PERSISTENCE_BACKEND") == "hybrid":
ldap = PromptLdap(self.settings)
ldap.prompt_hybrid_ldap_held_data()
def volumes(self):
self.load_settings()
volumes = PromptVolumes(self.settings)
if self.settings.get("PERSISTENCE_BACKEND") in ("hybrid", "ldap") or \
self.settings.get("INSTALL_JACKRABBIT") == "Y":
volumes.prompt_volumes()
volumes.prompt_storage()
def couchbase(self):
self.load_settings()
couchbase = PromptCouchbase(self.settings)
if not self.settings.get("DEPLOY_MULTI_CLUSTER") and self.settings.get("PERSISTENCE_BACKEND") in (
"hybrid", "couchbase") and self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s", "minikube"):
couchbase.prompt_couchbase_multi_cluster()
if self.settings.get("PERSISTENCE_BACKEND") in ("hybrid", "couchbase"):
couchbase.prompt_couchbase()
def cache(self):
self.load_settings()
cache = PromptCache(self.settings)
cache.prompt_cache_type()
def backup(self):
self.load_settings()
if self.settings.get("DEPLOYMENT_ARCH") not in ("microk8s", "minikube"):
backup = PromptBackup(self.settings)
backup.prompt_backup()
def configuration(self):
self.load_settings()
configuration = PromptConfiguration(self.settings)
configuration.prompt_config()
def images(self):
self.load_settings()
images = PromptImages(self.settings)
images.prompt_image_name_tag()
def replicas(self):
self.load_settings()
replicas = PromptReplicas(self.settings)
replicas.prompt_replicas()
def sql(self):
self.load_settings()
if self.settings.get("PERSISTENCE_BACKEND") == "sql":
spanner = PromptSQL(self.settings)
spanner.prompt_sql()
def google(self):
self.load_settings()
if self.settings.get("PERSISTENCE_BACKEND") == "spanner":
spanner = PromptGoogle(self.settings)
spanner.prompt_google()
def confirm_settings(self):
self.load_settings()
if self.settings.get("CONFIRM_PARAMS") != "Y":
confirm_settings = PromptConfirmSettings(self.settings)
confirm_settings.confirm_params()
def prompt(self):
"""Main property: called to setup all prompts and returns prompts in settings file.
:return:
"""
self.license()
self.versions()
self.arch()
self.namespace()
self.optional_services()
self.jackrabbit()
self.istio()
self.test_enviornment()
self.network()
self.gke()
self.persistence_backend()
self.ldap()
self.volumes()
self.sql()
self.google()
self.couchbase()
self.cache()
self.backup()
self.configuration()
self.images()
self.replicas()
self.volumes()
self.confirm_settings()
| 35.570048
| 112
| 0.677034
|
9cf35993c912fa3d328f666b5c66bdb35e741110
| 6,089
|
py
|
Python
|
fhirclient/models/guidanceresponse.py
|
carolinarsm/client-py
|
db1b6e3e28036dee11da75412003c7d90e591c6d
|
[
"Apache-2.0"
] | 418
|
2015-07-01T08:23:16.000Z
|
2022-03-31T14:02:30.000Z
|
fhirclient/models/guidanceresponse.py
|
carolinarsm/client-py
|
db1b6e3e28036dee11da75412003c7d90e591c6d
|
[
"Apache-2.0"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
fhirclient/models/guidanceresponse.py
|
carolinarsm/client-py
|
db1b6e3e28036dee11da75412003c7d90e591c6d
|
[
"Apache-2.0"
] | 185
|
2015-03-30T20:23:16.000Z
|
2022-03-30T14:39:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/GuidanceResponse) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class GuidanceResponse(domainresource.DomainResource):
""" The formal response to a guidance request.
A guidance response is the formal response to a guidance request, including
any output parameters returned by the evaluation, as well as the
description of any proposed actions to be taken.
"""
resource_type = "GuidanceResponse"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dataRequirement = None
""" Additional required data.
List of `DataRequirement` items (represented as `dict` in JSON). """
self.encounter = None
""" Encounter during which the response was returned.
Type `FHIRReference` (represented as `dict` in JSON). """
self.evaluationMessage = None
""" Messages resulting from the evaluation of the artifact or artifacts.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.identifier = None
""" Business identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.moduleCanonical = None
""" What guidance was requested.
Type `str`. """
self.moduleCodeableConcept = None
""" What guidance was requested.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.moduleUri = None
""" What guidance was requested.
Type `str`. """
self.note = None
""" Additional notes about the response.
List of `Annotation` items (represented as `dict` in JSON). """
self.occurrenceDateTime = None
""" When the guidance response was processed.
Type `FHIRDate` (represented as `str` in JSON). """
self.outputParameters = None
""" The output parameters of the evaluation, if any.
Type `FHIRReference` (represented as `dict` in JSON). """
self.performer = None
""" Device returning the guidance.
Type `FHIRReference` (represented as `dict` in JSON). """
self.reasonCode = None
""" Why guidance is needed.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
""" Why guidance is needed.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.requestIdentifier = None
""" The identifier of the request associated with this response, if any.
Type `Identifier` (represented as `dict` in JSON). """
self.result = None
""" Proposed actions, if any.
Type `FHIRReference` (represented as `dict` in JSON). """
self.status = None
""" success | data-requested | data-required | in-progress | failure |
entered-in-error.
Type `str`. """
self.subject = None
""" Patient the request was performed for.
Type `FHIRReference` (represented as `dict` in JSON). """
super(GuidanceResponse, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(GuidanceResponse, self).elementProperties()
js.extend([
("dataRequirement", "dataRequirement", datarequirement.DataRequirement, True, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("evaluationMessage", "evaluationMessage", fhirreference.FHIRReference, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("moduleCanonical", "moduleCanonical", str, False, "module", True),
("moduleCodeableConcept", "moduleCodeableConcept", codeableconcept.CodeableConcept, False, "module", True),
("moduleUri", "moduleUri", str, False, "module", True),
("note", "note", annotation.Annotation, True, None, False),
("occurrenceDateTime", "occurrenceDateTime", fhirdate.FHIRDate, False, None, False),
("outputParameters", "outputParameters", fhirreference.FHIRReference, False, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("requestIdentifier", "requestIdentifier", identifier.Identifier, False, None, False),
("result", "result", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import datarequirement
except ImportError:
datarequirement = sys.modules[__package__ + '.datarequirement']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
| 41.141892
| 119
| 0.632124
|
36d2a21840117e8a872c6d49636dc7a71cee45fd
| 1,837
|
py
|
Python
|
dynamic_programming/word_break.py
|
t-gibson/algorithms
|
d207e9e72b5f04d47508194e66d4603b27668805
|
[
"MIT"
] | null | null | null |
dynamic_programming/word_break.py
|
t-gibson/algorithms
|
d207e9e72b5f04d47508194e66d4603b27668805
|
[
"MIT"
] | null | null | null |
dynamic_programming/word_break.py
|
t-gibson/algorithms
|
d207e9e72b5f04d47508194e66d4603b27668805
|
[
"MIT"
] | null | null | null |
"""Given a non-empty string s and a dictionary wordDict containing a list of
non-empty words, determine if s can be segmented into a space-separated
sequence of one or more dictionary words.
Note:
- The same word in the dictionary may be reused multiple times in the
segmentation.
- You may assume the dictionary does not contain duplicate words.
Example 1:
Input: s = "leetcode", wordDict = ["leet", "code"]
Output: true
Explanation: Return true because "leetcode" can be segmented as "leet
code".
Example 2:
Input: s = "applepenapple", wordDict = ["apple", "pen"]
Output: true
Explanation: Return true because "applepenapple" can be segmented as
"apple pen apple".
Note that you are allowed to reuse a dictionary word.
Example 3:
Input: s = "catsandog", wordDict = ["cats", "dog", "sand", "and", "cat"]
Output: false
Example 4:
Input: s = "catsandog", wordDict = ["cats", "sand", "cat", "og"]
Note: it's not just about matching greedily
"""
from functools import lru_cache
from typing import Tuple
@lru_cache(maxsize=None)
def word_break(s: str, word_dict: Tuple[str]) -> bool:
"""
Explanation:
Have gone with the top-down approach with memoization.
Time complexity: O(n^2)
Space complexity: O(n)
"""
n = len(s)
min_word_length = min(len(word) for word in word_dict)
# base case
if n == 0:
return True
elif n < min_word_length:
return False
# recurrence
return max(
word_break(s[len(word):], word_dict)
for word in word_dict
if s.startswith(word)
)
if __name__ == "__main__":
assert word_break("leetcode", ("leet", "code"))
assert word_break("applepenapple", ("apple", "pen"))
assert not word_break("catsandog", ("cats", "dog", "sand", "and", "cat"))
| 30.114754
| 77
| 0.652695
|
146ab0cc5bad64bdf8a60d73d6f5dd9abbaa6928
| 5,861
|
py
|
Python
|
app.py
|
amcrisan/interactive-model-cards
|
081ab2e9bddb17f9407e8f8e666633d53ae2b5f9
|
[
"MIT"
] | null | null | null |
app.py
|
amcrisan/interactive-model-cards
|
081ab2e9bddb17f9407e8f8e666633d53ae2b5f9
|
[
"MIT"
] | null | null | null |
app.py
|
amcrisan/interactive-model-cards
|
081ab2e9bddb17f9407e8f8e666633d53ae2b5f9
|
[
"MIT"
] | null | null | null |
### LIBRARIES ###
# # Data
import numpy as np
import pandas as pd
import json
from math import floor
# Robustness Gym and Analysis
import robustnessgym as rg
from gensim.models.doc2vec import Doc2Vec
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
import nltk
nltk.download('punkt') #make sure that punkt is downloaded
# App & Visualization
import streamlit as st
import altair as alt
# utils
from interactive_model_cards import utils as ut
from interactive_model_cards import app_layout as al
from random import sample
from PIL import Image
### LOADING DATA ###
# model card data
@st.experimental_memo
def load_model_card():
with open("./assets/data/text_explainer/model_card.json") as f:
mc_text = json.load(f)
return mc_text
# pre-computed robusntess gym dev bench
# @st.experimental_singleton
@st.cache(allow_output_mutation=True)
def load_data():
# load dev bench
devBench = rg.DevBench.load("./assets/data/rg/sst_db.devbench")
return devBench
# load model
@st.experimental_singleton
def load_model():
model = rg.HuggingfaceModel(
"distilbert-base-uncased-finetuned-sst-2-english", is_classifier=True
)
return model
#load pre-computed embedding
def load_embedding():
embedding = pd.read_pickle("./assets/models/sst_vectors.pkl")
return embedding
#load doc2vec model
@st.experimental_singleton
def load_doc2vec():
doc2vec = Doc2Vec.load("./assets/models/sst_train.doc2vec")
return(doc2vec)
# @st.experimental_memo
def load_examples():
with open("./assets/data/user_data/example_sentence.json") as f:
examples = json.load(f)
return examples
# loading the dataset
def load_basic():
# load data
devBench = load_data()
# load model
model = load_model()
#protected_classes
protected_classes = json.load(open("./assets/data/protected_terms.json"))
return devBench, model, protected_classes
@st.experimental_singleton
def load_title():
img = Image.open("./assets/img/title.png")
return(img)
if __name__ == "__main__":
### STREAMLIT APP CONGFIG ###
st.set_page_config(layout="wide", page_title="Interactive Model Card")
# import custom styling
ut.init_style()
### LOAD DATA AND SESSION VARIABLES ###
# ******* loading the mode and the data
with st.spinner():
sst_db, model,protected_classes = load_basic()
embedding = load_embedding()
doc2vec = load_doc2vec()
# load example sentences
sentence_examples = load_examples()
# ******* session state variables
if "user_data" not in st.session_state:
st.session_state["user_data"] = pd.DataFrame()
if "example_sent" not in st.session_state:
st.session_state["example_sent"] = "I like you. I love you"
if "quant_ex" not in st.session_state:
st.session_state["quant_ex"] = {"Overall Performance": sst_db.metrics["model"]}
if "selected_slice" not in st.session_state:
st.session_state["selected_slice"] = None
if "slice_terms" not in st.session_state:
st.session_state["slice_terms"] = {}
if "embedding" not in st.session_state:
st.session_state["embedding"] = embedding
if "protected_class" not in st.session_state:
st.session_state["protected_class"] = protected_classes
### STREAMLIT APP LAYOUT###
# ******* MODEL CARD PANEL *******
#st.sidebar.title("Interactive Model Card")
img = load_title()
st.sidebar.image(img,width=400)
st.sidebar.warning("Data is not permanently collected or stored from your interactions, but is temporarily cached during usage.")
# load model card data
errors = st.sidebar.checkbox("Show Warnings", value=True)
model_card = load_model_card()
al.model_card_panel(model_card,errors)
lcol, rcol = st.columns([4, 8])
# ******* USER EXAMPLE DATA PANEL *******
st.markdown("---")
with lcol:
# Choose waht to show for the qunatiative analysis.
st.write("""<h1 style="font-size:20px;padding-top:0px;"> Quantitative Analysis</h1>""",
unsafe_allow_html=True)
st.markdown("View the model's performance or visually explore the model's training and testing dataset")
data_view = st.selectbox("Show:",
["Model Performance Metrics","Data Subpopulation Comparison Visualization"])
st.markdown("Any groups you define via the *analysis actions* will be automatically added to the view")
st.markdown("---")
# Additional Analysis Actions
st.write(
"""<h1 style="font-size:18px;padding-top:5px;"> Analysis Actions</h1>""",
unsafe_allow_html=True,
)
al.example_panel(sentence_examples, model, sst_db,doc2vec)
# ****** GUIDANCE PANEL *****
with st.expander("Guidance"):
st.markdown(
"Need help understanding what you're seeing in this model card?"
)
st.markdown(
" * **[Understanding Metrics](https://stanford.edu/~shervine/teaching/cs-229/cheatsheet-machine-learning-tips-and-tricks)**: A cheatsheet of model metrics"
)
st.markdown(
" * **[Understanding Sentiment Models](https://www.semanticscholar.org/topic/Sentiment-analysis/6011)**: An overview of sentiment analysis"
)
st.markdown(
"* **[Next Steps](https://docs.google.com/document/d/1r9J1NQ7eTibpXkCpcucDEPhASGbOQAMhRTBvosGu4Pk/edit?usp=sharin)**: Suggestions for follow-on actions"
)
st.markdown("Feel free to submit feedback via our [online form](https://sfdc.co/imc_feedback)")
# ******* QUANTITATIVE DATA PANEL *******
al.quant_panel(sst_db, st.session_state["embedding"], rcol,data_view)
| 32.027322
| 171
| 0.669169
|
011607cc0ea94e958ce9dece67bd5a87d694afb0
| 1,791
|
py
|
Python
|
lenet.py
|
maxwell-aladago/slot-machines
|
a490464933e8dd3279bfa0d1b6937a90ebba5573
|
[
"MIT"
] | 1
|
2021-11-04T17:53:11.000Z
|
2021-11-04T17:53:11.000Z
|
lenet.py
|
maxwell-aladago/slot-machines
|
a490464933e8dd3279bfa0d1b6937a90ebba5573
|
[
"MIT"
] | null | null | null |
lenet.py
|
maxwell-aladago/slot-machines
|
a490464933e8dd3279bfa0d1b6937a90ebba5573
|
[
"MIT"
] | 1
|
2021-11-04T17:58:49.000Z
|
2021-11-04T17:58:49.000Z
|
from model_construction import construct_classifier
from torch import nn
class LenetBase(nn.Module):
def __init__(self, classifier):
super(LenetBase, self).__init__()
self.classifier = classifier
self._initialize_weights()
def forward(self, x):
x = x.view(x.size(0), -1)
return self.classifier(x)
def _initialize_weights(self):
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
class Lenet300100:
def __init__(self,
num_classes=10, greedy_selection=True, batch_norm=False):
self._classifier_cfg = [300, "relu", 100, "relu", num_classes]
self._classifier_names = ["fc1", "relu_1", "fc2", "relu_2", "fc3"]
self._in_features = 784
self._greedy_selection = greedy_selection
self._batch_norm = batch_norm
def weight_updates(self):
classifier = construct_classifier(self._classifier_cfg,
self._classifier_names,
self._in_features
)
return LenetBase(classifier)
def weight_selections(self, k=8):
classifier = construct_classifier(self._classifier_cfg,
self._classifier_names,
self._in_features,
slot_machine=True,
k=k,
greedy_selection=self._greedy_selection
)
return LenetBase(classifier)
| 35.82
| 81
| 0.523171
|
c769553e32461ce82e2db5c8821544ac91c97f02
| 88
|
py
|
Python
|
Condor/python/Lib/site-packages/comtypes/gen/_C866CA3A_32F7_11D2_9602_00C04F8EE628_0_5_4.py
|
OriolOriolOriol/Condor
|
5b855ff7170e43149f9e9f81a97b6b88282915c5
|
[
"MIT"
] | null | null | null |
Condor/python/Lib/site-packages/comtypes/gen/_C866CA3A_32F7_11D2_9602_00C04F8EE628_0_5_4.py
|
OriolOriolOriol/Condor
|
5b855ff7170e43149f9e9f81a97b6b88282915c5
|
[
"MIT"
] | null | null | null |
Condor/python/Lib/site-packages/comtypes/gen/_C866CA3A_32F7_11D2_9602_00C04F8EE628_0_5_4.py
|
OriolOriolOriol/Condor
|
5b855ff7170e43149f9e9f81a97b6b88282915c5
|
[
"MIT"
] | 1
|
2020-11-04T08:32:26.000Z
|
2020-11-04T08:32:26.000Z
|
# -*- coding: mbcs -*-
typelib_path = 'C:\\WINDOWS\\System32\\Speech\\Common\\sapi.dll'
| 29.333333
| 64
| 0.647727
|
84fc8a2b0ff658aed821cc11c3eb55cc5ada2c04
| 13,115
|
py
|
Python
|
corehq/apps/linked_domain/tasks.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/linked_domain/tasks.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/linked_domain/tasks.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import defaultdict
from django.conf import settings
from django.template.defaultfilters import linebreaksbr
from django.urls import reverse
from django.utils.translation import ugettext as _
from celery import chord
from celery.task import task
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import get_url_base
from corehq import toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.dbaccessors import get_apps_in_domain
from corehq.apps.app_manager.util import is_linked_app
from corehq.apps.app_manager.views.utils import update_linked_app
from corehq.apps.hqwebapp.tasks import send_html_email_async
from corehq.apps.linked_domain.const import (
FEATURE_FLAG_DATA_MODEL_TOGGLES,
MODEL_APP,
MODEL_KEYWORD,
MODEL_REPORT,
)
from corehq.apps.linked_domain.dbaccessors import get_upstream_domain_link
from corehq.apps.linked_domain.exceptions import DomainLinkError
from corehq.apps.linked_domain.keywords import (
create_linked_keyword,
update_keyword,
)
from corehq.apps.linked_domain.models import (
KeywordLinkDetail,
ReportLinkDetail,
)
from corehq.apps.linked_domain.ucr import (
create_linked_ucr,
get_downstream_report,
update_linked_ucr,
)
from corehq.apps.linked_domain.updates import update_model_type
from corehq.apps.linked_domain.util import (
pull_missing_multimedia_for_app_and_notify,
)
from corehq.apps.reminders.views import KeywordsListView
from corehq.apps.sms.models import Keyword
from corehq.apps.userreports.models import ReportConfiguration
from corehq.apps.users.models import CouchUser
from corehq.privileges import RELEASE_MANAGEMENT
@task(queue='linked_domain_queue')
def pull_missing_multimedia_for_app_and_notify_task(domain, app_id, email=None, force=False):
pull_missing_multimedia_for_app_and_notify(domain, app_id, email, force)
@task(queue='linked_domain_queue')
def push_models(upstream_domain, models, downstream_domains, build_apps, username):
ReleaseManager(upstream_domain, username).release(models, downstream_domains, build_apps)
class ReleaseManager:
def __init__(self, upstream_domain, username):
self.upstream_domain = upstream_domain
self.user = CouchUser.get_by_username(username)
self._reset()
def _reset(self):
self.errors_by_domain = {'html': defaultdict(list), 'text': defaultdict(list)}
self.successes_by_domain = {'html': defaultdict(list), 'text': defaultdict(list)}
def results(self):
return self.successes_by_domain, self.errors_by_domain
def add_error(self, domain, html, text=None):
text = text or html
self.errors_by_domain['html'][domain].append(html)
self.errors_by_domain['text'][domain].append(text)
def add_success(self, domain, html, text=None):
text = text or html
self.successes_by_domain['html'][domain].append(html)
self.successes_by_domain['text'][domain].append(text)
def update_successes(self, successes):
self._update_messages(self.successes_by_domain, successes)
def update_errors(self, errors):
self._update_messages(self.errors_by_domain, errors)
def _update_messages(self, attr, messages):
for fmt in ('html', 'text'):
for domain, msgs in messages[fmt].items():
attr[fmt][domain].extend(msgs)
def get_error_domain_count(self):
return len(self.errors_by_domain['html'])
def get_success_domain_count(self):
return len(self.successes_by_domain['html'])
def _get_errors(self, domain, html=True):
return self.errors_by_domain['html' if html else 'text'][domain]
def _get_successes(self, domain, html=True):
return self.successes_by_domain['html' if html else 'text'][domain]
def release(self, models, downstream_domains, build_apps=False):
self._reset()
header = [
release_domain.si(self.upstream_domain, downstream_domain, self.user.username, models, build_apps)
for downstream_domain in downstream_domains
]
callback = send_linked_domain_release_email.s(self.upstream_domain, self.user.username,
models, downstream_domains)
chord(header)(callback)
def get_email_message(self, models, linked_domains, html=True):
error_domain_count = self.get_error_domain_count()
separator = "\n"
message = _("""
Release complete. {} project(s) succeeded. {}
The following content was released:
{}
The following linked project spaces received content:
""").format(
self.get_success_domain_count(),
_("{} project(s) encountered errors.").format(error_domain_count) if error_domain_count else "",
separator.join(["- {}".format(m['name']) for m in models])
).strip()
for linked_domain in sorted(linked_domains):
if not self._get_errors(linked_domain, html):
message += _("{}- {} updated successfully").format(separator, linked_domain)
else:
message += _("{}- {} encountered errors:").format(separator, linked_domain)
for msg in self._get_errors(linked_domain, html) + self._get_successes(linked_domain, html):
message += separator + " - " + msg
return linebreaksbr(message) if html else message
def _release_app(self, domain_link, model, user, build_and_release=False):
if toggles.MULTI_MASTER_LINKED_DOMAINS.enabled(domain_link.linked_domain):
return self._error_tuple(_("Multi master flag is in use"))
app_id = model['detail']['app_id']
found = False
error_prefix = ""
try:
for linked_app in get_apps_in_domain(domain_link.linked_domain, include_remote=False):
if is_linked_app(linked_app) and linked_app.family_id == app_id:
found = True
app = update_linked_app(linked_app, app_id, user.user_id)
if not found:
return self._error_tuple(_("Could not find app"))
if build_and_release:
error_prefix = _("Updated app but did not build or release: ")
build = app.make_build()
build.is_released = True
build.save(increment_version=False)
except Exception as e: # intentionally broad
return self._error_tuple(error_prefix + str(e))
def _release_report(self, domain_link, model, user_id):
report_id = model['detail']['report_id']
linked_report = get_downstream_report(domain_link.linked_domain, report_id)
if not linked_report:
if domain_has_privilege(self.upstream_domain, RELEASE_MANAGEMENT):
try:
linked_report_info = create_linked_ucr(domain_link, report_id)
linked_report = linked_report_info.report
except DomainLinkError as e:
return self._error_tuple(str(e))
else:
report = ReportConfiguration.get(report_id)
if report.report_meta.created_by_builder:
view = 'edit_report_in_builder'
else:
view = 'edit_configurable_report'
url = get_url_base() + reverse(view, args=[domain_link.master_domain, report_id])
return self._error_tuple(
_('Could not find report. <a href="{}">Click here</a> and click "Link Report" to link this '
+ 'report.').format(url),
text=_('Could not find report. Please check that the report has been linked.'),
)
# have no hit an error case, so update the ucr
update_linked_ucr(domain_link, linked_report.get_id)
domain_link.update_last_pull(
MODEL_REPORT,
user_id,
model_detail=ReportLinkDetail(report_id=linked_report.get_id).to_json(),
)
def _release_flag_dependent_model(self, domain_link, model, user, feature_flag):
if not feature_flag.enabled(domain_link.linked_domain):
return self._error_tuple(_("Feature flag for {} is not enabled").format(model['name']))
return self._release_model(domain_link, model, user)
def _release_keyword(self, domain_link, model, user_id):
upstream_id = model['detail']['keyword_id']
try:
linked_keyword_id = (Keyword.objects.values_list('id', flat=True)
.get(domain=domain_link.linked_domain, upstream_id=upstream_id))
except Keyword.DoesNotExist:
if domain_has_privilege(self.upstream_domain, RELEASE_MANAGEMENT):
linked_keyword_id = create_linked_keyword(domain_link, upstream_id)
else:
return self._error_tuple(
_('Could not find linked keyword in {domain}. '
'Please check that the keyword has been linked from the '
'<a href="{keyword_url}">Keyword Page</a>.').format(
domain=domain_link.linked_domain,
keyword_url=(
get_url_base() + reverse(
KeywordsListView.urlname, args=[domain_link.master_domain]
))
),
_('Could not find linked keyword. Please check the keyword has been linked.'),
)
update_keyword(domain_link, linked_keyword_id)
domain_link.update_last_pull(
MODEL_KEYWORD,
user_id,
model_detail=KeywordLinkDetail(keyword_id=str(linked_keyword_id)).to_json(),
)
def _release_model(self, domain_link, model, user):
update_model_type(domain_link, model['type'], model_detail=model['detail'])
domain_link.update_last_pull(model['type'], user._id, model_detail=model['detail'])
def _error_tuple(self, html, text=None):
text = text or html
return (html, text)
@task(queue='linked_domain_queue')
def release_domain(upstream_domain, downstream_domain, username, models, build_apps=False):
manager = ReleaseManager(upstream_domain, username)
domain_link = get_upstream_domain_link(downstream_domain)
if not domain_link or domain_link.master_domain != upstream_domain:
manager.add_error(downstream_domain, _("Project space {} is no longer linked to {}. No content "
"was released to it.").format(upstream_domain, downstream_domain))
return manager.results()
for model in models:
errors = None
try:
if model['type'] == MODEL_APP:
errors = manager._release_app(domain_link, model, manager.user, build_apps)
elif model['type'] == MODEL_REPORT:
errors = manager._release_report(domain_link, model, manager.user._id)
elif model['type'] in FEATURE_FLAG_DATA_MODEL_TOGGLES:
errors = manager._release_flag_dependent_model(domain_link, model, manager.user,
FEATURE_FLAG_DATA_MODEL_TOGGLES[model['type']])
elif model['type'] == MODEL_KEYWORD:
errors = manager._release_keyword(domain_link, model, manager.user._id)
else:
manager._release_model(domain_link, model, manager.user)
except Exception as e: # intentionally broad
errors = [str(e), str(e)]
notify_exception(None, "Exception pushing linked domains: {}".format(e))
if errors:
manager.add_error(
domain_link.linked_domain,
_("Could not update {}: {}").format(model['name'], errors[0]),
text=_("Could not update {}: {}").format(model['name'], errors[1]))
else:
manager.add_success(domain_link.linked_domain, _("Updated {} successfully").format(model['name']))
return manager.results()
@task(queue='linked_domain_queue')
def send_linked_domain_release_email(results, upstream_domain, username, models, downstream_domains):
manager = ReleaseManager(upstream_domain, username)
# chord sends a list of results only if there were multiple tasks
if len(downstream_domains) == 1:
results = [results]
for result in results:
(successes, errors) = result
manager.update_successes(successes)
manager.update_errors(errors)
subject = _("Linked project release complete.")
if manager.get_error_domain_count():
subject += _(" Errors occurred.")
email = manager.user.email or manager.user.username
send_html_email_async(
subject,
email,
manager.get_email_message(models, downstream_domains, html=True),
text_content=manager.get_email_message(models, downstream_domains, html=False),
email_from=settings.DEFAULT_FROM_EMAIL
)
| 42.859477
| 112
| 0.66077
|
58bebd846bd6fa648cfab6ab1056ad10d8415453
| 664
|
py
|
Python
|
charmhelpers/contrib/hardening/ssh/__init__.py
|
AurelienLourot/charm-helpers
|
b5725ac546372e7d4004d15095f79cdd5e7da687
|
[
"Apache-2.0"
] | 19
|
2016-04-22T10:00:05.000Z
|
2022-02-09T07:34:12.000Z
|
charmhelpers/contrib/hardening/ssh/__init__.py
|
AurelienLourot/charm-helpers
|
b5725ac546372e7d4004d15095f79cdd5e7da687
|
[
"Apache-2.0"
] | 313
|
2017-09-15T13:22:58.000Z
|
2022-02-25T17:55:01.000Z
|
charmhelpers/contrib/hardening/ssh/__init__.py
|
AurelienLourot/charm-helpers
|
b5725ac546372e7d4004d15095f79cdd5e7da687
|
[
"Apache-2.0"
] | 136
|
2017-09-19T13:37:33.000Z
|
2022-03-29T11:08:00.000Z
|
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
| 36.888889
| 74
| 0.768072
|
ecfc0f424841bb63d0e0d3bfa57f6fe66e971d71
| 36,574
|
py
|
Python
|
code/pngsuite.py
|
topin89/pypng
|
dc5df017245b2366133d863e37e9429f80af5a6c
|
[
"MIT"
] | null | null | null |
code/pngsuite.py
|
topin89/pypng
|
dc5df017245b2366133d863e37e9429f80af5a6c
|
[
"MIT"
] | null | null | null |
code/pngsuite.py
|
topin89/pypng
|
dc5df017245b2366133d863e37e9429f80af5a6c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# pngsuite.py
# PngSuite Test PNGs.
# https://docs.python.org/3.2/library/argparse.html
import argparse
import sys
"""
After you import this module with "import pngsuite" use
``pngsuite.bai0g01`` to get the bytes for a particular PNG image, or
use ``pngsuite.png`` to get a dict() of them all.
Also a delicious command line tool.
"""
def _dehex(s):
"""Liberally convert from hex string to binary string."""
import re
import binascii
# Remove all non-hexadecimal digits
s = re.sub(br'[^a-fA-F\d]', b'', s)
return binascii.unhexlify(s)
# Copies of PngSuite test files taken
# from http://www.schaik.com/pngsuite/pngsuite_bas_png.html
# on 2009-02-19 by drj and converted to hex.
# Some of these are not actually in PngSuite (but maybe they should
# be?), they use the same naming scheme, but start with a capital
# letter.
png = {
'basi0g01': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002001000000012c0677
cf0000000467414d41000186a031e8965f0000009049444154789c2d8d310ec2
300c45dfc682c415187a00a42e197ab81e83b127e00c5639001363a580d8582c
65c910357c4b78b0bfbfdf4f70168c19e7acb970a3f2d1ded9695ce5bf5963df
d92aaf4c9fd927ea449e6487df5b9c36e799b91bdf082b4d4bd4014fe4014b01
ab7a17aee694d28d328a2d63837a70451e1648702d9a9ff4a11d2f7a51aa21e5
a18c7ffd0094e3511d661822f20000000049454e44ae426082
"""),
'basi0g02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002002000000016ba60d
1f0000000467414d41000186a031e8965f0000005149444154789c635062e860
00e17286bb609c93c370ec189494960631366e4467b3ae675dcf10f521ea0303
90c1ca006444e11643482064114a4852c710baea3f18c31918020c30410403a6
0ac1a09239009c52804d85b6d97d0000000049454e44ae426082
"""),
'basi0g04': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200400000001e4e6f8
bf0000000467414d41000186a031e8965f000000ae49444154789c658e5111c2
301044171c141c141c041c843a287510ea20d441c041c141c141c04191102454
03994998cecd7edcecedbb9bdbc3b2c2b6457545fbc4bac1be437347f7c66a77
3c23d60db15e88f5c5627338a5416c2e691a9b475a89cd27eda12895ae8dfdab
43d61e590764f5c83a226b40d669bec307f93247701687723abf31ff83a2284b
a5b4ae6b63ac6520ad730ca4ed7b06d20e030369bd6720ed383290360406d24e
13811f2781eba9d34d07160000000049454e44ae426082
"""),
'basi0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000001211615
be0000000467414d41000186a031e8965f000000b549444154789cb5905d0ac2
3010849dbac81c42c47bf843cf253e8878b0aa17110f214bdca6be240f5d21a5
94ced3e49bcd322c1624115515154998aa424822a82a5624a1aa8a8b24c58f99
999908130989a04a00d76c2c09e76cf21adcb209393a6553577da17140a2c59e
70ecbfa388dff1f03b82fb82bd07f05f7cb13f80bb07ad2fd60c011c3c588eef
f1f4e03bbec7ce832dca927aea005e431b625796345307b019c845e6bfc3bb98
769d84f9efb02ea6c00f9bb9ff45e81f9f280000000049454e44ae426082
"""),
'basi0g16': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002010000000017186c9
fd0000000467414d41000186a031e8965f000000e249444154789cb5913b0ec2
301044c7490aa8f85d81c3e4301c8f53a4ca0da8902c8144b3920b4043111282
23bc4956681a6bf5fc3c5a3ba0448912d91a4de2c38dd8e380231eede4c4f7a1
4677700bec7bd9b1d344689315a3418d1a6efbe5b8305ba01f8ff4808c063e26
c60d5c81edcf6c58c535e252839e93801b15c0a70d810ae0d306b205dc32b187
272b64057e4720ff0502154034831520154034c3df81400510cdf0015c86e5cc
5c79c639fddba9dcb5456b51d7980eb52d8e7d7fa620a75120d6064641a05120
b606771a05626b401a05f1f589827cf0fe44c1f0bae0055698ee8914fffffe00
00000049454e44ae426082
"""),
'basi2c08': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002008020000018b1fdd
350000000467414d41000186a031e8965f000000f249444154789cd59341aa04
210c44abc07b78133d59d37333bd89d76868b566d10cf4675af8596431a11662
7c5688919280e312257dd6a0a4cf1a01008ee312a5f3c69c37e6fcc3f47e6776
a07f8bdaf5b40feed2d33e025e2ff4fe2d4a63e1a16d91180b736d8bc45854c5
6d951863f4a7e0b66dcf09a900f3ffa2948d4091e53ca86c048a64390f662b50
4a999660ced906182b9a01a8be00a56404a6ede182b1223b4025e32c4de34304
63457680c93aada6c99b73865aab2fc094920d901a203f5ddfe1970d28456783
26cffbafeffcd30654f46d119be4793f827387fc0d189d5bc4d69a3c23d45a7f
db803146578337df4d0a3121fc3d330000000049454e44ae426082
"""),
'basi2c16': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000201002000001db8f01
760000000467414d41000186a031e8965f0000020a49444154789cd5962173e3
3010853fcf1838cc61a1818185a53e56787fa13fa130852e3b5878b4b0b03081
b97f7030070b53e6b057a0a8912bbb9163b9f109ececbc59bd7dcf2b45492409
d66f00eb1dd83cb5497d65456aeb8e1040913b3b2c04504c936dd5a9c7e2c6eb
b1b8f17a58e8d043da56f06f0f9f62e5217b6ba3a1b76f6c9e99e8696a2a72e2
c4fb1e4d452e92ec9652b807486d12b6669be00db38d9114b0c1961e375461a5
5f76682a85c367ad6f682ff53a9c2a353191764b78bb07d8ddc3c97c1950f391
6745c7b9852c73c2f212605a466a502705c8338069c8b9e84efab941eb393a97
d4c9fd63148314209f1c1d3434e847ead6380de291d6f26a25c1ebb5047f5f24
d85c49f0f22cc1d34282c72709cab90477bf25b89d49f0f351822297e0ea9704
f34c82bc94002448ede51866e5656aef5d7c6a385cb4d80e6a538ceba04e6df2
480e9aa84ddedb413bb5c97b3838456df2d4fec2c7a706983e7474d085fae820
a841776a83073838973ac0413fea2f1dc4a06e71108fda73109bdae48954ad60
bf867aac3ce44c7c1589a711cf8a81df9b219679d96d1cec3d8bbbeaa2012626
df8c7802eda201b2d2e0239b409868171fc104ba8b76f10b4da09f6817ffc609
c413ede267fd1fbab46880c90f80eccf0013185eb48b47ba03df2bdaadef3181
cb8976f18e13188768170f98c0f844bb78cb04c62ddac59d09fc3fa25dfc1da4
14deb3df1344f70000000049454e44ae426082
"""),
'basi3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000133a3ba
500000000467414d41000186a031e8965f00000300504c5445224400f5ffed77
ff77cbffff110a003a77002222ffff11ff110000222200ffac5566ff66ff6666
ff01ff221200dcffffccff994444ff005555220000cbcbff44440055ff55cbcb
00331a00ffecdcedffffe4ffcbffdcdc44ff446666ff330000442200ededff66
6600ffa444ffffaaeded0000cbcbfefffffdfffeffff0133ff33552a000101ff
8888ff00aaaa010100440000888800ffe4cbba5b0022ff22663200ffff99aaaa
ff550000aaaa00cb630011ff11d4ffaa773a00ff4444dc6b0066000001ff0188
4200ecffdc6bdc00ffdcba00333300ed00ed7300ffff88994a0011ffff770000
ff8301ffbabafe7b00fffeff00cb00ff999922ffff880000ffff77008888ffdc
ff1a33000000aa33ffff009900990000000001326600ffbaff44ffffffaaff00
770000fefeaa00004a9900ffff66ff22220000998bff1155ffffff0101ff88ff
005500001111fffffefffdfea4ff4466ffffff66ff003300ffff55ff77770000
88ff44ff00110077ffff006666ffffed000100fff5ed1111ffffff44ff22ffff
eded11110088ffff00007793ff2200dcdc3333fffe00febabaff99ffff333300
63cb00baba00acff55ffffdcffff337bfe00ed00ed5555ffaaffffdcdcff5555
00000066dcdc00dc00dc83ff017777fffefeffffffcbff5555777700fefe00cb
00cb0000fe010200010000122200ffff220044449bff33ffd4aa0000559999ff
999900ba00ba2a5500ffcbcbb4ff66ff9b33ffffbaaa00aa42880053aa00ffaa
aa0000ed00babaffff1100fe00000044009999990099ffcc99ba000088008800
dc00ff93220000dcfefffeaa5300770077020100cb0000000033ffedff00ba00
ff3333edffedffc488bcff7700aa00660066002222dc0000ffcbffdcffdcff8b
110000cb00010155005500880000002201ffffcbffcbed0000ff88884400445b
ba00ffbc77ff99ff006600baffba00777773ed00fe00003300330000baff77ff
004400aaffaafffefe000011220022c4ff8800eded99ff99ff55ff002200ffb4
661100110a1100ff1111dcffbabaffff88ff88010001ff33ffb98ed362000002
a249444154789c65d0695c0b001806f03711a9904a94d24dac63292949e5a810
d244588a14ca5161d1a1323973252242d62157d12ae498c8124d25ca3a11398a
16e55a3cdffab0ffe7f77d7fcff3528645349b584c3187824d9d19d4ec2e3523
9eb0ae975cf8de02f2486d502191841b42967a1ad49e5ddc4265f69a899e26b5
e9e468181baae3a71a41b95669da8df2ea3594c1b31046d7b17bfb86592e4cbe
d89b23e8db0af6304d756e60a8f4ad378bdc2552ae5948df1d35b52143141533
33bbbbababebeb3b3bc9c9c9c6c6c0c0d7b7b535323225a5aa8a02024a4bedec
0a0a2a2bcdcd7d7cf2f3a9a9c9cdcdd8b8adcdd5b5ababa828298982824a4ab2
b21212acadbdbc1414e2e24859b9a72730302f4f49292c4c57373c9c0a0b7372
8c8c1c1c3a3a92936d6dfdfd293e3e26262a4a4eaea2424b4b5fbfbc9c323278
3c0b0ba1303abaae8ecdeeed950d6669a9a7a7a141d4de9e9d5d5cdcd2229b94
c572716132f97cb1d8db9bc3110864a39795d9db6b6a26267a7a9a98d4d6a6a7
cb76090ef6f030354d4d75766e686030545464cb393a1a1ac6c68686eae8f8f9
a9aa4644c8b66d6e1689dcdd2512a994cb35330b0991ad9f9b6b659596a6addd
d8282fafae5e5323fb8f41d01f76c22fd8061be01bfc041a0323e1002c81cd30
0b9ec027a0c930014ec035580fc3e112bc069a0b53e11c0c8095f00176c163a0
e5301baec06a580677600ddc05ba0f13e120bc81a770133ec355a017300d4ec2
0c7800bbe1219c02fa08f3e13c1c85dbb00a2ec05ea0dff00a6ec15a98027360
070c047a06d7e1085c84f1b014f6c03fa0b33018b6c0211801ebe018fc00da0a
6f61113c877eb01d4ec317a085700f26c130f80efbe132bc039a0733e106fc81
f7f017f6c10aa0d1300a0ec374780943e1382c06fa0a9b60238c83473016cec0
02f80f73fefe1072afc1e50000000049454e44ae426082
"""),
'basi6a08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200806000001047d4a
620000000467414d41000186a031e8965f0000012049444154789cc595414ec3
3010459fa541b8bbb26641b8069b861e8b4d12c1c112c1452a710a2a65d840d5
949041fc481ec98ae27c7f3f8d27e3e4648047600fec0d1f390fbbe2633a31e2
9389e4e4ea7bfdbf3d9a6b800ab89f1bd6b553cfcbb0679e960563d72e0a9293
b7337b9f988cc67f5f0e186d20e808042f1c97054e1309da40d02d7e27f92e03
6cbfc64df0fc3117a6210a1b6ad1a00df21c1abcf2a01944c7101b0cb568a001
909c9cf9e399cf3d8d9d4660a875405d9a60d000b05e2de55e25780b7a5268e0
622118e2399aab063a815808462f1ab86890fc2e03e48bb109ded7d26ce4bf59
0db91bac0050747fec5015ce80da0e5700281be533f0ce6d5900b59bcb00ea6d
200314cf801faab200ea752803a8d7a90c503a039f824a53f4694e7342000000
0049454e44ae426082
"""),
'basn0g01': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002001000000005b0147
590000000467414d41000186a031e8965f0000005b49444154789c2dccb10903
300c05d1ebd204b24a200b7a346f90153c82c18d0a61450751f1e08a2faaead2
a4846ccea9255306e753345712e211b221bf4b263d1b427325255e8bdab29e6f
6aca30692e9d29616ee96f3065f0bf1f1087492fd02f14c90000000049454e44
ae426082
"""),
'basn0g02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002002000000001ca13d
890000000467414d41000186a031e8965f0000001f49444154789c6360085df5
1f8cf1308850c20053868f0133091f6390b90700bd497f818b0989a900000000
49454e44ae426082
"""),
# A version of basn0g04 dithered down to 3 bits.
'Basn0g03': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
2900000001734249540371d88211000000fd49444154789c6d90d18906210c84
c356f22356b2889588604301b112112b11d94a96bb495cf7fe87f32d996f2689
44741cc658e39c0b118f883e1f63cc89dafbc04c0f619d7d898396c54b875517
83f3a2e7ac09a2074430e7f497f00f1138a5444f82839c5206b1f51053cca968
63258821e7f2b5438aac16fbecc052b646e709de45cf18996b29648508728612
952ca606a73566d44612b876845e9a347084ea4868d2907ff06be4436c4b41a3
a3e1774285614c5affb40dbd931a526619d9fa18e4c2be420858de1df0e69893
a0e3e5523461be448561001042b7d4a15309ce2c57aef2ba89d1c13794a109d7
b5880aa27744fc5c4aecb5e7bcef5fe528ec6293a930690000000049454e44ae
426082
"""),
'basn0g04': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
290000000467414d41000186a031e8965f0000004849444154789c6360601014
545232367671090d4d4b2b2f6720430095dbd1418e002a77e64c720450b9ab56
912380caddbd9b1c0154ee9933e408a072efde25470095fbee1d1902001f14ee
01eaff41fa0000000049454e44ae426082
"""),
'basn0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
280000000467414d41000186a031e8965f0000004149444154789c6364602400
1408c8b30c05058c0f0829f8f71f3f6079301c1430ca11906764a2795c0c0605
8c8ff0cafeffcff887e67131181430cae0956564040050e5fe7135e2d8590000
000049454e44ae426082
"""),
'basn0g16': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002010000000000681f9
6b0000000467414d41000186a031e8965f0000005e49444154789cd5d2310ac0
300c4351395bef7fc6dca093c0287b32d52a04a3d98f3f3880a7b857131363a0
3a82601d089900dd82f640ca04e816dc06422640b7a03d903201ba05b7819009
d02d680fa44c603f6f07ec4ff41938cf7f0016d84bd85fae2b9fd70000000049
454e44ae426082
"""),
'basn2c08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
a30000000467414d41000186a031e8965f0000004849444154789cedd5c10900
300c024085ec91fdb772133b442bf4a1f8cee12bb40d043b800a14f81ca0ede4
7d4c784081020f4a871fc284071428f0a0743823a94081bb7077a3c00182b1f9
5e0f40cf4b0000000049454e44ae426082
"""),
'basn2c16': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000201002000000ac8831
e00000000467414d41000186a031e8965f000000e549444154789cd596c10a83
301044a7e0417fcb7eb7fdadf6961e06039286266693cc7a188645e43dd6a08f
1042003e2fe09aef6472737e183d27335fcee2f35a77b702ebce742870a23397
f3edf2705dd10160f3b2815fe8ecf2027974a6b0c03f74a6e4192843e75c6c03
35e8ec3202f5e84c0181bbe8cca967a00d9df3491bb040671f2e6087ce1c2860
8d1e05f8c7ee0f1d00b667e70df44467ef26d01fbd9bc028f42860f71d188bce
fb8d3630039dbd59601e7ab3c06cf428507f0634d039afdc80123a7bb1801e7a
b1802a7a14c89f016d74ce331bf080ce9e08f8414f04bca133bfe642fe5e07bb
c4ec0000000049454e44ae426082
"""),
'basn3p04': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200403000000815467
c70000000467414d41000186a031e8965f000000037342495404040477f8b5a3
0000002d504c54452200ff00ffff8800ff22ff000099ffff6600dd00ff77ff00
ff000000ff99ddff00ff00bbffbb000044ff00ff44d2b049bd00000047494441
54789c63e8e8080d3d7366d5aaf27263e377ef66ce64204300952b28488e002a
d7c5851c0154eeddbbe408a07119c81140e52a29912380ca4d4b23470095bb7b
37190200e0c4ead10f82057d0000000049454e44ae426082
"""),
'basn4a16': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020100400000089e36e
3c0000000467414d41000186a031e8965f0000085549444154789cc5975f685b
e719c67f968fa4a363ebf84875524754ae9d283885121aba42ba2d17b1bd8e50
d22e253412bbc8e4d042694b618977119d8b5d48be98938bd0f4a2c9901658b0
1a028366258524a68cd27a84d2e2956da169ea4aade219574791ed63fd399677
f17e19a174d73518994fc7d2fb3eeff33ecff30160656158873da760d48217ce
c2b10138fe47c80ec1d93fc3c55df0de65f8e809f8e75fe1ee5e58bf2ebf77f7
cad9474fc8331777c9ff6487e4338e0dc8678e5af21dc3ba7c27806665a1665b
b9ae19f015a1bb025a1102bb217008f42684de86e6756817c1d36063043acf02
6fc887749272e669d05e90679b29589f04f710ac5d825503ea15a8a7a056805a
0aac6c2dd335ac43ad60e59c54241b75e121171e5aff3faf3f7006f09d01df85
bef7fa4367eab56a4064c6b1ad742da35959e9bccb85aa61657d27a13b03fed3
10c8807e124219e8c9403303ed0c7827a19381cd8c4220075e0eda53d0cc4123
076e0ed672b03205f51cd472e0e4a03a0551b76647526066418b6405769f0bbe
93b03c15c9fae6401b03ff97a05f84d022f48c41c383d687e09d868dc3b0f988
14b07158ce5a1fca33ee53b0f63aacdc807bc7c0d902d583b03c0bfd271d3be2
42df0c9831c501ad08da2fa473df1c2ccd5a59dfa3a0ed83600cf4bd601c8170
1a1a67a13d011bfdb0f91355c03cb4af40230febbf83d502d4d7a0f62fa8b660
f9362ccdc2d6d19a1dcd805505f35de8bd8f406037f87f26b06b63e07b14160b
91acef0cf83f80e00a1825089f80f53a34df026f0536af4a01de889cadfb61f5
04d44be0bc00cb4761c984c5020ca41dbb3f01910c98af40b8083df30a81c021
089465e6fe2fa573df19a89856568b4370108c41080f8235088d4168ef81cea0
14d02e41a3046b25a8ff1d9c122c97e03f25a8942156afd95b3f836800fa7640
f85be89901e32f0a01bd09fa1e219c7e5160f77f005a1c4ae54856d340d7a1b7
172c0b5c175a2de874a480564bceea75a8566169092a1528956130eed80fd7a1
7f02ac0a847f0d3d69308a109a560884de86d02e617b6851661e5c91ce350dee
7c6565fdfbc1380ad6046c39068d51e8fc460a68e4616516aa0558cc43390f77
6ec0f6e19a1d8b41ff0a44d260cec936195f42a808c1fb1c685e07e35379b367
4c08679404765d07ff7eb8958f64838f415f0db66c037714bc5352803b0ad549
b85b83858fe1561e46261c3bfe356cdd0a913a9813d0db034606f42404672038
ae106817a115973d6f78c2f6f00999796faf741e7c0ce627adac5186fe323c6a
43fb7329a06643250e5f7c02f371d83d5db3879e86810b108d82b902bd6908fd
01f46720f80f0814c17f1f014f83f66b2232ad0f65d5d6eb4238cb12d8fb6a60
94612e1ec94612309c8046420a58bc06ffbe0d73b7616fd9b1773e09db2c88a6
c134a1a70ea134e86310f839f89f077f11344f21b031021bd744e1bcd3b2e7cd
b784edae2b33dfb24d3a8f24e0ea6d2b1bdf0f3f3d2a057c7e0eaebe0f071235
7b571962a7207a17c2436018a07f07c157c17f10b4e3a0dd84ee19e8bea510e8
3c0b1d43e475e3b0888cb722abd66a09e1dc51817d3801f1fd70ee7c243b3e2e
059c3b0f2fbfe4d88f9761a00cd63418b3a02f402000fe05d0d2d0bd5b89dd2f
45fe7def290478033693a2ed9b8f88c26d5e953def7484edde29997923219d8f
8fc38b47c4542fbd53b3b76f87be0ba07f03fe53a04d80ef4fe0f381af0e5d13
d0d5075d23d0f537e82a0267c0c78ffca3d56cf1f38e21aeb67158b4dd1b1185
6bb564cfdd5161fbe23599f9b9f3d239c08b47e0e597e0f1320cec03eb841ac1
1d350213b4bc1ac165358224f86cd01cfb0112e61409af28129684842bb3b2e7
95b8b0fdeafb32f3eddba58b975f92820e2460571c629310cd3f40c230040b8a
843945c2e7a07b8f42e07f6b38a5d6302fc6b25652f25a1091f9e21359b50389
9afd7859660ed2f981045cbd0d4e1c76feea7b6bb80d4279d05f834053ad614a
ada1634b8c6a855498f094a59e1063a956455e173e1691d95b76ec5d8aedfa37
52c0c03ee9dc89c35c1cdc69b8f7a0108d40ef2908dd005d53429404ff9042a0
791d9a9faa24f394f2f392b8dad29268fbadbc28dcce2765cfad69613bc8cc63
93d2b93b0df393d09c00f76b585d854818cc02f4be03c64d25c54925c58ead02
e4ef558c7a5dc284f382586aa522c63232e1d8434f2b68ef0ac9b40929c09895
996fb3a4f3e68414dc1e8646035c13dcbc32a3379519a520682b04d627c10da9
0c774392ccf251f1f352595c2dfeb582342de4d21764cf41d81e1e92f7062e48
e7ed61b8f315781e34c3d02c40a302e19cb2e32484ee6f817b08d6ca1220ef1d
9318b5644a98188c3b762c26ae168d0aa90c43d6cba75424109033d394675657
a573cf93063c13da796806a0b1031adf422309465021b0760956de94f4ea6c91
0cb7589024f3705df9795d5cada72edaee5f108503d9733d2c6c374764e6ae29
9d7b26544ce8a4c14b28e77d055a2968cd2b04560da81b129dab0725400ea41d
7beb6792642269e5e76971b5e0aba2ed5d8a035a5ef63c9417b69b059979b320
9d77d2d2506714bc2bd0ae423b09ade71402f50adc7325b72fabf4da9f900c67
55843cbd3dcacfc74450ba778bb683fced3f287b1eba216c37e764e6cd8074de
1995c63a39d8f82d7849f0620a817a0a9c19b934f49f74ec6846d26bdf0e95e1
322ac93c237eae1d1757eb1a51055c16850b3465cf8d9bc2f6704e66de2e4ae7
9d1c2c4f41c7864e0a366cf8f1af668e2d17c5c88c634752eac6f2aecaed332a
bd1625c3058a9264bad545b6ab2805f892a2edfe94285c30297b6e2485edad94
ccdc4b4ae79b33e0a46033ab3860656b192b2d7735332637969e79c9eda16949
afc17195e13c4932bef78033aa005b198b27f21a1c179109d9b26aad79219c17
13d83b69f9f29a0dff052002c70fc3e1ac750000000049454e44ae426082
"""),
'basn6a08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200806000000737a7a
f40000000467414d41000186a031e8965f0000006f49444154789cedd6310a80
300c46e12764684fa1f73f55048f21c4ddc545781d52e85028fc1f4d28d98a01
305e7b7e9cffba33831d75054703ca06a8f90d58a0074e351e227d805c8254e3
1bb0420f5cdc2e0079208892ffe2a00136a07b4007943c1004d900195036407f
011bf00052201a9c160fb84c0000000049454e44ae426082
"""),
'cs3n3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f0000000373424954030303a392a042
00000054504c544592ff0000ff9200ffff00ff0000dbff00ff6dffb600006dff
b6ff00ff9200dbff000049ffff2400ff000024ff0049ff0000ffdb00ff4900ff
b6ffff0000ff2400b6ffffdb000092ffff6d000024ffff49006dff00df702b17
0000004b49444154789c85cac70182000000b1b3625754b0edbfa72324ef7486
184ed0177a437b680bcdd0031c0ed00ea21f74852ed00a1c9ed0086da0057487
6ed0121cd6d004bda0013a421ff803224033e177f4ae260000000049454e44ae
426082
"""),
'f02n0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
280000012a49444154789c85d12f4b83511805f0c3f938168b2088200882410c
03834dd807182c588749300c5604c30b0b03c360e14d826012c162b1182c8241
100441f47dee5fc3a6f7b9efc2bdf9c7e59cf370703a3caf26d3faeae6f6fee1
f1e9f9e5f5edfde3f3ebbb31d6f910227f1a6944448c31d65aebac77de7b1f42
883146444a41b029084a41500a825210340541d1e2607f777b733d13344a7401
00c8046d127da09a4ceb5cd024010c45446a40e5a04d029827055452da247ac7
f32e80ea42a7c4a20ba0dad22e892ea0f6a06b8b3e50a9c5e85ae264d1e54fd0
e762040cb2d5e93331067af95de8b4980147adcb3128710d74dab7a54fe20ec0
ec727c313a53822109fc3ff50743122bab6b1b5b3b7b9d439d834189e5d54518
0b82b120180b82b1208882200ae217e9e497bfbfccebfd0000000049454e44ae
426082
"""),
's09n3p02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000090000000902030000009dffee
830000000467414d41000186a031e8965f000000037342495404040477f8b5a3
0000000c504c544500ff000077ffff00ffff7700ff5600640000001f49444154
789c63600002fbff0c0c56ab19182ca381581a4283f82071200000696505c36a
437f230000000049454e44ae426082
"""),
'tbgn3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f00000207504c54457f7f7fafafafab
abab110000222200737300999999510d00444400959500959595e6e600919191
8d8d8d620d00898989666600b7b700911600000000730d007373736f6f6faaaa
006b6b6b676767c41a00cccc0000f30000ef00d51e0055555567670000dd0051
515100d1004d4d4de61e0038380000b700160d0d00ab00560d00090900009500
009100008d003333332f2f2f2f2b2f2b2b000077007c7c001a05002b27000073
002b2b2b006f00bb1600272727780d002323230055004d4d00cc1e00004d00cc
1a000d00003c09006f6f00002f003811271111110d0d0d55554d090909001100
4d0900050505000d00e2e200000900000500626200a6a6a6a2a2a29e9e9e8484
00fb00fbd5d500801100800d00ea00ea555500a6a600e600e6f7f700e200e233
0500888888d900d9848484c01a007777003c3c05c8c8008080804409007c7c7c
bb00bbaa00aaa600a61e09056262629e009e9a009af322005e5e5e05050000ee
005a5a5adddd00a616008d008d00e20016050027270088110078780000c40078
00787300736f006f44444400aa00c81e004040406600663c3c3c090000550055
1a1a00343434d91e000084004d004d007c004500453c3c00ea1e00222222113c
113300331e1e1efb22001a1a1a004400afaf00270027003c001616161e001e0d
160d2f2f00808000001e00d1d1001100110d000db7b7b7090009050005b3b3b3
6d34c4230000000174524e530040e6d86600000001624b474402660b7c640000
01f249444154789c6360c0048c8c58049100575f215ee92e6161ef109cd2a15e
4b9645ce5d2c8f433aa4c24f3cbd4c98833b2314ab74a186f094b9c2c27571d2
6a2a58e4253c5cda8559057a392363854db4d9d0641973660b0b0bb76bb16656
06970997256877a07a95c75a1804b2fbcd128c80b482a0b0300f8a824276a9a8
ec6e61612b3e57ee06fbf0009619d5fac846ac5c60ed20e754921625a2daadc6
1967e29e97d2239c8aec7e61fdeca9cecebef54eb36c848517164514af16169e
866444b2b0b7b55534c815cc2ec22d89cd1353800a8473100a4485852d924a6a
412adc74e7ad1016ceed043267238c901716f633a812022998a4072267c4af02
92127005c0f811b62830054935ce017b38bf0948cc5c09955f030a24617d9d46
63371fd940b0827931cbfdf4956076ac018b592f72d45594a9b1f307f3261b1a
084bc2ad50018b1900719ba6ba4ca325d0427d3f6161449486f981144cf3100e
2a5f2a1ce8683e4ddf1b64275240c8438d98af0c729bbe07982b8a1c94201dc2
b3174c9820bcc06201585ad81b25b64a2146384e3798290c05ad280a18c0a62e
e898260c07fca80a24c076cc864b777131a00190cdfa3069035eccbc038c30e1
3e88b46d16b6acc5380d6ac202511c392f4b789aa7b0b08718765990111606c2
9e854c38e5191878fbe471e749b0112bb18902008dc473b2b2e8e72700000000
49454e44ae426082
"""),
'Tp2n3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f00000300504c544502ffff80ff05ff
7f0703ff7f0180ff04ff00ffff06ff000880ff05ff7f07ffff06ff000804ff00
0180ff02ffff03ff7f02ffff80ff0503ff7f0180ffff0008ff7f0704ff00ffff
06ff000802ffffff7f0704ff0003ff7fffff0680ff050180ff04ff000180ffff
0008ffff0603ff7f80ff05ff7f0702ffffff000880ff05ffff0603ff7f02ffff
ff7f070180ff04ff00ffff06ff000880ff050180ffff7f0702ffff04ff0003ff
7fff7f0704ff0003ff7f0180ffffff06ff000880ff0502ffffffff0603ff7fff
7f0702ffff04ff000180ff80ff05ff0008ff7f07ffff0680ff0504ff00ff0008
0180ff03ff7f02ffff02ffffffff0604ff0003ff7f0180ffff000880ff05ff7f
0780ff05ff00080180ff02ffffff7f0703ff7fffff0604ff00ff7f07ff0008ff
ff0680ff0504ff0002ffff0180ff03ff7fff0008ffff0680ff0504ff000180ff
02ffff03ff7fff7f070180ff02ffff04ff00ffff06ff0008ff7f0780ff0503ff
7fffff06ff0008ff7f0780ff0502ffff03ff7f0180ff04ff0002ffffff7f07ff
ff0604ff0003ff7fff00080180ff80ff05ffff0603ff7f0180ffff000804ff00
80ff0502ffffff7f0780ff05ffff0604ff000180ffff000802ffffff7f0703ff
7fff0008ff7f070180ff03ff7f02ffff80ff05ffff0604ff00ff0008ffff0602
ffff0180ff04ff0003ff7f80ff05ff7f070180ff04ff00ff7f0780ff0502ffff
ff000803ff7fffff0602ffffff7f07ffff0680ff05ff000804ff0003ff7f0180
ff02ffff0180ffff7f0703ff7fff000804ff0080ff05ffff0602ffff04ff00ff
ff0603ff7fff7f070180ff80ff05ff000803ff7f0180ffff7f0702ffffff0008
04ff00ffff0680ff0503ff7f0180ff04ff0080ff05ffff06ff000802ffffff7f
0780ff05ff0008ff7f070180ff03ff7f04ff0002ffffffff0604ff00ff7f07ff
000880ff05ffff060180ff02ffff03ff7f80ff05ffff0602ffff0180ff03ff7f
04ff00ff7f07ff00080180ffff000880ff0502ffff04ff00ff7f0703ff7fffff
06ff0008ffff0604ff00ff7f0780ff0502ffff03ff7f0180ffdeb83387000000
f874524e53000000000000000008080808080808081010101010101010181818
1818181818202020202020202029292929292929293131313131313131393939
393939393941414141414141414a4a4a4a4a4a4a4a52525252525252525a5a5a
5a5a5a5a5a62626262626262626a6a6a6a6a6a6a6a73737373737373737b7b7b
7b7b7b7b7b83838383838383838b8b8b8b8b8b8b8b94949494949494949c9c9c
9c9c9c9c9ca4a4a4a4a4a4a4a4acacacacacacacacb4b4b4b4b4b4b4b4bdbdbd
bdbdbdbdbdc5c5c5c5c5c5c5c5cdcdcdcdcdcdcdcdd5d5d5d5d5d5d5d5dedede
dededededee6e6e6e6e6e6e6e6eeeeeeeeeeeeeeeef6f6f6f6f6f6f6f6b98ac5
ca0000012c49444154789c6360e7169150d230b475f7098d4ccc28a96ced9e32
63c1da2d7b8e9fb97af3d1fb8f3f18e8a0808953544a4dd7c4c2c9233c2621bf
b4aab17fdacce5ab36ee3a72eafaad87efbefea68702362e7159652d031b07cf
c0b8a4cce28aa68e89f316aedfb4ffd0b92bf79fbcfcfe931e0a183904e55435
8decdcbcc22292b3caaadb7b27cc5db67af3be63e72fdf78fce2d31f7a2860e5
119356d037b374f10e8a4fc92eaa6fee99347fc9caad7b0f9ebd74f7c1db2fbf
e8a180995f484645dbdccad12f38363dafbcb6a573faeca5ebb6ed3e7ce2c29d
e76fbefda38702063e0149751d537b67ff80e8d4dcc29a86bea97316add9b0e3
c0e96bf79ebdfafc971e0a587885e515f58cad5d7d43a2d2720aeadaba26cf5a
bc62fbcea3272fde7efafac37f3a28000087c0fe101bc2f85f0000000049454e
44ae426082
"""),
'tbbn1g04': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
290000000467414d41000186a031e8965f0000000274524e530007e8f7589b00
000002624b47440000aa8d23320000013e49444154789c55d1cd4b024118c7f1
efbe6419045b6a48a72d352808b435284f9187ae9b098627a1573a19945beba5
e8129e8222af11d81e3a4545742de8ef6af6d5762e0fbf0fc33c33f36085cb76
bc4204778771b867260683ee57e13f0c922df5c719c2b3b6c6c25b2382cea4b9
9f7d4f244370746ac71f4ca88e0f173a6496749af47de8e44ba8f3bf9bdfa98a
0faf857a7dd95c7dc8d7c67c782c99727997f41eb2e3c1e554152465bb00fe8e
b692d190b718d159f4c0a45c4435915a243c58a7a4312a7a57913f05747594c6
46169866c57101e4d4ce4d511423119c419183a3530cc63db88559ae28e7342a
1e9c8122b71139b8872d6e913153224bc1f35b60e4445bd4004e20ed6682c759
1d9873b3da0fbf50137dc5c9bde84fdb2ec8bde1189e0448b63584735993c209
7a601bd2710caceba6158797285b7f2084a2f82c57c01a0000000049454e44ae
426082
"""),
'tbrn2c08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
a30000000467414d41000186a031e8965f0000000674524e53007f007f007f8a
33334f00000006624b474400ff0000000033277cf3000004d649444154789cad
965f68537714c73fd912d640235e692f34d0406fa0c1663481045ab060065514
56660a295831607df0a1488715167060840a1614e6431e9cb34fd2c00a762c85
f6a10f816650c13b0cf40612e1822ddc4863bd628a8924d23d6464f9d3665dd9
f7e977ce3dbff3cd3939bfdfef6bb87dfb364782dbed065ebe7cd93acc78b4ec
a228debd7bb7bfbfbfbbbbfb7f261045311a8d261209405194274f9ea4d3e916
f15f1c3eb5dd6e4fa5fecce526239184a2b0b8486f6f617171b1f5ae4311381c
8e57af5e5dbd7a351088150a78bd389d44222c2f93cdfe66b7db8f4ee07038b6
b6b6bebf766d7e7e7e60a06432313b4ba984c3c1c4049a46b95c5a58583822c1
dbb76f27272733d1b9df853c3030c0f232562b9108cf9eb1b888d7cbf030abab
31abd5fa1f08dc6ef7e7cf9f1f3f7e1c8944745d4f1400c62c001313acad21cb
b8dd2c2c603271eb1640341aad4c6d331aa7e8c48913a150a861307ecc11e964
74899919bc5e14e56fffc404f1388502f178dceff7ef4bf0a5cfe7abb533998c
e5f9ea2f1dd88c180d64cb94412df3dd57e83a6b3b3c7a84c98420100c72fd3a
636348bae726379fe69e8e8d8dbd79f3a6558b0607079796965256479b918085
7b02db12712b6181950233023f3f647494ee6e2e5ea45864cce5b8a7fe3acffc
3aebb22c2bd5d20e22d0757d7b7bbbbdbd3d94a313bed1b0aa3cd069838b163a
8d4c59585f677292d0b84d9a995bd337def3fe6bbe5e6001989b9b6bfe27ea08
36373781542ab56573248b4c5bc843ac4048c7ab21aa24ca00534c25482828a3
8c9ee67475bbaaaab22cb722c8e57240a150301a8d219de94e44534d7d90e885
87acb0e2c4f9800731629b6c5ee14a35a6b9887d2a0032994cb9cf15dbe59650
ff7b46a04c9a749e7cc5112214266cc65c31354d5b5d5d3d90209bcd5616a552
a95c2e87f2a659bd9ee01c2cd73964e438f129a6aa9e582c363838b80f81d7eb
5555b56a2a8ad2d9d7affd0409f8015c208013fea00177b873831b0282c964f2
783c1e8fa7582cee5f81a669b5e6eeeeaee58e8559b0c233d8843c7c0b963a82
34e94b5cb2396d7d7d7db22c8ba258fb0afd43f0e2c58b919191ba9de9b4d425
118329b0c3323c8709d02041b52b4ea7f39de75d2a934a2693c0a953a76a93d4
5d157ebf7f6565a5542a553df97c5e10045dd731c130b86113cc300cbd489224
08422a952a140a95788fc763b1d41558d7a2d7af5f5fb870a1d6a3aaaacd6603
18802da84c59015bd2e6897b745d9765b99a1df0f97c0daf74e36deaf7fbcd66
73ad2797cb89a2c839880188a2e8743a8bc5a22ccbba5e376466b3b9bdbdbd21
6123413a9d0e0402b51e4dd3bababa788eb022b85caeb6b6364551b6b7b76942
43f7f727007a7a7a04a1ee8065b3595fde2768423299ac1ec6669c3973e65004
c0f8f878ad69341a33994ced2969c0d0d0502412f9f8f163f3a7fd654b474787
288ad53e74757535df6215b85cae60302849d2410aecc037f9f2e5cbd5b5c160
680eb0dbede170381c0e7ff8f0a185be3b906068684892a4ca7a6f6faff69328
8ad3d3d3f7efdfdfdbdbfb57e96868a14d0d0643381c96242997cbe5f3794010
84603078fcf8f1d6496bd14a3aba5c2ea7d369341a5555b5582c8140e0fcf9f3
1b1b1b87cf4eeb0a8063c78e45a3d19e9e1ebfdfdf5a831e844655d18093274f
9e3d7bf6d3a74f3b3b3b47c80efc05ff7af28fefb70d9b0000000049454e44ae
426082
"""),
'basn6a16': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020100600000023eaa6
b70000000467414d41000186a031e8965f00000d2249444154789cdd995f6c1c
d775c67ff38fb34b724d2ee55a8e4b04a0ac87049100cab4dbd8c6528902cb4d
10881620592e52d4325ac0905bc98a94025e71fd622cb5065ac98a0c283050c0
728a00b6e542a1d126885cd3298928891d9a0444037e904434951d4b90b84b2f
c9dde1fcebc33977a95555348f411e16dfce9d3b77ee77eebde77ce78c95a669
0ad07c17009a13edd898b87dfb1fcb7d2b4d1bff217f33df80deb1e6267df0ff
c1e6e6dfafdf1f5a7fd30f9aef66b6d546dd355bf02c40662e3307f9725a96c6
744c3031f83782f171c148dbc3bf1774f5dad1e79d6f095a3f54d4fbec5234ef
d9a2f8d73afe4f14f57ef4f42def7b44f19060f06b45bddf1c5534d77fd922be
2973a15a82e648661c6e3240aa3612ead952b604bde57458894f29deaf133bac
13d2766f5227a4a3b8cf08da7adfd6fbd6bd8a4fe9dbb43d35e3dfa3f844fbf8
9119bf4f7144094fb56333abf8a86063ca106f94b3a3b512343765e60082097f
1bb86ba72439a653519b09f5cee1ce61c897d37eedf5553580ae60f4af8af33a
b14fd400b6a0f34535c0434afc0b3a9f07147527a5fa7ca218ff56c74d74dc3f
155cfd3325fc278acf2ae1cb4a539f5f9937c457263b0bd51234c732a300cdd1
cc1840f0aaff54db0e4874ed5a9b5d6d27d4bb36746d80de72baa877ff4b275a
d7895ed1897ea4139b5143fcbb1a62560da1ed9662aaed895ec78a91c18795b8
5e07ab4af8ba128e95e682e0728bf8f2e5ae815a091a53d902ac1920d8e05f06
589de8d8d66680789f4e454fb9d9ec66cd857af796ee2d902fa73fd5bba775a2
153580ae44705ed0d37647d15697cb8f14bfa3e3e8fdf8031d47af571503357c
f30d25acedcbbf135c9a35c49766ba07ab255859e8ec03684e66860182dff8f7
0304bff6ff1c20fc81b7afdd00a71475539a536e36bb5973a19e3b923b02bde5
e4efd4003ac170eb2d13fe274157afedbd82d6fb3a9a1e85e4551d47cf7078f8
9671fe4289ebf5f2bf08d63f37c4eb4773c55a0996efeefa0ca011671d8060ca
2f0004c7fcc300e166ef0240f825efe3361f106d57d423d0723f7acacd66376b
2ed47b7a7a7a205f4ef4ac4691e0aad9aa0d41cf13741c3580a506487574ddca
61a8c403c1863ebfbcac3475168b2de28b8b3d77544bb05ce92a02aceced3c0d
d0cc65ea371b201cf1c601c24dde1c4078cedbdeb60322f50126a019bf6edc9b
39e566b39b3517eaf97c3e0fbde5e4491d45bd74537145d155b476aa0176e868
c6abebf30dbd5e525c54ac8e18e2d56abeb756827a3d970358a97416019a6f64
f60004fdfe1580d5c98e618070cc1b05887eee7e0d209a70db7d8063029889b4
c620ead78d7b33a7dc6c76b3e6427ddddbebde867c393aa7845e5403e8ca794a
d0d6fb897af5f03525fe5782f5e7046bdaef468bf88d1debc6ab25583cd17310
6079b9ab0ba059c914018245bf076075b5a303200c3c1f209a733701444fbbaf
00c4134ebb016c5d0b23614c243701cdf875e3decce9349bddacb9505fbf7dfd
76e82d87736a00f5d2b5ffd4b7dce2719a4d25ae717ee153c1abef18e257cfad
7fa45682da48ef38c052b53b0fd06864b300c151ff08c0ea431de701a287dd5f
004497dc7b01a253ee3e80b8c7f91c20f967fb6fdb7c80ada7d8683723614c24
3701cdf875e3decc29379bddacb950ef3fd47f08f2e5a61ea4aa2a3eb757cd55
13345efcfa59c12b2f19e2578ef77fb75a82854ffbee01a83f977b11a031931d
040802df07082b5e11207cc17b1e209a770700e2df0a83e409fb7580f827c230
99b06fd901fb058d6835dacd481813c94d40337eddb83773cacd66376b2ed437
bebcf165e82d2f4e4beb7f3fa6e652c2d7ee10bc78c010bfb87fe3c95a09ae9f
bd732740bd2fb700d0f865f64180e059ff044018ca0ca28a5b04883f701e0088
bfec7c0c909cb71f0448c6ec518074b375012079d9dedf66004bcfbc51eb2dd1
aadacd481813c94d40337eddb83773cacd66376b2ed487868686205fbe7c49ef
5605a73f34c4a7a787eeab96e0da81bb4e022c15ba27019a5b339300e16bf286
a8eae601e25866907cdf3e0890acb36f00245fb57f05904e59c300e92561946e
b2e600d209ab7d07f04d458dfb46ad1bd16ab49b913026929b8066fcba716fe6
949bcd6ed65ca8ef7e7cf7e3d05b7e7c8f217ee6cdddbb6a25a856f37980e0c7
fe4e80a82623c48193014846ec7180f4acf518409aca0cd28a5504e03b32c374
de1a00608a0240faaa327a4b19fe946fb6f90054dbb5f2333d022db56eb4966a
3723614c243701cdf8f556bea8a7dc6c76b3e66bd46584ddbbcebc0990cf4b0f
ff4070520c282338a7e26700ec725202b01e4bcf0258963c6f1d4d8f0030cb20
805549c520930c03584fa522b676f11600ffc03fde3e1b3489a9c9054c9aa23b
c08856a3dd8c843191dc0434e3d78d7b33a75c36fb993761f7ae5a69f72ef97f
e6ad336fed7e1c60e8bee96980bbdebbb60da07b7069062033d9dc0ae03d296f
70ab511ec071640676252902d833c916007b3e1900b0a6d2028035968e025861
ea01581369fb11488c34d18cbc95989afccca42baad65ba2d5683723614c24d7
8066fcbab8b7e96918baaf5aaa56219f975fb50a43f7c9bde90fa73f1c1a02d8
78f2e27e803b77ca08b90519315b6fe400fc1392097a9eccc0ad444500e70199
a1331f0f00d8934901c07e5d526ceb87c2d07e2579badd005a2b31a5089391b7
1253358049535a6add8856dd0146c298482e01ede27ed878b256ba7600ee3a09
c18fc1df09fe01084ec25defc1b56db0f1a4f4bd78e0e2818d2f0334e7330300
7df7c888b917e50dd9c1c60c80efcb0cbc63e1f700bce7c31700dccbd1060027
8add9b0de06c8e2f00d84962b7d7030e2a61538331b98051f92631bd253f336a
dd8856a3dd44c25c390efddfad96ae9f853b77c25201ba27c533b8bdf28b6ad0
3d084b33d2e7fa59099e9901b8f2d29597fa0f01848f78e70082117f1ca07b76
6910209b9519f895a008d031bbba05c09d8f06005c5b18b8fba25300cea6780e
c03e911c6ccf06d507b48a4fa606634a114609de929f9934c5a87511ad57cfc1
fa476aa5854fa1ef1e3910b905686e85cc24c40138198915f133d2d6dc2a7dea
7df2ccc2a752faf2cec1d577aebeb37e3b4034eeee0008dff3be0e6b923773b4
7904c0ef9119767cb4fa1500ef1361e08e452500f71561e84cc4ed3e20fab6a2
c905f40cb76a3026bf3319b91ac2e46792a6dcd801ebc6aba5da08f48ecb81c8
bd088d5f42f6417191de93908c803d0e76199292b485af41b60e8d9c3c537f0e
8211f0c7211a077707dc18b931b2ee6d80a4d7ae024491ebc24d4a708ff70680
7f25e807e8785f1878e322d6ddaf453f0770ff2dfa769b01423dbbad72a391b6
5a7c3235985629423372494cab55c8f7d64a8b27a0e7202c55a13b0f8d19c80e
4ae9ca3f015115dc3ca467c17a4c7ee95970ab10e5a54ff0ac3cd39881ee5958
1a84f03df0be0e492fd855a8d6aa35d10b4962dbb0a604a3d3ee5e80a8eee600
a24977f8660378bf0bbf00e01d0a8fb7f980f04b8aa6ce6aca8d5a7533c52753
839152c4e222f4dc512dd5eb90cbc981e8ea12cf90cd8a8bf47d89159e2741d3
7124f65b96fcd254dae258fa84a13c13043246a32129574787e49eae2b49b86d
c3e2e78b9ff7f4002415bb08907c66df0d103b4e0c104db90500ff70700c203a
ee1e82dba4c3e16e256c0acca6ceaae9afd1f612d7eb472157ac95962bd05594
7dd1598466053245088e827f44628657942a825b84e4fb601f84b4025611aca3
901e01bb024911dc0a4445f08e41f83df02b10142173149ab71baf027611ea95
7a257704201d14cd9af4d90b00f194530088cb4e09c0df1c5c0088f7393f6833
c0aa3ac156655de3bca9b34ab9716906ba07aba5e5bba1eb3358d90b9da7c533
64f6888bf47b60f521e8380fe10be03d2feac17900927560df40f4e48f805960
50328d648bf4893f9067c217a0631656b7c898c122847bc07b03a2d3e0ee85e4
33b0ef867450c4fad2ecd26cf7168074c0ba0c904cdac300c9cfec4701924df6
1cdca61e10685c6f7d52d0caba1498972f43d740adb4b2009d7d7220b20e3473
90a943d00ffe959bb6eac3e0fe42ea49ee00c45f06e76329b1dabf127d690d80
5581b408f63c2403e0cc433c00ee658836803b0fd100747c04ab5f917704fd10
d5c1cd41ec801343d207f602a403605d86e5f9e5f9ae0d00e994556833806685
c931fb709b0f08b4e869bea5c827859549e82c544b8d29c816a0390999613920
7e610d5727a16318c2003c1fa24be0de2b32caf92224e7c17e5004b6350c4c01
05601218066b0ad28224e149019c086257ca315102de2712903bde97b8144d82
3b2c6ac52d403c054e019249b087f53d0558995a99ea946c70cc927458b3c1ff
550f30050df988d4284376b4566a8e416654cc921985e037e0df0fc131f00f4b
acf0c6211c036f14a239703741740adc7da227edd7e56b833d0ae92549b4d357
25dfb49ed2ff63908e6adf27d6d0dda7638d4154d2778daca17f58e61297c129
41f233b01f5dc3740cac51688c35c6b22580f48224fee9b83502569a66b629f1
09f3713473413e2666e7fe6f6c6efefdfafda1f56f6e06f93496d9d67cb7366a
9964b6f92e64b689196ec6c604646fd3fe4771ff1bf03f65d8ecc3addbb5f300
00000049454e44ae426082
"""),
}
# Make each of the dict entries also be a module entry.
sys.modules[__name__].__dict__.update(png)
def binary_stdout():
"""
A sys.stdout that accepts bytes.
"""
stdout = sys.stdout.buffer
# On Windows the C runtime file orientation needs changing.
if sys.platform == "win32":
import msvcrt
import os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return stdout
def main(argv=None):
parser = argparse.ArgumentParser(
description="Output a PNG file from the PNG suite")
either = parser.add_mutually_exclusive_group(required=True)
either.add_argument('--list', action='store_true')
either.add_argument('image', nargs='?')
args = parser.parse_args()
if args.list:
for name in sorted(png):
print(name)
return 0
if args.image not in png:
raise ValueError("cannot find PNG suite image " + args.image)
binary_stdout().write(png[args.image])
if __name__ == '__main__':
sys.exit(main())
| 54.506706
| 69
| 0.953464
|
50838ee5f1c9a477e977f0b9a082f3e9f2023439
| 724
|
py
|
Python
|
test/conftest.py
|
kavandev/hk1980
|
0a22fca303bcc1e5cf1eedd45dc92f9854c7466e
|
[
"MIT"
] | 1
|
2022-01-30T08:11:43.000Z
|
2022-01-30T08:11:43.000Z
|
test/conftest.py
|
kavandev/hk1980
|
0a22fca303bcc1e5cf1eedd45dc92f9854c7466e
|
[
"MIT"
] | 1
|
2022-01-30T10:50:01.000Z
|
2022-01-30T10:50:01.000Z
|
test/conftest.py
|
kavandev/hk1980
|
0a22fca303bcc1e5cf1eedd45dc92f9854c7466e
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture
def valid_wgs84_point():
return (22.2580467, 114.00876443) # NOTE: same location as valid_hk80_point
@pytest.fixture
def valid_hk80_point():
return (813259.700, 818940.160) # NOTE: same location as valid_wgs84_point
@pytest.fixture
def invalid_wgs84_point():
return (122.302711, 1114.177216)
@pytest.fixture
def invalid_hk80_point():
return (1793259.700, 1848940.160)
@pytest.fixture
def min_hk80_point():
return (799500, 799000)
@pytest.fixture
def max_hk80_point():
return (867500, 848000)
@pytest.fixture
def min_wgs84_point():
return (22.074428895, 113.491375844)
@pytest.fixture
def max_wgs84_point():
return (22.341764663, 114.285001156)
| 17.238095
| 80
| 0.734807
|
ca7133b4aedef6a4ef81897b4fdd362ba341da5f
| 716
|
py
|
Python
|
utils/average_meter.py
|
awwong1/pytorch-model-compression
|
739355f3f6874fc0d480f479d358fa2a5b54ad61
|
[
"MIT"
] | 4
|
2019-05-29T23:02:52.000Z
|
2021-01-22T10:07:13.000Z
|
utils/average_meter.py
|
awwong1/pytorch-model-compression
|
739355f3f6874fc0d480f479d358fa2a5b54ad61
|
[
"MIT"
] | 7
|
2020-03-24T17:07:23.000Z
|
2022-01-13T01:14:59.000Z
|
utils/average_meter.py
|
awwong1/pytorch-model-compression
|
739355f3f6874fc0d480f479d358fa2a5b54ad61
|
[
"MIT"
] | 1
|
2021-01-22T10:07:16.000Z
|
2021-01-22T10:07:16.000Z
|
class AverageMeter(object):
"""
Computes and stores the average and current value
https://github.com/pytorch/examples/blob/5b1f45057dc14a5e2132b45233c258a1dc2a0aab/imagenet/main.py#L351
"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
| 25.571429
| 107
| 0.561453
|
25fa776eac1943e203444531df4f85250503f1d4
| 3,123
|
py
|
Python
|
tests/test_qm.py
|
bjrnfrdnnd/panel-test
|
4609a259e749825b2a2012d8a7e48ed8e8a78deb
|
[
"MIT"
] | null | null | null |
tests/test_qm.py
|
bjrnfrdnnd/panel-test
|
4609a259e749825b2a2012d8a7e48ed8e8a78deb
|
[
"MIT"
] | 1
|
2019-07-26T22:12:19.000Z
|
2019-10-31T17:48:51.000Z
|
tests/test_qm.py
|
bjrnfrdnnd/panel-test
|
4609a259e749825b2a2012d8a7e48ed8e8a78deb
|
[
"MIT"
] | 1
|
2019-09-19T11:54:45.000Z
|
2019-09-19T11:54:45.000Z
|
import pathlib
import numpy as np
from nmrtools.qm import (cache_tm, hamiltonian_dense, hamiltonian_sparse,
nspinspec_dense, nspinspec_sparse, so_sparse,
spectrum)
from tests.accepted_data import HAMILTONIAN_RIOUX, SPECTRUM_RIOUX
from tests.simulation_data import rioux
def test_so_sparse_creates_files(fs):
test_bin = (pathlib.Path(__file__)
.resolve()
.parent.parent
.joinpath('nmrtools', 'bin'))
fs.create_dir(test_bin)
expected_Lz = test_bin.joinpath('Lz3.npz')
expected_Lproduct = test_bin.joinpath('Lproduct3.npz')
assert not expected_Lz.exists()
assert not expected_Lproduct.exists()
Lz, Lproduct = so_sparse(3) # noqa
assert expected_Lz.exists()
assert expected_Lproduct.exists()
def test_cache_tm_creates_file(fs):
test_bin = (pathlib.Path(__file__)
.resolve()
.parent.parent
.joinpath('nmrtools', 'bin'))
fs.create_dir(test_bin)
expected_T = test_bin.joinpath('T3.npz')
assert not expected_T.exists()
T = cache_tm(3)
assert T
assert expected_T.exists()
def test_hamiltonian_dense():
# GIVEN v and J inputs for the Rioux 3-spin system
v, J = rioux()
# WHEN hamiltonian_dense is used to calculate the Hamiltonian
H_dense = hamiltonian_dense(v, J)
# THEN it matches the Hamiltonian result using the old accepted algorithm
assert np.array_equal(H_dense, HAMILTONIAN_RIOUX)
def test_hamiltonian_sparse():
# GIVEN v and J inputs for the Rioux 3-spin system
v, J = rioux()
# WHEN hamiltonian_dense is used to calculate the Hamiltonian
H_sparse = hamiltonian_sparse(v, J)
# THEN it matches the Hamiltonian result using the old accepted algorithm
assert np.array_equal(H_sparse.todense(), HAMILTONIAN_RIOUX) # noqa
def test_nspinspec_dense():
# GIVEN v and J inputs for the Rioux 3-spin system
v, J = rioux()
# WHEN nspinspec_dense is called with those inputs
result = nspinspec_dense(v, J)
# THEN the resulting spectrum matches that using the old algorithm
assert np.allclose(result, SPECTRUM_RIOUX)
def test_nspinspec_sparse():
# GIVEN v and J inputs for the Rioux 3-spin system
v, J = rioux()
# WHEN nspinspec_sparse is called with those inputs
result = nspinspec_sparse(v, J)
# THEN the resulting spectrum matches that using the old algorithm
assert np.allclose(result, SPECTRUM_RIOUX)
def test_nspinspec_spectrum():
# GIVEN v and J inputs
v, J = rioux()
# WHEN spectrum is called with v and J and all possible cache/sparse
spectrum_TT = spectrum(v, J, cache=True, sparse=True, normalize=True)
spectrum_FT = spectrum(v, J, cache=False, sparse=True, normalize=True)
spectrum_TF = spectrum(v, J, cache=True, sparse=False, normalize=True)
spectrum_FF = spectrum(v, J, cache=False, sparse=False, normalize=True)
# THEN they all match the expected result
for s in [spectrum_TT, spectrum_FT, spectrum_TF, spectrum_FF]:
assert np.allclose(s, SPECTRUM_RIOUX)
| 36.313953
| 77
| 0.697086
|
98941e686c40030f5c715ac302417af6892068bf
| 2,893
|
py
|
Python
|
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateOTADynamicUpgradeJobRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateOTADynamicUpgradeJobRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateOTADynamicUpgradeJobRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateOTADynamicUpgradeJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateOTADynamicUpgradeJob','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RetryCount(self):
return self.get_query_params().get('RetryCount')
def set_RetryCount(self,RetryCount):
self.add_query_param('RetryCount',RetryCount)
def get_TimeoutInMinutes(self):
return self.get_query_params().get('TimeoutInMinutes')
def set_TimeoutInMinutes(self,TimeoutInMinutes):
self.add_query_param('TimeoutInMinutes',TimeoutInMinutes)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_FirmwareId(self):
return self.get_query_params().get('FirmwareId')
def set_FirmwareId(self,FirmwareId):
self.add_query_param('FirmwareId',FirmwareId)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_RetryInterval(self):
return self.get_query_params().get('RetryInterval')
def set_RetryInterval(self,RetryInterval):
self.add_query_param('RetryInterval',RetryInterval)
def get_SrcVersions(self):
return self.get_query_params().get('SrcVersions')
def set_SrcVersions(self, SrcVersions):
for depth1 in range(len(SrcVersions)):
if SrcVersions[depth1] is not None:
self.add_query_param('SrcVersion.' + str(depth1 + 1) , SrcVersions[depth1])
def get_MaximumPerMinute(self):
return self.get_query_params().get('MaximumPerMinute')
def set_MaximumPerMinute(self,MaximumPerMinute):
self.add_query_param('MaximumPerMinute',MaximumPerMinute)
| 35.280488
| 85
| 0.769098
|
ce92fd71c540ea505e18744ad94199651d40fb05
| 1,484
|
py
|
Python
|
codev/template-library/regex/ip-address/re-ipv6.py
|
creative-sensor/echo-home
|
14ed0da26e455ff19425ea9fc2a5e81bba8ca2c2
|
[
"MIT"
] | null | null | null |
codev/template-library/regex/ip-address/re-ipv6.py
|
creative-sensor/echo-home
|
14ed0da26e455ff19425ea9fc2a5e81bba8ca2c2
|
[
"MIT"
] | 2
|
2021-05-11T10:33:39.000Z
|
2022-01-22T11:35:20.000Z
|
codev/template-library/regex/ip-address/re-ipv6.py
|
creative-sensor/echo-home
|
14ed0da26e455ff19425ea9fc2a5e81bba8ca2c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-------------------------||| IPv6 REGEX PANTHEON |||---------------------------
FULL = '([A-Fa-f0-9]{1,4}):(([A-Fa-f0-9]{1,4}):){6}([A-Fa-f0-9]{1,4})'
MIDDLE_ZERO_61 = '(([A-Fa-f0-9]{1,4}:){6}(:[A-Fa-f0-9]{1,4}){1})'
MIDDLE_ZERO_52 = '(([A-Fa-f0-9]{1,4}:){5}(:[A-Fa-f0-9]{1,4}){1,2})'
MIDDLE_ZERO_43 = '(([A-Fa-f0-9]{1,4}:){4}(:[A-Fa-f0-9]{1,4}){1,3})'
MIDDLE_ZERO_34 = '(([A-Fa-f0-9]{1,4}:){3}(:[A-Fa-f0-9]{1,4}){1,4})'
MIDDLE_ZERO_25 = '(([A-Fa-f0-9]{1,4}:){2}(:[A-Fa-f0-9]{1,4}){1,5})'
MIDDLE_ZERO_16 = '([A-Fa-f0-9]{1,4}:(:[A-Fa-f0-9]{1,4}){1,6})'
TAIL_ZERO = '(([A-Fa-f0-9]{1,4}:){1,6}:)'
LEADING_ZERO = '(:(:[A-Fa-f0-9]{1,4}){1,6})'
ALL_ZERO = '(::)'
LONGEST_MATCH_PRIORITY = [
FULL,
MIDDLE_ZERO_61,
MIDDLE_ZERO_52,
MIDDLE_ZERO_43,
MIDDLE_ZERO_34,
MIDDLE_ZERO_25,
MIDDLE_ZERO_16,
TAIL_ZERO,
LEADING_ZERO,
ALL_ZERO
]
PATTERN = "|".join(LONGEST_MATCH_PRIORITY)
#-------------------------------------------------------------------------------
import re
#print("PATTERN: " + PATTERN)
regex = re.compile(PATTERN)
with open("./test-cases.ipv6",'r') as f:
for line in f:
if len(line) > 1 : prefix = line.split()[0]
#result = regex.findall(line)
result = regex.search(line)
#if result : print(prefix + " " + str(result))
if result : print(prefix + " " + result.group())
| 30.285714
| 80
| 0.463612
|
994afe8b757117755549cae77b8238fb5b0a184c
| 1,039
|
py
|
Python
|
src/wanderbot/red_light_green_light.py
|
plskeggs/ros_wanderbot
|
697860a3ce7ba6d311f1386ff3c708661d53549a
|
[
"Apache-2.0"
] | null | null | null |
src/wanderbot/red_light_green_light.py
|
plskeggs/ros_wanderbot
|
697860a3ce7ba6d311f1386ff3c708661d53549a
|
[
"Apache-2.0"
] | null | null | null |
src/wanderbot/red_light_green_light.py
|
plskeggs/ros_wanderbot
|
697860a3ce7ba6d311f1386ff3c708661d53549a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
rospy.init_node('red_light_green_light')
red_light_twist = Twist()
green_light_twist = Twist()
green_light_twist.linear.x = 0.5
driving_forward = False
while rospy.get_time() == 0:
if rospy.is_shutdown():
break
print 'Now:', rospy.Time.now()
light_change_time = rospy.Time.now() + rospy.Duration.from_sec(3)
rate = rospy.Rate(10)
print 'red light green light starting...'
print 'stopped'
while not rospy.is_shutdown():
if driving_forward:
cmd_vel_pub.publish(green_light_twist)
else:
cmd_vel_pub.publish(red_light_twist)
if light_change_time > rospy.Time.now():
driving_forward = not driving_forward
if driving_forward:
print 'forward'
else:
print 'stopped'
light_change_time = rospy.Time.now() + rospy.Duration.from_sec(3)
rate.sleep()
print 'Now:', rospy.Time.now(), ' Next:', light_change_time
| 28.081081
| 73
| 0.694899
|
7a2add1f7deb4034fd413e72e4fb68733912dea6
| 252
|
py
|
Python
|
Algorithmic Toolbox/Dynamic Programming/Primitive Calculator/primitive_calculator.py
|
ganeshbhandarkar/Python-Projects
|
a4df933122a6694d249c69d1e8e95b592cf036a0
|
[
"MIT"
] | 9
|
2020-07-02T06:06:17.000Z
|
2022-02-26T11:08:09.000Z
|
Algorithmic Toolbox/Dynamic Programming/Primitive Calculator/primitive_calculator.py
|
ganeshbhandarkar/Python-Projects
|
a4df933122a6694d249c69d1e8e95b592cf036a0
|
[
"MIT"
] | 1
|
2021-11-04T17:26:36.000Z
|
2021-11-04T17:26:36.000Z
|
Algorithmic Toolbox/Dynamic Programming/Primitive Calculator/primitive_calculator.py
|
ganeshbhandarkar/Python-Projects
|
a4df933122a6694d249c69d1e8e95b592cf036a0
|
[
"MIT"
] | 8
|
2021-01-31T10:31:12.000Z
|
2022-03-13T09:15:55.000Z
|
# python3
def compute_operations(n):
assert 1 <= n <= 10 ** 6
type here
if __name__ == '__main__':
input_n = int(input())
output_sequence = compute_operations(input_n)
print(len(output_sequence) - 1)
print(*output_sequence)
| 18
| 49
| 0.654762
|
5227032e7056551172b4de15ef1fd442e55a0e0f
| 929
|
py
|
Python
|
demos/rotating_bar.py
|
Sh0cktr4p/PhiFlow
|
cc87c5887bc3abfa1ef3c03252122a06e9fd2c18
|
[
"MIT"
] | null | null | null |
demos/rotating_bar.py
|
Sh0cktr4p/PhiFlow
|
cc87c5887bc3abfa1ef3c03252122a06e9fd2c18
|
[
"MIT"
] | null | null | null |
demos/rotating_bar.py
|
Sh0cktr4p/PhiFlow
|
cc87c5887bc3abfa1ef3c03252122a06e9fd2c18
|
[
"MIT"
] | 1
|
2021-09-15T11:14:42.000Z
|
2021-09-15T11:14:42.000Z
|
""" Rotating Bar
This demo shows how to simulate fluid flow with moving or rotating obstacles.
"""
from phi.flow import *
DOMAIN = Domain(x=100, y=100, boundaries=OPEN, bounds=Box[0:100, 0:100])
DT = 1.0
obstacle = Obstacle(Box[47:53, 20:70], angular_velocity=0.05)
obstacle_mask = DOMAIN.scalar_grid(obstacle.geometry) # to show in user interface
velocity = DOMAIN.staggered_grid((1, 0))
for frame in ModuleViewer(framerate=10, display=('velocity', 'obstacle_mask'), autorun=True).range():
obstacle = obstacle.copied_with(geometry=obstacle.geometry.rotated(-obstacle.angular_velocity * DT)) # rotate bar
velocity = advect.mac_cormack(velocity, velocity, DT)
velocity, pressure, _iter, _ = fluid.make_incompressible(velocity, DOMAIN, (obstacle,), solve_params=math.LinearSolve(absolute_tolerance=1e-2, max_iterations=1e5))
print(f"{frame}: {_iter}")
obstacle_mask = DOMAIN.scalar_grid(obstacle.geometry)
| 48.894737
| 167
| 0.751346
|
20dafde7408e1bfe04014ba0110a7201db492a90
| 140
|
py
|
Python
|
mysite/mysite/views.py
|
manavpradhan/E-Commerce-website
|
92120663b2f1c48bb4940e7459f4f69974af6503
|
[
"MIT"
] | null | null | null |
mysite/mysite/views.py
|
manavpradhan/E-Commerce-website
|
92120663b2f1c48bb4940e7459f4f69974af6503
|
[
"MIT"
] | null | null | null |
mysite/mysite/views.py
|
manavpradhan/E-Commerce-website
|
92120663b2f1c48bb4940e7459f4f69974af6503
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse as hr
from django.shortcuts import render
def start(request):
return render(request, 'start.html')
| 28
| 42
| 0.785714
|
b26d9e0ee78bdac35f1e787c42033c3b3c06e8b5
| 3,399
|
py
|
Python
|
data-hub-api/apps/companieshouse/tests/sources/test_matcher.py
|
uktrade/data-hub-api-old
|
5ecf093d88692870982a638ced45de6a82d55672
|
[
"MIT"
] | null | null | null |
data-hub-api/apps/companieshouse/tests/sources/test_matcher.py
|
uktrade/data-hub-api-old
|
5ecf093d88692870982a638ced45de6a82d55672
|
[
"MIT"
] | 18
|
2016-04-04T12:42:45.000Z
|
2016-09-01T07:21:05.000Z
|
data-hub-api/apps/companieshouse/tests/sources/test_matcher.py
|
uktrade/data-hub-api-old
|
5ecf093d88692870982a638ced45de6a82d55672
|
[
"MIT"
] | 1
|
2016-06-01T15:45:21.000Z
|
2016-06-01T15:45:21.000Z
|
from django.test.testcases import TestCase
from companieshouse.sources.matcher import BaseMatcher, FindingResult
class MyMatcher(BaseMatcher):
"""
Testing Matcher that allows settinging findings to check that the logic works
"""
def set_findings(self, findings):
self.findings = findings
class BaseMatcherTestCase(TestCase):
def setUp(self):
self.matcher = MyMatcher(
name='some company', postcode='SW1A 1AA'
)
def test_without_findings(self):
"""
No findings => return None
"""
self.matcher.set_findings([])
best_match = self.matcher.find()
self.assertEqual(best_match, None)
def test_with_one_finding(self):
"""
Just one finding => return exacly that one
"""
finding = FindingResult(
company_number='0',
name='company name',
postcode='SW1A1AA',
proximity=1,
raw={'company_number': '0'}
)
self.matcher.set_findings([finding])
best_match = self.matcher.find()
self.assertTrue(best_match)
self.assertEqual(best_match, finding)
def test_with_some_findings(self):
"""
More than one finding => return the one with higher proximity
"""
finding_025 = FindingResult(
company_number='025',
name='company 025',
postcode='SW1A1AA',
proximity=0.25,
raw={'company_number': '025'}
)
finding_075 = FindingResult(
company_number='075',
name='company 075',
postcode='SW1A1AA',
proximity=0.75,
raw={'company_number': '075'}
)
finding_050 = FindingResult(
company_number='050',
name='company 050',
postcode='SW1A1AA',
proximity=0.5,
raw={'company_number': '050'}
)
self.matcher.set_findings([
finding_025, finding_075, finding_050
])
best_match = self.matcher.find()
self.assertTrue(best_match)
self.assertEqual(best_match, finding_075)
class GetCHPostcodeTestCase(TestCase):
def setUp(self):
self.matcher = MyMatcher(
name='some company', postcode='SW1A 1AA'
)
def test_with_no_props(self):
self.assertEqual(
self.matcher._get_ch_postcode({}),
None
)
def test_with_registered_address_prop(self):
data = {
'registered_office_address': {
'postal_code': 'SW1A 1AA'
}
}
self.assertEqual(
self.matcher._get_ch_postcode(data),
'SW1A 1AA'
)
def test_with_address_prop(self):
data = {
'address': {
'postal_code': 'SW1A 1AA'
}
}
self.assertEqual(
self.matcher._get_ch_postcode(data),
'SW1A 1AA'
)
def test_with_both_registered_address_and_address_props(self):
data = {
'registered_office_address': {
'postal_code': 'SW1A 1AA'
},
'address': {
'postal_code': 'SW1A 1AB'
}
}
self.assertEqual(
self.matcher._get_ch_postcode(data),
'SW1A 1AA'
)
| 26.76378
| 81
| 0.543395
|
50c4453ebba808e61ef6c95e7935a4a1aa928c3f
| 2,900
|
py
|
Python
|
horizon/horizon/dashboards/nova/access_and_security/tests.py
|
ttrifonov/horizon
|
293c2ee9f76231e12142a733f6f916c533648f8f
|
[
"Apache-2.0"
] | 1
|
2016-10-11T10:21:14.000Z
|
2016-10-11T10:21:14.000Z
|
horizon/horizon/dashboards/nova/access_and_security/tests.py
|
ttrifonov/horizon
|
293c2ee9f76231e12142a733f6f916c533648f8f
|
[
"Apache-2.0"
] | null | null | null |
horizon/horizon/dashboards/nova/access_and_security/tests.py
|
ttrifonov/horizon
|
293c2ee9f76231e12142a733f6f916c533648f8f
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from mox import IsA
from horizon import api
from horizon import test
class AccessAndSecurityTests(test.BaseViewTests):
def setUp(self):
super(AccessAndSecurityTests, self).setUp()
keypair = api.KeyPair(None)
keypair.name = 'keyName'
self.keypairs = (keypair,)
server = api.Server(None, self.request)
server.id = 1
server.name = 'serverName'
self.server = server
self.servers = (server, )
floating_ip = api.FloatingIp(None)
floating_ip.id = 1
floating_ip.fixed_ip = '10.0.0.4'
floating_ip.instance_id = 1
floating_ip.ip = '58.58.58.58'
self.floating_ip = floating_ip
self.floating_ips = (floating_ip,)
security_group = api.SecurityGroup(None)
security_group.id = '1'
security_group.name = 'default'
self.security_groups = (security_group,)
def test_index(self):
self.mox.StubOutWithMock(api, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api, 'security_group_list')
self.mox.StubOutWithMock(api.nova, 'keypair_list')
api.nova.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs)
api.tenant_floating_ip_list(IsA(http.HttpRequest)).\
AndReturn(self.floating_ips)
api.security_group_list(IsA(http.HttpRequest)).\
AndReturn(self.security_groups)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:nova:access_and_security:index'))
self.assertTemplateUsed(res, 'nova/access_and_security/index.html')
self.assertItemsEqual(res.context['keypairs_table'].data,
self.keypairs)
self.assertItemsEqual(res.context['security_groups_table'].data,
self.security_groups)
self.assertItemsEqual(res.context['floating_ips_table'].data,
self.floating_ips)
| 36.708861
| 79
| 0.658276
|
1a7283140204b5cf8a1062a7e8563125ec69401c
| 479
|
py
|
Python
|
nomadgram/users/migrations/0008_auto_20180328_0129.py
|
zlyanz13/Yonwongram
|
a340f8ef215d3d8967e6977f89f46fbe2cc1a337
|
[
"MIT"
] | null | null | null |
nomadgram/users/migrations/0008_auto_20180328_0129.py
|
zlyanz13/Yonwongram
|
a340f8ef215d3d8967e6977f89f46fbe2cc1a337
|
[
"MIT"
] | null | null | null |
nomadgram/users/migrations/0008_auto_20180328_0129.py
|
zlyanz13/Yonwongram
|
a340f8ef215d3d8967e6977f89f46fbe2cc1a337
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-03-27 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20180322_0216'),
]
operations = [
migrations.AlterField(
model_name='user',
name='gender',
field=models.CharField(choices=[('male', 'Male'), ('female', 'Female'), ('not-specified', 'Not sepcified')], max_length=80, null=True),
),
]
| 25.210526
| 147
| 0.597077
|
058682eec430e51860afd24db0429b517087722d
| 1,277
|
py
|
Python
|
src/2019/second/C.py
|
veltzerdoron/GCJ
|
8976b95e1d3c1806ca10b3df5f0a506adef6f1b0
|
[
"MIT"
] | null | null | null |
src/2019/second/C.py
|
veltzerdoron/GCJ
|
8976b95e1d3c1806ca10b3df5f0a506adef6f1b0
|
[
"MIT"
] | null | null | null |
src/2019/second/C.py
|
veltzerdoron/GCJ
|
8976b95e1d3c1806ca10b3df5f0a506adef6f1b0
|
[
"MIT"
] | null | null | null |
from fractions import *
T = int(input())
for t in range(T):
n = int(input())
a = []
for i in range(n):
a.append([int(i) for i in input().split(' ')])
intersections = set()
inf = 10 ** 20
lower, upper = Fraction(0), Fraction(inf, 1)
impossible = False
for i in range(len(a) - 1):
x1, y1 = a[i]
x2, y2 = a[i + 1]
if y1 == y2:
if x1 >= x2:
impossible = True
break
elif y1 > y2:
upper = min(upper, Fraction(x2 - x1, y1 - y2))
else:
lower = max(lower, Fraction(x2 - x1, y1 - y2))
if not impossible and 0 <= lower < upper:
if upper == Fraction(inf, 1):
de = 1
else:
mean = (lower + upper) / 2
left = 0
right = 10 ** 20
while left + 1 < right:
mid = (left + right) // 2
tmp = mean.limit_denominator(mid)
if lower < tmp < upper:
right = mid
else:
left = mid
de = right
lower = lower * de
upper = upper * de
for i in range(int(lower) - 5, int(upper) + 5):
if lower < i < upper:
nu = i
break
else:
impossible = True
if impossible:
result = 'IMPOSSIBLE'
else:
result = '{} {}'.format(de, nu)
print('Case #{case}: {result}'.format(case= t + 1, result=result))
| 23.648148
| 68
| 0.509789
|
aca727ccc46f157ecb3d5230076b04b96a31f9ca
| 1,006
|
py
|
Python
|
kubernetes/test/test_v1_scale_io_volume_source.py
|
reymont/python
|
02a3a31c630c305527b328af49724f348fbdae15
|
[
"Apache-2.0"
] | 1
|
2018-10-20T19:37:57.000Z
|
2018-10-20T19:37:57.000Z
|
kubernetes/test/test_v1_scale_io_volume_source.py
|
reymont/python
|
02a3a31c630c305527b328af49724f348fbdae15
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_scale_io_volume_source.py
|
reymont/python
|
02a3a31c630c305527b328af49724f348fbdae15
|
[
"Apache-2.0"
] | 2
|
2018-07-27T19:39:34.000Z
|
2020-12-25T02:48:27.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
class TestV1ScaleIOVolumeSource(unittest.TestCase):
""" V1ScaleIOVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOVolumeSource(self):
"""
Test V1ScaleIOVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_scale_io_volume_source.V1ScaleIOVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| 22.355556
| 105
| 0.72167
|
9fd50f693a9ec17dee420093e7ccb3028018a556
| 893
|
py
|
Python
|
2015/2015-day05/nice.py
|
bennettp123/advent-of-code
|
07b2ada43ad16a842b010c852f456c3ed44b1562
|
[
"MIT"
] | null | null | null |
2015/2015-day05/nice.py
|
bennettp123/advent-of-code
|
07b2ada43ad16a842b010c852f456c3ed44b1562
|
[
"MIT"
] | null | null | null |
2015/2015-day05/nice.py
|
bennettp123/advent-of-code
|
07b2ada43ad16a842b010c852f456c3ed44b1562
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import re
strings = []
def vowels(s):
return [c for c in s if c in ('a', 'e', 'i', 'o', 'u')]
def repeats(s):
return re.search(r'(.)\1', s)
def blacklisted(s):
return [b for b in ('ab', 'cd', 'pq', 'xy') if b in s]
def is_nice(s):
return len(vowels(s)) > 2 and repeats(s) and not blacklisted(s)
def repeater(s):
return re.search(r'(..).*\1', s)
def repleater(s):
return re.search(r'(.).\1', s)
def is_nicer(s):
return repeater(s) and repleater(s)
if __name__ == '__main__':
with open('input', 'r') as f:
for line in f:
strings = strings + [line]
nice_strings = [s for s in strings if is_nice(s)]
nicer_strings = [s for s in strings if is_nicer(s)]
print('part 1: {0} nice strings found'.format(len(nice_strings)))
print('part 2: {0} nicer strings found'.format(len(nicer_strings)))
| 19
| 71
| 0.587906
|
ae52d2678db2790d90ca0a12fec91f04c83bd2a5
| 7,773
|
py
|
Python
|
ludwig/utils/batcher.py
|
mehrdad-shokri/ludwig
|
f167981683c067b50be6a3656cbf553efbf192e9
|
[
"Apache-2.0"
] | 2
|
2020-04-02T17:43:34.000Z
|
2021-11-09T07:20:31.000Z
|
ludwig/utils/batcher.py
|
mehrdad-shokri/ludwig
|
f167981683c067b50be6a3656cbf553efbf192e9
|
[
"Apache-2.0"
] | 6
|
2020-01-28T22:42:31.000Z
|
2022-02-10T00:16:09.000Z
|
ludwig/utils/batcher.py
|
mehrdad-shokri/ludwig
|
f167981683c067b50be6a3656cbf553efbf192e9
|
[
"Apache-2.0"
] | 2
|
2020-03-09T07:19:05.000Z
|
2020-03-09T07:20:42.000Z
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import numpy as np
from ludwig.utils.data_utils import shuffle_dict_unison_inplace, shuffle_inplace
class Batcher(object):
def __init__(self, dataset, batch_size=128, should_shuffle=True,
ignore_last=False):
self.should_shuffle = should_shuffle
# store our dataset as well
self.dataset = dataset
if should_shuffle:
shuffle_inplace(self.dataset.get_dataset())
self.ignore_last = ignore_last
self.batch_size = batch_size
self.total_size = dataset.size
self.steps_per_epoch = int(math.ceil(self.total_size / self.batch_size))
self.index = 0
self.step = 0
self.epoch = 0
def next_batch(self):
if self.last_batch():
if self.should_shuffle:
self.dataset = shuffle_dict_unison_inplace(self.dataset)
self.reset()
self.epoch += 1
sub_batch = {}
for features_name in self.dataset.features:
sub_batch[features_name] = self.dataset.get(
features_name,
range(
self.index,
min(self.index + self.batch_size, self.total_size)
)
)
self.index += self.batch_size
self.step += 1
return sub_batch
def last_batch(self):
return self.index >= self.total_size or (
self.ignore_last and
self.index + self.batch_size >= self.total_size)
def reset(self):
self.index = 0
self.step = 0
class BucketedBatcher(object):
def __init__(self, dataset, bucketing_field, batch_size=128, buckets=10,
should_shuffle=True, ignore_last=False,
should_trim=False, trim_side='right'):
self.should_shuffle = should_shuffle
self.bucketing_field = bucketing_field
self.should_trim = should_trim
self.trim_side = trim_side
# store our dataset as well
self.dataset = dataset
field = dataset.get_dataset()[bucketing_field]
field_lengths = np.apply_along_axis(lambda x: np.sign(x).sum(), 1,
field)
sorted_idcs = np.argsort(field_lengths)
self.buckets_idcs = []
datapoints_per_bucket = len(field) // buckets
for b in range(buckets):
start = datapoints_per_bucket * b
end = datapoints_per_bucket * (b + 1) if b < buckets - 1 else len(
sorted_idcs)
self.buckets_idcs.append(sorted_idcs[start:end])
if should_shuffle:
self.shuffle(self.buckets_idcs)
self.ignore_last = ignore_last
self.batch_size = batch_size
self.total_size = min(map(len, dataset.get_dataset().values()))
self.bucket_sizes = np.array([x for x in map(len, self.buckets_idcs)])
self.steps_per_epoch = int(
np.asscalar(np.sum(np.ceil(self.bucket_sizes / self.batch_size))))
self.indices = np.array([0] * buckets)
self.step = 0
self.epoch = 0
def shuffle(self, buckets_idcs):
for i in range(len(buckets_idcs)):
np.random.shuffle(buckets_idcs[i])
def next_batch(self):
if self.last_batch():
if self.should_shuffle:
self.shuffle(self.buckets_idcs)
self.reset()
self.epoch += 1
if self.ignore_last:
idcs_below_size = self.indices + self.batch_size < self.bucket_sizes
else:
idcs_below_size = self.indices < self.bucket_sizes
i = np.random.choice(
np.arange(0, len(self.buckets_idcs))[idcs_below_size])
selected_bucket = self.buckets_idcs[i]
selected_idcs = selected_bucket[
self.indices[i]:self.indices[i] + self.batch_size]
sub_batch = {}
for key in self.dataset.get_dataset():
if key == self.bucketing_field and self.should_trim:
selected_samples = self.dataset.get(key, selected_idcs)
max_length = np.sign(selected_samples).sum(axis=1).max()
if self.trim_side == 'right':
sub_batch[key] = selected_samples[:, :max_length]
elif self.trim_side == 'left':
sub_batch[key] = selected_samples[:, -max_length:]
else:
raise ValueError('Invalid trim side:', self.trim_side)
else:
sub_batch[key] = self.dataset.get(key, selected_idcs)
self.indices[i] += self.batch_size
self.step += 1
return sub_batch
def last_batch(self):
return not np.any(self.indices < self.bucket_sizes) \
or (self.ignore_last and
not np.any(
self.indices + self.batch_size < self.bucket_sizes
))
def reset(self):
self.indices = np.array([0] * len(self.buckets_idcs))
self.step = 0
class DistributedBatcher(object):
def __init__(self, dataset, partition_number, horovod, batch_size=128,
should_shuffle=True, ignore_last=False):
self.should_shuffle = should_shuffle
# store our dataset as well
partition_size = dataset.size // horovod.size()
if partition_number == horovod.size() - 1:
self.partition = (partition_size * partition_number, dataset.size)
else:
self.partition = (partition_size * partition_number,
partition_size * (partition_number + 1))
self.dataset = dataset
if should_shuffle:
shuffle_inplace(self.dataset.get_dataset())
self.ignore_last = ignore_last
self.batch_size = batch_size
self.total_size = self.partition[1] - self.partition[0]
self.steps_per_epoch = int(math.ceil(self.total_size / self.batch_size))
self.index = self.partition[0]
self.max_index = self.partition[1]
self.step = 0
self.epoch = 0
def next_batch(self):
if self.last_batch():
if self.should_shuffle:
self.dataset = shuffle_dict_unison_inplace(
self.dataset,
np.random.RandomState(self.epoch)
)
self.reset()
self.epoch += 1
sub_batch = {}
for features_name in self.dataset.features:
sub_batch[features_name] = self.dataset.get(
features_name,
range(
self.index,
min(self.index + self.batch_size, self.max_index)
)
)
self.index += self.batch_size
self.step += 1
return sub_batch
def last_batch(self):
return self.index >= self.max_index or (
self.ignore_last and
self.index + self.batch_size >= self.max_index)
def reset(self):
self.index = self.partition[0]
self.step = 0
| 35.331818
| 80
| 0.584974
|
d8110af2432018d4b13221a62989cd5be9a72023
| 1,516
|
py
|
Python
|
src/spaceone/inventory/lib/ip_address.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/lib/ip_address.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/lib/ip_address.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import ipaddress
class IPAddress(object):
@staticmethod
def get_ip_object(ip_address):
return ipaddress.ip_address(ip_address)
@staticmethod
def get_network_object(network):
return ipaddress.ip_network(network)
@staticmethod
def check_valid_ip(ip_address):
try:
ip = ipaddress.ip_address(ip_address)
return str(ip)
except ValueError:
return False
@staticmethod
def check_valid_network(cidr):
try:
network = ipaddress.ip_network(cidr)
return str(network)
except ValueError:
return False
@staticmethod
def check_duplicate_cidr_range(cidr1, cidr2):
"""
if CIDRs was duplicated, return True
"""
try:
c1 = ipaddress.ip_network(cidr1)
c2 = ipaddress.ip_network(cidr2)
if c1.subnet_of(c2) or c1.supernet_of(c2):
return True
return False
except ValueError:
return False
@staticmethod
def check_subnet_of_network(subnet_cidr, net_cidr):
subnet_cidr = ipaddress.ip_network(subnet_cidr)
net_cidr = ipaddress.ip_network(net_cidr)
return subnet_cidr.subnet_of(net_cidr)
@staticmethod
def check_valid_ip_in_network(ip, cidr):
_ip = ipaddress.ip_address(ip)
_net = ipaddress.ip_network(cidr)
if _ip in _net:
return True
return False
| 24.063492
| 55
| 0.611478
|
536ccf9990f6b7e29712409723ba687b52afa677
| 125
|
py
|
Python
|
Inheritance - Exercise/project/food.py
|
DiyanKalaydzhiev23/OOP---Python
|
7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0
|
[
"MIT"
] | null | null | null |
Inheritance - Exercise/project/food.py
|
DiyanKalaydzhiev23/OOP---Python
|
7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0
|
[
"MIT"
] | null | null | null |
Inheritance - Exercise/project/food.py
|
DiyanKalaydzhiev23/OOP---Python
|
7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0
|
[
"MIT"
] | null | null | null |
from project.product import Product
class Food(Product):
def __init__(self, name):
super().__init__(name, 15)
| 15.625
| 35
| 0.68
|
89fdb5767b0ded153c92aa11707943d86d5a2774
| 24,155
|
py
|
Python
|
src/electionguard/decryption.py
|
PradyumnaKrishna/electionguard-python
|
e239478972d76195c64fd715bb57682d526aab6c
|
[
"MIT"
] | null | null | null |
src/electionguard/decryption.py
|
PradyumnaKrishna/electionguard-python
|
e239478972d76195c64fd715bb57682d526aab6c
|
[
"MIT"
] | null | null | null |
src/electionguard/decryption.py
|
PradyumnaKrishna/electionguard-python
|
e239478972d76195c64fd715bb57682d526aab6c
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Optional, Tuple
from electionguard.chaum_pedersen import ChaumPedersenProof, make_chaum_pedersen
from electionguard.elgamal import ElGamalCiphertext
from electionguard.utils import get_optional
from .ballot import (
SubmittedBallot,
CiphertextSelection,
CiphertextContest,
)
from .decryption_share import (
CiphertextDecryptionSelection,
CiphertextCompensatedDecryptionSelection,
CiphertextDecryptionContest,
CiphertextCompensatedDecryptionContest,
create_ciphertext_decryption_selection,
DecryptionShare,
CompensatedDecryptionShare,
)
from .election import CiphertextElectionContext
from .election_polynomial import compute_lagrange_coefficient
from .group import (
ElementModP,
ElementModQ,
ONE_MOD_P,
mult_p,
pow_p,
pow_q,
rand_q,
)
from .key_ceremony import (
CoordinateData,
ElectionKeyPair,
ElectionPartialKeyBackup,
ElectionPublicKey,
get_backup_seed,
)
from .logs import log_warning
from .scheduler import Scheduler
from .tally import CiphertextTally
from .type import ContestId, GuardianId, SelectionId
RecoveryPublicKey = ElementModP
def compute_decryption_share(
key_pair: ElectionKeyPair,
tally: CiphertextTally,
context: CiphertextElectionContext,
scheduler: Optional[Scheduler] = None,
) -> Optional[DecryptionShare]:
"""
Compute the decryption for all of the contests in the Ciphertext Tally
:param guardian_keys: Guardian's election key pair
:param tally: Encrypted tally to get decryption share of
:param context: Election context
:param scheduler: Scheduler
:return: Return a guardian's decryption share of tally or None if error
"""
contests: Dict[ContestId, CiphertextDecryptionContest] = {}
for contest in tally.contests.values():
contest_share = compute_decryption_share_for_contest(
key_pair,
CiphertextContest(
contest.object_id,
contest.sequence_order,
contest.description_hash,
list(contest.selections.values()),
),
context,
scheduler,
)
if contest_share is None:
return None
contests[contest.object_id] = contest_share
return DecryptionShare(
tally.object_id,
key_pair.owner_id,
key_pair.key_pair.public_key,
contests,
)
def compute_compensated_decryption_share(
missing_guardian_coordinate: ElementModQ,
present_guardian_key: ElectionPublicKey,
missing_guardian_key: ElectionPublicKey,
tally: CiphertextTally,
context: CiphertextElectionContext,
scheduler: Optional[Scheduler] = None,
) -> Optional[CompensatedDecryptionShare]:
"""
Compute the compensated decryption for all of the contests in the Ciphertext Tally
:param guardian_key: Guardian's election public key
:param missing_guardian_key: Missing guardian's election public key
:param missing_guardian_backup: Missing guardian's election partial key backup
:param tally: Encrypted tally to get decryption share of
:param context: Election context
:param scheduler: Scheduler
:return: Return a guardian's compensated decryption share of tally for the missing guardian
or None if error
"""
contests: Dict[ContestId, CiphertextCompensatedDecryptionContest] = {}
for contest in tally.contests.values():
contest_share = compute_compensated_decryption_share_for_contest(
missing_guardian_coordinate,
present_guardian_key,
missing_guardian_key,
CiphertextContest(
contest.object_id,
contest.sequence_order,
contest.description_hash,
list(contest.selections.values()),
),
context,
scheduler,
)
if contest_share is None:
return None
contests[contest.object_id] = contest_share
return CompensatedDecryptionShare(
tally.object_id,
present_guardian_key.owner_id,
missing_guardian_key.owner_id,
present_guardian_key.key,
contests,
)
def compute_decryption_share_for_ballot(
key_pair: ElectionKeyPair,
ballot: SubmittedBallot,
context: CiphertextElectionContext,
scheduler: Optional[Scheduler] = None,
) -> Optional[DecryptionShare]:
"""
Compute the decryption for a single ballot
:param guardian_keys: Guardian's election key pair
:param ballot: Ballot to be decrypted
:param context: The public election encryption context
:param scheduler: Scheduler
:return: Decryption share for ballot or `None` if there is an error
"""
contests: Dict[ContestId, CiphertextDecryptionContest] = {}
for contest in ballot.contests:
contest_share = compute_decryption_share_for_contest(
key_pair,
CiphertextContest(
contest.object_id,
contest.sequence_order,
contest.description_hash,
contest.ballot_selections,
),
context,
scheduler,
)
if contest_share is None:
return None
contests[contest.object_id] = contest_share
return DecryptionShare(
ballot.object_id,
key_pair.owner_id,
key_pair.share().key,
contests,
)
def compute_compensated_decryption_share_for_ballot(
missing_guardian_coordinate: ElementModQ,
missing_guardian_key: ElectionPublicKey,
present_guardian_key: ElectionPublicKey,
ballot: SubmittedBallot,
context: CiphertextElectionContext,
scheduler: Optional[Scheduler] = None,
) -> Optional[CompensatedDecryptionShare]:
"""
Compute the compensated decryption for a single ballot
:param missing_guardian_coordinate: Missing guardian's election partial key backup
:param missing_guardian_key: Missing guardian's election public key
:param present_guardian_key: Present guardian's election public key
:param ballot: Encrypted ballot to get decryption share of
:param context: Election context
:param scheduler: Scheduler
:return: Return a guardian's compensated decryption share of ballot for the missing guardian
or None if error
"""
contests: Dict[ContestId, CiphertextCompensatedDecryptionContest] = {}
for contest in ballot.contests:
contest_share = compute_compensated_decryption_share_for_contest(
missing_guardian_coordinate,
present_guardian_key,
missing_guardian_key,
CiphertextContest(
contest.object_id,
contest.sequence_order,
contest.description_hash,
contest.ballot_selections,
),
context,
scheduler,
)
if contest_share is None:
return None
contests[contest.object_id] = contest_share
return CompensatedDecryptionShare(
ballot.object_id,
present_guardian_key.owner_id,
missing_guardian_key.owner_id,
present_guardian_key.key,
contests,
)
def compute_decryption_share_for_contest(
key_pair: ElectionKeyPair,
contest: CiphertextContest,
context: CiphertextElectionContext,
scheduler: Optional[Scheduler] = None,
) -> Optional[CiphertextDecryptionContest]:
"""
Compute the decryption share for a single contest
:param guardian_keys: Guardian's election key pair
:param contest: Contest to be decrypted
:param context: The public election encryption context
:param scheduler: Scheduler
:return: Decryption share for contest or `None` if there is an error
"""
if not scheduler:
scheduler = Scheduler()
selections: Dict[SelectionId, CiphertextDecryptionSelection] = {}
decryptions: List[Optional[CiphertextDecryptionSelection]] = scheduler.schedule(
compute_decryption_share_for_selection,
[(key_pair, selection, context) for selection in contest.selections],
with_shared_resources=True,
)
for decryption in decryptions:
if decryption is None:
return None
selections[decryption.object_id] = decryption
return CiphertextDecryptionContest(
contest.object_id,
key_pair.owner_id,
contest.description_hash,
selections,
)
def compute_compensated_decryption_share_for_contest(
missing_guardian_coordinate: ElementModQ,
present_guardian_key: ElectionPublicKey,
missing_guardian_key: ElectionPublicKey,
contest: CiphertextContest,
context: CiphertextElectionContext,
scheduler: Optional[Scheduler] = None,
) -> Optional[CiphertextCompensatedDecryptionContest]:
"""
Compute the compensated decryption share for a single contest
:param missing_guardian_coordinate: Election partial key backup of the missing guardian
:param guardian_key: The election public key of the available guardian that will partially decrypt the selection
:param missing_guardian_key: Election public key of the guardian that is missing
:param contest: The specific contest to decrypt
:param context: The public election encryption context
:return: a `CiphertextCompensatedDecryptionContest` or `None` if there is an error
"""
if not scheduler:
scheduler = Scheduler()
selections: Dict[SelectionId, CiphertextCompensatedDecryptionSelection] = {}
selection_decryptions: List[
Optional[CiphertextCompensatedDecryptionSelection]
] = scheduler.schedule(
compute_compensated_decryption_share_for_selection,
[
(
missing_guardian_coordinate,
present_guardian_key,
missing_guardian_key,
selection,
context,
)
for selection in contest.selections
],
with_shared_resources=True,
)
for decryption in selection_decryptions:
if decryption is None:
return None
selections[decryption.object_id] = decryption
return CiphertextCompensatedDecryptionContest(
contest.object_id,
present_guardian_key.owner_id,
missing_guardian_key.owner_id,
contest.description_hash,
selections,
)
def compute_decryption_share_for_selection(
key_pair: ElectionKeyPair,
selection: CiphertextSelection,
context: CiphertextElectionContext,
) -> Optional[CiphertextDecryptionSelection]:
"""
Compute a partial decryption for a specific selection
:param guardian_keys: Election keys for the guardian who will partially decrypt the selection
:param selection: The specific selection to decrypt
:param context: The public election encryption context
:return: a `CiphertextDecryptionSelection` or `None` if there is an error
"""
(decryption, proof) = partially_decrypt(
key_pair, selection.ciphertext, context.crypto_extended_base_hash
)
if proof.is_valid(
selection.ciphertext,
key_pair.key_pair.public_key,
decryption,
context.crypto_extended_base_hash,
):
return create_ciphertext_decryption_selection(
selection.object_id,
key_pair.owner_id,
decryption,
proof,
)
log_warning(
f"compute decryption share proof failed for guardian {key_pair.owner_id}"
f"and {selection.object_id} with invalid proof"
)
return None
def compute_compensated_decryption_share_for_selection(
missing_guardian_backup: ElementModQ,
available_guardian_key: ElectionPublicKey,
missing_guardian_key: ElectionPublicKey,
selection: CiphertextSelection,
context: CiphertextElectionContext,
) -> Optional[CiphertextCompensatedDecryptionSelection]:
"""
Compute a compensated decryption share for a specific selection using the
available guardian's share of the missing guardian's private key polynomial
:param missing_guardian_backup: The coordinate aka backup of a missing guardian
:param available_guardian_key: Election public key of the guardian that is present
:param missing_guardian_key: Election public key of the guardian that is missing
:param selection: The specific selection to decrypt
:param context: The public election encryption context
:return: a `CiphertextCompensatedDecryptionSelection` or `None` if there is an error
"""
compensated = decrypt_with_threshold(
missing_guardian_backup,
selection.ciphertext,
context.crypto_extended_base_hash,
)
if compensated is None:
log_warning(
(
f"compute compensated decryption share failed for {available_guardian_key.owner_id} "
f"missing: {missing_guardian_key.owner_id} {selection.object_id}"
)
)
return None
(decryption, proof) = compensated
recovery_public_key = compute_recovery_public_key(
available_guardian_key, missing_guardian_key
)
if proof.is_valid(
selection.ciphertext,
recovery_public_key,
decryption,
context.crypto_extended_base_hash,
):
share = CiphertextCompensatedDecryptionSelection(
selection.object_id,
available_guardian_key.owner_id,
missing_guardian_key.owner_id,
decryption,
recovery_public_key,
proof,
)
return share
log_warning(
(
f"compute compensated decryption share proof failed for {available_guardian_key.owner_id} "
f"missing: {missing_guardian_key.owner_id} {selection.object_id}"
)
)
return None
def partially_decrypt(
key_pair: ElectionKeyPair,
elgamal: ElGamalCiphertext,
extended_base_hash: ElementModQ,
nonce_seed: ElementModQ = None,
) -> Tuple[ElementModP, ChaumPedersenProof]:
"""
Compute a partial decryption of an elgamal encryption
:param elgamal: the `ElGamalCiphertext` that will be partially decrypted
:param extended_base_hash: the extended base hash of the election that
was used to generate t he ElGamal Ciphertext
:param nonce_seed: an optional value used to generate the `ChaumPedersenProof`
if no value is provided, a random number will be used.
:return: a `Tuple[ElementModP, ChaumPedersenProof]` of the decryption and its proof
"""
if nonce_seed is None:
nonce_seed = rand_q()
# TODO: ISSUE #47: Decrypt the election secret key
# 𝑀_i = 𝐴^𝑠𝑖 mod 𝑝
partial_decryption = elgamal.partial_decrypt(key_pair.key_pair.secret_key)
# 𝑀_i = 𝐴^𝑠𝑖 mod 𝑝 and 𝐾𝑖 = 𝑔^𝑠𝑖 mod 𝑝
proof = make_chaum_pedersen(
message=elgamal,
s=key_pair.key_pair.secret_key,
m=partial_decryption,
seed=nonce_seed,
hash_header=extended_base_hash,
)
return (partial_decryption, proof)
def decrypt_backup(
guardian_backup: ElectionPartialKeyBackup,
key_pair: ElectionKeyPair,
) -> Optional[ElementModQ]:
"""
Decrypts a compensated partial decryption of an elgamal encryption
on behalf of a missing guardian
:param guardian_backup: Missing guardian's backup
:param key_pair: The present guardian's key pair that will be used to decrypt the backup
:return: a `Tuple[ElementModP, ChaumPedersenProof]` of the decryption and its proof
"""
encryption_seed = get_backup_seed(
key_pair.owner_id,
key_pair.sequence_order,
)
bytes_optional = guardian_backup.encrypted_coordinate.decrypt(
key_pair.key_pair.secret_key, encryption_seed
)
if bytes_optional is None:
return None
coordinate_data: CoordinateData = CoordinateData.from_bytes(
get_optional(bytes_optional)
)
return coordinate_data.coordinate
def decrypt_with_threshold(
coordinate: ElementModQ,
ciphertext: ElGamalCiphertext,
extended_base_hash: ElementModQ,
nonce_seed: ElementModQ = None,
) -> Optional[Tuple[ElementModP, ChaumPedersenProof]]:
"""
Compute a compensated partial decryption of an elgamal encryption
given a coordinate from a missing guardian.
:param coordinate: The coordinate aka backup provided to a present guardian from
a missing guardian
:param ciphertext: the `ElGamalCiphertext` that will be partially decrypted
:param extended_base_hash: the extended base hash of the election that
was used to generate the ElGamal Ciphertext
:param nonce_seed: an optional value used to generate the `ChaumPedersenProof`
if no value is provided, a random number will be used.
:return: a `Tuple[ElementModP, ChaumPedersenProof]` of the decryption and its proof
"""
if nonce_seed is None:
nonce_seed = rand_q()
# 𝑀_{𝑖,l} = 𝐴^P𝑖_{l}
partial_decryption = ciphertext.partial_decrypt(coordinate)
# 𝑀_{𝑖,l} = 𝐴^𝑠𝑖 mod 𝑝 and 𝐾𝑖 = 𝑔^𝑠𝑖 mod 𝑝
proof = make_chaum_pedersen(
ciphertext,
coordinate,
partial_decryption,
nonce_seed,
extended_base_hash,
)
return (partial_decryption, proof)
def compute_recovery_public_key(
guardian_key: ElectionPublicKey,
missing_guardian_key: ElectionPublicKey,
) -> RecoveryPublicKey:
"""
Compute the recovery public key,
corresponding to the secret share Pi(l)
K_ij^(l^j) for j in 0..k-1. K_ij is coefficients[j].public_key
"""
pub_key = ONE_MOD_P
for index, commitment in enumerate(missing_guardian_key.coefficient_commitments):
exponent = pow_q(guardian_key.sequence_order, index)
pub_key = mult_p(pub_key, pow_p(commitment, exponent))
return pub_key
def reconstruct_decryption_share(
missing_guardian_key: ElectionPublicKey,
tally: CiphertextTally,
shares: Dict[GuardianId, CompensatedDecryptionShare],
lagrange_coefficients: Dict[GuardianId, ElementModQ],
) -> DecryptionShare:
"""
Reconstruct the missing Decryption Share for a missing guardian
from the collection of compensated decryption shares
:param missing_guardian_id: The guardian id for the missing guardian
:param public_key: The public key of the guardian creating share
:param tally: The collection of `CiphertextTallyContest` that is cast
:shares: the collection of `CompensatedTallyDecryptionShare` for the missing guardian from available guardians
:lagrange_coefficients: the lagrange coefficients corresponding to the available guardians that provided shares
"""
contests: Dict[ContestId, CiphertextDecryptionContest] = {}
for contest in tally.contests.values():
contests[contest.object_id] = reconstruct_decryption_contest(
missing_guardian_key.owner_id,
CiphertextContest(
contest.object_id,
contest.sequence_order,
contest.description_hash,
list(contest.selections.values()),
),
shares,
lagrange_coefficients,
)
return DecryptionShare(
tally.object_id,
missing_guardian_key.owner_id,
missing_guardian_key.key,
contests,
)
def reconstruct_decryption_share_for_ballot(
missing_guardian_key: ElectionPublicKey,
ballot: SubmittedBallot,
shares: Dict[GuardianId, CompensatedDecryptionShare],
lagrange_coefficients: Dict[GuardianId, ElementModQ],
) -> DecryptionShare:
"""
Reconstruct a missing ballot Decryption share for a missing guardian
from the collection of compensated decryption shares
:param missing_guardian_id: The guardian id for the missing guardian
:param public_key: the public key for the missing guardian
:param ballot: The `SubmittedBallot` to reconstruct
:shares: the collection of `CompensatedBallotDecryptionShare` for
the missing guardian, each keyed by the ID of the guardian that produced it from available guardians
:lagrange_coefficients: the lagrange coefficients corresponding to the available guardians that provided shares
"""
contests: Dict[ContestId, CiphertextDecryptionContest] = {}
for contest in ballot.contests:
contests[contest.object_id] = reconstruct_decryption_contest(
missing_guardian_key.owner_id,
CiphertextContest(
contest.object_id,
contest.sequence_order,
contest.description_hash,
contest.ballot_selections,
),
shares,
lagrange_coefficients,
)
return DecryptionShare(
ballot.object_id,
missing_guardian_key.owner_id,
missing_guardian_key.key,
contests,
)
def reconstruct_decryption_contest(
missing_guardian_id: GuardianId,
contest: CiphertextContest,
shares: Dict[GuardianId, CompensatedDecryptionShare],
lagrange_coefficients: Dict[GuardianId, ElementModQ],
) -> CiphertextDecryptionContest:
"""
Reconstruct the missing Decryption Share for a missing guardian
from the collection of compensated decryption shares
:param missing_guardian_id: The guardian id for the missing guardian
:param contest: The CiphertextContest to decrypt
:shares: the collection of `CompensatedDecryptionShare` for the missing guardian from available guardians
:lagrange_coefficients: the lagrange coefficients corresponding to the available guardians that provided shares
"""
contest_shares: Dict[GuardianId, CiphertextCompensatedDecryptionContest] = {
available_guardian_id: compensated_share.contests[contest.object_id]
for available_guardian_id, compensated_share in shares.items()
}
selections: Dict[SelectionId, CiphertextDecryptionSelection] = {}
for selection in contest.selections:
# collect all of the shares generated for each selection
compensated_selection_shares: Dict[
GuardianId, CiphertextCompensatedDecryptionSelection
] = {
available_guardian_id: compensated_contest.selections[selection.object_id]
for available_guardian_id, compensated_contest in contest_shares.items()
}
share_pow_p = []
for available_guardian_id, share in compensated_selection_shares.items():
share_pow_p.append(
pow_p(share.share, lagrange_coefficients[available_guardian_id])
)
reconstructed_share = mult_p(*share_pow_p)
selections[selection.object_id] = create_ciphertext_decryption_selection(
selection.object_id,
missing_guardian_id,
reconstructed_share,
compensated_selection_shares,
)
return CiphertextDecryptionContest(
contest.object_id,
missing_guardian_id,
contest.description_hash,
selections,
)
def compute_lagrange_coefficients_for_guardians(
available_guardians_keys: List[ElectionPublicKey],
) -> Dict[GuardianId, ElementModQ]:
"""
Produce all Lagrange coefficients for a collection of available
Guardians, to be used when reconstructing a missing share.
"""
return {
guardian_keys.owner_id: compute_lagrange_coefficients_for_guardian(
guardian_keys, available_guardians_keys
)
for guardian_keys in available_guardians_keys
}
def compute_lagrange_coefficients_for_guardian(
guardian_key: ElectionPublicKey,
other_guardians_keys: List[ElectionPublicKey],
) -> ElementModQ:
"""
Produce a Lagrange coefficient for a single Guardian, to be used when reconstructing a missing share.
"""
other_guardian_orders = [
g.sequence_order
for g in other_guardians_keys
if g.owner_id != guardian_key.owner_id
]
return compute_lagrange_coefficient(
guardian_key.sequence_order,
*other_guardian_orders,
)
| 34.262411
| 116
| 0.700931
|
bd5249369153d34a07831b5e0b0e37e0c97ab44c
| 528
|
py
|
Python
|
scripts/import_data.py
|
bfontaine/movielens-data-analysis
|
fa8cc7248ba3c1e5a629a6b2291af3ae191cbc82
|
[
"MIT"
] | 3
|
2017-01-13T23:53:37.000Z
|
2019-09-29T06:40:29.000Z
|
scripts/import_data.py
|
bfontaine/movielens-data-analysis
|
fa8cc7248ba3c1e5a629a6b2291af3ae191cbc82
|
[
"MIT"
] | null | null | null |
scripts/import_data.py
|
bfontaine/movielens-data-analysis
|
fa8cc7248ba3c1e5a629a6b2291af3ae191cbc82
|
[
"MIT"
] | 1
|
2021-04-14T14:47:31.000Z
|
2021-04-14T14:47:31.000Z
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import sys
sys.path.insert(0, '%s/..' % os.path.dirname(__file__))
from movies.data_import import import_data
directory = "./data"
if len(sys.argv) == 3:
directory = sys.argv[1]
fmt = sys.argv[2]
else:
print "Usage:\n\t%s [<directory> <format>]\n" % sys.argv[0]
print "Using directory='./data' and format='ml-100k'"
fmt = "ml-100k"
print "Importing data from '%s' using format '%s'" % (directory, fmt)
import_data(directory, fmt, verbose=True)
| 22
| 69
| 0.642045
|
15185a65c3720c6856f2f1625d66557e5b1a9331
| 1,544
|
py
|
Python
|
dm_control/mujoco/wrapper/__init__.py
|
rdaems/dm_control
|
c682e626fde95a98b53f67f07b0c1021e4200bb8
|
[
"Apache-2.0"
] | 1
|
2022-03-22T11:53:38.000Z
|
2022-03-22T11:53:38.000Z
|
dm_control/mujoco/wrapper/__init__.py
|
krakhit/dm_control
|
4e1a35595124742015ae0c7a829e099a5aa100f5
|
[
"Apache-2.0"
] | null | null | null |
dm_control/mujoco/wrapper/__init__.py
|
krakhit/dm_control
|
4e1a35595124742015ae0c7a829e099a5aa100f5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python bindings and wrapper classes for MuJoCo."""
from dm_control.mujoco.wrapper import mjbindings
from dm_control.mujoco.wrapper.core import callback_context
from dm_control.mujoco.wrapper.core import enable_timer
from dm_control.mujoco.wrapper.core import Error
from dm_control.mujoco.wrapper.core import get_schema
from dm_control.mujoco.wrapper.core import MjData
from dm_control.mujoco.wrapper.core import MjModel
from dm_control.mujoco.wrapper.core import MjrContext
from dm_control.mujoco.wrapper.core import MjvCamera
from dm_control.mujoco.wrapper.core import MjvFigure
from dm_control.mujoco.wrapper.core import MjvOption
from dm_control.mujoco.wrapper.core import MjvPerturb
from dm_control.mujoco.wrapper.core import MjvScene
from dm_control.mujoco.wrapper.core import save_last_parsed_model_to_xml
from dm_control.mujoco.wrapper.core import set_callback
| 40.631579
| 78
| 0.778497
|
1f362a54b4e7b116abe74f67a7ae2b13b5e9fd59
| 702
|
py
|
Python
|
setup.py
|
paramono/djpp
|
ba70c212595d37e2d3ffbd7313c879979d9d4f3e
|
[
"MIT"
] | 1
|
2020-07-28T19:08:20.000Z
|
2020-07-28T19:08:20.000Z
|
setup.py
|
paramono/djpaypal_subs
|
ba70c212595d37e2d3ffbd7313c879979d9d4f3e
|
[
"MIT"
] | null | null | null |
setup.py
|
paramono/djpaypal_subs
|
ba70c212595d37e2d3ffbd7313c879979d9d4f3e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = ''
setup(
long_description=readme,
name='djpp',
version='0.3.11',
python_requires='==3.*,>=3.5.0',
author='paramono',
author_email='alex@paramono.com',
packages=[
'djpp', 'djpp.management', 'djpp.management.commands',
'djpp.migrations', 'djpp.models'
],
package_dir={"": "."},
package_data={},
install_requires=['paypalrestsdk==1.*,>=1.13.1'],
extras_require={"dev": ["pytest==5.*,>=5.2.0"]},
)
| 23.4
| 62
| 0.618234
|
bd283ffd14e3a18b88ac21397831d4756d7ae3e5
| 329
|
py
|
Python
|
hypervector/errors.py
|
hypervectorio/hypervector-wrapper
|
c90ec91363249ec9401898960ea41cf6aa116ced
|
[
"MIT"
] | 8
|
2021-05-12T15:19:17.000Z
|
2022-03-06T00:59:25.000Z
|
hypervector/errors.py
|
hypervectorio/hypervector-wrapper
|
c90ec91363249ec9401898960ea41cf6aa116ced
|
[
"MIT"
] | null | null | null |
hypervector/errors.py
|
hypervectorio/hypervector-wrapper
|
c90ec91363249ec9401898960ea41cf6aa116ced
|
[
"MIT"
] | 6
|
2021-04-07T14:03:24.000Z
|
2021-04-16T15:38:54.000Z
|
class APIKeyNotSetError(Exception):
pass
class APIConnectionError(Exception):
pass
class HypervectorError(Exception):
def __init__(self, response=None):
self.response = response
if response:
self.status_code = response.status_code
else:
self.status_code = None
| 16.45
| 51
| 0.653495
|
2e7e7c9f570cf1bd542539dda8da3a7e9463ccd7
| 38,360
|
py
|
Python
|
src/sparsezoo/models/zoo.py
|
signalism/sparsezoo
|
5ca44f8cb514e80844034920d743baba97279ec2
|
[
"Apache-2.0"
] | 116
|
2021-02-04T17:51:22.000Z
|
2022-03-25T03:15:19.000Z
|
src/sparsezoo/models/zoo.py
|
PIlotcnc/new
|
6e6413632de01f6acf691dca8fadb84f841444b9
|
[
"Apache-2.0"
] | 15
|
2021-02-13T12:00:40.000Z
|
2022-03-17T18:44:54.000Z
|
src/sparsezoo/models/zoo.py
|
PIlotcnc/new
|
6e6413632de01f6acf691dca8fadb84f841444b9
|
[
"Apache-2.0"
] | 11
|
2021-02-04T22:20:47.000Z
|
2021-12-03T12:20:09.000Z
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code for managing the search and creation of sparsezoo Model and Recipe objects
"""
from typing import List, Union
from sparsezoo.objects.model import Model
from sparsezoo.objects.recipe import Recipe
from sparsezoo.requests import ModelArgs
__all__ = [
"Zoo",
]
class Zoo:
"""
Provides static functions for loading and searching SparseZoo models and recipes
"""
@staticmethod
def load_model(
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: Union[str, None],
framework: str,
repo: str,
dataset: str,
training_scheme: Union[str, None],
sparse_name: str,
sparse_category: str,
sparse_target: Union[str, None],
release_version: Union[str, None] = None,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Model:
"""
Obtains a Model from the model repo
:param domain: The domain of the model the object belongs to;
e.g. cv, nlp
:param sub_domain: The sub domain of the model the object belongs to;
e.g. classification, segmentation
:param architecture: The architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
:param sub_architecture: The sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
:param framework: The framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param repo: The source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param dataset: The dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param training_scheme: The training scheme used on the model the object
belongs to if any; e.g. augmented
:param sparse_name: The name describing the sparsification of the model
the object belongs to, e.g. base, pruned, pruned_quant
:param sparse_category: The degree of sparsification of the model the object
belongs to; e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
:param sparse_target: The deployment target of sparsification of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
:param release_version: The sparsezoo release version for the model
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: The requested Model instance
"""
return Model.load_model(
domain=domain,
sub_domain=sub_domain,
architecture=architecture,
sub_architecture=sub_architecture,
framework=framework,
repo=repo,
dataset=dataset,
training_scheme=training_scheme,
sparse_name=sparse_name,
sparse_category=sparse_category,
sparse_target=sparse_target,
release_version=release_version,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def load_model_from_stub(
stub: Union[str, ModelArgs],
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Model:
"""
:param stub: the SparseZoo stub path to the model, can be a string path or
ModelArgs object
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: The requested Model instance
"""
return Model.load_model_from_stub(
stub=stub,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def load_model_from_recipe(
recipe: Recipe,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
):
"""
Loads the model associated with a recipe
:param recipe: the Recipe associated with the model
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: The requested Model instance
"""
return Model.load_model_from_recipe(
recipe=recipe,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def load_base_model_from_recipe(
recipe: Recipe,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
):
"""
Loads the base model associated with a recipe
:param recipe: the Recipe associated with the model
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: The requested Model instance
"""
return Model.load_base_model_from_recipe(
recipe=recipe,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def download_model(
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: Union[str, None],
framework: str,
repo: str,
dataset: str,
training_scheme: Union[str, None],
sparse_name: str,
sparse_category: str,
sparse_target: Union[str, None],
release_version: Union[str, None] = None,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
overwrite: bool = False,
) -> Model:
"""
Downloads a model from model repo
:param domain: The domain of the model the object belongs to;
e.g. cv, nlp
:param sub_domain: The sub domain of the model the object belongs to;
e.g. classification, segmentation
:param architecture: The architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
:param sub_architecture: The sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
:param framework: The framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param repo: The source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param dataset: The dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param training_scheme: The training scheme used on the model the object
belongs to if any; e.g. augmented
:param sparse_name: The name describing the sparsification of the model
the object belongs to, e.g. base, pruned, pruned_quant
:param sparse_category: The degree of sparsification of the model the object
belongs to; e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
:param sparse_target: The deployment target of sparsification of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
:param release_version: The sparsezoo release version for the model
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:param overwrite: True to overwrite the file if it exists, False otherwise
:return: The requested Model instance
"""
return Model.download_model(
domain=domain,
sub_domain=sub_domain,
architecture=architecture,
sub_architecture=sub_architecture,
framework=framework,
repo=repo,
dataset=dataset,
training_scheme=training_scheme,
sparse_name=sparse_name,
sparse_category=sparse_category,
sparse_target=sparse_target,
release_version=release_version,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
overwrite=overwrite,
)
@staticmethod
def download_model_from_stub(
stub: Union[str, ModelArgs],
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
overwrite: bool = False,
) -> Model:
"""
:param stub: the SparseZoo stub path to the model, can be a string path or
ModelArgs object
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:param overwrite: True to overwrite the file if it exists, False otherwise
:return: The requested Model instance
"""
return Model.download_model_from_stub(
stub=stub,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
overwrite=overwrite,
)
@staticmethod
def search_models(
domain: str,
sub_domain: str,
architecture: Union[str, None] = None,
sub_architecture: Union[str, None] = None,
framework: Union[str, None] = None,
repo: Union[str, None] = None,
dataset: Union[str, None] = None,
training_scheme: Union[str, None] = None,
sparse_name: Union[str, None] = None,
sparse_category: Union[str, None] = None,
sparse_target: Union[str, None] = None,
release_version: Union[str, None] = None,
page: int = 1,
page_length: int = 20,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> List[Model]:
"""
Obtains a list of Models matching the search parameters
:param domain: The domain of the model the object belongs to;
e.g. cv, nlp
:param sub_domain: The sub domain of the model the object belongs to;
e.g. classification, segmentation
:param architecture: The architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
:param sub_architecture: The sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
:param framework: The framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param repo: The source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param dataset: The dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param training_scheme: The training scheme used on the model the object
belongs to if any; e.g. augmented
:param sparse_name: The name describing the sparsification of the model
the object belongs to, e.g. base, pruned, pruned_quant
:param sparse_category: The degree of sparsification of the model the object
belongs to; e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
:param sparse_target: The deployment target of sparsification of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
:param release_version: The sparsezoo release version for the model
:param page: the page of values to get
:param page_length: the page length of values to get
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: The requested Model instance
"""
return Model.search_models(
domain=domain,
sub_domain=sub_domain,
architecture=architecture,
sub_architecture=sub_architecture,
framework=framework,
repo=repo,
dataset=dataset,
training_scheme=training_scheme,
sparse_name=sparse_name,
sparse_category=sparse_category,
sparse_target=sparse_target,
release_version=release_version,
page=page,
page_length=page_length,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def search_similar_models(
model: Union[Model, str, ModelArgs],
match_domain: bool = True,
match_sub_domain: bool = True,
match_architecture: bool = True,
match_sub_architecture: bool = True,
match_framework: bool = True,
match_repo: bool = True,
match_dataset: bool = True,
match_training_scheme: bool = False,
match_sparse_name: bool = False,
match_sparse_category: bool = False,
match_sparse_target: bool = False,
) -> List[Model]:
"""
Search for similar models to the given one
:param model: The model object, a SparseZoo model stub path, or a ModelArgs
object representing the base model to search similar models of
:param match_domain: True to match similar models to the current
domain of the model the object belongs to; e.g. cv, nlp
:param match_sub_domain: True to match similar models to the current
sub domain of the model the object belongs to;
e.g. classification, segmentation
:param match_architecture: True to match similar models to the current
architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
:param match_sub_architecture: True to match similar models to the current
sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
:param match_framework: True to match similar models to the current
framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param match_repo: True to match similar models to the current
source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param match_dataset: True to match similar models to the current
dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param match_training_scheme: True to match similar models to the current
training scheme used on the model the object
belongs to if any; e.g. augmented
:param match_sparse_name: True to match similar models to the current
name describing the sparsification of the model
the object belongs to, e.g. base, pruned, pruned_quant
:param match_sparse_category: True to match similar models to the current
degree of sparsification of the model the object
belongs to; e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
:param match_sparse_target: True to match similar models to the current
deployment target of sparsification of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
:return: a list of models matching the given model, if any
"""
if isinstance(model, str):
model = Zoo.load_model_from_stub(model)
return model.search_similar_models(
match_domain=match_domain,
match_sub_domain=match_sub_domain,
match_architecture=match_architecture,
match_sub_architecture=match_sub_architecture,
match_framework=match_framework,
match_repo=match_repo,
match_dataset=match_dataset,
match_training_scheme=match_training_scheme,
match_sparse_name=match_sparse_name,
match_sparse_category=match_sparse_category,
match_sparse_target=match_sparse_target,
)
@staticmethod
def search_sparse_models(
model: Union[Model, str, ModelArgs],
match_framework: bool = True,
match_repo: bool = True,
match_dataset: bool = True,
match_training_scheme: bool = True,
) -> List[Model]:
"""
Search for different available sparse versions based off of the current model
:param model: The model object, a SparseZoo model stub path, or a ModelArgs
object representing the base model to search different sparsifications of
:param match_framework: True to match similar models to the current
framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param match_repo: True to match similar models to the current
source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param match_dataset: True to match similar models to the current
dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param match_training_scheme: True to match similar models to the current
training scheme used on the model the object
belongs to if any; e.g. augmented
:return: the list of matching sparse models, if any
"""
if isinstance(model, str):
model = Zoo.load_model_from_stub(model)
return model.search_sparse_models(
match_framework=match_framework,
match_repo=match_repo,
match_dataset=match_dataset,
match_training_scheme=match_training_scheme,
)
@staticmethod
def search_recipes(
domain: str,
sub_domain: str,
architecture: Union[str, None] = None,
sub_architecture: Union[str, None] = None,
framework: Union[str, None] = None,
repo: Union[str, None] = None,
dataset: Union[str, None] = None,
training_scheme: Union[str, None] = None,
sparse_name: Union[str, None] = None,
sparse_category: Union[str, None] = None,
sparse_target: Union[str, None] = None,
release_version: Union[str, None] = None,
recipe_type: Union[str, None] = None,
page: int = 1,
page_length: int = 20,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> List[Recipe]:
"""
Obtains a list of Recipes matching the model search parameters
:param domain: The domain of the model the object belongs to;
e.g. cv, nlp
:param sub_domain: The sub domain of the model the object belongs to;
e.g. classification, segmentation
:param architecture: The architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
:param sub_architecture: The sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
:param framework: The framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param repo: The source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param dataset: The dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param training_scheme: The training scheme used on the model the object
belongs to if any; e.g. augmented
:param sparse_name: The name describing the sparsification of the model
the object belongs to, e.g. base, pruned, pruned_quant
:param sparse_category: The degree of sparsification of the model the object
belongs to; e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
:param sparse_target: The deployment target of sparsification of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
:param release_version: The sparsezoo release version for the model
:param recipe_type: The recipe type; e.g. original, transfer_learn
:param page: the page of values to get
:param page_length: the page length of values to get
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: A list of Recipe objects for models that match the given
search parameters
"""
return Recipe.search_recipes(
domain=domain,
sub_domain=sub_domain,
architecture=architecture,
sub_architecture=sub_architecture,
framework=framework,
repo=repo,
dataset=dataset,
training_scheme=training_scheme,
sparse_name=sparse_name,
sparse_category=sparse_category,
sparse_target=sparse_target,
release_version=release_version,
recipe_type=recipe_type,
page=page,
page_length=page_length,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def search_sparse_recipes(
model: Union[Model, str, ModelArgs],
recipe_type: Union[str, None] = None,
match_framework: bool = True,
match_repo: bool = True,
match_dataset: bool = True,
match_training_scheme: bool = True,
) -> List[Recipe]:
"""
Search for recipes of the given model
:param model: The model object, a SparseZoo stub model path, or a ModelArgs
object representing the base model to search for recipes
:param match_framework: True to match similar models to the current
framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param match_repo: True to match similar models to the current
source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param match_dataset: True to match similar models to the current
dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param match_training_scheme: True to match similar models to the current
training scheme used on the model the object
belongs to if any; e.g. augmented
:return: the list of matching sparsification recipes, if any
"""
return Recipe.search_sparse_recipes(
model=model,
recipe_type=recipe_type,
match_framework=match_framework,
match_repo=match_repo,
match_dataset=match_dataset,
match_training_scheme=match_training_scheme,
)
@staticmethod
def load_recipe(
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: Union[str, None],
framework: str,
repo: str,
dataset: str,
training_scheme: Union[str, None],
sparse_name: str,
sparse_category: str,
sparse_target: Union[str, None],
recipe_type: Union[str, None] = None,
release_version: Union[str, None] = None,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Recipe:
"""
Obtains a Recipe from the model repo
:param domain: The domain of the model the object belongs to;
e.g. cv, nlp
:param sub_domain: The sub domain of the model the object belongs to;
e.g. classification, segmentation
:param architecture: The architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
:param sub_architecture: The sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
:param framework: The framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param repo: The source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param dataset: The dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param training_scheme: The training scheme used on the model the object
belongs to if any; e.g. augmented
:param sparse_name: The name describing the sparsification of the model
the object belongs to, e.g. base, pruned, pruned_quant
:param sparse_category: The degree of sparsification of the model the object
belongs to; e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
:param sparse_target: The deployment target of sparsification of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
:param recipe_type: The recipe type; e.g. original, transfer_learn
:param release_version: The sparsezoo release version for the model
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: The requested Recipe instance
"""
return Recipe.load_recipe(
domain=domain,
sub_domain=sub_domain,
architecture=architecture,
sub_architecture=sub_architecture,
framework=framework,
repo=repo,
dataset=dataset,
training_scheme=training_scheme,
sparse_name=sparse_name,
sparse_category=sparse_category,
sparse_target=sparse_target,
release_version=release_version,
recipe_type=recipe_type,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def load_recipe_from_stub(
stub: Union[str, ModelArgs],
recipe_type: Union[str, None] = None,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Recipe:
"""
Loads a recipe from stub. If the stub is a string, it may contain the
recipe type as a stub parameter. i.e.
- "model/stub/path"
- "zoo:model/stub/path",
- "zoo:model/stub/path?recipe_type=original",
- "zoo:model/stub/path/transfer_learn"
:param stub: the SparseZoo stub path to the recipe, can be a string path or
ModelArgs object
:param recipe_type: the recipe type to obtain if not original
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: The requested Recipe instance
"""
return Recipe.load_recipe_from_stub(
stub=stub,
recipe_type=recipe_type,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def download_recipe(
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: Union[str, None],
framework: str,
repo: str,
dataset: str,
training_scheme: Union[str, None],
sparse_name: str,
sparse_category: str,
sparse_target: Union[str, None],
recipe_type: Union[str, None] = None,
release_version: Union[str, None] = None,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
) -> Model:
"""
Downloads a Recipe from the model repo
:param domain: The domain of the model the object belongs to;
e.g. cv, nlp
:param sub_domain: The sub domain of the model the object belongs to;
e.g. classification, segmentation
:param architecture: The architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
:param sub_architecture: The sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
:param framework: The framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param repo: The source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param dataset: The dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param training_scheme: The training scheme used on the model the object
belongs to if any; e.g. augmented
:param sparse_name: The name describing the sparsification of the model
the object belongs to, e.g. base, pruned, pruned_quant
:param sparse_category: The degree of sparsification of the model the object
belongs to; e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
:param sparse_target: The deployment target of sparsification of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
:param recipe_type: The recipe type; e.g. original, transfer_learn
:param release_version: The sparsezoo release version for the model
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:return: The requested Recipe instance
"""
return Recipe.download_recipe(
domain=domain,
sub_domain=sub_domain,
architecture=architecture,
sub_architecture=sub_architecture,
framework=framework,
repo=repo,
dataset=dataset,
training_scheme=training_scheme,
sparse_name=sparse_name,
sparse_category=sparse_category,
sparse_target=sparse_target,
release_version=release_version,
recipe_type=recipe_type,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
)
@staticmethod
def download_recipe_from_stub(
stub: Union[str, ModelArgs],
recipe_type: Union[str, None] = None,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
overwrite: bool = False,
) -> Recipe:
"""
Downloads a recipe from stub. If the stub is a string, it may contain the
recipe type as a stub parameter or part of the stub. i.e.
- "model/stub/path"
- "zoo:model/stub/path",
- "zoo:model/stub/path?recipe_type=original",
- "zoo:model/stub/path/transfer_learn"
:param stub: the SparseZoo stub path to the recipe, can be a string path or
ModelArgs object
:param recipe_type: the recipe_type to download if not original
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:param overwrite: True to overwrite the file if it exists, False otherwise
:return: The requested Recipe instance
"""
return Recipe.download_recipe_from_stub(
stub=stub,
recipe_type=recipe_type,
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
force_token_refresh=force_token_refresh,
overwrite=overwrite,
)
@staticmethod
def download_recipe_base_framework_files(
stub: Union[str, ModelArgs],
recipe_type: Union[str, None] = None,
override_folder_name: Union[str, None] = None,
override_parent_path: Union[str, None] = None,
force_token_refresh: bool = False,
overwrite: bool = False,
extensions: Union[List[str], None] = None,
) -> List[str]:
"""
:param stub: a string model stub that points to a SparseZoo model.
recipe_type may be added as a stub parameter or path of path. i.e.
"model/stub/path", "zoo:model/stub/path",
"zoo:model/stub/path?recipe_type=transfer",
"zoo:model/stub/path/transfer"
:param recipe_type: the recipe_type to download if not original
:param override_folder_name: Override for the name of the folder to save
this file under
:param override_parent_path: Path to override the default save path
for where to save the parent folder for this file under
:param force_token_refresh: True to refresh the auth token, False otherwise
:param overwrite: True to overwrite the file if it exists, False otherwise
:param extensions: List of file extensions to filter for. ex ['.pth', '.ptc'].
If None or empty list, all framework files are downloaded. Default is None
:return: file path to the downloaded framework checkpoint files for the
base weights of this recipe
"""
recipe = Zoo.load_recipe_from_stub(
stub,
recipe_type=recipe_type,
force_token_refresh=force_token_refresh,
)
return recipe.download_base_framework_files(
override_folder_name=override_folder_name,
override_parent_path=override_parent_path,
overwrite=overwrite,
extensions=extensions,
)
| 45.830346
| 87
| 0.649088
|
8c95f96085ac2c14d07f2d82ac32691a19bb833d
| 5,397
|
py
|
Python
|
tests/components/panel_custom/test_init.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | null | null | null |
tests/components/panel_custom/test_init.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:34:57.000Z
|
2022-03-12T00:59:48.000Z
|
tests/components/panel_custom/test_init.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 1
|
2019-06-19T07:43:11.000Z
|
2019-06-19T07:43:11.000Z
|
"""The tests for the panel_custom component."""
from unittest.mock import Mock, patch
from homeassistant import setup
from homeassistant.components import frontend
async def test_webcomponent_custom_path_not_found(hass):
"""Test if a web component is found in config panels dir."""
filename = 'mock.file'
config = {
'panel_custom': {
'name': 'todomvc',
'webcomponent_path': filename,
'sidebar_title': 'Sidebar Title',
'sidebar_icon': 'mdi:iconicon',
'url_path': 'nice_url',
'config': 5,
}
}
with patch('os.path.isfile', Mock(return_value=False)):
result = await setup.async_setup_component(
hass, 'panel_custom', config
)
assert not result
assert len(hass.data.get(frontend.DATA_PANELS, {})) == 0
async def test_webcomponent_custom_path(hass):
"""Test if a web component is found in config panels dir."""
filename = 'mock.file'
config = {
'panel_custom': {
'name': 'todo-mvc',
'webcomponent_path': filename,
'sidebar_title': 'Sidebar Title',
'sidebar_icon': 'mdi:iconicon',
'url_path': 'nice_url',
'config': {
'hello': 'world',
}
}
}
with patch('os.path.isfile', Mock(return_value=True)):
with patch('os.access', Mock(return_value=True)):
result = await setup.async_setup_component(
hass, 'panel_custom', config
)
assert result
panels = hass.data.get(frontend.DATA_PANELS, [])
assert panels
assert 'nice_url' in panels
panel = panels['nice_url']
assert panel.config == {
'hello': 'world',
'_panel_custom': {
'html_url': '/api/panel_custom/todo-mvc',
'name': 'todo-mvc',
'embed_iframe': False,
'trust_external': False,
},
}
assert panel.frontend_url_path == 'nice_url'
assert panel.sidebar_icon == 'mdi:iconicon'
assert panel.sidebar_title == 'Sidebar Title'
async def test_js_webcomponent(hass):
"""Test if a web component is found in config panels dir."""
config = {
'panel_custom': {
'name': 'todo-mvc',
'js_url': '/local/bla.js',
'sidebar_title': 'Sidebar Title',
'sidebar_icon': 'mdi:iconicon',
'url_path': 'nice_url',
'config': {
'hello': 'world',
},
'embed_iframe': True,
'trust_external_script': True,
}
}
result = await setup.async_setup_component(
hass, 'panel_custom', config
)
assert result
panels = hass.data.get(frontend.DATA_PANELS, [])
assert panels
assert 'nice_url' in panels
panel = panels['nice_url']
assert panel.config == {
'hello': 'world',
'_panel_custom': {
'js_url': '/local/bla.js',
'name': 'todo-mvc',
'embed_iframe': True,
'trust_external': True,
}
}
assert panel.frontend_url_path == 'nice_url'
assert panel.sidebar_icon == 'mdi:iconicon'
assert panel.sidebar_title == 'Sidebar Title'
async def test_module_webcomponent(hass):
"""Test if a js module is found in config panels dir."""
config = {
'panel_custom': {
'name': 'todo-mvc',
'module_url': '/local/bla.js',
'sidebar_title': 'Sidebar Title',
'sidebar_icon': 'mdi:iconicon',
'url_path': 'nice_url',
'config': {
'hello': 'world',
},
'embed_iframe': True,
'trust_external_script': True,
'require_admin': True,
}
}
result = await setup.async_setup_component(
hass, 'panel_custom', config
)
assert result
panels = hass.data.get(frontend.DATA_PANELS, [])
assert panels
assert 'nice_url' in panels
panel = panels['nice_url']
assert panel.require_admin
assert panel.config == {
'hello': 'world',
'_panel_custom': {
'module_url': '/local/bla.js',
'name': 'todo-mvc',
'embed_iframe': True,
'trust_external': True,
}
}
assert panel.frontend_url_path == 'nice_url'
assert panel.sidebar_icon == 'mdi:iconicon'
assert panel.sidebar_title == 'Sidebar Title'
async def test_url_option_conflict(hass):
"""Test config with multiple url options."""
to_try = [
{'panel_custom': {
'name': 'todo-mvc',
'module_url': '/local/bla.js',
'js_url': '/local/bla.js',
}
}, {'panel_custom': {
'name': 'todo-mvc',
'webcomponent_path': '/local/bla.html',
'js_url': '/local/bla.js',
}}, {'panel_custom': {
'name': 'todo-mvc',
'webcomponent_path': '/local/bla.html',
'module_url': '/local/bla.js',
'js_url': '/local/bla.js',
}}
]
for config in to_try:
result = await setup.async_setup_component(
hass, 'panel_custom', config
)
assert not result
| 28.555556
| 64
| 0.531036
|
1297b8371d8583b30f985f727d510fc160fc2ab0
| 93,161
|
py
|
Python
|
Lib/test/test_ipaddress.py
|
mananpal1997/pyston
|
d24bcf5690ab09f37d04549e70561656eaea4445
|
[
"0BSD"
] | 2,441
|
2020-07-31T06:45:53.000Z
|
2022-03-30T15:56:49.000Z
|
Lib/test/test_ipaddress.py
|
mananpal1997/pyston
|
d24bcf5690ab09f37d04549e70561656eaea4445
|
[
"0BSD"
] | 238
|
2020-10-21T04:54:00.000Z
|
2022-03-31T21:49:03.000Z
|
Lib/test/test_ipaddress.py
|
mananpal1997/pyston
|
d24bcf5690ab09f37d04549e70561656eaea4445
|
[
"0BSD"
] | 93
|
2020-08-09T12:00:17.000Z
|
2022-03-25T07:57:24.000Z
|
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""Unittest for ipaddress module."""
import unittest
import re
import contextlib
import functools
import operator
import pickle
import ipaddress
import weakref
from test.support import LARGEST, SMALLEST
class BaseTestCase(unittest.TestCase):
# One big change in ipaddress over the original ipaddr module is
# error reporting that tries to assume users *don't know the rules*
# for what constitutes an RFC compliant IP address
# Ensuring these errors are emitted correctly in all relevant cases
# meant moving to a more systematic test structure that allows the
# test structure to map more directly to the module structure
# Note that if the constructors are refactored so that addresses with
# multiple problems get classified differently, that's OK - just
# move the affected examples to the newly appropriate test case.
# There is some duplication between the original relatively ad hoc
# test suite and the new systematic tests. While some redundancy in
# testing is considered preferable to accidentally deleting a valid
# test, the original test suite will likely be reduced over time as
# redundant tests are identified.
@property
def factory(self):
raise NotImplementedError
@contextlib.contextmanager
def assertCleanError(self, exc_type, details, *args):
"""
Ensure exception does not display a context by default
Wraps unittest.TestCase.assertRaisesRegex
"""
if args:
details = details % args
cm = self.assertRaisesRegex(exc_type, details)
with cm as exc:
yield exc
# Ensure we produce clean tracebacks on failure
if exc.exception.__context__ is not None:
self.assertTrue(exc.exception.__suppress_context__)
def assertAddressError(self, details, *args):
"""Ensure a clean AddressValueError"""
return self.assertCleanError(ipaddress.AddressValueError,
details, *args)
def assertNetmaskError(self, details, *args):
"""Ensure a clean NetmaskValueError"""
return self.assertCleanError(ipaddress.NetmaskValueError,
details, *args)
def assertInstancesEqual(self, lhs, rhs):
"""Check constructor arguments produce equivalent instances"""
self.assertEqual(self.factory(lhs), self.factory(rhs))
class CommonTestMixin:
def test_empty_address(self):
with self.assertAddressError("Address cannot be empty"):
self.factory("")
def test_floats_rejected(self):
with self.assertAddressError(re.escape(repr("1.0"))):
self.factory(1.0)
def test_not_an_index_issue15559(self):
# Implementing __index__ makes for a very nasty interaction with the
# bytes constructor. Thus, we disallow implicit use as an integer
self.assertRaises(TypeError, operator.index, self.factory(1))
self.assertRaises(TypeError, hex, self.factory(1))
self.assertRaises(TypeError, bytes, self.factory(1))
def pickle_test(self, addr):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
x = self.factory(addr)
y = pickle.loads(pickle.dumps(x, proto))
self.assertEqual(y, x)
class CommonTestMixin_v4(CommonTestMixin):
def test_leading_zeros(self):
# bpo-36384: no leading zeros to avoid ambiguity with octal notation
msg = "Leading zeros are not permitted in '\d+'"
addresses = [
"000.000.000.000",
"192.168.000.001",
"016.016.016.016",
"192.168.000.001",
"001.000.008.016",
"01.2.3.40",
"1.02.3.40",
"1.2.03.40",
"1.2.3.040",
]
for address in addresses:
with self.subTest(address=address):
with self.assertAddressError(msg):
self.factory(address)
def test_int(self):
self.assertInstancesEqual(0, "0.0.0.0")
self.assertInstancesEqual(3232235521, "192.168.0.1")
def test_packed(self):
self.assertInstancesEqual(bytes.fromhex("00000000"), "0.0.0.0")
self.assertInstancesEqual(bytes.fromhex("c0a80001"), "192.168.0.1")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**32) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % 2**32)):
self.factory(2**32)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = b'\0' * length
msg = "%r (len %d != 4) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
assertBadLength(3)
assertBadLength(5)
class CommonTestMixin_v6(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("0000::0000", "::")
self.assertInstancesEqual("000::c0a8:0001", "::c0a8:1")
def test_int(self):
self.assertInstancesEqual(0, "::")
self.assertInstancesEqual(3232235521, "::c0a8:1")
def test_packed(self):
addr = b'\0'*12 + bytes.fromhex("00000000")
self.assertInstancesEqual(addr, "::")
addr = b'\0'*12 + bytes.fromhex("c0a80001")
self.assertInstancesEqual(addr, "::c0a8:1")
addr = bytes.fromhex("c0a80001") + b'\0'*12
self.assertInstancesEqual(addr, "c0a8:1::")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**128) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % 2**128)):
self.factory(2**128)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = b'\0' * length
msg = "%r (len %d != 16) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
self.factory(addr)
assertBadLength(15)
assertBadLength(17)
class AddressTestCase_v4(BaseTestCase, CommonTestMixin_v4):
factory = ipaddress.IPv4Address
def test_network_passed_as_address(self):
addr = "127.0.0.1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv4Address(addr)
def test_bad_address_split(self):
def assertBadSplit(addr):
with self.assertAddressError("Expected 4 octets in %r", addr):
ipaddress.IPv4Address(addr)
assertBadSplit("127.0.1")
assertBadSplit("42.42.42.42.42")
assertBadSplit("42.42.42")
assertBadSplit("42.42")
assertBadSplit("42")
assertBadSplit("42..42.42.42")
assertBadSplit("42.42.42.42.")
assertBadSplit("42.42.42.42...")
assertBadSplit(".42.42.42.42")
assertBadSplit("...42.42.42.42")
assertBadSplit("016.016.016")
assertBadSplit("016.016")
assertBadSplit("016")
assertBadSplit("000")
assertBadSplit("0x0a.0x0a.0x0a")
assertBadSplit("0x0a.0x0a")
assertBadSplit("0x0a")
assertBadSplit(".")
assertBadSplit("bogus")
assertBadSplit("bogus.com")
assertBadSplit("1000")
assertBadSplit("1000000000000000")
assertBadSplit("192.168.0.1.com")
def test_empty_octet(self):
def assertBadOctet(addr):
with self.assertAddressError("Empty octet not permitted in %r",
addr):
ipaddress.IPv4Address(addr)
assertBadOctet("42..42.42")
assertBadOctet("...")
def test_invalid_characters(self):
def assertBadOctet(addr, octet):
msg = "Only decimal digits permitted in %r in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("0x0a.0x0a.0x0a.0x0a", "0x0a")
assertBadOctet("0xa.0x0a.0x0a.0x0a", "0xa")
assertBadOctet("42.42.42.-0", "-0")
assertBadOctet("42.42.42.+0", "+0")
assertBadOctet("42.42.42.-42", "-42")
assertBadOctet("+1.+2.+3.4", "+1")
assertBadOctet("1.2.3.4e0", "4e0")
assertBadOctet("1.2.3.4::", "4::")
assertBadOctet("1.a.2.3", "a")
def test_octet_length(self):
def assertBadOctet(addr, octet):
msg = "At most 3 characters permitted in %r in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("0000.000.000.000", "0000")
assertBadOctet("12345.67899.-54321.-98765", "12345")
def test_octet_limit(self):
def assertBadOctet(addr, octet):
msg = "Octet %d (> 255) not permitted in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("257.0.0.0", 257)
assertBadOctet("192.168.0.999", 999)
def test_pickle(self):
self.pickle_test('192.0.2.1')
def test_weakref(self):
weakref.ref(self.factory('192.0.2.1'))
class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
factory = ipaddress.IPv6Address
def test_network_passed_as_address(self):
addr = "::1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv6Address(addr)
def test_bad_address_split_v6_not_enough_parts(self):
def assertBadSplit(addr):
msg = "At least 3 parts expected in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":")
assertBadSplit(":1")
assertBadSplit("FEDC:9878")
def test_bad_address_split_v6_too_many_colons(self):
def assertBadSplit(addr):
msg = "At most 8 colons permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("9:8:7:6:5:4:3::2:1")
assertBadSplit("10:9:8:7:6:5:4:3:2:1")
assertBadSplit("::8:7:6:5:4:3:2:1")
assertBadSplit("8:7:6:5:4:3:2:1::")
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe:0:0:0:0:0:0:0:1")
assertBadSplit("9:8:7:6:5:4:3:2:1")
assertBadSplit("7:6:5:4:3:2:1")
# A trailing IPv4 address is two parts
assertBadSplit("9:8:7:6:5:4:3:42.42.42.42")
assertBadSplit("7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts_with_double_colon(self):
def assertBadSplit(addr):
msg = "Expected at most 7 other parts with '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("1:2:3:4::5:6:7:8")
def test_bad_address_split_v6_repeated_double_colon(self):
def assertBadSplit(addr):
msg = "At most one '::' permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe::1::1")
assertBadSplit("1::2::3::4:5")
assertBadSplit("2001::db:::1")
assertBadSplit("3ffe::1::")
assertBadSplit("::3ffe::1")
assertBadSplit(":3ffe::1::1")
assertBadSplit("3ffe::1::1:")
assertBadSplit(":3ffe::1::1:")
assertBadSplit(":::")
assertBadSplit('2001:db8:::1')
def test_bad_address_split_v6_leading_colon(self):
def assertBadSplit(addr):
msg = "Leading ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":2001:db8::1")
assertBadSplit(":1:2:3:4:5:6:7")
assertBadSplit(":1:2:3:4:5:6:")
assertBadSplit(":6:5:4:3:2:1::")
def test_bad_address_split_v6_trailing_colon(self):
def assertBadSplit(addr):
msg = "Trailing ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("2001:db8::1:")
assertBadSplit("1:2:3:4:5:6:7:")
assertBadSplit("::1.2.3.4:")
assertBadSplit("::7:6:5:4:3:2:")
def test_bad_v4_part_in(self):
def assertBadAddressPart(addr, v4_error):
with self.assertAddressError("%s in %r", v4_error, addr):
ipaddress.IPv6Address(addr)
assertBadAddressPart("3ffe::1.net", "Expected 4 octets in '1.net'")
assertBadAddressPart("3ffe::127.0.1",
"Expected 4 octets in '127.0.1'")
assertBadAddressPart("::1.2.3",
"Expected 4 octets in '1.2.3'")
assertBadAddressPart("::1.2.3.4.5",
"Expected 4 octets in '1.2.3.4.5'")
assertBadAddressPart("3ffe::1.1.1.net",
"Only decimal digits permitted in 'net' "
"in '1.1.1.net'")
def test_invalid_characters(self):
def assertBadPart(addr, part):
msg = "Only hex digits permitted in %r in %r" % (part, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv6Address(addr)
assertBadPart("3ffe::goog", "goog")
assertBadPart("3ffe::-0", "-0")
assertBadPart("3ffe::+0", "+0")
assertBadPart("3ffe::-1", "-1")
assertBadPart("1.2.3.4::", "1.2.3.4")
assertBadPart('1234:axy::b', "axy")
def test_part_length(self):
def assertBadPart(addr, part):
msg = "At most 4 characters permitted in %r in %r"
with self.assertAddressError(msg, part, addr):
ipaddress.IPv6Address(addr)
assertBadPart("::00000", "00000")
assertBadPart("3ffe::10000", "10000")
assertBadPart("02001:db8::", "02001")
assertBadPart('2001:888888::1', "888888")
def test_pickle(self):
self.pickle_test('2001:db8::')
def test_weakref(self):
weakref.ref(self.factory('2001:db8::'))
class NetmaskTestMixin_v4(CommonTestMixin_v4):
"""Input validation on interfaces and networks is very similar"""
def test_no_mask(self):
for address in ('1.2.3.4', 0x01020304, b'\x01\x02\x03\x04'):
net = self.factory(address)
self.assertEqual(str(net), '1.2.3.4/32')
self.assertEqual(str(net.netmask), '255.255.255.255')
self.assertEqual(str(net.hostmask), '0.0.0.0')
# IPv4Network has prefixlen, but IPv4Interface doesn't.
# Should we add it to IPv4Interface too? (bpo-36392)
def test_split_netmask(self):
addr = "1.2.3.4/32/24"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("bogus", "Expected 4 octets")
assertBadAddress("google.com", "Expected 4 octets")
assertBadAddress("10/8", "Expected 4 octets")
assertBadAddress("::1.2.3.4", "Only decimal digits")
assertBadAddress("1.2.3.256", re.escape("256 (> 255)"))
def test_valid_netmask(self):
self.assertEqual(str(self.factory('192.0.2.0/255.255.255.0')),
'192.0.2.0/24')
for i in range(0, 33):
# Generate and re-parse the CIDR format (trivial).
net_str = '0.0.0.0/%d' % i
net = self.factory(net_str)
self.assertEqual(str(net), net_str)
# Generate and re-parse the expanded netmask.
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.netmask)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('0.0.0.0/0%d' % i)), net_str)
# Generate and re-parse the expanded hostmask. The ambiguous
# cases (/0 and /32) are treated as netmasks.
if i in (32, 0):
net_str = '0.0.0.0/%d' % (32 - i)
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.hostmask)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("1.2.3.4", "")
assertBadNetmask("1.2.3.4", "-1")
assertBadNetmask("1.2.3.4", "+1")
assertBadNetmask("1.2.3.4", " 1 ")
assertBadNetmask("1.2.3.4", "0x1")
assertBadNetmask("1.2.3.4", "33")
assertBadNetmask("1.2.3.4", "254.254.255.256")
assertBadNetmask("1.2.3.4", "1.a.2.3")
assertBadNetmask("1.1.1.1", "254.xyz.2.3")
assertBadNetmask("1.1.1.1", "240.255.0.0")
assertBadNetmask("1.1.1.1", "255.254.128.0")
assertBadNetmask("1.1.1.1", "0.1.127.255")
assertBadNetmask("1.1.1.1", "pudding")
assertBadNetmask("1.1.1.1", "::")
def test_netmask_in_tuple_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory((addr, netmask))
assertBadNetmask("1.1.1.1", -1)
assertBadNetmask("1.1.1.1", 33)
def test_pickle(self):
self.pickle_test('192.0.2.0/27')
self.pickle_test('192.0.2.0/31') # IPV4LENGTH - 1
self.pickle_test('192.0.2.0') # IPV4LENGTH
class InterfaceTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Interface
class NetworkTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Network
def test_subnet_of(self):
# containee left of container
self.assertFalse(
self.factory('10.0.0.0/30').subnet_of(
self.factory('10.0.1.0/24')))
# containee inside container
self.assertTrue(
self.factory('10.0.0.0/30').subnet_of(
self.factory('10.0.0.0/24')))
# containee right of container
self.assertFalse(
self.factory('10.0.0.0/30').subnet_of(
self.factory('10.0.1.0/24')))
# containee larger than container
self.assertFalse(
self.factory('10.0.1.0/24').subnet_of(
self.factory('10.0.0.0/30')))
def test_supernet_of(self):
# containee left of container
self.assertFalse(
self.factory('10.0.0.0/30').supernet_of(
self.factory('10.0.1.0/24')))
# containee inside container
self.assertFalse(
self.factory('10.0.0.0/30').supernet_of(
self.factory('10.0.0.0/24')))
# containee right of container
self.assertFalse(
self.factory('10.0.0.0/30').supernet_of(
self.factory('10.0.1.0/24')))
# containee larger than container
self.assertTrue(
self.factory('10.0.0.0/24').supernet_of(
self.factory('10.0.0.0/30')))
def test_subnet_of_mixed_types(self):
with self.assertRaises(TypeError):
ipaddress.IPv4Network('10.0.0.0/30').supernet_of(
ipaddress.IPv6Network('::1/128'))
with self.assertRaises(TypeError):
ipaddress.IPv6Network('::1/128').supernet_of(
ipaddress.IPv4Network('10.0.0.0/30'))
with self.assertRaises(TypeError):
ipaddress.IPv4Network('10.0.0.0/30').subnet_of(
ipaddress.IPv6Network('::1/128'))
with self.assertRaises(TypeError):
ipaddress.IPv6Network('::1/128').subnet_of(
ipaddress.IPv4Network('10.0.0.0/30'))
class NetmaskTestMixin_v6(CommonTestMixin_v6):
"""Input validation on interfaces and networks is very similar"""
def test_no_mask(self):
for address in ('::1', 1, b'\x00'*15 + b'\x01'):
net = self.factory(address)
self.assertEqual(str(net), '::1/128')
self.assertEqual(str(net.netmask), 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
self.assertEqual(str(net.hostmask), '::')
# IPv6Network has prefixlen, but IPv6Interface doesn't.
# Should we add it to IPv4Interface too? (bpo-36392)
def test_split_netmask(self):
addr = "cafe:cafe::/128/190"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("google.com", "At least 3 parts")
assertBadAddress("1.2.3.4", "At least 3 parts")
assertBadAddress("10/8", "At least 3 parts")
assertBadAddress("1234:axy::b", "Only hex digits")
def test_valid_netmask(self):
# We only support CIDR for IPv6, because expanded netmasks are not
# standard notation.
self.assertEqual(str(self.factory('2001:db8::/32')), '2001:db8::/32')
for i in range(0, 129):
# Generate and re-parse the CIDR format (trivial).
net_str = '::/%d' % i
self.assertEqual(str(self.factory(net_str)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('::/0%d' % i)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("::1", "")
assertBadNetmask("::1", "::1")
assertBadNetmask("::1", "1::")
assertBadNetmask("::1", "-1")
assertBadNetmask("::1", "+1")
assertBadNetmask("::1", " 1 ")
assertBadNetmask("::1", "0x1")
assertBadNetmask("::1", "129")
assertBadNetmask("::1", "1.2.3.4")
assertBadNetmask("::1", "pudding")
assertBadNetmask("::", "::")
def test_netmask_in_tuple_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory((addr, netmask))
assertBadNetmask("::1", -1)
assertBadNetmask("::1", 129)
def test_pickle(self):
self.pickle_test('2001:db8::1000/124')
self.pickle_test('2001:db8::1000/127') # IPV6LENGTH - 1
self.pickle_test('2001:db8::1000') # IPV6LENGTH
class InterfaceTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Interface
class NetworkTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Network
def test_subnet_of(self):
# containee left of container
self.assertFalse(
self.factory('2000:999::/56').subnet_of(
self.factory('2000:aaa::/48')))
# containee inside container
self.assertTrue(
self.factory('2000:aaa::/56').subnet_of(
self.factory('2000:aaa::/48')))
# containee right of container
self.assertFalse(
self.factory('2000:bbb::/56').subnet_of(
self.factory('2000:aaa::/48')))
# containee larger than container
self.assertFalse(
self.factory('2000:aaa::/48').subnet_of(
self.factory('2000:aaa::/56')))
def test_supernet_of(self):
# containee left of container
self.assertFalse(
self.factory('2000:999::/56').supernet_of(
self.factory('2000:aaa::/48')))
# containee inside container
self.assertFalse(
self.factory('2000:aaa::/56').supernet_of(
self.factory('2000:aaa::/48')))
# containee right of container
self.assertFalse(
self.factory('2000:bbb::/56').supernet_of(
self.factory('2000:aaa::/48')))
# containee larger than container
self.assertTrue(
self.factory('2000:aaa::/48').supernet_of(
self.factory('2000:aaa::/56')))
class FactoryFunctionErrors(BaseTestCase):
def assertFactoryError(self, factory, kind):
"""Ensure a clean ValueError with the expected message"""
addr = "camelot"
msg = '%r does not appear to be an IPv4 or IPv6 %s'
with self.assertCleanError(ValueError, msg, addr, kind):
factory(addr)
def test_ip_address(self):
self.assertFactoryError(ipaddress.ip_address, "address")
def test_ip_interface(self):
self.assertFactoryError(ipaddress.ip_interface, "interface")
def test_ip_network(self):
self.assertFactoryError(ipaddress.ip_network, "network")
class ComparisonTests(unittest.TestCase):
v4addr = ipaddress.IPv4Address(1)
v4net = ipaddress.IPv4Network(1)
v4intf = ipaddress.IPv4Interface(1)
v6addr = ipaddress.IPv6Address(1)
v6net = ipaddress.IPv6Network(1)
v6intf = ipaddress.IPv6Interface(1)
v4_addresses = [v4addr, v4intf]
v4_objects = v4_addresses + [v4net]
v6_addresses = [v6addr, v6intf]
v6_objects = v6_addresses + [v6net]
objects = v4_objects + v6_objects
v4addr2 = ipaddress.IPv4Address(2)
v4net2 = ipaddress.IPv4Network(2)
v4intf2 = ipaddress.IPv4Interface(2)
v6addr2 = ipaddress.IPv6Address(2)
v6net2 = ipaddress.IPv6Network(2)
v6intf2 = ipaddress.IPv6Interface(2)
def test_foreign_type_equality(self):
# __eq__ should never raise TypeError directly
other = object()
for obj in self.objects:
self.assertNotEqual(obj, other)
self.assertFalse(obj == other)
self.assertEqual(obj.__eq__(other), NotImplemented)
self.assertEqual(obj.__ne__(other), NotImplemented)
def test_mixed_type_equality(self):
# Ensure none of the internal objects accidentally
# expose the right set of attributes to become "equal"
for lhs in self.objects:
for rhs in self.objects:
if lhs is rhs:
continue
self.assertNotEqual(lhs, rhs)
def test_same_type_equality(self):
for obj in self.objects:
self.assertEqual(obj, obj)
self.assertLessEqual(obj, obj)
self.assertGreaterEqual(obj, obj)
def test_same_type_ordering(self):
for lhs, rhs in (
(self.v4addr, self.v4addr2),
(self.v4net, self.v4net2),
(self.v4intf, self.v4intf2),
(self.v6addr, self.v6addr2),
(self.v6net, self.v6net2),
(self.v6intf, self.v6intf2),
):
self.assertNotEqual(lhs, rhs)
self.assertLess(lhs, rhs)
self.assertLessEqual(lhs, rhs)
self.assertGreater(rhs, lhs)
self.assertGreaterEqual(rhs, lhs)
self.assertFalse(lhs > rhs)
self.assertFalse(rhs < lhs)
self.assertFalse(lhs >= rhs)
self.assertFalse(rhs <= lhs)
def test_containment(self):
for obj in self.v4_addresses:
self.assertIn(obj, self.v4net)
for obj in self.v6_addresses:
self.assertIn(obj, self.v6net)
for obj in self.v4_objects + [self.v6net]:
self.assertNotIn(obj, self.v6net)
for obj in self.v6_objects + [self.v4net]:
self.assertNotIn(obj, self.v4net)
def test_mixed_type_ordering(self):
for lhs in self.objects:
for rhs in self.objects:
if isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs)):
continue
self.assertRaises(TypeError, lambda: lhs < rhs)
self.assertRaises(TypeError, lambda: lhs > rhs)
self.assertRaises(TypeError, lambda: lhs <= rhs)
self.assertRaises(TypeError, lambda: lhs >= rhs)
def test_foreign_type_ordering(self):
other = object()
for obj in self.objects:
with self.assertRaises(TypeError):
obj < other
with self.assertRaises(TypeError):
obj > other
with self.assertRaises(TypeError):
obj <= other
with self.assertRaises(TypeError):
obj >= other
self.assertTrue(obj < LARGEST)
self.assertFalse(obj > LARGEST)
self.assertTrue(obj <= LARGEST)
self.assertFalse(obj >= LARGEST)
self.assertFalse(obj < SMALLEST)
self.assertTrue(obj > SMALLEST)
self.assertFalse(obj <= SMALLEST)
self.assertTrue(obj >= SMALLEST)
def test_mixed_type_key(self):
# with get_mixed_type_key, you can sort addresses and network.
v4_ordered = [self.v4addr, self.v4net, self.v4intf]
v6_ordered = [self.v6addr, self.v6net, self.v6intf]
self.assertEqual(v4_ordered,
sorted(self.v4_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v6_ordered,
sorted(self.v6_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v4_ordered + v6_ordered,
sorted(self.objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(NotImplemented, ipaddress.get_mixed_type_key(object))
def test_incompatible_versions(self):
# These should always raise TypeError
v4addr = ipaddress.ip_address('1.1.1.1')
v4net = ipaddress.ip_network('1.1.1.1')
v6addr = ipaddress.ip_address('::1')
v6net = ipaddress.ip_network('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4_address = ipaddress.IPv4Address('1.2.3.4')
self.ipv4_interface = ipaddress.IPv4Interface('1.2.3.4/24')
self.ipv4_network = ipaddress.IPv4Network('1.2.3.0/24')
#self.ipv4_hostmask = ipaddress.IPv4Interface('10.0.0.1/0.255.255.255')
self.ipv6_address = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1')
self.ipv6_interface = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1/64')
self.ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/64')
def testRepr(self):
self.assertEqual("IPv4Interface('1.2.3.4/32')",
repr(ipaddress.IPv4Interface('1.2.3.4')))
self.assertEqual("IPv6Interface('::1/128')",
repr(ipaddress.IPv6Interface('::1')))
# issue #16531: constructing IPv4Network from an (address, mask) tuple
def testIPv4Tuple(self):
# /32
ip = ipaddress.IPv4Address('192.0.2.1')
net = ipaddress.IPv4Network('192.0.2.1/32')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1', 32)), net)
self.assertEqual(ipaddress.IPv4Network((ip, 32)), net)
self.assertEqual(ipaddress.IPv4Network((3221225985, 32)), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1',
'255.255.255.255')), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.255')), net)
self.assertEqual(ipaddress.IPv4Network((3221225985,
'255.255.255.255')), net)
# strict=True and host bits set
with self.assertRaises(ValueError):
ipaddress.IPv4Network(('192.0.2.1', 24))
with self.assertRaises(ValueError):
ipaddress.IPv4Network((ip, 24))
with self.assertRaises(ValueError):
ipaddress.IPv4Network((3221225985, 24))
with self.assertRaises(ValueError):
ipaddress.IPv4Network(('192.0.2.1', '255.255.255.0'))
with self.assertRaises(ValueError):
ipaddress.IPv4Network((ip, '255.255.255.0'))
with self.assertRaises(ValueError):
ipaddress.IPv4Network((3221225985, '255.255.255.0'))
# strict=False and host bits set
net = ipaddress.IPv4Network('192.0.2.0/24')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1', 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((ip, 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((3221225985, 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1',
'255.255.255.0'),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.0'),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((3221225985,
'255.255.255.0'),
strict=False), net)
# /24
ip = ipaddress.IPv4Address('192.0.2.0')
net = ipaddress.IPv4Network('192.0.2.0/24')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.0',
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network((3221225984,
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.0', 24)), net)
self.assertEqual(ipaddress.IPv4Network((ip, 24)), net)
self.assertEqual(ipaddress.IPv4Network((3221225984, 24)), net)
self.assertEqual(ipaddress.IPv4Interface(('192.0.2.1', 24)),
ipaddress.IPv4Interface('192.0.2.1/24'))
self.assertEqual(ipaddress.IPv4Interface((3221225985, 24)),
ipaddress.IPv4Interface('192.0.2.1/24'))
# issue #16531: constructing IPv6Network from an (address, mask) tuple
def testIPv6Tuple(self):
# /128
ip = ipaddress.IPv6Address('2001:db8::')
net = ipaddress.IPv6Network('2001:db8::/128')
self.assertEqual(ipaddress.IPv6Network(('2001:db8::', '128')),
net)
self.assertEqual(ipaddress.IPv6Network(
(42540766411282592856903984951653826560, 128)),
net)
self.assertEqual(ipaddress.IPv6Network((ip, '128')),
net)
ip = ipaddress.IPv6Address('2001:db8::')
net = ipaddress.IPv6Network('2001:db8::/96')
self.assertEqual(ipaddress.IPv6Network(('2001:db8::', '96')),
net)
self.assertEqual(ipaddress.IPv6Network(
(42540766411282592856903984951653826560, 96)),
net)
self.assertEqual(ipaddress.IPv6Network((ip, '96')),
net)
# strict=True and host bits set
ip = ipaddress.IPv6Address('2001:db8::1')
with self.assertRaises(ValueError):
ipaddress.IPv6Network(('2001:db8::1', 96))
with self.assertRaises(ValueError):
ipaddress.IPv6Network((
42540766411282592856903984951653826561, 96))
with self.assertRaises(ValueError):
ipaddress.IPv6Network((ip, 96))
# strict=False and host bits set
net = ipaddress.IPv6Network('2001:db8::/96')
self.assertEqual(ipaddress.IPv6Network(('2001:db8::1', 96),
strict=False),
net)
self.assertEqual(ipaddress.IPv6Network(
(42540766411282592856903984951653826561, 96),
strict=False),
net)
self.assertEqual(ipaddress.IPv6Network((ip, 96), strict=False),
net)
# /96
self.assertEqual(ipaddress.IPv6Interface(('2001:db8::1', '96')),
ipaddress.IPv6Interface('2001:db8::1/96'))
self.assertEqual(ipaddress.IPv6Interface(
(42540766411282592856903984951653826561, '96')),
ipaddress.IPv6Interface('2001:db8::1/96'))
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
ipaddress.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') - 256,
ipaddress.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddress.IPv6Address('::1') + (2**16 - 2),
ipaddress.IPv6Address('::ffff'))
self.assertEqual(ipaddress.IPv6Address('::ffff') - (2**16 - 2),
ipaddress.IPv6Address('::1'))
def testInvalidIntToBytes(self):
self.assertRaises(ValueError, ipaddress.v4_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v4_int_to_packed,
2 ** ipaddress.IPV4LENGTH)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed,
2 ** ipaddress.IPV6LENGTH)
def testInternals(self):
ip1 = ipaddress.IPv4Address('10.10.10.10')
ip2 = ipaddress.IPv4Address('10.10.10.11')
ip3 = ipaddress.IPv4Address('10.10.10.12')
self.assertEqual(list(ipaddress._find_address_range([ip1])),
[(ip1, ip1)])
self.assertEqual(list(ipaddress._find_address_range([ip1, ip3])),
[(ip1, ip1), (ip3, ip3)])
self.assertEqual(list(ipaddress._find_address_range([ip1, ip2, ip3])),
[(ip1, ip3)])
self.assertEqual(128, ipaddress._count_righthand_zero_bits(0, 128))
self.assertEqual("IPv4Network('1.2.3.0/24')", repr(self.ipv4_network))
def testGetNetwork(self):
self.assertEqual(int(self.ipv4_network.network_address), 16909056)
self.assertEqual(str(self.ipv4_network.network_address), '1.2.3.0')
self.assertEqual(int(self.ipv6_network.network_address),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6_network.network_address),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6_network.hostmask),
'::ffff:ffff:ffff:ffff')
def testIpFromInt(self):
self.assertEqual(self.ipv4_interface._ip,
ipaddress.IPv4Interface(16909060)._ip)
ipv4 = ipaddress.ip_network('1.2.3.4')
ipv6 = ipaddress.ip_network('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddress.ip_network(int(ipv4.network_address)))
self.assertEqual(ipv6, ipaddress.ip_network(int(ipv6.network_address)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6_interface._ip,
ipaddress.IPv6Interface(v6_int)._ip)
self.assertEqual(ipaddress.ip_network(self.ipv4_address._ip).version,
4)
self.assertEqual(ipaddress.ip_network(self.ipv6_address._ip).version,
6)
def testIpFromPacked(self):
address = ipaddress.ip_address
self.assertEqual(self.ipv4_interface._ip,
ipaddress.ip_interface(b'\x01\x02\x03\x04')._ip)
self.assertEqual(address('255.254.253.252'),
address(b'\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6_interface.ip,
ipaddress.ip_interface(
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01').ip)
self.assertEqual(address('ffff:2:3:4:ffff::'),
address(b'\xff\xff\x00\x02\x00\x03\x00\x04' +
b'\xff\xff' + b'\x00' * 6))
self.assertEqual(address('::'),
address(b'\x00' * 16))
def testGetIp(self):
self.assertEqual(int(self.ipv4_interface.ip), 16909060)
self.assertEqual(str(self.ipv4_interface.ip), '1.2.3.4')
self.assertEqual(int(self.ipv6_interface.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6_interface.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4_network.netmask), 4294967040)
self.assertEqual(str(self.ipv4_network.netmask), '255.255.255.0')
self.assertEqual(int(self.ipv6_network.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6_network.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddress.IPv4Interface('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.network.netmask), 0)
self.assertEqual(ipv4_zero_netmask._prefix_from_prefix_string('0'), 0)
ipv6_zero_netmask = ipaddress.IPv6Interface('::1/0')
self.assertEqual(int(ipv6_zero_netmask.network.netmask), 0)
self.assertEqual(ipv6_zero_netmask._prefix_from_prefix_string('0'), 0)
def testIPv4Net(self):
net = ipaddress.IPv4Network('127.0.0.0/0.0.0.255')
self.assertEqual(net.prefixlen, 24)
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4_network.broadcast_address), 16909311)
self.assertEqual(str(self.ipv4_network.broadcast_address), '1.2.3.255')
self.assertEqual(int(self.ipv6_network.broadcast_address),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6_network.broadcast_address),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4_interface.network.prefixlen, 24)
self.assertEqual(self.ipv6_interface.network.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4_network.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4_network.supernet().network_address),
'1.2.2.0')
self.assertEqual(
ipaddress.IPv4Interface('0.0.0.0/0').network.supernet(),
ipaddress.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6_network.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6_network.supernet().network_address),
'2001:658:22a:cafe::')
self.assertEqual(ipaddress.IPv6Interface('::0/0').network.supernet(),
ipaddress.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4_network.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4_network.supernet(3).network_address),
'1.2.0.0')
self.assertEqual(self.ipv6_network.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6_network.supernet(3).network_address),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv4_network.supernet,
new_prefix=25)
self.assertEqual(self.ipv4_network.supernet(prefixlen_diff=2),
self.ipv4_network.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv6_network.supernet,
new_prefix=65)
self.assertEqual(self.ipv6_network.supernet(prefixlen_diff=2),
self.ipv6_network.supernet(new_prefix=62))
def testHosts(self):
hosts = list(self.ipv4_network.hosts())
self.assertEqual(254, len(hosts))
self.assertEqual(ipaddress.IPv4Address('1.2.3.1'), hosts[0])
self.assertEqual(ipaddress.IPv4Address('1.2.3.254'), hosts[-1])
ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/120')
hosts = list(ipv6_network.hosts())
self.assertEqual(255, len(hosts))
self.assertEqual(ipaddress.IPv6Address('2001:658:22a:cafe::1'), hosts[0])
self.assertEqual(ipaddress.IPv6Address('2001:658:22a:cafe::ff'), hosts[-1])
# special case where only 1 bit is left for address
addrs = [ipaddress.IPv4Address('2.0.0.0'),
ipaddress.IPv4Address('2.0.0.1')]
str_args = '2.0.0.0/31'
tpl_args = ('2.0.0.0', 31)
self.assertEqual(addrs, list(ipaddress.ip_network(str_args).hosts()))
self.assertEqual(addrs, list(ipaddress.ip_network(tpl_args).hosts()))
self.assertEqual(list(ipaddress.ip_network(str_args).hosts()),
list(ipaddress.ip_network(tpl_args).hosts()))
# special case where the network is a /32
addrs = [ipaddress.IPv4Address('1.2.3.4')]
str_args = '1.2.3.4/32'
tpl_args = ('1.2.3.4', 32)
self.assertEqual(addrs, list(ipaddress.ip_network(str_args).hosts()))
self.assertEqual(addrs, list(ipaddress.ip_network(tpl_args).hosts()))
self.assertEqual(list(ipaddress.ip_network(str_args).hosts()),
list(ipaddress.ip_network(tpl_args).hosts()))
addrs = [ipaddress.IPv6Address('2001:658:22a:cafe::'),
ipaddress.IPv6Address('2001:658:22a:cafe::1')]
str_args = '2001:658:22a:cafe::/127'
tpl_args = ('2001:658:22a:cafe::', 127)
self.assertEqual(addrs, list(ipaddress.ip_network(str_args).hosts()))
self.assertEqual(addrs, list(ipaddress.ip_network(tpl_args).hosts()))
self.assertEqual(list(ipaddress.ip_network(str_args).hosts()),
list(ipaddress.ip_network(tpl_args).hosts()))
addrs = [ipaddress.IPv6Address('2001:658:22a:cafe::1'), ]
str_args = '2001:658:22a:cafe::1/128'
tpl_args = ('2001:658:22a:cafe::1', 128)
self.assertEqual(addrs, list(ipaddress.ip_network(str_args).hosts()))
self.assertEqual(addrs, list(ipaddress.ip_network(tpl_args).hosts()))
self.assertEqual(list(ipaddress.ip_network(str_args).hosts()),
list(ipaddress.ip_network(tpl_args).hosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)),
sorted(self.ipv4_network.subnets(new_prefix=27)))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(new_prefix=23))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(prefixlen_diff=3,
new_prefix=27))
self.assertEqual(sorted(self.ipv6_network.subnets(prefixlen_diff=4)),
sorted(self.ipv6_network.subnets(new_prefix=68)))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(new_prefix=63))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(prefixlen_diff=4,
new_prefix=68))
def testGetSubnets(self):
self.assertEqual(list(self.ipv4_network.subnets())[0].prefixlen, 25)
self.assertEqual(str(list(
self.ipv4_network.subnets())[0].network_address),
'1.2.3.0')
self.assertEqual(str(list(
self.ipv4_network.subnets())[1].network_address),
'1.2.3.128')
self.assertEqual(list(self.ipv6_network.subnets())[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddress.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddress.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4_network.subnets(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6_network.subnets(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testGetSubnets3(self):
subnets = [str(x) for x in self.ipv4_network.subnets(8)]
self.assertEqual(subnets[:3],
['1.2.3.0/32', '1.2.3.1/32', '1.2.3.2/32'])
self.assertEqual(subnets[-3:],
['1.2.3.253/32', '1.2.3.254/32', '1.2.3.255/32'])
self.assertEqual(len(subnets), 256)
ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/120')
subnets = [str(x) for x in ipv6_network.subnets(8)]
self.assertEqual(subnets[:3],
['2001:658:22a:cafe::/128',
'2001:658:22a:cafe::1/128',
'2001:658:22a:cafe::2/128'])
self.assertEqual(subnets[-3:],
['2001:658:22a:cafe::fd/128',
'2001:658:22a:cafe::fe/128',
'2001:658:22a:cafe::ff/128'])
self.assertEqual(len(subnets), 256)
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(65))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(65))
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError,
self.ipv4_interface.network.supernet, 25)
self.assertRaises(ValueError,
self.ipv6_interface.network.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(-1))
def testGetNum_Addresses(self):
self.assertEqual(self.ipv4_network.num_addresses, 256)
self.assertEqual(list(self.ipv4_network.subnets())[0].num_addresses,
128)
self.assertEqual(self.ipv4_network.supernet().num_addresses, 512)
self.assertEqual(self.ipv6_network.num_addresses, 18446744073709551616)
self.assertEqual(list(self.ipv6_network.subnets())[0].num_addresses,
9223372036854775808)
self.assertEqual(self.ipv6_network.supernet().num_addresses,
36893488147419103232)
def testContains(self):
self.assertIn(ipaddress.IPv4Interface('1.2.3.128/25'),
self.ipv4_network)
self.assertNotIn(ipaddress.IPv4Interface('1.2.4.1/24'),
self.ipv4_network)
# We can test addresses and string as well.
addr1 = ipaddress.IPv4Address('1.2.3.37')
self.assertIn(addr1, self.ipv4_network)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddress.IPv4Network('1.1.0.0/16').__contains__(
ipaddress.IPv4Network('1.0.0.0/15')))
def testNth(self):
self.assertEqual(str(self.ipv4_network[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4_network.__getitem__, 256)
self.assertEqual(str(self.ipv6_network[5]),
'2001:658:22a:cafe::5')
self.assertRaises(IndexError, self.ipv6_network.__getitem__, 1 << 64)
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddress.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertFalse(self.ipv4_interface == '')
self.assertFalse(self.ipv4_interface == [])
self.assertFalse(self.ipv4_interface == 2)
self.assertTrue(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv6_interface == '')
self.assertFalse(self.ipv6_interface == [])
self.assertFalse(self.ipv6_interface == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertTrue(self.ipv4_interface != '')
self.assertTrue(self.ipv4_interface != [])
self.assertTrue(self.ipv4_interface != 2)
self.assertTrue(self.ipv4_address !=
ipaddress.IPv4Address('1.2.3.5'))
self.assertTrue(self.ipv4_address != '')
self.assertTrue(self.ipv4_address != [])
self.assertTrue(self.ipv4_address != 2)
self.assertFalse(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv6_interface != '')
self.assertTrue(self.ipv6_interface != [])
self.assertTrue(self.ipv6_interface != 2)
self.assertTrue(self.ipv6_address !=
ipaddress.IPv4Address('1.2.3.4'))
self.assertTrue(self.ipv6_address != '')
self.assertTrue(self.ipv6_address != [])
self.assertTrue(self.ipv6_address != 2)
def testSlash32Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface(
'1.2.3.4/255.255.255.255')), '1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(str(ipaddress.IPv6Interface('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
ip5 = ipaddress.IPv4Address('1.1.1.4')
ip6 = ipaddress.IPv4Address('1.1.1.0')
# check that addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses(
[ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30'),
ipaddress.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
#ip5 = ipaddress.IPv4Interface('1.1.1.4/30')
#ip6 = ipaddress.IPv4Interface('1.1.1.4/30')
# check that addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30')])
# test only IP networks
ip1 = ipaddress.IPv4Network('1.1.0.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/24')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
ip4 = ipaddress.IPv4Network('1.1.3.0/24')
ip5 = ipaddress.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call
# [].sort
ip6 = ipaddress.IPv4Network('1.1.0.0/22')
# check that addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4, ip5,
ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/22'),
ipaddress.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddress.collapse_addresses([ip1, ip2])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddress.IPv4Network('1.1.1.1/32')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddress.IPv4Address('1.1.1.1')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ipaddress.ip_network('1.1.1.1/32')])
ip1 = ipaddress.IPv6Network('2001::/100')
ip2 = ipaddress.IPv6Network('2001::/120')
ip3 = ipaddress.IPv6Network('2001::/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3])
self.assertEqual(list(collapsed), [ip3])
# the toejam test
addr_tuples = [
(ipaddress.ip_address('1.1.1.1'),
ipaddress.ip_address('::1')),
(ipaddress.IPv4Network('1.1.0.0/24'),
ipaddress.IPv6Network('2001::/120')),
(ipaddress.IPv4Network('1.1.0.0/32'),
ipaddress.IPv6Network('2001::/128')),
]
for ip1, ip2 in addr_tuples:
self.assertRaises(TypeError, ipaddress.collapse_addresses,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddress.ip_address
#ipnet = ipaddress.ip_network
summarize = ipaddress.summarize_address_range
ip1 = ipaddress.ip_address('1.1.1.0')
ip2 = ipaddress.ip_address('1.1.1.255')
# summarize works only for IPv4 & IPv6
class IPv7Address(ipaddress.IPv6Address):
@property
def version(self):
return 7
ip_invalid1 = IPv7Address('::1')
ip_invalid2 = IPv7Address('::1')
self.assertRaises(ValueError, list,
summarize(ip_invalid1, ip_invalid2))
# test that a summary over ip4 & ip6 fails
self.assertRaises(TypeError, list,
summarize(ip1, ipaddress.IPv6Address('::1')))
# test a /24 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('1.1.1.8')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1.1.1.0/29'),
ipaddress.ip_network('1.1.1.8')])
# all!
ip1 = ipaddress.IPv4Address(0)
ip2 = ipaddress.IPv4Address(ipaddress.IPv4Address._ALL_ONES)
self.assertEqual([ipaddress.IPv4Network('0.0.0.0/0')],
list(summarize(ip1, ip2)))
ip1 = ipaddress.ip_address('1::')
ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test an IPv6 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('2::')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1::/16'),
ipaddress.ip_network('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, list,
summarize(ipaddress.ip_address('1.1.1.0'),
ipaddress.ip_address('1.1.0.0')))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_address('::'),
ipaddress.ip_network('1.1.0.0')))
def testAddressComparison(self):
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.1'))
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.2'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::1'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::2'))
def testInterfaceComparison(self):
self.assertTrue(ipaddress.ip_interface('1.1.1.1/24') ==
ipaddress.ip_interface('1.1.1.1/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1/16') <
ipaddress.ip_interface('1.1.1.1/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1/24') <
ipaddress.ip_interface('1.1.1.2/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.2/16') <
ipaddress.ip_interface('1.1.1.1/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1/24') >
ipaddress.ip_interface('1.1.1.1/16'))
self.assertTrue(ipaddress.ip_interface('1.1.1.2/24') >
ipaddress.ip_interface('1.1.1.1/24'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1/24') >
ipaddress.ip_interface('1.1.1.2/16'))
self.assertTrue(ipaddress.ip_interface('::1/64') ==
ipaddress.ip_interface('::1/64'))
self.assertTrue(ipaddress.ip_interface('::1/64') <
ipaddress.ip_interface('::1/80'))
self.assertTrue(ipaddress.ip_interface('::1/64') <
ipaddress.ip_interface('::2/64'))
self.assertTrue(ipaddress.ip_interface('::2/48') <
ipaddress.ip_interface('::1/64'))
self.assertTrue(ipaddress.ip_interface('::1/80') >
ipaddress.ip_interface('::1/64'))
self.assertTrue(ipaddress.ip_interface('::2/64') >
ipaddress.ip_interface('::1/64'))
self.assertTrue(ipaddress.ip_interface('::1/64') >
ipaddress.ip_interface('::2/48'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddress.IPv4Network('1.1.1.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/32')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip1), 0)
# if addresses are the same, sort by netmask
self.assertEqual(ip1.compare_networks(ip2), -1)
self.assertEqual(ip2.compare_networks(ip1), 1)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertEqual(ip3.compare_networks(ip1), 1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddress.IPv6Network('2001:2000::/96')
ip2 = ipaddress.IPv6Network('2001:2001::/96')
ip3 = ipaddress.IPv6Network('2001:ffff:2000::/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
self.assertRaises(TypeError,
self.ipv4_network.compare_networks,
self.ipv6_network)
ipv6 = ipaddress.IPv6Interface('::/0')
ipv4 = ipaddress.IPv4Interface('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddress.ip_network('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddress.ip_network('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddress.ip_network('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddress.ip_network('10.10.10.0/31')
ip2 = ipaddress.ip_network('10.10.10.0')
ip3 = ipaddress.ip_network('10.10.10.2/31')
ip4 = ipaddress.ip_network('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertIs(ip1.__lt__(ipaddress.ip_address('10.10.10.0')),
NotImplemented)
self.assertIs(ip2.__lt__(ipaddress.ip_address('10.10.10.0')),
NotImplemented)
# <=, >=
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.2'))
self.assertFalse(ipaddress.ip_network('1.1.1.2') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::2'))
self.assertFalse(ipaddress.ip_network('::2') <=
ipaddress.ip_network('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddress.ip_network, '192.168.1.1/24')
self.assertRaises(ValueError, ipaddress.ip_network, '::1/120')
def testOverlaps(self):
other = ipaddress.IPv4Network('1.2.3.0/30')
other2 = ipaddress.IPv4Network('1.2.2.0/24')
other3 = ipaddress.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4_network.overlaps(other))
self.assertFalse(self.ipv4_network.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddress.IPv4Interface(ipv4_string)
v4compat_ipv6 = ipaddress.IPv6Interface('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddress.IPv6Interface('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1'),
ipaddress.ip_address('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4_address.version, 4)
self.assertEqual(self.ipv6_address.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4_interface.max_prefixlen, 32)
self.assertEqual(self.ipv6_interface.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4_address.packed,
b'\x01\x02\x03\x04')
self.assertEqual(ipaddress.IPv4Interface('255.254.253.252').packed,
b'\xff\xfe\xfd\xfc')
self.assertEqual(self.ipv6_address.packed,
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01')
self.assertEqual(ipaddress.IPv6Interface('ffff:2:3:4:ffff::').packed,
b'\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ b'\x00' * 6)
self.assertEqual(ipaddress.IPv6Interface('::1:0:0:0:0').packed,
b'\x00' * 6 + b'\x00\x01' + b'\x00' * 8)
def testIpType(self):
ipv4net = ipaddress.ip_network('1.2.3.4')
ipv4addr = ipaddress.ip_address('1.2.3.4')
ipv6net = ipaddress.ip_network('::1.2.3.4')
ipv6addr = ipaddress.ip_address('::1.2.3.4')
self.assertEqual(ipaddress.IPv4Network, type(ipv4net))
self.assertEqual(ipaddress.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddress.IPv6Network, type(ipv6net))
self.assertEqual(ipaddress.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddress.ip_interface(
'224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddress.ip_network('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_network('240.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_interface(
'192.168.1.1/17').is_private)
self.assertEqual(False, ipaddress.ip_network('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_network(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_network(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_network('169.254.1.0/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_interface(
'169.254.100.200/24').is_link_local)
self.assertEqual(False,
ipaddress.ip_interface(
'169.255.100.200/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_network(
'127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddress.ip_network(
'127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddress.ip_network('128.0.0.0').is_loopback)
self.assertEqual(False,
ipaddress.ip_network('100.64.0.0/10').is_private)
self.assertEqual(False, ipaddress.ip_network('100.64.0.0/10').is_global)
self.assertEqual(True,
ipaddress.ip_network('192.0.2.128/25').is_private)
self.assertEqual(True,
ipaddress.ip_network('192.0.3.0/24').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('0.0.0.0').is_unspecified)
self.assertEqual(True, ipaddress.ip_address('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddress.ip_address('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_address('240.0.0.1').is_reserved)
self.assertEqual(False,
ipaddress.ip_address('239.255.255.255').is_reserved)
self.assertEqual(True, ipaddress.ip_address('192.168.1.1').is_private)
self.assertEqual(False, ipaddress.ip_address('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('11.0.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_address('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddress.ip_address('169.255.100.200').is_link_local)
self.assertTrue(ipaddress.ip_address('192.0.7.1').is_global)
self.assertFalse(ipaddress.ip_address('203.0.113.1').is_global)
self.assertEqual(True,
ipaddress.ip_address('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddress.ip_address('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_network('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_network('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_network(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_network('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_network(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_network('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_network(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_interface('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddress.ip_interface('::1/127').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::1').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::/127').is_unspecified)
self.assertEqual(True,
ipaddress.ip_network('2001::1/128').is_private)
self.assertEqual(True,
ipaddress.ip_network('200::1/128').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_address('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_address('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_address(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_address('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_address(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_address('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddress.ip_address('::1').is_loopback)
self.assertEqual(False, ipaddress.ip_address('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddress.ip_address('100::').is_reserved)
self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(
ipaddress.ip_address('::ffff:192.168.1.1').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
self.assertEqual(ipaddress.ip_address('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddress.ip_address('::ffff:c0a8:101').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network('10.1.1.0/26')
addr3 = ipaddress.ip_network('10.2.1.0/24')
addr4 = ipaddress.ip_address('10.1.1.0')
addr5 = ipaddress.ip_network('2001:db8::0/32')
addr6 = ipaddress.ip_network('10.1.1.5/32')
self.assertEqual(sorted(list(addr1.address_exclude(addr2))),
[ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
self.assertRaises(ValueError, list, addr1.address_exclude(addr3))
self.assertRaises(TypeError, list, addr1.address_exclude(addr4))
self.assertRaises(TypeError, list, addr1.address_exclude(addr5))
self.assertEqual(list(addr1.address_exclude(addr1)), [])
self.assertEqual(sorted(list(addr1.address_exclude(addr6))),
[ipaddress.ip_network('10.1.1.0/30'),
ipaddress.ip_network('10.1.1.4/32'),
ipaddress.ip_network('10.1.1.6/31'),
ipaddress.ip_network('10.1.1.8/29'),
ipaddress.ip_network('10.1.1.16/28'),
ipaddress.ip_network('10.1.1.32/27'),
ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
def testHash(self):
self.assertEqual(hash(ipaddress.ip_interface('10.1.1.0/24')),
hash(ipaddress.ip_interface('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_network('10.1.1.0/24')),
hash(ipaddress.ip_network('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_address('10.1.1.0')),
hash(ipaddress.ip_address('10.1.1.0')))
# i70
self.assertEqual(hash(ipaddress.ip_address('1.2.3.4')),
hash(ipaddress.ip_address(
int(ipaddress.ip_address('1.2.3.4')._ip))))
ip1 = ipaddress.ip_address('10.1.1.0')
ip2 = ipaddress.ip_address('1::')
dummy = {}
dummy[self.ipv4_address] = None
dummy[self.ipv6_address] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertIn(self.ipv4_address, dummy)
self.assertIn(ip2, dummy)
def testIPBases(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.compressed)
net = self.ipv6_network
self.assertRaises(ValueError, net._string_from_ip_int, 2**128 + 1)
def testIPv6NetworkHelpers(self):
net = self.ipv6_network
self.assertEqual('2001:658:22a:cafe::/64', net.with_prefixlen)
self.assertEqual('2001:658:22a:cafe::/ffff:ffff:ffff:ffff::',
net.with_netmask)
self.assertEqual('2001:658:22a:cafe::/::ffff:ffff:ffff:ffff',
net.with_hostmask)
self.assertEqual('2001:658:22a:cafe::/64', str(net))
def testIPv4NetworkHelpers(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.with_prefixlen)
self.assertEqual('1.2.3.0/255.255.255.0', net.with_netmask)
self.assertEqual('1.2.3.0/0.0.0.255', net.with_hostmask)
self.assertEqual('1.2.3.0/24', str(net))
def testCopyConstructor(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network(addr1)
addr3 = ipaddress.ip_interface('2001:658:22a:cafe:200::1/64')
addr4 = ipaddress.ip_interface(addr3)
addr5 = ipaddress.IPv4Address('1.1.1.1')
addr6 = ipaddress.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddress.IPv4Address(addr5))
self.assertEqual(addr6, ipaddress.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddress.IPv6Interface('2001::1')
addr2 = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
addr3 = ipaddress.IPv6Network('2001::/96')
addr4 = ipaddress.IPv4Address('192.168.178.1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddress.IPv6Interface('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0000/96',
addr3.exploded)
self.assertEqual('192.168.178.1', addr4.exploded)
def testReversePointer(self):
addr1 = ipaddress.IPv4Address('127.0.0.1')
addr2 = ipaddress.IPv6Address('2001:db8::1')
self.assertEqual('1.0.0.127.in-addr.arpa', addr1.reverse_pointer)
self.assertEqual('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.' +
'b.d.0.1.0.0.2.ip6.arpa',
addr2.reverse_pointer)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4_address))
self.assertEqual(42540616829182469433547762482097946625,
int(self.ipv6_address))
def testForceVersion(self):
self.assertEqual(ipaddress.ip_network(1).version, 4)
self.assertEqual(ipaddress.IPv6Network(1).version, 6)
def testWithStar(self):
self.assertEqual(self.ipv4_interface.with_prefixlen, "1.2.3.4/24")
self.assertEqual(self.ipv4_interface.with_netmask,
"1.2.3.4/255.255.255.0")
self.assertEqual(self.ipv4_interface.with_hostmask,
"1.2.3.4/0.0.0.255")
self.assertEqual(self.ipv6_interface.with_prefixlen,
'2001:658:22a:cafe:200::1/64')
self.assertEqual(self.ipv6_interface.with_netmask,
'2001:658:22a:cafe:200::1/ffff:ffff:ffff:ffff::')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(self.ipv6_interface.with_hostmask,
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertNotIn('broadcast_address', self.ipv4_network.__dict__)
self.assertNotIn('hostmask', self.ipv4_network.__dict__)
# V4 - populate and test
self.assertEqual(self.ipv4_network.broadcast_address,
ipaddress.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4_network.hostmask,
ipaddress.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertIn('broadcast_address', self.ipv4_network.__dict__)
self.assertIn('hostmask', self.ipv4_network.__dict__)
# V6 - make sure we're empty
self.assertNotIn('broadcast_address', self.ipv6_network.__dict__)
self.assertNotIn('hostmask', self.ipv6_network.__dict__)
# V6 - populate and test
self.assertEqual(self.ipv6_network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6_interface.network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(
self.ipv6_network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
self.assertEqual(
self.ipv6_interface.network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_interface.network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertIn('broadcast_address', self.ipv6_network.__dict__)
self.assertIn('hostmask', self.ipv6_network.__dict__)
self.assertIn('broadcast_address', self.ipv6_interface.network.__dict__)
self.assertIn('hostmask', self.ipv6_interface.network.__dict__)
def testTeredo(self):
# stolen from wikipedia
server = ipaddress.IPv4Address('65.54.227.120')
client = ipaddress.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddress.ip_address(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
# i77
teredo_addr = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddress.IPv4Address('94.245.121.253'),
ipaddress.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddress.ip_address('2002:ac1d:2d64::1')
bad_addr = ipaddress.ip_address('2000:ac1d:2d64::1')
self.assertEqual(ipaddress.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
# issue41004 Hash collisions in IPv4Interface and IPv6Interface
def testV4HashIsNotConstant(self):
ipv4_address1 = ipaddress.IPv4Interface("1.2.3.4")
ipv4_address2 = ipaddress.IPv4Interface("2.3.4.5")
self.assertNotEqual(ipv4_address1.__hash__(), ipv4_address2.__hash__())
# issue41004 Hash collisions in IPv4Interface and IPv6Interface
def testV6HashIsNotConstant(self):
ipv6_address1 = ipaddress.IPv6Interface("2001:658:22a:cafe:200:0:0:1")
ipv6_address2 = ipaddress.IPv6Interface("2001:658:22a:cafe:200:0:0:2")
self.assertNotEqual(ipv6_address1.__hash__(), ipv6_address2.__hash__())
if __name__ == '__main__':
unittest.main()
| 44.341266
| 89
| 0.593972
|
f9217ca43ebaf09ce80fdb76d4adc1d668b66d54
| 482
|
py
|
Python
|
examples/dynamictable/Student.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 739
|
2015-01-01T02:05:11.000Z
|
2022-03-30T15:26:16.000Z
|
examples/dynamictable/Student.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 33
|
2015-03-25T23:17:04.000Z
|
2021-08-19T08:25:22.000Z
|
examples/dynamictable/Student.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 167
|
2015-01-01T22:27:47.000Z
|
2022-03-17T13:29:19.000Z
|
from Schedule import Schedule
from Person import Person
class Student(Person):
def __init__(self, classSchedule=None, **kwargs):
Person.__init__(self, **kwargs)
if classSchedule is None:
self.classSchedule = Schedule()
else:
self.classSchedule = classSchedule
def getSchedule(self, daysFilter):
return self.classSchedule.getDescription(daysFilter)
def getClassSchedule(self):
return self.classSchedule
| 26.777778
| 60
| 0.682573
|
098245ea28c4266e078062f72da43d181af53666
| 4,962
|
py
|
Python
|
gpMgmt/test/behave/mgmt_utils/steps/gpstate_utils.py
|
haolinw/gpdb
|
16a9465747a54f0c61bac8b676fe7611b4f030d8
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/test/behave/mgmt_utils/steps/gpstate_utils.py
|
haolinw/gpdb
|
16a9465747a54f0c61bac8b676fe7611b4f030d8
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/test/behave/mgmt_utils/steps/gpstate_utils.py
|
haolinw/gpdb
|
16a9465747a54f0c61bac8b676fe7611b4f030d8
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
from behave import given, when, then
import os
import re
from gppylib.db import dbconn
from gppylib.gparray import GpArray, ROLE_MIRROR
from test.behave_utils.utils import check_stdout_msg, check_string_not_present_stdout
from gppylib.commands.gp import get_coordinatordatadir
@then('a sample recovery_progress.file is created from saved lines')
def impl(context):
with open('{}/gpAdminLogs/recovery_progress.file'.format(os.path.expanduser("~")), 'w+') as fp:
fp.writelines(context.recovery_lines)
@given('a sample recovery_progress.file is created with ongoing recoveries in gpAdminLogs')
def impl(context):
with open('{}/gpAdminLogs/recovery_progress.file'.format(os.path.expanduser("~")), 'w+') as fp:
fp.write("full:5: 1164848/1371715 kB (84%), 0/1 tablespace (...t1/demoDataDir0/base/16384/40962)\n")
fp.write("incremental:6: 1/1371875 kB (1%)")
@then('a sample gprecoverseg.lock directory is created in coordinator_data_directory')
@given('a sample gprecoverseg.lock directory is created in coordinator_data_directory')
def impl(context):
gprecoverseg_lock_dir = os.path.join(get_coordinatordatadir() + '/gprecoverseg.lock')
os.mkdir(gprecoverseg_lock_dir)
@given('a sample recovery_progress.file is created with completed recoveries in gpAdminLogs')
def impl(context):
with open('{}/gpAdminLogs/recovery_progress.file'.format(os.path.expanduser("~")), 'w+') as fp:
fp.write("incremental:5: pg_rewind: Done!\n")
fp.write("full:6: 1164848/1371715 kB (84%), 0/1 tablespace (...t1/demoDataDir0/base/16384/40962)\n")
fp.write("full:7: pg_basebackup: completed")
@then('gpstate output looks like')
def impl(context):
# Check the header line first.
header_pattern = r'[ \t]+'.join(context.table.headings)
check_stdout_msg_in_order(context, header_pattern)
check_rows_exist(context)
@then('gpstate output contains "{recovery_types}" entries for mirrors of content {contents}')
def impl(context, recovery_types, contents):
recovery_types = recovery_types.split(',')
contents = [int(c) for c in contents.split(',')]
contents = set(contents)
all_segments = GpArray.initFromCatalog(dbconn.DbURL()).getDbList()
segments_to_display = []
segments_to_not_display = []
for seg in all_segments:
if seg.getSegmentContentId() in contents and seg.getSegmentRole() == ROLE_MIRROR:
segments_to_display.append(seg)
else:
segments_to_not_display.append(seg)
for index, seg_to_display in enumerate(segments_to_display):
hostname = seg_to_display.getSegmentHostName()
port = seg_to_display.getSegmentPort()
expected_msg = "{}[ \t]+{}[ \t]+{}[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+[0-9]+\%".format(hostname, port,
recovery_types[index])
check_stdout_msg(context, expected_msg)
#TODO assert that only segments_to_display are printed to the console
# for seg_to_not_display in segments_to_not_display:
# check_string_not_present_stdout(context, str(seg_to_not_display.getSegmentPort()))
@then('gpstate output has rows')
def impl(context):
check_rows_exist(context)
@then('gpstate output has rows with keys values')
def impl(context):
# Check that every row exists in the standard out in the specified order.
# We accept any amount of horizontal whitespace in between columns.
def check_row(row):
split_row = [str.strip() for str in ''.join(row).split('=')]
row_pattern = r'[ \t]+=[ \t]+'.join(split_row)
check_stdout_msg_in_order(context, row_pattern)
check_row(context.table.headings)
for row in context.table:
check_row(row)
def check_rows_exist(context):
# Check that every row exists in the standard out. We accept any amount
# of horizontal whitespace in between columns.
for row in context.table:
row_pattern = r'[ \t]+'.join(row)
check_stdout_msg_in_order(context, row_pattern)
def check_stdout_msg_in_order(context, msg):
"""
Searches forward in context.stdout_message for a string matching the msg
pattern. Once output has been matched, it's no longer considered for future
matching. Use this matcher for order-dependent output tests.
"""
# Lazily initialize the stdout_position -- if this is the first time we've
# called this, start at the beginning.
if 'stdout_position' not in context:
context.stdout_position = 0
pat = re.compile(msg)
match = pat.search(context.stdout_message, pos=context.stdout_position)
if not match:
err_str = (
"Expected stdout string '%s' in remaining output:\n"
"%s\n\n"
"Full output was\n%s"
) % (msg, context.stdout_message[context.stdout_position:], context.stdout_message)
raise Exception(err_str)
context.stdout_position = match.end()
| 42.050847
| 111
| 0.696493
|
3a889a4b201ee3856e9d2829dce5ec21bc4ad613
| 6,661
|
py
|
Python
|
day04.py
|
htv2012/advent-of-code-2020
|
bc884d5e8a8b437d6b3bd52e689b4bd142bb3bfe
|
[
"MIT"
] | null | null | null |
day04.py
|
htv2012/advent-of-code-2020
|
bc884d5e8a8b437d6b3bd52e689b4bd142bb3bfe
|
[
"MIT"
] | null | null | null |
day04.py
|
htv2012/advent-of-code-2020
|
bc884d5e8a8b437d6b3bd52e689b4bd142bb3bfe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
--- Day 4: Passport Processing ---
You arrive at the airport only to realize that you grabbed your
North Pole Credentials instead of your passport. While these documents
are extremely similar, North Pole Credentials aren't issued by a
country and therefore aren't actually valid documentation for travel
in most of the world.
It seems like you're not the only one having problems, though; a
very long line has formed for the automatic passport scanners, and
the delay could upset your travel itinerary.
Due to some questionable network security, you realize you might
be able to solve both of these problems at the same time.
The automatic passport scanners are slow because they're having
trouble detecting which passports have all required fields. The
expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each
passport is represented as a sequence of key:value pairs separated
by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The
second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid,
so it looks like data from North Pole Credentials, not a passport
at all! Surely, nobody would mind if you made the system temporarily
ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid
is fine, but missing any other field is not, so this passport is
invalid.
According to the above rules, your improved system would report 2
valid passports.
Count the number of valid passports - those that have all required
fields. Treat cid as optional. In your batch file, how many passports
are valid?
--- Part Two ---
The line is moving more quickly now, but you overhear airport
security talking about how passports with invalid data are getting
through. Better add some data validation, quick!
You can continue to ignore the cid field, but each other field has
strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are
both present and valid according to the above rules. Here are some
example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required
fields and valid values. Continue to treat cid as optional. In your
batch file, how many passports are valid?
"""
def parse(record):
fields = record.split()
result = dict(f.split(":") for f in fields)
return result
def in_range(value, lower, upper):
try:
value = int(value)
return lower <= value <= upper
except ValueError:
return False
# TODO: Unit test all functions
def valid_height(passport):
value = passport["hgt"]
number, unit = value[:-2], value[-2:]
try:
ranges = {"cm": (150, 193), "in": (65, 76)}
return unit in ranges and in_range(number, *ranges[unit])
except ValueError:
return False
def has_required_fields(passport):
must_have = set("ecl pid eyr hcl byr iyr hgt".split())
missing = must_have - passport.keys()
return not bool(missing)
def valid_hair_color(passport):
color = passport["hcl"]
if not color.startswith("#"):
return False
color = color[1:]
if len(color) != 6:
return False
return all(digit in set("0123456789abcdef") for digit in color)
def valid_eye_color(passport):
return passport["ecl"] in {"amb", "blu", "brn", "gry", "grn", "hzl", "oth"}
def valid_pid(passport):
pid = passport["pid"]
return len(pid) == 9 and pid.isdigit()
def valid_birth_year(passport):
byr = int(passport["byr"])
return 1920 <= byr <= 2002
def valid_issue_year(passport):
iyr = int(passport["iyr"])
return 2010 <= iyr <= 2020
def valid_expiration_year(passport):
eyr = int(passport["eyr"])
return 2020 <= eyr <= 2030
def validate(passport):
checks = [
has_required_fields,
valid_birth_year,
valid_expiration_year,
valid_eye_color,
valid_hair_color,
valid_height,
valid_issue_year,
valid_pid,
]
if not all(check(passport) for check in checks):
return 0
return 1
def main():
# Parse the input
with open("day04.txt") as stream:
passports = [parse(record) for record in stream.read().split("\n\n")]
print(f"Number of passports to check: {len(passports)}")
# Count the valids
valids = sum(validate(pp) for pp in passports)
print(f"Number of valid: {valids}")
if __name__ == "__main__":
main()
| 26.751004
| 79
| 0.720913
|
222f86123c6a3185db4a539b4343788ff0f71e44
| 1,041
|
py
|
Python
|
tests/sentry/tasks/test_store.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/tasks/test_store.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/tasks/test_store.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import mock
from sentry.plugins import Plugin2
from sentry.tasks.store import preprocess_event
from sentry.testutils import PluginTestCase
class BasicPreprocessorPlugin(Plugin2):
def get_event_preprocessors(self):
def remove_extra(data):
del data['extra']
return data
return [remove_extra, lambda x: None]
def is_enabled(self, project=None):
return True
class PreprocessEventTest(PluginTestCase):
plugin = BasicPreprocessorPlugin
@mock.patch('sentry.tasks.store.save_event')
def test_simple(self, mock_save_event):
project = self.create_project()
data = {
'project': project.id,
'message': 'test',
'extra': {'foo': 'bar'},
}
preprocess_event(data=data)
mock_save_event.delay.assert_called_once_with(
cache_key=None,
data={
'project': project.id,
'message': 'test',
},
)
| 23.659091
| 54
| 0.616715
|
a9501b7e9008ae7402aa838bbdccdf6b1fb6660c
| 271
|
py
|
Python
|
weibo_base/__init__.py
|
NewKnowledge/weibo-scraper
|
a136fb6b88f66597ad38ef5deb4ec54e5eb05941
|
[
"MIT"
] | 58
|
2018-05-10T20:10:54.000Z
|
2022-03-15T11:11:31.000Z
|
weibo_base/__init__.py
|
NewKnowledge/weibo-scraper
|
a136fb6b88f66597ad38ef5deb4ec54e5eb05941
|
[
"MIT"
] | 18
|
2018-05-21T16:20:30.000Z
|
2021-12-15T19:35:21.000Z
|
weibo_base/__init__.py
|
NewKnowledge/weibo-scraper
|
a136fb6b88f66597ad38ef5deb4ec54e5eb05941
|
[
"MIT"
] | 17
|
2018-05-25T09:26:51.000Z
|
2022-03-14T01:56:43.000Z
|
# -*- coding:utf-8 -*-
"""
Author: Helixcs
Site: https://github.com/Xarrow/weibo-scraper
File: __init__.py.py
Time: 5/19/18
"""
from .weibo_typing import *
from .weibo_api import *
from .weibo_component import *
from .weibo_util import *
from .weibo_parser import *
| 19.357143
| 46
| 0.708487
|
a7f75db08da6b9d4ceeb786d68355635b4f1a59d
| 1,788
|
py
|
Python
|
Variado_GeekUniversity/guppe/counter.py
|
PauloFTeixeira/curso_python
|
9040c7dcc5262620f6330bb9637710bb8899bc6b
|
[
"MIT"
] | null | null | null |
Variado_GeekUniversity/guppe/counter.py
|
PauloFTeixeira/curso_python
|
9040c7dcc5262620f6330bb9637710bb8899bc6b
|
[
"MIT"
] | null | null | null |
Variado_GeekUniversity/guppe/counter.py
|
PauloFTeixeira/curso_python
|
9040c7dcc5262620f6330bb9637710bb8899bc6b
|
[
"MIT"
] | null | null | null |
"""
Módulo Collections - Counter (Contador)
https://docs.python.org/3/library/collections.html#collections.Counter
Collections -> High-performance Container Datetypes
Counter -> Recebe um interável como parâmetro e cria um objeto do tipo Collections Counter que é parecido
com um dicionário, contendo como chave o elemento da lista passada como parâmetro e como valor a quantidade
de ocorrências desse elemento.
# Realizando o import
from collections import Counter
# Exemplo 1
# Podemos utilizar qualquer iterável, aqui usamos uma Lista
lista = [1, 1, 1, 2, 2, 3, 3, 3, 3, 1, 1, 2, 2, 4, 4, 4, 5, 5, 5, 5, 3, 45, 45, 66, 66, 43, 34]
# Utilizando o Counter
res = Counter(lista)
print(type(res))
print(res)
# Counter({1: 5, 3: 5, 2: 4, 5: 4, 4: 3, 45: 2, 66: 2, 43: 1, 34: 1})
# Veja que, para cada elemento da lista, o Counter criou uma chave e colocou como valor a quantidade de ocorrências.
# Exemplo 2
print(Counter('Geek University'))
# Counter({'e': 3, 'i': 2, 'G': 1, 'k': 1, ' ': 1, 'U': 1, 'n': 1, 'v': 1, 'r': 1, 's': 1, 't': 1, 'y': 1})
"""
from collections import Counter
# Exemplo 3
texto = """A Wikipédia é um projeto de enciclopédia colaborativa, universal e multilíngue estabelecido na internet
sob o princípio wiki. Tem como propósito fornecer um conteúdo livre, objetivo e verificável, que todos possam editar
e melhorar. O projeto é definido pelos princípios fundadores. O conteúdo é disponibilizado sob a licença Creative
Commons BY-SA e pode ser copiado e reutilizado sob a mesma licença — mesmo para fins comerciais — desde que
respeitando os termos e condições de uso. """
palavras = texto.split()
# print(palavras)
res = Counter(palavras)
print(res)
# Encontrando as 5 palavras com mais ocorrência no texto
print(res.most_common(5))
| 28.83871
| 119
| 0.714206
|
c9aef418837a2a7b9060778bb644b37ed53e27c8
| 113
|
py
|
Python
|
venv/lib/python3.9/site-packages/nbformat/_version.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | null | null | null |
venv/lib/python3.9/site-packages/nbformat/_version.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | null | null | null |
venv/lib/python3.9/site-packages/nbformat/_version.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | null | null | null |
# Make sure to update package.json, too!
version_info = (5, 3, 0)
__version__ = ".".join(map(str, version_info))
| 28.25
| 46
| 0.690265
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.