hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64b762b58172911616e280b85678c26dceabe66d
| 2,782
|
py
|
Python
|
mundiapi/models/update_pricing_scheme_request.py
|
hugocpolos/MundiAPI-PYTHON
|
164545cc58bf18c946d5456e9ba4d55a378a339a
|
[
"MIT"
] | 10
|
2017-08-30T15:53:00.000Z
|
2021-02-11T18:06:56.000Z
|
mundiapi/models/update_pricing_scheme_request.py
|
hugocpolos/MundiAPI-PYTHON
|
164545cc58bf18c946d5456e9ba4d55a378a339a
|
[
"MIT"
] | 3
|
2020-02-20T08:24:05.000Z
|
2021-07-22T14:18:33.000Z
|
mundiapi/models/update_pricing_scheme_request.py
|
hugocpolos/MundiAPI-PYTHON
|
164545cc58bf18c946d5456e9ba4d55a378a339a
|
[
"MIT"
] | 7
|
2017-04-27T13:46:52.000Z
|
2021-04-14T13:44:23.000Z
|
# -*- coding: utf-8 -*-
"""
mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import mundiapi.models.update_price_bracket_request
class UpdatePricingSchemeRequest(object):
"""Implementation of the 'UpdatePricingSchemeRequest' model.
Request for updating a pricing scheme
Attributes:
scheme_type (string): Scheme type
price_brackets (list of UpdatePriceBracketRequest): Price brackets
price (int): Price
minimum_price (int): Minimum price
percentage (float): percentual value used in pricing_scheme Percent
"""
# Create a mapping from Model property names to API property names
_names = {
"scheme_type":'scheme_type',
"price_brackets":'price_brackets',
"price":'price',
"minimum_price":'minimum_price',
"percentage":'percentage'
}
def __init__(self,
scheme_type=None,
price_brackets=None,
price=None,
minimum_price=None,
percentage=None):
"""Constructor for the UpdatePricingSchemeRequest class"""
# Initialize members of the class
self.scheme_type = scheme_type
self.price_brackets = price_brackets
self.price = price
self.minimum_price = minimum_price
self.percentage = percentage
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
scheme_type = dictionary.get('scheme_type')
price_brackets = None
if dictionary.get('price_brackets') != None:
price_brackets = list()
for structure in dictionary.get('price_brackets'):
price_brackets.append(mundiapi.models.update_price_bracket_request.UpdatePriceBracketRequest.from_dictionary(structure))
price = dictionary.get('price')
minimum_price = dictionary.get('minimum_price')
percentage = dictionary.get('percentage')
# Return an object of this model
return cls(scheme_type,
price_brackets,
price,
minimum_price,
percentage)
| 31.977011
| 137
| 0.604242
|
681a1d923545f802976d4bcf18a4febdb34ddf34
| 1,830
|
py
|
Python
|
lr_for_the_masses/rr_mlm/mlm.py
|
samuel-velez/-533_lab_4_KatheVelez
|
d4d9b7ea7cd3d0ae896d9aee9ca3b316bd4166b2
|
[
"MIT"
] | null | null | null |
lr_for_the_masses/rr_mlm/mlm.py
|
samuel-velez/-533_lab_4_KatheVelez
|
d4d9b7ea7cd3d0ae896d9aee9ca3b316bd4166b2
|
[
"MIT"
] | null | null | null |
lr_for_the_masses/rr_mlm/mlm.py
|
samuel-velez/-533_lab_4_KatheVelez
|
d4d9b7ea7cd3d0ae896d9aee9ca3b316bd4166b2
|
[
"MIT"
] | null | null | null |
class linearmixedeffects():
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.regression import mixed_linear_model
import sklearn as sk
from math import sqrt
import statsmodels.api as sm
def __init__(self, response, fixed, random, predict = 0, mlmf = 0):
self.response = response
self.fixed = fixed
self.random = random
def fitmlm(self):
import pandas as pd
from statsmodels.regression import mixed_linear_model
mlm = mixed_linear_model.MixedLM(endog = pd.DataFrame(self.response), exog = pd.DataFrame(self.fixed), groups = pd.DataFrame(self.random), formula = 'response ~ fixed')
mlmf = mlm.fit()
return mlmf
def summarymlm(self):
import pandas as pd
from statsmodels.regression import mixed_linear_model
mlm = mixed_linear_model.MixedLM(endog = pd.DataFrame(self.response), exog = pd.DataFrame(self.fixed), groups = pd.DataFrame(self.random), formula = 'response ~ fixed')
mlmf = mlm.fit()
print(" ")
print("The summary of the linear mixed effects model is given below:")
return mlmf.summary()
def plotmlm(self):
import seaborn as sns; sns.set()
import pandas as pd
from statsmodels.regression import mixed_linear_model
mlm = mixed_linear_model.MixedLM(endog = pd.DataFrame(self.response), exog = pd.DataFrame(self.fixed), groups = pd.DataFrame(self.random), formula = 'response ~ fixed')
mlmf = mlm.fit()
db_plot = pd.DataFrame()
db_plot["residuals"] = mlmf.resid.values
db_plot["fixed"] = fixed
db_plot["predicted"] = mlmf.fittedvalues
sns.lmplot(x = "predicted", y = "residuals", data = dbplot)
| 43.571429
| 176
| 0.660109
|
3942962e7ee40b2bf1df43a03e223da1c1e5707e
| 1,037
|
py
|
Python
|
tests/helpers.py
|
TheMOOCAgency/figures
|
4f991ed32626c25b4d67300c9b8019a36c5db297
|
[
"MIT"
] | 1
|
2020-07-27T19:50:53.000Z
|
2020-07-27T19:50:53.000Z
|
tests/helpers.py
|
TheMOOCAgency/figures
|
4f991ed32626c25b4d67300c9b8019a36c5db297
|
[
"MIT"
] | 3
|
2020-06-05T21:19:02.000Z
|
2022-02-10T18:09:04.000Z
|
tests/helpers.py
|
TheMOOCAgency/figures
|
4f991ed32626c25b4d67300c9b8019a36c5db297
|
[
"MIT"
] | null | null | null |
'''Helper methods for Figures testing
'''
from dateutil.rrule import rrule, DAILY
from packaging import version
import organizations
def make_course_key_str(org, number, run='test-run', **kwargs):
'''
Helper method to create a string representation of a CourseKey
'''
return 'course-v1:{}+{}+{}'.format(org, number, run)
def create_metrics_model_timeseries(factory, first_day, last_day):
return [factory(date_for=dt)
for dt in rrule(DAILY, dtstart=first_day, until=last_day)]
def organizations_support_sites():
"""
This function returns True if organizations supports site-organization
mapping, False otherwise.
This is used to conditionally run tests
"""
orgs_has_site = hasattr(organizations.models.Organization, 'sites')
return orgs_has_site
def django_filters_pre_v1():
"""Returns `True` if the installed Django Filters package is before '1.0.0'
"""
import django_filters
return version.parse(django_filters.__version__) < version.parse('1.0.0')
| 27.289474
| 79
| 0.719383
|
7bce96eec91113f058c24e88820c97f2d2dc2fc2
| 2,607
|
py
|
Python
|
selectable/tests/test_forms.py
|
affan2/django-selectable
|
00a3c3b9eddb706e1b113d1b4cea706a7bf26a44
|
[
"BSD-2-Clause"
] | null | null | null |
selectable/tests/test_forms.py
|
affan2/django-selectable
|
00a3c3b9eddb706e1b113d1b4cea706a7bf26a44
|
[
"BSD-2-Clause"
] | null | null | null |
selectable/tests/test_forms.py
|
affan2/django-selectable
|
00a3c3b9eddb706e1b113d1b4cea706a7bf26a44
|
[
"BSD-2-Clause"
] | 1
|
2020-01-08T09:08:21.000Z
|
2020-01-08T09:08:21.000Z
|
from django.conf import settings
from ..forms import BaseLookupForm
from .base import BaseSelectableTestCase, PatchSettingsMixin
__all__ = (
'BaseLookupFormTestCase',
)
class BaseLookupFormTestCase(PatchSettingsMixin, BaseSelectableTestCase):
def get_valid_data(self):
data = {
'term': 'foo',
'limit': 10,
}
return data
def test_valid_data(self):
data = self.get_valid_data()
form = BaseLookupForm(data)
self.assertTrue(form.is_valid(), "%s" % form.errors)
def test_invalid_limit(self):
"""
Test giving the form an invalid limit.
"""
data = self.get_valid_data()
data['limit'] = 'bar'
form = BaseLookupForm(data)
self.assertFalse(form.is_valid())
def test_no_limit(self):
"""
If SELECTABLE_MAX_LIMIT is set and limit is not given then
the form will return SELECTABLE_MAX_LIMIT.
"""
data = self.get_valid_data()
if 'limit' in data:
del data['limit']
form = BaseLookupForm(data)
self.assertTrue(form.is_valid(), "%s" % form.errors)
self.assertEqual(form.cleaned_data['limit'], settings.SELECTABLE_MAX_LIMIT)
def test_no_max_set(self):
"""
If SELECTABLE_MAX_LIMIT is not set but given then the form
will return the given limit.
"""
settings.SELECTABLE_MAX_LIMIT = None
data = self.get_valid_data()
form = BaseLookupForm(data)
self.assertTrue(form.is_valid(), "%s" % form.errors)
if 'limit' in data:
self.assertTrue(form.cleaned_data['limit'], data['limit'])
def test_no_max_set_not_given(self):
"""
If SELECTABLE_MAX_LIMIT is not set and not given then the form
will return no limit.
"""
settings.SELECTABLE_MAX_LIMIT = None
data = self.get_valid_data()
if 'limit' in data:
del data['limit']
form = BaseLookupForm(data)
self.assertTrue(form.is_valid(), "%s" % form.errors)
self.assertFalse(form.cleaned_data.get('limit'))
def test_over_limit(self):
"""
If SELECTABLE_MAX_LIMIT is set and limit given is greater then
the form will return SELECTABLE_MAX_LIMIT.
"""
data = self.get_valid_data()
data['limit'] = settings.SELECTABLE_MAX_LIMIT + 100
form = BaseLookupForm(data)
self.assertTrue(form.is_valid(), "%s" % form.errors)
self.assertEqual(form.cleaned_data['limit'], settings.SELECTABLE_MAX_LIMIT)
| 29.965517
| 83
| 0.618719
|
ac41c6b6522930ef696a7a121609414f94dd2b4e
| 1,051
|
py
|
Python
|
src/pycropml/transpiler/generators/sticsGenerator.py
|
brichet/PyCrop2ML
|
7177996f72a8d95fdbabb772a16f1fd87b1d033e
|
[
"MIT"
] | null | null | null |
src/pycropml/transpiler/generators/sticsGenerator.py
|
brichet/PyCrop2ML
|
7177996f72a8d95fdbabb772a16f1fd87b1d033e
|
[
"MIT"
] | null | null | null |
src/pycropml/transpiler/generators/sticsGenerator.py
|
brichet/PyCrop2ML
|
7177996f72a8d95fdbabb772a16f1fd87b1d033e
|
[
"MIT"
] | null | null | null |
from pycropml.transpiler.generators.fortranGenerator import FortranGenerator, FortranCompo
import os
from pycropml.transpiler import lib
from path import Path
class SticsGenerator(FortranGenerator):
""" This class contains the specific properties of
Apsim and use the NodeVisitor to generate a csharp
code source from a well formed syntax tree.
"""
def __init__(self, tree=None, model=None, name=None):
self.tree = tree
self.model=model
self.name = name
self.indent_with=' '*4
dir_lib = Path(os.path.dirname(lib.__file__))
self.f_src=dir_lib/"f90"/"list_sub.f90"
FortranGenerator.__init__(self, tree, model, name)
self.f_dest = os.path.join(self.model.path,"src","dssat","list_sub.f90")
class SticsCompo(FortranCompo):
""" This class generates Dssat module
"""
def __init__(self, tree, model=None, name=None):
self.tree = tree
self.model = model
self.name = name
FortranCompo.__init__(self,tree, model, self.name)
| 33.903226
| 90
| 0.679353
|
7c5271cd2ccc591942d33a357a88f8af16fa2102
| 1,053
|
py
|
Python
|
soc_site/discussions/migrations/0001_initial.py
|
nathanielCherian/socSite
|
e13ea1be15fde3511b46070038906930250e7f76
|
[
"MIT"
] | null | null | null |
soc_site/discussions/migrations/0001_initial.py
|
nathanielCherian/socSite
|
e13ea1be15fde3511b46070038906930250e7f76
|
[
"MIT"
] | 8
|
2020-07-16T23:37:06.000Z
|
2022-03-12T00:35:18.000Z
|
soc_site/discussions/migrations/0001_initial.py
|
nathanielCherian/socSite
|
e13ea1be15fde3511b46070038906930250e7f76
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-06-21 21:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300, unique=True)),
('slug', models.SlugField(max_length=350)),
('content', models.TextField()),
('updated', models.DateTimeField(auto_now=True)),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.967742
| 123
| 0.631529
|
a33be66a133d7f31597a91f727670ef025e4fb53
| 4,846
|
py
|
Python
|
Connector/wsutils/broker.py
|
swapper-pegasus/NodeChain
|
e1f7c6b372b1e5032fbeb2767503c1d2e878aa9c
|
[
"MIT"
] | 4
|
2021-05-05T18:34:03.000Z
|
2022-01-05T06:41:02.000Z
|
Connector/wsutils/broker.py
|
swapper-pegasus/NodeChain
|
e1f7c6b372b1e5032fbeb2767503c1d2e878aa9c
|
[
"MIT"
] | 94
|
2021-05-05T18:38:10.000Z
|
2022-03-31T23:00:59.000Z
|
Connector/wsutils/broker.py
|
swapper-pegasus/NodeChain
|
e1f7c6b372b1e5032fbeb2767503c1d2e878aa9c
|
[
"MIT"
] | 4
|
2021-05-03T14:44:43.000Z
|
2022-01-21T19:56:04.000Z
|
#!/usr/bin/python3
import threading
from logger import logger
from .singleton import Singleton
from .subscribers import SubscriberInterface
from .constants import *
class Broker(object, metaclass=Singleton):
def __init__(self):
self.topicSubscriptions = {} # Topic -> {"Subs":[Sub1, Sub2], "ClosingFunc":func}
def attach(self, subscriber, topic):
logger.printInfo(f"Attaching subscriber {subscriber.subscriberID} to topic [{topic.name}]")
if not issubclass(type(subscriber), SubscriberInterface):
logger.printWarning("Trying to attach unknown subscriber class")
return {
SUBSCRIBED: False
}
if topic.name not in self.topicSubscriptions:
self.topicSubscriptions[topic.name] = {
SUBSCRIBERS: [],
CLOSING_TOPIC_FUNC: topic.closingHandler
}
if subscriber not in self.topicSubscriptions[topic.name][SUBSCRIBERS]:
logger.printInfo(f"Subscriber {subscriber.subscriberID} atached successfully to topic [{topic.name}]")
self.topicSubscriptions[topic.name][SUBSCRIBERS].append(subscriber)
return {
SUBSCRIBED: True
}
else:
logger.printInfo(f"Subscriber {subscriber.subscriberID} already atached to topic [{topic.name}]")
return {
SUBSCRIBED: False
}
def detach(self, subscriber, topicName=""):
logger.printInfo(f"Detaching subscriber {subscriber.subscriberID} from topic [{topicName}]")
if not issubclass(type(subscriber), SubscriberInterface):
logger.printWarning("Trying to detach unknown subscriber class")
return {
UNSUBSCRIBED: False
}
if topicName not in self.topicSubscriptions:
logger.printWarning(f"Trying to detach subscriber {subscriber.subscriberID} from unknown topic [{topicName}]")
return {
UNSUBSCRIBED: False
}
elif subscriber in self.topicSubscriptions[topicName][SUBSCRIBERS]:
self.topicSubscriptions[topicName][SUBSCRIBERS].remove(subscriber)
logger.printInfo(f"Subscriber {subscriber.subscriberID} detached from topic [{topicName}]")
if len(self.topicSubscriptions[topicName][SUBSCRIBERS]) == 0:
logger.printWarning(f"No more subscribers for topic [{topicName}]")
del self.topicSubscriptions[topicName]
return {
UNSUBSCRIBED: True
}
else:
logger.printWarning(f"Subscriber {subscriber.subscriberID} can not be detached because it is not subscribed to topic [{topicName}]")
return {
UNSUBSCRIBED: False
}
def route(self, topicName="", message=""):
logger.printInfo(f"Routing message of topic [{topicName}]: {message}")
if topicName in self.topicSubscriptions:
for subscriber in self.topicSubscriptions[topicName][SUBSCRIBERS]:
subscriberNotificationThread = threading.Thread(target=_notifySubscriber, args=(subscriber, topicName, message), daemon=True)
subscriberNotificationThread.start()
def removeSubscriber(self, subscriber):
logger.printInfo(f"Removing subscriber {subscriber.subscriberID} from subsbribed topics")
if not issubclass(type(subscriber), SubscriberInterface):
logger.printWarning("Trying to remove unknown subscriber class")
return False
for topicName in subscriber.topicsSubscribed:
topicClosingFunc = self.topicSubscriptions[topicName][CLOSING_TOPIC_FUNC]
self.detach(subscriber, topicName)
if not self.topicHasSubscribers(topicName):
logger.printInfo(f"Calling closing func to topic [{topicName}]")
if topicClosingFunc is not None:
topicClosingFunc(topicName)
def isTopic(self, topicName):
return topicName in self.topicSubscriptions
def getSubTopics(self, topicName):
return [topicSubscription[len(topicName) + 1:] for topicSubscription in self.topicSubscriptions if topicName in topicSubscription]
def topicHasSubscribers(self, topicName):
if topicName in self.topicSubscriptions:
return len(self.topicSubscriptions[topicName][SUBSCRIBERS]) != 0
return False
def getTopicSubscribers(self, topicName):
if topicName in self.topicSubscriptions:
return self.topicSubscriptions[topicName][SUBSCRIBERS]
return []
def getTopicNameSubscriptions(self):
return list(self.topicSubscriptions.keys())
def _notifySubscriber(subscriber, topicName, message):
subscriber.onMessage(topicName, message)
| 38.460317
| 144
| 0.656624
|
6370b10fc327bc88784fa563f63f9d561b2619a9
| 757
|
py
|
Python
|
examples/Python3/demo.py
|
NaohiroTamura/snowboy
|
1aca51b23bddf4b941cb400e8202eeba193bc00e
|
[
"Apache-2.0"
] | null | null | null |
examples/Python3/demo.py
|
NaohiroTamura/snowboy
|
1aca51b23bddf4b941cb400e8202eeba193bc00e
|
[
"Apache-2.0"
] | null | null | null |
examples/Python3/demo.py
|
NaohiroTamura/snowboy
|
1aca51b23bddf4b941cb400e8202eeba193bc00e
|
[
"Apache-2.0"
] | null | null | null |
import snowboydecoder
import sys
import signal
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
if len(sys.argv) == 1:
print("Error: need to specify model name")
print("Usage: python demo.py your.model")
sys.exit(-1)
model = sys.argv[1]
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
detector = snowboydecoder.HotwordDetector(model, sensitivity=0.9)
print('Listening... Press Ctrl+C to exit')
# main loop
detector.start(detected_callback=snowboydecoder.play_audio_file,
interrupt_check=interrupt_callback,
sleep_time=0.03)
detector.terminate()
| 21.027778
| 65
| 0.72391
|
4dff57e7f8b9bc4f0e043c5fadd5fcd0994128b4
| 2,694
|
py
|
Python
|
functest/run_all.py
|
Luvideria/lightmetrica-v3
|
3e83db59998e79648047bac29c37d8eb18d7600d
|
[
"MIT"
] | 101
|
2019-05-31T21:27:58.000Z
|
2022-02-03T18:54:16.000Z
|
functest/run_all.py
|
Luvideria/lightmetrica-v3
|
3e83db59998e79648047bac29c37d8eb18d7600d
|
[
"MIT"
] | 11
|
2019-09-19T16:03:09.000Z
|
2020-12-05T18:37:54.000Z
|
functest/run_all.py
|
Luvideria/lightmetrica-v3
|
3e83db59998e79648047bac29c37d8eb18d7600d
|
[
"MIT"
] | 14
|
2019-06-05T03:06:09.000Z
|
2022-01-15T06:36:24.000Z
|
"""Run all functional tests"""
import os
import sys
from colorama import Fore, Back, Style
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
import argparse
import jupytext
import nbmerge
from shutil import copyfile
def run_functests(output_dir, lmenv_path):
# Output directory of executed notebooks
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# Base directory of the functional tests.
# That is, the directory where this script is located.
base_path = os.path.dirname(os.path.realpath(__file__))
# Copy .lmenv file to the functest directory
copyfile(lmenv_path, os.path.join(base_path, '.lmenv'))
# Tests
tests = [
'example_blank',
'example_quad',
'example_raycast',
'example_pt',
'example_cpp',
'example_custom_renderer',
'example_serialization',
'func_render_all',
'func_render_instancing',
'func_accel_consistency',
'func_error_handling',
'func_obj_loader_consistency',
'func_serial_consistency',
'func_update_asset',
'func_scheduler',
'func_materials',
'func_lights',
'func_renderers',
'perf_accel',
'perf_obj_loader',
'perf_serial'
]
# Execute tests
for test in tests:
print(Fore.GREEN + "Running test [name='{}']".format(test) + Style.RESET_ALL, flush=True)
# Read the requested notebook
nb = jupytext.read(os.path.join(base_path, test + '.py'))
# Execute the notebook
ep = ExecutePreprocessor(timeout=600)
ep.preprocess(nb, {'metadata': {'path': base_path}})
# Write result
with open(os.path.join(output_dir, test + '.ipynb'), mode='w', encoding='utf-8') as f:
nbformat.write(nb, f)
# Merge executed notebooks
print(Fore.GREEN + "Merging notebooks" + Style.RESET_ALL)
notebook_paths = [os.path.join(output_dir, test + '.ipynb') for test in tests]
nb = nbmerge.merge_notebooks(os.getcwd(), notebook_paths)
with open(os.path.join(output_dir, 'merged.ipynb'), mode='w', encoding='utf-8') as f:
nbformat.write(nb, f)
# Notify success
print(Fore.GREEN + "All tests have been executed successfully" + Style.RESET_ALL)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Execute all functional tests')
parser.add_argument('--lmenv', type=str, help='Path to .lmenv file')
parser.add_argument('--output-dir', nargs='?', type=str, default='executed_functest', help='Output directory of executed notebooks')
args = parser.parse_args()
run_functests(output_dir, lmenv)
| 34.101266
| 136
| 0.659985
|
565f49629159ceaeba26dfa8da682f4bc4b7c10f
| 4,643
|
bzl
|
Python
|
tensorflow/core/platform/build_config.bzl
|
nicolas-harraudeau-sonarsource/tensorflow
|
f42f57b814b82a217943f621967036a08bb95e88
|
[
"Apache-2.0"
] | 2
|
2020-10-28T20:24:19.000Z
|
2021-02-08T21:24:15.000Z
|
tensorflow/core/platform/build_config.bzl
|
nicolas-harraudeau-sonarsource/tensorflow
|
f42f57b814b82a217943f621967036a08bb95e88
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:14:20.000Z
|
2022-02-10T02:27:45.000Z
|
tensorflow/core/platform/build_config.bzl
|
nicolas-harraudeau-sonarsource/tensorflow
|
f42f57b814b82a217943f621967036a08bb95e88
|
[
"Apache-2.0"
] | 1
|
2020-12-03T21:23:29.000Z
|
2020-12-03T21:23:29.000Z
|
"""Provides a redirection point for platform specific implementations of starlark utilities."""
load(
"//tensorflow/core/platform/default:build_config.bzl",
_if_llvm_aarch64_available = "if_llvm_aarch64_available",
_if_llvm_system_z_available = "if_llvm_system_z_available",
_pyx_library = "pyx_library",
_tf_additional_all_protos = "tf_additional_all_protos",
_tf_additional_binary_deps = "tf_additional_binary_deps",
_tf_additional_core_deps = "tf_additional_core_deps",
_tf_additional_cupti_utils_cuda_deps = "tf_additional_cupti_utils_cuda_deps",
_tf_additional_device_tracer_srcs = "tf_additional_device_tracer_srcs",
_tf_additional_env_hdrs = "tf_additional_env_hdrs",
_tf_additional_lib_deps = "tf_additional_lib_deps",
_tf_additional_lib_hdrs = "tf_additional_lib_hdrs",
_tf_additional_rpc_deps = "tf_additional_rpc_deps",
_tf_additional_tensor_coding_deps = "tf_additional_tensor_coding_deps",
_tf_additional_test_deps = "tf_additional_test_deps",
_tf_additional_test_srcs = "tf_additional_test_srcs",
_tf_fingerprint_deps = "tf_fingerprint_deps",
_tf_google_mobile_srcs_no_runtime = "tf_google_mobile_srcs_no_runtime",
_tf_google_mobile_srcs_only_runtime = "tf_google_mobile_srcs_only_runtime",
_tf_jspb_proto_library = "tf_jspb_proto_library",
_tf_kernel_tests_linkstatic = "tf_kernel_tests_linkstatic",
_tf_lib_proto_parsing_deps = "tf_lib_proto_parsing_deps",
_tf_logging_deps = "tf_logging_deps",
_tf_platform_alias = "tf_platform_alias",
_tf_platform_deps = "tf_platform_deps",
_tf_portable_deps_no_runtime = "tf_portable_deps_no_runtime",
_tf_portable_proto_lib = "tf_portable_proto_lib",
_tf_profiler_client_deps = "tf_profiler_client_deps",
_tf_proto_library = "tf_proto_library",
_tf_proto_library_cc = "tf_proto_library_cc",
_tf_protobuf_compiler_deps = "tf_protobuf_compiler_deps",
_tf_protobuf_deps = "tf_protobuf_deps",
_tf_protos_all = "tf_protos_all",
_tf_protos_all_impl = "tf_protos_all_impl",
_tf_protos_grappler = "tf_protos_grappler",
_tf_protos_grappler_impl = "tf_protos_grappler_impl",
_tf_protos_profiler_impl = "tf_protos_profiler_impl",
_tf_protos_profiler_service = "tf_protos_profiler_service",
_tf_py_clif_cc = "tf_py_clif_cc",
_tf_pyclif_proto_library = "tf_pyclif_proto_library",
_tf_resource_deps = "tf_resource_deps",
_tf_tpu_dependencies = "tf_tpu_dependencies",
_tf_windows_aware_platform_deps = "tf_windows_aware_platform_deps",
)
if_llvm_aarch64_available = _if_llvm_aarch64_available
if_llvm_system_z_available = _if_llvm_system_z_available
pyx_library = _pyx_library
tf_additional_all_protos = _tf_additional_all_protos
tf_additional_binary_deps = _tf_additional_binary_deps
tf_additional_core_deps = _tf_additional_core_deps
tf_additional_cupti_utils_cuda_deps = _tf_additional_cupti_utils_cuda_deps
tf_additional_device_tracer_srcs = _tf_additional_device_tracer_srcs
tf_additional_env_hdrs = _tf_additional_env_hdrs
tf_additional_lib_deps = _tf_additional_lib_deps
tf_additional_lib_hdrs = _tf_additional_lib_hdrs
tf_additional_rpc_deps = _tf_additional_rpc_deps
tf_additional_tensor_coding_deps = _tf_additional_tensor_coding_deps
tf_additional_test_deps = _tf_additional_test_deps
tf_additional_test_srcs = _tf_additional_test_srcs
tf_fingerprint_deps = _tf_fingerprint_deps
tf_google_mobile_srcs_no_runtime = _tf_google_mobile_srcs_no_runtime
tf_google_mobile_srcs_only_runtime = _tf_google_mobile_srcs_only_runtime
tf_jspb_proto_library = _tf_jspb_proto_library
tf_kernel_tests_linkstatic = _tf_kernel_tests_linkstatic
tf_lib_proto_parsing_deps = _tf_lib_proto_parsing_deps
tf_logging_deps = _tf_logging_deps
tf_platform_alias = _tf_platform_alias
tf_platform_deps = _tf_platform_deps
tf_portable_proto_lib = _tf_portable_proto_lib
tf_portable_deps_no_runtime = _tf_portable_deps_no_runtime
tf_proto_library = _tf_proto_library
tf_proto_library_cc = _tf_proto_library_cc
tf_protobuf_compiler_deps = _tf_protobuf_compiler_deps
tf_protobuf_deps = _tf_protobuf_deps
tf_protos_all = _tf_protos_all
tf_protos_all_impl = _tf_protos_all_impl
tf_protos_grappler = _tf_protos_grappler
tf_protos_grappler_impl = _tf_protos_grappler_impl
tf_protos_profiler_impl = _tf_protos_profiler_impl
tf_protos_profiler_service = _tf_protos_profiler_service
tf_profiler_client_deps = _tf_profiler_client_deps
tf_py_clif_cc = _tf_py_clif_cc
tf_pyclif_proto_library = _tf_pyclif_proto_library
tf_resource_deps = _tf_resource_deps
tf_windows_aware_platform_deps = _tf_windows_aware_platform_deps
tf_tpu_dependencies = _tf_tpu_dependencies
| 51.021978
| 95
| 0.855912
|
02fc36056d37603f7be9820f58470a9641af8aae
| 19,976
|
py
|
Python
|
pysnmp/DVMRP-STD-MIB-UNI.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/DVMRP-STD-MIB-UNI.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/DVMRP-STD-MIB-UNI.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module DVMRP-STD-MIB-UNI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DVMRP-STD-MIB-UNI
# Produced by pysmi-0.3.4 at Mon Apr 29 18:40:19 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
InterfaceIndex, InterfaceIndexOrZero = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "InterfaceIndexOrZero")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
Integer32, TimeTicks, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, MibIdentifier, Unsigned32, ModuleIdentity, iso, IpAddress, Bits, ObjectIdentity, Gauge32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "TimeTicks", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "MibIdentifier", "Unsigned32", "ModuleIdentity", "iso", "IpAddress", "Bits", "ObjectIdentity", "Gauge32", "Counter32")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
usdDvmrpExperiment, = mibBuilder.importSymbols("Unisphere-Data-Experiment", "usdDvmrpExperiment")
dvmrpStdMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1))
dvmrpStdMIB.setRevisions(('1999-10-19 12:00',))
if mibBuilder.loadTexts: dvmrpStdMIB.setLastUpdated('9910191200Z')
if mibBuilder.loadTexts: dvmrpStdMIB.setOrganization('IETF IDMR Working Group.')
dvmrpMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1))
dvmrp = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1))
dvmrpScalar = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1))
dvmrpVersionString = MibScalar((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpVersionString.setStatus('current')
dvmrpGenerationId = MibScalar((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpGenerationId.setStatus('current')
dvmrpNumRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNumRoutes.setStatus('current')
dvmrpReachableRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpReachableRoutes.setStatus('current')
dvmrpInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2), )
if mibBuilder.loadTexts: dvmrpInterfaceTable.setStatus('current')
dvmrpInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1), ).setIndexNames((0, "DVMRP-STD-MIB-UNI", "dvmrpInterfaceIfIndex"))
if mibBuilder.loadTexts: dvmrpInterfaceEntry.setStatus('current')
dvmrpInterfaceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: dvmrpInterfaceIfIndex.setStatus('current')
dvmrpInterfaceLocalAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dvmrpInterfaceLocalAddress.setStatus('current')
dvmrpInterfaceMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dvmrpInterfaceMetric.setStatus('current')
dvmrpInterfaceStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dvmrpInterfaceStatus.setStatus('current')
dvmrpInterfaceRcvBadPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpInterfaceRcvBadPkts.setStatus('current')
dvmrpInterfaceRcvBadRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpInterfaceRcvBadRoutes.setStatus('current')
dvmrpInterfaceSentRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpInterfaceSentRoutes.setStatus('current')
dvmrpInterfaceInterfaceKey = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 8), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dvmrpInterfaceInterfaceKey.setStatus('current')
dvmrpInterfaceInterfaceKeyVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 9), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dvmrpInterfaceInterfaceKeyVersion.setStatus('current')
dvmrpNeighborTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3), )
if mibBuilder.loadTexts: dvmrpNeighborTable.setStatus('current')
dvmrpNeighborEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1), ).setIndexNames((0, "DVMRP-STD-MIB-UNI", "dvmrpNeighborIfIndex"), (0, "DVMRP-STD-MIB-UNI", "dvmrpNeighborAddress"))
if mibBuilder.loadTexts: dvmrpNeighborEntry.setStatus('current')
dvmrpNeighborIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: dvmrpNeighborIfIndex.setStatus('current')
dvmrpNeighborAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 2), IpAddress())
if mibBuilder.loadTexts: dvmrpNeighborAddress.setStatus('current')
dvmrpNeighborUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborUpTime.setStatus('current')
dvmrpNeighborExpiryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborExpiryTime.setStatus('current')
dvmrpNeighborGenerationId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborGenerationId.setStatus('current')
dvmrpNeighborMajorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborMajorVersion.setStatus('current')
dvmrpNeighborMinorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborMinorVersion.setStatus('current')
dvmrpNeighborCapabilities = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 8), Bits().clone(namedValues=NamedValues(("leaf", 0), ("prune", 1), ("generationID", 2), ("mtrace", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborCapabilities.setStatus('current')
dvmrpNeighborRcvRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborRcvRoutes.setStatus('current')
dvmrpNeighborRcvBadPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborRcvBadPkts.setStatus('current')
dvmrpNeighborRcvBadRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborRcvBadRoutes.setStatus('current')
dvmrpNeighborState = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("oneway", 1), ("active", 2), ("ignoring", 3), ("down", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpNeighborState.setStatus('current')
dvmrpRouteTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4), )
if mibBuilder.loadTexts: dvmrpRouteTable.setStatus('current')
dvmrpRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1), ).setIndexNames((0, "DVMRP-STD-MIB-UNI", "dvmrpRouteSource"), (0, "DVMRP-STD-MIB-UNI", "dvmrpRouteSourceMask"))
if mibBuilder.loadTexts: dvmrpRouteEntry.setStatus('current')
dvmrpRouteSource = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 1), IpAddress())
if mibBuilder.loadTexts: dvmrpRouteSource.setStatus('current')
dvmrpRouteSourceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 2), IpAddress())
if mibBuilder.loadTexts: dvmrpRouteSourceMask.setStatus('current')
dvmrpRouteUpstreamNeighbor = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpRouteUpstreamNeighbor.setStatus('current')
dvmrpRouteIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 4), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpRouteIfIndex.setStatus('current')
dvmrpRouteMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpRouteMetric.setStatus('current')
dvmrpRouteExpiryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 6), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpRouteExpiryTime.setStatus('current')
dvmrpRouteUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 7), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpRouteUpTime.setStatus('current')
dvmrpRouteNextHopTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5), )
if mibBuilder.loadTexts: dvmrpRouteNextHopTable.setStatus('current')
dvmrpRouteNextHopEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1), ).setIndexNames((0, "DVMRP-STD-MIB-UNI", "dvmrpRouteNextHopSource"), (0, "DVMRP-STD-MIB-UNI", "dvmrpRouteNextHopSourceMask"), (0, "DVMRP-STD-MIB-UNI", "dvmrpRouteNextHopIfIndex"))
if mibBuilder.loadTexts: dvmrpRouteNextHopEntry.setStatus('current')
dvmrpRouteNextHopSource = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1, 1), IpAddress())
if mibBuilder.loadTexts: dvmrpRouteNextHopSource.setStatus('current')
dvmrpRouteNextHopSourceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1, 2), IpAddress())
if mibBuilder.loadTexts: dvmrpRouteNextHopSourceMask.setStatus('current')
dvmrpRouteNextHopIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1, 3), InterfaceIndex())
if mibBuilder.loadTexts: dvmrpRouteNextHopIfIndex.setStatus('current')
dvmrpRouteNextHopType = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("leaf", 1), ("branch", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpRouteNextHopType.setStatus('current')
dvmrpPruneTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6), )
if mibBuilder.loadTexts: dvmrpPruneTable.setStatus('current')
dvmrpPruneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1), ).setIndexNames((0, "DVMRP-STD-MIB-UNI", "dvmrpPruneGroup"), (0, "DVMRP-STD-MIB-UNI", "dvmrpPruneSource"), (0, "DVMRP-STD-MIB-UNI", "dvmrpPruneSourceMask"))
if mibBuilder.loadTexts: dvmrpPruneEntry.setStatus('current')
dvmrpPruneGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1, 1), IpAddress())
if mibBuilder.loadTexts: dvmrpPruneGroup.setStatus('current')
dvmrpPruneSource = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1, 2), IpAddress())
if mibBuilder.loadTexts: dvmrpPruneSource.setStatus('current')
dvmrpPruneSourceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1, 3), IpAddress())
if mibBuilder.loadTexts: dvmrpPruneSourceMask.setStatus('current')
dvmrpPruneExpiryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dvmrpPruneExpiryTime.setStatus('current')
dvmrpTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 0))
dvmrpNeighborLoss = NotificationType((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 0, 1)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpInterfaceLocalAddress"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborState"))
if mibBuilder.loadTexts: dvmrpNeighborLoss.setStatus('current')
dvmrpNeighborNotPruning = NotificationType((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 0, 2)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpInterfaceLocalAddress"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborCapabilities"))
if mibBuilder.loadTexts: dvmrpNeighborNotPruning.setStatus('current')
dvmrpMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2))
dvmrpMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 1))
dvmrpMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2))
dvmrpMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 1, 1)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpGeneralGroup"), ("DVMRP-STD-MIB-UNI", "dvmrpInterfaceGroup"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborGroup"), ("DVMRP-STD-MIB-UNI", "dvmrpRoutingGroup"), ("DVMRP-STD-MIB-UNI", "dvmrpTreeGroup"), ("DVMRP-STD-MIB-UNI", "dvmrpSecurityGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dvmrpMIBCompliance = dvmrpMIBCompliance.setStatus('current')
dvmrpGeneralGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 2)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpVersionString"), ("DVMRP-STD-MIB-UNI", "dvmrpGenerationId"), ("DVMRP-STD-MIB-UNI", "dvmrpNumRoutes"), ("DVMRP-STD-MIB-UNI", "dvmrpReachableRoutes"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dvmrpGeneralGroup = dvmrpGeneralGroup.setStatus('current')
dvmrpInterfaceGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 3)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpInterfaceLocalAddress"), ("DVMRP-STD-MIB-UNI", "dvmrpInterfaceMetric"), ("DVMRP-STD-MIB-UNI", "dvmrpInterfaceStatus"), ("DVMRP-STD-MIB-UNI", "dvmrpInterfaceRcvBadPkts"), ("DVMRP-STD-MIB-UNI", "dvmrpInterfaceRcvBadRoutes"), ("DVMRP-STD-MIB-UNI", "dvmrpInterfaceSentRoutes"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dvmrpInterfaceGroup = dvmrpInterfaceGroup.setStatus('current')
dvmrpNeighborGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 4)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpNeighborUpTime"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborExpiryTime"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborGenerationId"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborMajorVersion"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborMinorVersion"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborCapabilities"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborRcvRoutes"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborRcvBadPkts"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborRcvBadRoutes"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dvmrpNeighborGroup = dvmrpNeighborGroup.setStatus('current')
dvmrpRoutingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 5)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpRouteUpstreamNeighbor"), ("DVMRP-STD-MIB-UNI", "dvmrpRouteIfIndex"), ("DVMRP-STD-MIB-UNI", "dvmrpRouteMetric"), ("DVMRP-STD-MIB-UNI", "dvmrpRouteExpiryTime"), ("DVMRP-STD-MIB-UNI", "dvmrpRouteUpTime"), ("DVMRP-STD-MIB-UNI", "dvmrpRouteNextHopType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dvmrpRoutingGroup = dvmrpRoutingGroup.setStatus('current')
dvmrpSecurityGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 6)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpInterfaceInterfaceKey"), ("DVMRP-STD-MIB-UNI", "dvmrpInterfaceInterfaceKeyVersion"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dvmrpSecurityGroup = dvmrpSecurityGroup.setStatus('current')
dvmrpTreeGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 7)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpPruneExpiryTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dvmrpTreeGroup = dvmrpTreeGroup.setStatus('current')
dvmrpNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 8)).setObjects(("DVMRP-STD-MIB-UNI", "dvmrpNeighborLoss"), ("DVMRP-STD-MIB-UNI", "dvmrpNeighborNotPruning"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dvmrpNotificationGroup = dvmrpNotificationGroup.setStatus('current')
mibBuilder.exportSymbols("DVMRP-STD-MIB-UNI", dvmrpInterfaceRcvBadRoutes=dvmrpInterfaceRcvBadRoutes, dvmrpNeighborMinorVersion=dvmrpNeighborMinorVersion, dvmrpNeighborUpTime=dvmrpNeighborUpTime, dvmrpNeighborEntry=dvmrpNeighborEntry, dvmrpReachableRoutes=dvmrpReachableRoutes, dvmrpInterfaceIfIndex=dvmrpInterfaceIfIndex, dvmrpNeighborAddress=dvmrpNeighborAddress, dvmrpPruneGroup=dvmrpPruneGroup, dvmrpPruneExpiryTime=dvmrpPruneExpiryTime, dvmrpMIBCompliances=dvmrpMIBCompliances, dvmrpRouteUpTime=dvmrpRouteUpTime, dvmrpInterfaceInterfaceKey=dvmrpInterfaceInterfaceKey, dvmrp=dvmrp, dvmrpNeighborExpiryTime=dvmrpNeighborExpiryTime, dvmrpNeighborRcvBadRoutes=dvmrpNeighborRcvBadRoutes, dvmrpInterfaceSentRoutes=dvmrpInterfaceSentRoutes, dvmrpRouteSource=dvmrpRouteSource, dvmrpRouteNextHopTable=dvmrpRouteNextHopTable, dvmrpRouteNextHopSourceMask=dvmrpRouteNextHopSourceMask, dvmrpRouteNextHopIfIndex=dvmrpRouteNextHopIfIndex, dvmrpTraps=dvmrpTraps, dvmrpNeighborNotPruning=dvmrpNeighborNotPruning, dvmrpNeighborState=dvmrpNeighborState, dvmrpRouteTable=dvmrpRouteTable, dvmrpNeighborRcvRoutes=dvmrpNeighborRcvRoutes, dvmrpScalar=dvmrpScalar, dvmrpMIBGroups=dvmrpMIBGroups, dvmrpRouteIfIndex=dvmrpRouteIfIndex, dvmrpInterfaceLocalAddress=dvmrpInterfaceLocalAddress, dvmrpNeighborCapabilities=dvmrpNeighborCapabilities, dvmrpRouteNextHopSource=dvmrpRouteNextHopSource, dvmrpRouteNextHopType=dvmrpRouteNextHopType, dvmrpInterfaceRcvBadPkts=dvmrpInterfaceRcvBadPkts, dvmrpRouteSourceMask=dvmrpRouteSourceMask, dvmrpNotificationGroup=dvmrpNotificationGroup, dvmrpInterfaceInterfaceKeyVersion=dvmrpInterfaceInterfaceKeyVersion, dvmrpSecurityGroup=dvmrpSecurityGroup, PYSNMP_MODULE_ID=dvmrpStdMIB, dvmrpRouteEntry=dvmrpRouteEntry, dvmrpNeighborTable=dvmrpNeighborTable, dvmrpRouteUpstreamNeighbor=dvmrpRouteUpstreamNeighbor, dvmrpPruneSource=dvmrpPruneSource, dvmrpGeneralGroup=dvmrpGeneralGroup, dvmrpNeighborRcvBadPkts=dvmrpNeighborRcvBadPkts, dvmrpNeighborLoss=dvmrpNeighborLoss, dvmrpStdMIB=dvmrpStdMIB, dvmrpPruneSourceMask=dvmrpPruneSourceMask, dvmrpRouteExpiryTime=dvmrpRouteExpiryTime, dvmrpNeighborMajorVersion=dvmrpNeighborMajorVersion, dvmrpRouteNextHopEntry=dvmrpRouteNextHopEntry, dvmrpInterfaceMetric=dvmrpInterfaceMetric, dvmrpNeighborGroup=dvmrpNeighborGroup, dvmrpInterfaceEntry=dvmrpInterfaceEntry, dvmrpPruneEntry=dvmrpPruneEntry, dvmrpNeighborIfIndex=dvmrpNeighborIfIndex, dvmrpInterfaceStatus=dvmrpInterfaceStatus, dvmrpPruneTable=dvmrpPruneTable, dvmrpGenerationId=dvmrpGenerationId, dvmrpMIBConformance=dvmrpMIBConformance, dvmrpInterfaceTable=dvmrpInterfaceTable, dvmrpInterfaceGroup=dvmrpInterfaceGroup, dvmrpRouteMetric=dvmrpRouteMetric, dvmrpRoutingGroup=dvmrpRoutingGroup, dvmrpVersionString=dvmrpVersionString, dvmrpMIBCompliance=dvmrpMIBCompliance, dvmrpNeighborGenerationId=dvmrpNeighborGenerationId, dvmrpNumRoutes=dvmrpNumRoutes, dvmrpTreeGroup=dvmrpTreeGroup, dvmrpMIBObjects=dvmrpMIBObjects)
| 126.43038
| 2,921
| 0.743793
|
a16d5b49250da7ede6b55b1d8540b546bc13b27d
| 1,056
|
py
|
Python
|
app/urls.py
|
hamzabell/Realtor
|
8787f1e2cf05fb165f4cb8e8ae95cdcba1aaf7a0
|
[
"MIT"
] | null | null | null |
app/urls.py
|
hamzabell/Realtor
|
8787f1e2cf05fb165f4cb8e8ae95cdcba1aaf7a0
|
[
"MIT"
] | null | null | null |
app/urls.py
|
hamzabell/Realtor
|
8787f1e2cf05fb165f4cb8e8ae95cdcba1aaf7a0
|
[
"MIT"
] | null | null | null |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('pages.urls')),
path('listings/', include('listings.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 37.714286
| 77
| 0.717803
|
9330254eeace87c12a8d2a5f8e6d8b009be2a421
| 1,332
|
py
|
Python
|
pennylane/numpy/__init__.py
|
theRoughCode/pennylane
|
317f82ef00c752beeef7d2412b88119a753467b4
|
[
"Apache-2.0"
] | 1
|
2020-10-28T11:08:37.000Z
|
2020-10-28T11:08:37.000Z
|
pennylane/numpy/__init__.py
|
markhop20/pennylane
|
8792f0f88178f70a04d6f7afbbb9dd90d2e758b3
|
[
"Apache-2.0"
] | 1
|
2020-10-04T22:45:45.000Z
|
2020-10-04T22:45:45.000Z
|
pennylane/numpy/__init__.py
|
markhop20/pennylane
|
8792f0f88178f70a04d6f7afbbb9dd90d2e758b3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package provides a wrapped version of autograd.numpy, such that
it works with the PennyLane :class:`~.tensor` class.
"""
# pylint: disable=wrong-import-position,wildcard-import,undefined-variable
from autograd import numpy as _np
from autograd.numpy import *
from .wrapper import wrap_arrays, extract_tensors
wrap_arrays(_np.__dict__, globals())
# Delete the unwrapped fft, linalg, random modules
# so that we can re-import our wrapped versions.
del fft
del linalg
del random
from . import fft
from . import linalg
from . import random
from .tensor import tensor, NonDifferentiableError
__doc__ = "NumPy with automatic differentiation support, provided by Autograd and PennyLane."
| 34.153846
| 94
| 0.758258
|
34c2149af459ddcc1b2d51293b9d821028a813eb
| 26,862
|
py
|
Python
|
nova/tests/virt/libvirt/test_imagebackend.py
|
bopopescu/nova-35
|
c32c01e08dccad921b4af6fc03d971d6e74c990f
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/virt/libvirt/test_imagebackend.py
|
bopopescu/nova-35
|
c32c01e08dccad921b4af6fc03d971d6e74c990f
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/virt/libvirt/test_imagebackend.py
|
bopopescu/nova-35
|
c32c01e08dccad921b4af6fc03d971d6e74c990f
|
[
"Apache-2.0"
] | 1
|
2020-07-24T09:13:22.000Z
|
2020-07-24T09:13:22.000Z
|
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo.config import cfg
import inspect
from nova import exception
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_processutils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
class _ImageTestCase(object):
INSTANCES_PATH = '/instances_path'
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
self.INSTANCE = {'name': 'instance',
'uuid': uuidutils.generate_uuid()}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RawTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestCase, self).setUp()
self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_extend(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
self.mox.VerifyAll()
def test_correct_format(self):
info = self.mox.CreateMockAnything()
self.stubs.UnsetAll()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
os.path.exists(self.PATH).AndReturn(True)
info = self.mox.CreateMockAnything()
info.file_format = 'foo'
imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
self.mox.VerifyAll()
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = units.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / units.Gi))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'create_cow_image')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_too_small(self):
fn = self.prepare_mocks()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.FlavorDiskTooSmall,
image.create_image, fn, self.TEMPLATE_PATH, 1)
self.mox.VerifyAll()
def test_generate_resized_backing_files(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(self.QCOW2_BASE)
os.path.exists(self.QCOW2_BASE).AndReturn(False)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
self.QCOW2_BASE)
imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_qcow2_exists_and_has_no_backing_file(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(None)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(images_volume_group=self.VG, group='libvirt')
self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.disk, 'resize2fs')
self.mox.StubOutWithMock(self.libvirt_utils, 'create_lvm_image')
self.mox.StubOutWithMock(self.disk, 'get_disk_size')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def _create_image(self, sparse):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def _create_image_generated(self, sparse):
fn = self.prepare_mocks()
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn(target=self.PATH, ephemeral_size=None)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
self.mox.VerifyAll()
def _create_image_resize(self, sparse):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False
).AndRaise(RuntimeError())
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_generated_negative(self):
fn = self.prepare_mocks()
fn(target=self.PATH,
ephemeral_size=None).AndRaise(RuntimeError())
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(images_rbd_pool=self.POOL,
rbd_user=self.USER,
images_rbd_ceph_conf=self.CONF,
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
self.rbd = self.mox.CreateMockAnything()
self.rados = self.mox.CreateMockAnything()
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend, 'rbd')
self.mox.StubOutWithMock(imagebackend, 'rados')
return fn
def test_cache(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
image.check_image_exists().AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, rbd=self.rbd, target=self.TEMPLATE_PATH)
self.rbd.RBD_FEATURE_LAYERING = 1
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME)
cmd = ('--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.libvirt_utils.import_rbd_image(self.TEMPLATE_PATH, *cmd)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, rbd=self.rbd)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
self.mox.StubOutWithMock(imagebackend, 'rbd')
self.mox.StubOutWithMock(imagebackend, 'rados')
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
def fake_resize(rbd_name, size):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
inspect.getargspec(self.image_class.libvirt_info))
def test_image_path(self):
conf = "FakeConf"
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
user, conf)
self.assertEqual(image.path, rbd_path)
class BackendTestCase(test.NoDBTestCase):
INSTANCE = {'name': 'fake-instance',
'uuid': uuidutils.generate_uuid()}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_raw(self):
self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
def test_image_rbd(self):
conf = "FakeConf"
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
| 38.210526
| 78
| 0.643772
|
ef22e69c104ba4b41e4dd4ccd58db94f69792a2b
| 1,310
|
py
|
Python
|
setup.py
|
Robbybp/pymumps
|
698339dc249f4014f0688d521f316a3da6165e10
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Robbybp/pymumps
|
698339dc249f4014f0688d521f316a3da6165e10
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Robbybp/pymumps
|
698339dc249f4014f0688d521f316a3da6165e10
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, Extension
try:
import Cython
except ImportError:
raise ImportError('''
Cython is required for building this package. Please install using
pip install cython
or upgrade to a recent PIP release.
''')
with open('README.md') as f:
long_description = f.read()
setup(
name='PyMUMPS',
version='0.3.2',
description='Python bindings for MUMPS, a parallel sparse direct solver',
long_description=long_description,
long_description_content_type='text/markdown',
author='Bradley M. Froehle',
author_email='brad.froehle@gmail.com',
maintainer='Stephan Rave',
maintainer_email='stephan.rave@uni-muenster.de',
license='BSD',
url='http://github.com/pymumps/pymumps',
packages=['mumps'],
ext_modules=[
Extension(
'mumps._dmumps',
sources=['mumps/_dmumps.pyx'],
libraries=['dmumps'],
),
],
install_requires=['mpi4py'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Mathematics',
],
)
| 25.686275
| 77
| 0.637405
|
ce88e8913f294ec0b92e1900509b315ec4d5af23
| 3,299
|
py
|
Python
|
client/verta/tests/test_versioning/test_configuration.py
|
fool-sec-review/modeldb
|
44e7f3c1af6768c4c23a2d134f9a322fcf0320b5
|
[
"Apache-2.0"
] | 835
|
2017-02-08T20:14:24.000Z
|
2020-03-12T17:37:49.000Z
|
client/verta/tests/test_versioning/test_configuration.py
|
fool-sec-review/modeldb
|
44e7f3c1af6768c4c23a2d134f9a322fcf0320b5
|
[
"Apache-2.0"
] | 651
|
2019-04-18T12:55:07.000Z
|
2022-03-31T23:45:09.000Z
|
client/verta/tests/test_versioning/test_configuration.py
|
fool-sec-review/modeldb
|
44e7f3c1af6768c4c23a2d134f9a322fcf0320b5
|
[
"Apache-2.0"
] | 170
|
2017-02-13T14:49:22.000Z
|
2020-02-19T17:59:12.000Z
|
import pytest
import verta.configuration
class TestInternalFunctions:
def test_value_to_msg(self):
fn = verta.configuration.Hyperparameters._value_to_msg
assert fn(0)
assert fn(0.3)
assert fn("coconut")
def test_hyperparameter_to_msg(self):
fn = verta.configuration.Hyperparameters._hyperparameter_to_msg
assert fn("banana", 0)
assert fn("banana", 0.3)
assert fn("banana", "foo")
def test_hyperparamater_range_to_msg(self):
fn = verta.configuration.Hyperparameters._hyperparameter_range_to_msg
assert fn("banana", (0, 12, 3))
with pytest.raises(TypeError):
fn("banana", 0)
with pytest.raises(ValueError):
fn("banana", (0, 12))
def test_hyperparameter_set_to_msg(self):
fn = verta.configuration.Hyperparameters._hyperparameter_set_to_msg
assert fn("banana", list(range(0, 12, 3)))
with pytest.raises(TypeError):
fn("banana", 0)
class TestHyperparameters:
HYPERPARAMETERS = {'banana': "foo"}
HYPERPARAMETER_RANGES = {'coconut': (0, 12, 3)}
HYPERPARAMETER_SETS = {'durian': list(range(0, 12, 3))}
def test_hyperparameters(self):
config = verta.configuration.Hyperparameters(
hyperparameters=self.HYPERPARAMETERS,
)
name, value = list(self.HYPERPARAMETERS.items())[0]
hyperparam_msg = config._msg.hyperparameters[0]
assert hyperparam_msg.name == name
value_msg = hyperparam_msg.value
assert getattr(value_msg, value_msg.WhichOneof('value')) == value
def test_hyperparamater_ranges(self):
config = verta.configuration.Hyperparameters(
hyperparameter_ranges=self.HYPERPARAMETER_RANGES,
)
name, (begin, end, step) = list(self.HYPERPARAMETER_RANGES.items())[0]
hyperparam_msg = config._msg.hyperparameter_set[0]
assert hyperparam_msg.name == name
begin_msg = hyperparam_msg.continuous.interval_begin
assert getattr(begin_msg, begin_msg.WhichOneof('value')) == begin
end_msg = hyperparam_msg.continuous.interval_end
assert getattr(end_msg, end_msg.WhichOneof('value')) == end
step_msg = hyperparam_msg.continuous.interval_step
assert getattr(step_msg, step_msg.WhichOneof('value')) == step
def test_hyperparameter_sets(self):
config = verta.configuration.Hyperparameters(
hyperparameter_sets=self.HYPERPARAMETER_SETS,
)
name, sequence = list(self.HYPERPARAMETER_SETS.items())[0]
hyperparam_msg = config._msg.hyperparameter_set[0]
assert hyperparam_msg.name == name
for value_msg, value in zip(hyperparam_msg.discrete.values, sequence):
assert getattr(value_msg, value_msg.WhichOneof('value')) == value
def test_repr(self):
"""Tests that __repr__() executes without error"""
config = verta.configuration.Hyperparameters(
hyperparameters={
'a': 1, 'b': 1,
},
hyperparameter_ranges={
'c': (1, 5, 1), 'd': (1, 5, 1),
},
hyperparameter_sets={
'e': [1, 2], 'f': [1, 2],
},
)
assert config.__repr__()
| 34.010309
| 78
| 0.636557
|
1898b714c19856abb8d724f8ea87b54f43567a81
| 339
|
py
|
Python
|
Algorithms/Easy/561. Array Partition I/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Easy/561. Array Partition I/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Easy/561. Array Partition I/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
class Solution:
def arrayPairSum(self, nums: List[int]) -> int:
nums.sort()
n = len(nums)
sum = 0
for i in range(0, n, 2):
sum += nums[i]
return sum
if __name__ == "__main__":
s = Solution()
result = s.arrayPairSum([1, 4, 3, 2])
print(result)
| 18.833333
| 51
| 0.525074
|
f17b6580026608b3a72c2a42c1266585b81fd7d1
| 14,044
|
py
|
Python
|
main.py
|
abhijit-buet/Learning-to-Detect-Pedestrian-Flow-inTraffic-Intersections-from-Synthetic-Data
|
c30334f29749dde01f998c7b648c9a8b3aa38650
|
[
"MIT"
] | 1
|
2020-11-26T17:01:24.000Z
|
2020-11-26T17:01:24.000Z
|
main.py
|
abhijit-buet/Learning-to-Detect-Pedestrian-Flow-inTraffic-Intersections-from-Synthetic-Data
|
c30334f29749dde01f998c7b648c9a8b3aa38650
|
[
"MIT"
] | null | null | null |
main.py
|
abhijit-buet/Learning-to-Detect-Pedestrian-Flow-inTraffic-Intersections-from-Synthetic-Data
|
c30334f29749dde01f998c7b648c9a8b3aa38650
|
[
"MIT"
] | null | null | null |
from __future__ import division
import argparse
import os
import shutil
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import alexnet as modifiednet
import vgg16 as modified_vgg16net
# set the seed
torch.manual_seed(1)
torch.cuda.manual_seed(1)
import sys
import gc
cwd = os.getcwd()
sys.path.append(cwd + '/../')
import datasets as datasets
import datasets.transforms as transforms
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='vgg16',
help='model architecture (default: alexnet)')
parser.add_argument('--data', metavar='DATA_PATH',
default='./home/yuxiang/FiberTrack/YX_data/generated_ellipse/Lan/image/',
help='path to imagenet data (default: ./home/yuxiang/FiberTrack/YX_data/generated_ellipse/Lan/image/)')
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 1)')
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=1, type=int,
metavar='N', help='mini-batch size (default: 10)')
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.90, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)') # need to modify when it is not
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
default=True, help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
use_gpu = torch.cuda.is_available()
def main():
global args
args = parser.parse_args()
args.distributed = args.world_size > 1
# create model
if args.arch == 'alexnet':
model = modifiednet.main(args.arch)
if args.arch == 'vgg16':
model = modified_vgg16net.main(args.arch)
if use_gpu:
model.cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss() # loss for classification
# smooth_l1 = nn.SmoothL1Loss() # loss for regression
# mse = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a pretrained model(checkpoint)
if args.resume:
# if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
cudnn.benchmark = True
# load train data
data_transforms = {
'train': transforms.Compose([
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# transforms.RandomSizedCrop(input_size),
]),
'val': transforms.Compose([
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# transforms.RandomSizedCrop(input_size),
])
}
# need to define
num_class = 3
train_dataset = datasets.customData(img_path='',
txt_path='/home/hongkai/PyTorch_Tutorial/pytorch_MultiTask-two-branches/data/train.txt',
data_transforms=data_transforms,
dataset='train')
val_dataset = datasets.customData(img_path='',
txt_path='/home/hongkai/PyTorch_Tutorial/pytorch_MultiTask-two-branches/data/val.txt',
data_transforms=data_transforms,
dataset='val')
# wrap your data and label into Tensor
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=1,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=1,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
# train
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, False, args.arch + '-epoch-' + str(args.epochs)+'-checkpoint.pth.tar')
def main_testing():
global args
args = parser.parse_args()
# create model
model = modifiednet.main(args.arch)
if use_gpu:
model.cuda()
checkpoint = torch.load('./AlexNet-epoch50-checkpoint.pth.tar')
model.load_state_dict(checkpoint['state_dict'])
criterion = nn.CrossEntropyLoss() # loss for classification
# load train data
data_transforms = {
'train': transforms.Compose([
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# transforms.RandomSizedCrop(input_size),
]),
'val': transforms.Compose([
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# transforms.RandomSizedCrop(input_size),
])
}
# need to define
num_class = 3
val_dataset = datasets.customData(img_path='',
txt_path='/home/hongkai/PyTorch_Tutorial/pytorch_MultiTask-two-branches/data/val.txt',
data_transforms=data_transforms,
dataset='val')
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=1,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
# loss1_am = AverageMeter()
# loss2_am = AverageMeter()
loss_am = AverageMeter()
# loss2_angle_am = AverageMeter()
# loss2_point_am = AverageMeter()
# switch to train mode
model.train()
end = time.time()
accu_no_train = 0
for i, (input_left, input_right, target_cls) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if use_gpu: # transfer to GPU
input_left_var = Variable(input_left.cuda())
input_right_var = Variable(input_right.cuda())
target_cls = Variable(target_cls.cuda())
else:
input_left_var = Variable(input_left)
input_right_var = Variable(input_right)
target_cls = Variable(target_cls)
# compute output
output = model(input_left_var, input_right_var)
loss = criterion(output, target_cls.data)
# measure accuracy and record loss
pred = output.max(1)[1]
accu_no_train += pred.eq(target_cls.view_as(pred)).sum().item() # compute the accuracy of classification
accu_no_train_per = '{:.2%}'.format(accu_no_train / (i + 1))
loss_am.update(loss.item(), input_left.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Accuracy: {3}\t'
'Loss: {loss.val:.4f}'.format(epoch, i, len(train_loader), accu_no_train_per, loss=loss_am))
gc.collect()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
data_time = AverageMeter()
loss_am = AverageMeter()
class_num = 3
classes = ('A', 'B', 'C')
class_correct = list(0. for i in range(class_num))
class_total = list(0. for i in range(class_num))
# switch to evaluate mode
model.eval()
end = time.time()
accu_no_val = 0
# predict_v = torch.from_numpy([])
for i, (input_left, input_right, target_cls) in enumerate(val_loader):
# measure data loading time
data_time.update(time.time() - end)
# target = target.cuda(async=True)
if use_gpu:
input_left_var = Variable(input_left.cuda())
input_right_var = Variable(input_right.cuda())
target_cls = Variable(target_cls.cuda())
else:
input_left_var = Variable(input_left)
input_right_var = Variable(input_right)
target_cls = Variable(target_cls)
output = model(input_left_var, input_right_var)
loss = criterion(output, target_cls.data) # compute two losses
# measure accuracy and record loss
pred = output.max(1)[1]
accu_no_val += pred.eq(target_cls.view_as(pred)).sum().item()
accu_no_val_per = '{:.4%}'.format(accu_no_val / (i + 1))
loss_am.update(loss.item(), input_left.size(0))
c = (pred == target_cls).squeeze()
for j in range(len(target_cls)):
label = target_cls[j]
class_correct[label] += c[j].item()
class_total[label] += 1
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i+1) % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Accuracy: {2}: \t'
'Loss: {loss.val:.4f} ({loss.avg:.4f})'.format(i+1, len(val_loader), accu_no_val_per, loss=loss_am))
mean_accuracy = 0
for i in range(class_num):
print('Final Accuracy of %2s : %.4f %%' % (classes[i], 100 * class_correct[i] / float(class_total[i])))
mean_accuracy += 100 * class_correct[i] / float(class_total[i])
print('Final Overall Mean Accuracy: %.4f %%' % (mean_accuracy / float(class_num)))
return loss.data
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 40 epochs"""
lr = args.lr * (0.1 ** (epoch // 15))
print 'Learning rate:', lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy_multiclass(output, target, topk=(1, 5)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
# for training and validation
main()
# for testing/validation
# main_testing()
| 36.195876
| 128
| 0.590857
|
4e130adaa858cb8e66f593db4966ed0c0318c5d0
| 531
|
py
|
Python
|
vddb_async/src/task_manager/tests/clear_hbase.py
|
dlf412/thunderCopyright
|
c736c9eefc7c934cc830d9d6f27a00cf147e02aa
|
[
"MIT"
] | 1
|
2021-06-10T02:56:43.000Z
|
2021-06-10T02:56:43.000Z
|
vddb_async/src/task_manager/tests/clear_hbase.py
|
dlf412/thunderCopyright
|
c736c9eefc7c934cc830d9d6f27a00cf147e02aa
|
[
"MIT"
] | null | null | null |
vddb_async/src/task_manager/tests/clear_hbase.py
|
dlf412/thunderCopyright
|
c736c9eefc7c934cc830d9d6f27a00cf147e02aa
|
[
"MIT"
] | 1
|
2020-03-25T23:55:58.000Z
|
2020-03-25T23:55:58.000Z
|
#!/usr/bin/python
import happybase
import uuid
import sys
hbpool = happybase.ConnectionPool(1,host="master",port=9090)
if len(sys.argv) < 2:
print "usage: python %s table" % sys.argv[0]
sys.exit(1)
table = sys.argv[1]
print table
with hbpool.connection() as conn:
if table == 'all':
for x in ['crr', 'finished', 'matches', 'unpush', 'task', 'sid_tids']:
t = conn.table(x)
for row in t.scan():
t.delete(row[0])
print row[0] + " deleted"
print "success"
| 24.136364
| 78
| 0.581921
|
b4324d9fde7233a6a3311e6b24cd7b3d3098f6ec
| 9,235
|
py
|
Python
|
hazelcast/protocol/client_message.py
|
SaitTalhaNisanci/hazelcast-python-client
|
af382081da02a360e826163b1026aa0f68b6e9b8
|
[
"Apache-2.0"
] | null | null | null |
hazelcast/protocol/client_message.py
|
SaitTalhaNisanci/hazelcast-python-client
|
af382081da02a360e826163b1026aa0f68b6e9b8
|
[
"Apache-2.0"
] | null | null | null |
hazelcast/protocol/client_message.py
|
SaitTalhaNisanci/hazelcast-python-client
|
af382081da02a360e826163b1026aa0f68b6e9b8
|
[
"Apache-2.0"
] | null | null | null |
"""
Client Message is the carrier framed data as defined below.
Any request parameter, response or event data will be carried in the payload.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|R| Frame Length |
+-------------+---------------+---------------------------------+
| Version |B|E| Flags | Type |
+-------------+---------------+---------------------------------+
| |
+ CorrelationId +
| |
+---------------------------------------------------------------+
| PartitionId |
+-----------------------------+---------------------------------+
| Data Offset | |
+-----------------------------+ |
| Message Payload Data ...
| ...
"""
import binascii
import logging
import struct
from hazelcast.serialization.data import *
# constants
VERSION = 0
BEGIN_FLAG = 0x80
END_FLAG = 0x40
BEGIN_END_FLAG = BEGIN_FLAG | END_FLAG
LISTENER_FLAG = 0x01
PAYLOAD_OFFSET = 18
SIZE_OFFSET = 0
FRAME_LENGTH_FIELD_OFFSET = 0
VERSION_FIELD_OFFSET = FRAME_LENGTH_FIELD_OFFSET + INT_SIZE_IN_BYTES
FLAGS_FIELD_OFFSET = VERSION_FIELD_OFFSET + BYTE_SIZE_IN_BYTES
TYPE_FIELD_OFFSET = FLAGS_FIELD_OFFSET + BYTE_SIZE_IN_BYTES
CORRELATION_ID_FIELD_OFFSET = TYPE_FIELD_OFFSET + SHORT_SIZE_IN_BYTES
PARTITION_ID_FIELD_OFFSET = CORRELATION_ID_FIELD_OFFSET + LONG_SIZE_IN_BYTES
DATA_OFFSET_FIELD_OFFSET = PARTITION_ID_FIELD_OFFSET + INT_SIZE_IN_BYTES
HEADER_SIZE = DATA_OFFSET_FIELD_OFFSET + SHORT_SIZE_IN_BYTES
class ClientMessage(object):
def __init__(self, buff=None, payload_size=0):
if buff:
self.buffer = buff
self._read_index = 0
else:
self.buffer = bytearray(HEADER_SIZE + payload_size)
self.set_data_offset(HEADER_SIZE)
self._write_index = 0
self._retryable = False
# HEADER ACCESSORS
def get_correlation_id(self):
return struct.unpack_from(FMT_LE_LONG, self.buffer, CORRELATION_ID_FIELD_OFFSET)[0]
def set_correlation_id(self, val):
struct.pack_into(FMT_LE_LONG, self.buffer, CORRELATION_ID_FIELD_OFFSET, val)
return self
def get_partition_id(self):
return struct.unpack_from(FMT_LE_INT, self.buffer, PARTITION_ID_FIELD_OFFSET)[0]
def set_partition_id(self, val):
struct.pack_into(FMT_LE_INT, self.buffer, PARTITION_ID_FIELD_OFFSET, val)
return self
def get_message_type(self):
return struct.unpack_from(FMT_LE_UINT16, self.buffer, TYPE_FIELD_OFFSET)[0]
def set_message_type(self, val):
struct.pack_into(FMT_LE_UINT16, self.buffer, TYPE_FIELD_OFFSET, val)
return self
def get_flags(self):
return struct.unpack_from(FMT_LE_UINT8, self.buffer, FLAGS_FIELD_OFFSET)[0]
def set_flags(self, val):
struct.pack_into(FMT_LE_UINT8, self.buffer, FLAGS_FIELD_OFFSET, val)
return self
def has_flags(self, flags):
return self.get_flags() & flags
def get_frame_length(self):
return struct.unpack_from(FMT_LE_INT, self.buffer, FRAME_LENGTH_FIELD_OFFSET)[0]
def set_frame_length(self, val):
struct.pack_into(FMT_LE_INT, self.buffer, FRAME_LENGTH_FIELD_OFFSET, val)
return self
def get_data_offset(self):
return struct.unpack_from(FMT_LE_UINT16, self.buffer, DATA_OFFSET_FIELD_OFFSET)[0]
def set_data_offset(self, val):
struct.pack_into(FMT_LE_UINT16, self.buffer, DATA_OFFSET_FIELD_OFFSET, val)
return self
def _write_offset(self):
return self.get_data_offset() + self._write_index
def _read_offset(self):
return self.get_data_offset() + self._read_index
# PAYLOAD
def append_byte(self, val):
struct.pack_into(FMT_LE_UINT8, self.buffer, self._write_offset(), val)
self._write_index += BYTE_SIZE_IN_BYTES
return self
def append_bool(self, val):
return self.append_byte(1 if val else 0)
def append_int(self, val):
struct.pack_into(FMT_LE_INT, self.buffer, self._write_offset(), val)
self._write_index += INT_SIZE_IN_BYTES
return self
def append_long(self, val):
struct.pack_into(FMT_LE_LONG, self.buffer, self._write_offset(), val)
self._write_index += LONG_SIZE_IN_BYTES
return self
def append_str(self, val):
self.append_byte_array(val.encode("utf-8"))
return self
def append_data(self, val):
self.append_byte_array(val.to_bytes())
return self
def append_byte_array(self, arr):
length = len(arr)
# length
self.append_int(length)
# copy content
self.buffer[self._write_offset(): self._write_offset() + length] = arr[:]
self._write_index += length
def append_tuple(self, entry_tuple):
self.append_data(entry_tuple[0]).append_data(entry_tuple[1])
return self
# PAYLOAD READ
def _read_from_buff(self, fmt, size):
val = struct.unpack_from(fmt, self.buffer, self._read_offset())
self._read_index += size
return val[0]
def read_byte(self):
return self._read_from_buff(FMT_LE_UINT8, BYTE_SIZE_IN_BYTES)
def read_bool(self):
return True if self.read_byte() else False
def read_int(self):
return self._read_from_buff(FMT_LE_INT, INT_SIZE_IN_BYTES)
def read_long(self):
return self._read_from_buff(FMT_LE_LONG, LONG_SIZE_IN_BYTES)
def read_str(self):
return self.read_byte_array().decode("utf-8")
def read_data(self):
return Data(self.read_byte_array())
def read_byte_array(self):
length = self.read_int()
result = bytearray(self.buffer[self._read_offset(): self._read_offset() + length])
self._read_index += length
return result
# helpers
def is_retryable(self):
return self._retryable
def set_retryable(self, val):
self._retryable = val
return self
def is_complete(self):
try:
return (self._read_offset() >= HEADER_SIZE) and (self._read_offset() == self.get_frame_length())
except AttributeError:
return False
def is_flag_set(self, flag):
i = self.get_flags() & flag
return i == flag
def add_flag(self, flags):
self.set_flags(self.get_flags() | flags)
return self
def update_frame_length(self):
self.set_frame_length(self._write_offset())
return self
def accumulate(self, client_message):
start = client_message.get_data_offset()
end = client_message.get_frame_length()
self.buffer += client_message.buffer[start:end]
self.set_frame_length(len(self.buffer))
def __repr__(self):
return binascii.hexlify(self.buffer)
def __str__(self):
return "ClientMessage:{{" \
"length={}, " \
"correlationId={}, " \
"messageType={}, " \
"partitionId={}, " \
"isComplete={}, " \
"isRetryable={}, " \
"isEvent={}, " \
"writeOffset={}}}".format(self.get_frame_length(),
self.get_correlation_id(),
self.get_message_type(),
self.get_partition_id(),
self.is_complete(),
self.is_retryable(),
self.is_flag_set(LISTENER_FLAG),
self.get_data_offset())
class ClientMessageBuilder(object):
def __init__(self, message_callback):
self.logger = logging.getLogger("ClientMessageBuilder:")
self._incomplete_messages = dict()
self._message_callback = message_callback
def on_message(self, client_message):
if client_message.is_flag_set(BEGIN_END_FLAG):
# handle message
self._message_callback(client_message)
elif client_message.is_flag_set(BEGIN_FLAG):
self._incomplete_messages[client_message.get_correlation_id()] = client_message
else:
try:
message = self._incomplete_messages[client_message.get_correlation_id()]
except KeyError:
self.logger.warning("A message without the begin part is received.")
return
message.accumulate(client_message)
if client_message.is_flag_set(END_FLAG):
message.add_flag(BEGIN_END_FLAG)
self._message_callback(message)
del self._incomplete_messages[client_message.get_correlation_id()]
| 35.383142
| 108
| 0.585815
|
f45dd4ecb9da591671a70c4b6210a4fce324773b
| 700
|
py
|
Python
|
bin/run_psql.py
|
jfeser/castor-opt
|
b377c589707fcba68f74d85eba6c4eb394ac2c05
|
[
"MIT"
] | null | null | null |
bin/run_psql.py
|
jfeser/castor-opt
|
b377c589707fcba68f74d85eba6c4eb394ac2c05
|
[
"MIT"
] | null | null | null |
bin/run_psql.py
|
jfeser/castor-opt
|
b377c589707fcba68f74d85eba6c4eb394ac2c05
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import os
import re
from subprocess import Popen, PIPE
import sys
def run_sql(db, sql_file, params):
# Substitute parameters into sql query.
with open(sql_file, "r") as f:
sql_query = f.read()
for i, param_value in enumerate(params):
sql_query = re.sub(":%d(?![0-9]+)" % (i + 1), str(param_value), sql_query)
# Run query and write results.
p = Popen(["psql", "-t", "-A", "-F", "|", db], stdin=PIPE)
p.communicate(input=(sql_query).encode())
p.wait()
if __name__ == '__main__':
if len(sys.argv) != 4:
print('Usage: run_psql.py DB SQL PARAMS')
run_sql(sys.argv[1], sys.argv[2], json.loads(sys.argv[3]))
| 28
| 82
| 0.617143
|
16487cf844b83c1ebc7dd7c01089e627fb8ecefd
| 331
|
py
|
Python
|
HackerEarth/Basic Programming/Basics of Implementation/the_great_kian.py
|
nsudhanva/Competetive
|
d9e93fdeefaa4e422a2101db41a7ab0a5676e9da
|
[
"Unlicense"
] | null | null | null |
HackerEarth/Basic Programming/Basics of Implementation/the_great_kian.py
|
nsudhanva/Competetive
|
d9e93fdeefaa4e422a2101db41a7ab0a5676e9da
|
[
"Unlicense"
] | null | null | null |
HackerEarth/Basic Programming/Basics of Implementation/the_great_kian.py
|
nsudhanva/Competetive
|
d9e93fdeefaa4e422a2101db41a7ab0a5676e9da
|
[
"Unlicense"
] | null | null | null |
n = raw_input()
x = raw_input().split()
#print x
sum1 = 0
sum2 = 0
sum3 = 0
for i in range(int(n)):
if int(x[i]) % 3 == 1:
sum1 = sum1 + int(x[i])
if int(x[i]) % 3 == 2:
sum2 = sum2 + int(x[i])
if int(x[i]) % 3 == 0:
sum3 = sum3 + int(x[i])
print str(sum1) + " " + str(sum2) + " " + str(sum3)
| 19.470588
| 51
| 0.462236
|
5b6519fab19592b5d708aab835834c2316f52e19
| 215
|
py
|
Python
|
applications/utopianIdentificationNumber.py
|
silvioedu/HackerRank-Regex-Practice
|
dc2f2719385626375eb719b38baf6315ff23cad4
|
[
"MIT"
] | null | null | null |
applications/utopianIdentificationNumber.py
|
silvioedu/HackerRank-Regex-Practice
|
dc2f2719385626375eb719b38baf6315ff23cad4
|
[
"MIT"
] | null | null | null |
applications/utopianIdentificationNumber.py
|
silvioedu/HackerRank-Regex-Practice
|
dc2f2719385626375eb719b38baf6315ff23cad4
|
[
"MIT"
] | null | null | null |
import re
if __name__ == '__main__':
regex = r'^[a-z]{0,3}\d{2,8}[A-Z]{3,}$'
dict = {True: "VALID", False: "INVALID"}
for _ in range(int(input())):
print(dict[bool(re.search(regex, input()))])
| 23.888889
| 52
| 0.544186
|
ca64e3d83ab8437d2c7ec27168bdd1c5b480d8a7
| 46,124
|
py
|
Python
|
tests/test_tf_converter.py
|
susemeee/tf-coreml
|
244a7705dc6653d22751b6583a9a89c5fb7c1e3b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_tf_converter.py
|
susemeee/tf-coreml
|
244a7705dc6653d22751b6583a9a89c5fb7c1e3b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_tf_converter.py
|
susemeee/tf-coreml
|
244a7705dc6653d22751b6583a9a89c5fb7c1e3b
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import shutil
import tempfile
import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from coremltools.proto import NeuralNetwork_pb2
from tensorflow.python.tools.freeze_graph import freeze_graph
from tensorflow.tools.graph_transforms import TransformGraph
import tfcoreml as tf_converter
np.random.seed(34)
"""IMPORTANT NOTE TO ADD NEW TESTS:
For each test function you should set up your own graph and session.
Otherwise TF will carry all ops and tensors from previously run tests.
"""
def _tf_transpose(x, is_sequence=False):
if not hasattr(x, "shape"):
return x
if len(x.shape) == 4:
# [Batch, Height, Width, Channels] --> [Batch, Channels, Height, Width]
x = np.transpose(x, [0,3,1,2])
return np.expand_dims(x, axis=0)
elif len(x.shape) == 3:
# We only deal with non-recurrent networks for now
# [Batch, (Sequence) Length, Channels] --> [1,B, Channels, 1, Seq]
# [0,1,2] [0,2,1]
return np.transpose(x, [0,2,1])[None,:,:,None,:]
elif len(x.shape) == 2:
if is_sequence: # (N,S) --> (S,N,1,)
return x.reshape(x.shape[::-1] + (1,))
else: # (N,C) --> (N,C,1,1)
return x.reshape((1, ) + x.shape) # Dense
elif len(x.shape) == 1:
if is_sequence: # (S) --> (S,N,1,1,1)
return x.reshape((x.shape[0], 1, 1))
else:
return x
else:
return x
def _convert_to_coreml(tf_model_path, mlmodel_path, input_name_shape_dict,
output_names,add_custom_layers=False,custom_conversion_functions={}):
""" Convert and return the coreml model from the Tensorflow
"""
model = tf_converter.convert(tf_model_path=tf_model_path,
mlmodel_path=mlmodel_path,
output_feature_names=output_names,
input_name_shape_dict=input_name_shape_dict,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions)
return model
def _generate_data(input_shape, mode = 'random'):
"""
Generate some random data according to a shape.
"""
if input_shape is None or len(input_shape) == 0:
return 0.5
if mode == 'zeros':
X = np.zeros(input_shape)
elif mode == 'ones':
X = np.ones(input_shape)
elif mode == 'linear':
X = np.array(range(np.product(input_shape))).reshape(input_shape)*1.0
elif mode == 'random':
X = np.random.rand(*input_shape)
elif mode == 'random_zero_mean':
X = np.random.rand(*input_shape)-0.5
return X
class TFNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(self):
""" Set up the unit test by loading common utilities.
"""
def _simple_freeze(self, input_graph, input_checkpoint, output_graph,
output_node_names):
# output_node_names is a string of names separated by comma
freeze_graph(input_graph=input_graph,
input_saver="",
input_binary=False,
input_checkpoint=input_checkpoint,
output_node_names=output_node_names,
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=output_graph,
clear_devices=True,
initializer_nodes="")
def _test_coreml_accuracy(self, coreml_model,
output_node_names, input_tensor_shapes, one_dim_seq_flags,
feed_dict, tf_result, delta, use_cpu_only):
# evaluate coreml
coreml_inputs = {}
for idx, in_tensor_name in enumerate(input_tensor_shapes):
in_shape = input_tensor_shapes[in_tensor_name]
coreml_in_name = in_tensor_name.replace(':', '__').replace('/', '__')
if one_dim_seq_flags is None:
coreml_inputs[coreml_in_name] = _tf_transpose(
feed_dict[in_tensor_name]).copy()
else:
coreml_inputs[coreml_in_name] = _tf_transpose(
feed_dict[in_tensor_name], one_dim_seq_flags[idx]).copy()
coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=use_cpu_only)
for idx, out_name in enumerate(output_node_names):
tp = _tf_transpose(tf_result[idx]).flatten()
out_tensor_name = out_name.replace('/','__') + '__0'
cp = coreml_output[out_tensor_name].flatten()
self.assertEqual(len(tp), len(cp))
for i in range(len(tp)):
max_den = max(1.0, tp[i], cp[i])
self.assertAlmostEqual(tp[i]/max_den, cp[i]/max_den, delta=delta)
def _test_tf_model(self, graph, input_tensor_shapes, output_node_names,
data_mode = 'random', delta = 1e-2, is_quantized = False, use_cpu_only = False,
one_dim_seq_flags = None, check_numerical_accuracy=True,
add_custom_layers = False, custom_conversion_functions={}):
""" Common entry to testing routine.
graph - defined TensorFlow graph.
input_tensor_shapes - dict str:shape for each input (placeholder)
output_node_names - output_node_names, a list of strings
output_tensor_names - output tensor names, a list of strings, usually
just output_node_names each appended with ':0'
"""
# Some file processing
model_dir = tempfile.mkdtemp()
graph_def_file = os.path.join(model_dir, 'tf_graph.pbtxt')
checkpoint_file = os.path.join(model_dir, 'tf_model.ckpt')
frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')
# add a saver
tf.reset_default_graph()
with graph.as_default() as g:
saver = tf.train.Saver()
with tf.Session(graph = graph) as sess:
# initialize
sess.run(tf.global_variables_initializer())
# prepare the tensorflow inputs
feed_dict = {}
for in_tensor_name in input_tensor_shapes:
in_tensor_shape = input_tensor_shapes[in_tensor_name]
feed_dict[in_tensor_name] = _generate_data(in_tensor_shape, data_mode)
# run the result
fetches = [graph.get_operation_by_name(name).outputs[0] for name in \
output_node_names]
tf_result = sess.run(fetches, feed_dict=feed_dict)
# save graph definition somewhere
tf.train.write_graph(sess.graph, model_dir, graph_def_file)
# save the weights
saver.save(sess, checkpoint_file)
# freeze the graph
self._simple_freeze(
input_graph=graph_def_file,
input_checkpoint=checkpoint_file,
output_graph=frozen_model_file,
output_node_names=",".join(output_node_names))
if is_quantized:
tf_model_path = frozen_model_file
with open(tf_model_path, 'rb') as f:
serialized = f.read()
gdef = tf.GraphDef()
gdef.ParseFromString(serialized)
input_names = []
output_names = output_node_names
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default() as g:
transforms = ["add_default_attributes",
"remove_nodes(op=Identity, op=CheckNumerics)",
"fold_constants(ignore_errors=true)",
"fold_batch_norms",
"fold_old_batch_norms",
"quantize_weights(minimum_size=1)",
"quantize_nodes",
"strip_unused_nodes",
"sort_by_execution_order"]
transformed_graph_def = TransformGraph(gdef, input_names, output_names, transforms)
tf.import_graph_def(transformed_graph_def, name='')
tf.train.write_graph(graph, model_dir, "./tf_quantized_frozen.pb", as_text=False)
frozen_model_file = os.path.join(model_dir, 'tf_quantized_frozen.pb')
# convert the tensorflow model
output_tensor_names = [name + ':0' for name in output_node_names]
coreml_model = _convert_to_coreml(
tf_model_path=frozen_model_file,
mlmodel_path=coreml_model_file,
input_name_shape_dict=input_tensor_shapes,
output_names=output_tensor_names,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions)
#test numerical accuracy with CoreML
if check_numerical_accuracy:
self._test_coreml_accuracy(coreml_model,
output_node_names, input_tensor_shapes, one_dim_seq_flags,
feed_dict, tf_result, delta, use_cpu_only)
# Cleanup files - models on disk no longer useful
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
return coreml_model
def _test_tf_model_constant(self, graph, input_tensor_shapes, output_node_names,
data_mode='random', delta=1e-2, use_cpu_only=False,
one_dim_seq_flags=None):
""" Common entry to testing routine for graphs that have no variables.
graph - defined TensorFlow graph.
input_tensor_shapes - dict str:shape for each input (placeholder)
output_node_names - output_node_names, a list of strings
output_tensor_names - output tensor names, a list of strings, usually
just output_node_names each appended with ':0'
"""
model_dir = tempfile.mkdtemp()
frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')
with tf.Session(graph = graph) as sess:
# initialize
sess.run(tf.global_variables_initializer())
# prepare the tensorflow inputs
feed_dict = {}
for in_tensor_name in input_tensor_shapes:
in_tensor_shape = input_tensor_shapes[in_tensor_name]
feed_dict[in_tensor_name] = _generate_data(in_tensor_shape, data_mode)
# run the result
fetches = [graph.get_operation_by_name(name).outputs[0] for name in \
output_node_names]
tf_result = sess.run(fetches, feed_dict=feed_dict)
#save the frozen .pb
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names #The output node names are used to select the usefull nodes
)
with tf.gfile.GFile(frozen_model_file, "wb") as f:
f.write(output_graph_def.SerializeToString())
# convert the tensorflow model
output_tensor_names = [name + ':0' for name in output_node_names]
coreml_model = _convert_to_coreml(
tf_model_path=frozen_model_file,
mlmodel_path=coreml_model_file,
input_name_shape_dict=input_tensor_shapes,
output_names=output_tensor_names)
#test numerical accuracy with CoreML
self._test_coreml_accuracy(coreml_model,
output_node_names, input_tensor_shapes, one_dim_seq_flags,
feed_dict, tf_result, delta, use_cpu_only)
# Cleanup files - models on disk no longer useful
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
class TFSimpleNetworkTest(TFNetworkTest):
def test_toy(self):
# Define your TF graph here
graph = tf.Graph()
with graph.as_default() as g:
# matrix1 is input of shape (Batch=1,Channels=2)
matrix1 = tf.placeholder(tf.float32, shape=[1,2], name="test_toy/input")
matrix2 = tf.Variable(tf.truncated_normal([2,1]))
product = tf.matmul(matrix1, matrix2, name = "test_toy/product")
saver = tf.train.Saver()
self._test_tf_model(graph, {"test_toy/input:0":[1,2]},
["test_toy/product"], delta=1e-2)
def test_linear(self):
graph = tf.Graph()
with graph.as_default() as g:
# placeholder constructor returns a tensor not an op
x = tf.placeholder(tf.float32, shape=[None,20], name="test_linear/input")
# Make a redundant tensor. It should get trimmed
gt = tf.placeholder(tf.float32, shape=[None,10])
W = tf.Variable(tf.ones([20,10]))
b = tf.Variable(tf.ones([10]))
y = tf.matmul(x,W) + b
output_name = [y.op.name]
# not batched
self._test_tf_model(graph, {"test_linear/input:0":[1,20]},
output_name, delta=1e-2)
# batched
self._test_tf_model(graph, {"test_linear/input:0":[8,20]},
output_name, delta=1e-2)
def test_log(self):
graph = tf.Graph()
with graph.as_default() as g:
# placeholder constructor returns a tensor not an op
x = tf.placeholder(tf.float32, shape=[None,20], name="test_log/input")
# Make a redundant tensor. It should get trimmed
gt = tf.placeholder(tf.float32, shape=[None,10])
W = tf.Variable(tf.ones([20,10]))
b = tf.Variable(tf.ones([10]))
y = tf.log(tf.matmul(x,W) + b)
output_name = [y.op.name]
self._test_tf_model(graph, {"test_log/input:0":[1,20]},
output_name, delta=1e-2)
def test_simple_convnet(self):
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
graph = tf.Graph()
with graph.as_default() as g:
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.placeholder(tf.float32, shape=[None,28,28,1],
name="test_simple_conv/input")
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
output_name = [h_pool2.op.name]
self._test_tf_model(graph,
{"test_simple_conv/input:0":[1,28,28,1]},
output_name, delta=1e-2)
def test_convnet(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_convnet/input")
W_conv1 = tf.Variable(tf.truncated_normal([3,3,3,2], stddev=0.3))
h_conv1 = tf.nn.conv2d(x_image,W_conv1, strides=[1,1,1,1], padding='SAME')
h_conv1_flat = tf.reshape(h_conv1, [-1, 8*8*2])
W_fc1 = tf.Variable(tf.truncated_normal([8*8*2,4], stddev=0.3))
h_fc1 = tf.matmul(h_conv1_flat, W_fc1)
output_name = [h_fc1.op.name]
# not batched
self._test_tf_model(graph,
{"test_convnet/input:0":[1,8,8,3]}, output_name, delta=1e-2)
# batched
self._test_tf_model(graph,
{"test_convnet/input:0":[10,8,8,3]}, output_name, delta=1e-2)
def test_convnet_quantized(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_convnet/input")
W_conv1 = tf.Variable(tf.truncated_normal([3,3,3,2], stddev=0.3))
h_conv1 = tf.nn.conv2d(x_image,W_conv1, strides=[1,1,1,1], padding='SAME')
h_conv1_flat = tf.reshape(h_conv1, [-1, 8*8*2])
W_fc1 = tf.Variable(tf.truncated_normal([8*8*2,4], stddev=0.3))
h_fc1 = tf.matmul(h_conv1_flat, W_fc1)
output_name = [h_fc1.op.name]
# quantized
self._test_tf_model(graph,
{"test_convnet/input:0":[1,8,8,3]}, output_name, delta=0.20,is_quantized=True)
def test_reduce_max(self):
graph = tf.Graph()
with graph.as_default() as g:
# placeholder constructor returns a tensor not an op
x = tf.placeholder(tf.float32, shape=[None,20],
name="test_reduce_max/input")
W = tf.Variable(tf.ones([20,10]))
y = tf.matmul(x,W)
output = tf.reduce_max(y, axis=-1)
output_name = [output.op.name]
# not batched
self._test_tf_model(graph, {"test_reduce_max/input:0":[1,20]},
output_name, delta=1e-2)
def test_pad_conv_fuse(self):
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None,32,18,3],
name="test_pad_conv/input")
W = tf.Variable(tf.truncated_normal([9,9,3,5], stddev=1))
paddings = tf.constant([[0, 0], [5,5], [1,1], [0, 0]])
x_pad = tf.pad(x, paddings, "CONSTANT")
output = tf.nn.conv2d(x_pad,W,strides=[1,1,1,1], padding='VALID')
output_name = [output.op.name]
self._test_tf_model(graph,
{"test_pad_conv/input:0":[1,32,18,3]}, output_name, delta=.05)
def test_dilated_conv(self):
#params: (Hin,Win,K,pad,dilation)
Cin = 3
Cout = 5
params = [(32,18,3,3),
(14,13,3,4),
(14,19,1,3),
(17,18,5,3),
(14,20,3,3)]
for param in params:
Hin, Win, K, d = param
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None,Hin,Win,Cin],
name="test_pad_conv/input")
W = tf.Variable(tf.truncated_normal([K,K,Cin,Cout], stddev=1))
output = tf.nn.convolution(x,W,strides=[1,1], padding='VALID',
dilation_rate=[d,d])
output_name = [output.op.name]
self._test_tf_model(graph,
{"test_pad_conv/input:0":[1,Hin,Win,Cin]}, output_name, delta=.05)
class TFSingleLayersTest(TFNetworkTest):
""" Small models from tensorflow.layers
"""
def test_dense(self):
# dense layer with some activation
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None,10],
name="test_dense/input")
y = tf.layers.dense(inputs=x, units=16, activation=tf.sigmoid)
output_name = [y.op.name]
self._test_tf_model(graph,
{"test_dense/input:0":[1,10]}, output_name, delta=1e-2,is_quantized=False)
def test_dense_quantized(self):
# dense layer with some activation
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None,10],
name="test_dense/input")
y = tf.layers.dense(inputs=x, units=16, activation=tf.sigmoid)
output_name = [y.op.name]
self._test_tf_model(graph,
{"test_dense/input:0":[1,10]}, output_name, delta=0.05,is_quantized=True)
def test_dense_concat(self):
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None, 10],
name="test_dense/input")
y = tf.layers.dense(inputs=x, units=16, activation=tf.nn.relu)
z1 = tf.layers.dense(inputs=y, units=20, activation=tf.nn.relu)
z2 = tf.layers.dense(inputs=y, units=20, activation=tf.nn.relu)
z3 = tf.layers.dense(inputs=y, units=20, activation=tf.nn.relu)
z = tf.concat([z1,z2,z3], axis=1)
output_name = [z.op.name]
self._test_tf_model(graph,
{"test_dense/input:0": [1, 10]}, output_name, delta=1e-2)
def test_conv2d(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2d/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[5,5],
padding='same', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d/input:0":[1,8,8,3]}, output_name, delta=1e-2, is_quantized=False)
def test_conv2d_quantized(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2d/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[5,5],
padding='same', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d/input:0":[1,8,8,3]}, output_name, delta=0.05, is_quantized=True)
def test_conv2d_valid(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2d_valid/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='valid', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d_valid/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2d_stride2(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2d_stride2/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='valid', strides=(2,2))
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d_stride2/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2d_dilated(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,32,32,3],
name="test_conv2d_dilated/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='valid', dilation_rate=(3,4))
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2d_dilated/input:0":[1,32,32,3]}, output_name, delta=1e-2)
def test_conv2dt(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2dt/input")
conv1 = tf.layers.conv2d_transpose(inputs=x_image, filters=4,
kernel_size=[3,3], padding='same', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2dt/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2dt_valid(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2dt_valid/input")
conv1 = tf.layers.conv2d_transpose(inputs=x_image, filters=4,
kernel_size=[3,3], padding='valid', activation=tf.nn.relu)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2dt_valid/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2dt_stride2(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_conv2dt_stride2/input")
conv1 = tf.layers.conv2d_transpose(inputs=x_image, filters=4,
kernel_size=[3,3], padding='valid', strides=(2,2))
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv2dt_stride2/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv2d_avepool(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_avepool/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='same', activation=tf.nn.relu)
pool1 = tf.layers.average_pooling2d(inputs=conv1, pool_size=[2, 2],
strides=2)
output_name = [pool1.op.name]
self._test_tf_model(graph,
{"test_conv2d_avepool/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_conv2d_maxpool(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_maxpool/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='same', activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[3, 3], strides=1,
padding='same')
output_name = [pool1.op.name]
self._test_tf_model(graph,
{"test_conv2d_maxpool/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_conv2d_bn(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_bn/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=4, kernel_size=[3,3],
padding='same', activation=tf.nn.relu)
bn1 = tf.layers.batch_normalization(inputs=conv1, axis=-1)
output_name = [bn1.op.name]
self._test_tf_model(graph,
{"test_conv2d_bn/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_conv2d_spatial_bn(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_bn/input")
bn1 = tf.layers.batch_normalization(inputs=x_image, axis=2)
output_name = [bn1.op.name]
self._test_tf_model(graph,
{"test_conv2d_bn/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_separable_conv2d(self):
# conv layer with "fused activation"
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
name="test_separable_conv2d/input")
conv1 = tf.layers.separable_conv2d(inputs=x_image, filters=4,
kernel_size=[3,3], padding='valid', depth_multiplier=2)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_separable_conv2d/input:0":[1,8,8,3]}, output_name, delta=1e-2)
def test_conv1d(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,3],
name="test_conv1d/input")
conv1 = tf.layers.conv1d(inputs=x_image, filters=2, kernel_size=3,
padding='valid', use_bias=True)
output_name = [conv1.op.name]
self._test_tf_model(graph,
{"test_conv1d/input:0":[1,8,3]}, output_name, data_mode='linear',
delta=.05)
def test_conv1d_dense(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,3],
name="test_conv1d_dense/input")
conv1 = tf.layers.conv1d(inputs=x_image, filters=2, kernel_size=3,
padding='same')
conv1_flat = tf.reshape(conv1,[-1,8*2])
y = tf.layers.dense(inputs=conv1_flat, units=6, activation=tf.nn.relu)
output_name = [y.op.name]
# not batched
self._test_tf_model(graph,
{"test_conv1d_dense/input:0":[1,8,3]}, output_name, delta=1e-2)
# batched
self._test_tf_model(graph,
{"test_conv1d_dense/input:0":[10,8,3]}, output_name, delta=1e-2)
def test_conv1d_avepool(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,3],
name="test_conv1d_avepool/input")
conv1 = tf.layers.conv1d(inputs=x_image, filters=2, kernel_size=5,
padding='same')
pool1 = tf.layers.average_pooling1d(inputs=conv1, pool_size=2,
strides=2)
output_name = [pool1.op.name]
self._test_tf_model(graph,
{"test_conv1d_avepool/input:0":[1,8,3]}, output_name, delta=1e-2)
def test_conv1d_maxpool(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,8,3],
name="test_conv1d_maxpool/input")
conv1 = tf.layers.conv1d(inputs=x_image, filters=2, kernel_size=3,
padding='same')
pool1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2,
strides=1)
output_name = [pool1.op.name]
self._test_tf_model(graph,
{"test_conv1d_maxpool/input:0":[1,8,3]}, output_name, delta=1e-2)
def test_conv2d_resize_bilinear(self):
graph = tf.Graph()
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None,16,16,3],
name="test_conv2d_resize_bl/input")
conv1 = tf.layers.conv2d(inputs=x_image, filters=3, kernel_size=[3,3],
padding='same', activation=tf.nn.relu)
bl1 = tf.image.resize_bilinear(images=conv1, size=[32,32])
output_name = [bl1.op.name]
self._test_tf_model(graph,
{"test_conv2d_resize_bl/input:0":[1,16,16,3]}, output_name, delta=1e-2)
def test_concat_constants(self):
graph = tf.Graph()
x, y = np.meshgrid(np.linspace(0., 1., 256), np.linspace(0., 1., 256))
x = np.reshape(x, [1, 256, 256, 1])
y = np.reshape(y, [1, 256, 256, 1])
with graph.as_default() as g:
x_image = tf.placeholder(tf.float32, shape=[None, 256, 256, 3],
name="input_image")
xx = tf.constant(x, dtype=tf.float32)
yy = tf.constant(y, dtype=tf.float32)
img_concatenated = tf.concat([x_image, xx, yy], -1, name='concat')
output_name = [img_concatenated.op.name]
self._test_tf_model_constant(graph,
{"input_image:0": [1, 256, 256, 3]}, output_name, delta=1e-2)
def test_split(self):
graph = tf.Graph()
with graph.as_default() as g:
x_input = tf.placeholder(tf.float32, shape=[None,10,10,6], name="input")
y1, y2 = tf.split(x_input, 2, axis=3)
z = tf.add(y1, y2, name='output')
output_name = [z.op.name]
self._test_tf_model_constant(graph,
{"input:0":[1,10,10,6]}, output_name, delta=1e-2)
def test_sqrt(self):
graph = tf.Graph()
with graph.as_default() as g:
x_input = tf.placeholder(tf.float32, shape=[None,10,10,6], name="input")
z = tf.sqrt(x_input, name='output')
output_name = [z.op.name]
self._test_tf_model_constant(graph,
{"input:0":[1,10,10,6]}, output_name, delta=1e-2)
def test_pow(self):
graph = tf.Graph()
with graph.as_default() as g:
x_input = tf.placeholder(tf.float32, shape=[None,5,5,6], name="input")
z = tf.pow(x_input, 4, name='output')
output_name = [z.op.name]
self._test_tf_model_constant(graph,
{"input:0":[1,5,5,6]}, output_name, delta=1e-2)
def test_leaky_relu(self):
graph = tf.Graph()
with graph.as_default() as g:
x_input = tf.placeholder(tf.float32, shape=[None,5,5,6], name="input")
z = tf.nn.leaky_relu(x_input, 0.2, name='output')
output_name = [z.op.name]
self._test_tf_model_constant(graph, {"input:0":[1,5,5,6]},
output_name, delta=1e-2,
data_mode="random_zero_mean")
def test_resize_bilinear_non_fractional(self):
graph = tf.Graph()
with graph.as_default() as g:
x_input = tf.placeholder(tf.float32, shape=[None, 10, 10, 3], name="input")
z = tf.image.resize_bilinear(x_input, size=[20, 30], align_corners=True)
output_name = [z.op.name]
self._test_tf_model_constant(graph, {"input:0":[1,10,10,3]}, output_name, delta=1e-2)
def test_resize_bilinear_non_fractional_upsample_mode(self):
graph = tf.Graph()
with graph.as_default() as g:
x_input = tf.placeholder(tf.float32, shape=[None, 10, 10, 3], name="input")
z = tf.image.resize_bilinear(x_input, size=[20, 30], align_corners=False)
output_name = [z.op.name]
self._test_tf_model_constant(graph, {"input:0":[1,10,10,3]}, output_name, delta=1e-2)
def test_resize_bilinear_fractional(self):
graph = tf.Graph()
with graph.as_default() as g:
x_input = tf.placeholder(tf.float32, shape=[None, 10, 10, 3], name="input")
z = tf.image.resize_bilinear(x_input, size=[25, 45], align_corners=False)
output_name = [z.op.name]
self._test_tf_model_constant(graph, {"input:0":[1,10,10,3]}, output_name, delta=1e-2)
def test_crop_resize(self):
graph = tf.Graph()
roi = np.zeros((2, 4), dtype=np.float32)
box_ind = np.zeros((2))
roi[0, :] = [0.24, 0.34, 0.8, 0.9]
roi[0, :] = [0.05, 0.25, 0.5, 0.7]
with graph.as_default() as g:
x_input = tf.placeholder(tf.float32, shape=[None, 10, 10, 3], name="input")
z = tf.image.crop_and_resize(x_input, roi, box_ind, crop_size=[6, 7])
output_name = [z.op.name]
self._test_tf_model_constant(graph, {"input:0":[1,10,10,3]}, output_name, delta=1e-2)
def _test_reorganize_data(self, op, shape):
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=shape, name="input")
z = op(x, block_size=2, name='output')
output_name = [z.op.name]
self._test_tf_model_constant(graph, {"input:0": shape}, output_name)
def test_depth_to_space(self):
self._test_reorganize_data(tf.depth_to_space, [1, 1, 1, 4])
def test_space_to_depth(self):
self._test_reorganize_data(tf.space_to_depth, [1, 2, 2, 1])
class TFSlimTest(TFNetworkTest):
"""Small models for tf.slim layers
"""
def test_slim_stacked_conv2d(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
name='test_slim_stacked_conv2d/input')
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.3),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.conv2d(inputs, 2, [5, 5], scope='conv1')
net = slim.conv2d(net, 4, [3, 3], padding='VALID', scope='conv2')
net = slim.conv2d(net, 8, [3, 3], scope='conv3')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_stacked_conv2d/input:0":[1,16,16,3]},
output_name, delta=1e-2)
def test_slim_repeat(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
name='test_slim_repeat/input')
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.3),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_repeat/input:0":[1,16,16,3]},
output_name, delta=1e-2)
def test_slim_fc(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,8],
name='test_slim_vgg_fc/input')
with slim.arg_scope([slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(0.0, 0.2),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.fully_connected(inputs, 10, scope='fc')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_vgg_fc/input:0":[1,8]},
output_name, delta=1e-2)
def test_slim_convnet(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,8,8,3],
name='test_slim_convnet/input')
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(0.0, 0.2),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.conv2d(inputs, 2, [3, 3], scope='conv1')
net = slim.flatten(net, scope='flatten3')
net = slim.fully_connected(net, 6, scope='fc6')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_convnet/input:0":[1,8,8,3]},
output_name, delta=1e-2)
def test_slim_lenet(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,28,28,1],
name='test_slim_lenet/input')
net = slim.conv2d(inputs, 4, [5,5], scope='conv1')
net = slim.avg_pool2d(net, [2,2], scope='pool1')
net = slim.conv2d(net, 6, [5,5], scope='conv2')
net = slim.max_pool2d(net, [2,2], scope='pool2')
net = slim.flatten(net, scope='flatten3')
net = slim.fully_connected(net, 10, scope='fc4')
net = slim.fully_connected(net, 10, activation_fn=None, scope='fc5')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_lenet/input:0":[1,28,28,1]},
output_name, delta=1e-2)
def test_slim_one_hot(self):
graph = tf.Graph()
with graph.as_default() as g:
# input is usually a known / unknown batch size
inputs = tf.placeholder(tf.int64, shape=[None],
name='test_slim_one_hot/input')
net = slim.one_hot_encoding(inputs, 10)
net = slim.fully_connected(net, 6)
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_one_hot/input:0":[3]},
output_name, delta=1e-2, data_mode='linear',
one_dim_seq_flags=[True])
def test_slim_conv_bn(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
name='test_slim_conv2d_bn/input')
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.3),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.conv2d(inputs, 2, [5, 5], scope='conv1')
net = slim.batch_norm(net, center=True, scale=True, is_training=False)
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_conv2d_bn/input:0":[1,16,16,3]},
output_name, delta=1e-2)
def test_slim_conv_bn_no_beta(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
name='test_slim_conv_bn_no_beta/input')
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.3),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.conv2d(inputs, 2, [5, 5], scope='conv1')
net = slim.batch_norm(net, center=False, scale=False, is_training=False)
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_conv_bn_no_beta/input:0":[1,16,16,3]},
output_name, delta=1e-2)
def test_slim_separable_conv(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
name='test_slim_separable_conv2d/input')
with slim.arg_scope([slim.separable_conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
net = slim.separable_conv2d(inputs, 2, [5, 5], 2, scope='conv1')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_separable_conv2d/input:0":[1,16,16,3]},
output_name, delta=1e-2)
def test_slim_dilated_depthwise_conv(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
name='test_slim_separable_conv2d/input')
with slim.arg_scope([slim.separable_conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
net = slim.separable_conv2d(inputs,
num_outputs=None,
stride=1,
depth_multiplier=1,
kernel_size=[3, 3],
rate=2,
scope='conv1')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_separable_conv2d/input:0":[1,16,16,3]},
output_name, delta=1e-2)
def test_slim_deconv(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
name='test_slim_decconv2d/input')
with slim.arg_scope([slim.separable_conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
net = slim.conv2d_transpose(inputs, 2, [3, 3], scope='conv1')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_decconv2d/input:0":[1,16,16,3]},
output_name, delta=1e-2)
# TODO - this fails due to unsupported op "Tile"
@unittest.skip
def test_slim_plane_conv(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
name='test_slim_plane_conv2d/input')
with slim.arg_scope([slim.separable_conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
net = slim.conv2d_in_plane(inputs, 2, [3, 3], scope='conv1')
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_plane_conv2d/input:0":[1,16,16,3]},
output_name, delta=1e-2)
# TODO - this fails due to unsupported op "Tile"
@unittest.skip
def test_slim_unit_norm(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None,8],
name='test_slim_unit_norm/input')
with slim.arg_scope([slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(0.0, 0.2),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.fully_connected(inputs, 10, scope='fc')
net = slim.unit_norm(net,1)
output_name = [net.op.name]
self._test_tf_model(graph,
{"test_slim_unit_norm/input:0":[1,8]},
output_name, delta=1e-2)
class TFCustomLayerTest(TFNetworkTest):
"""
Test the arguments "add_custom_layers" and "custom_conversion_functions",
that are used to insert custom layers during conversion
"""
def test_custom_tile(self):
graph = tf.Graph()
with graph.as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None, 8], name='input')
with slim.arg_scope([slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(0.0, 0.2),
weights_regularizer=slim.l2_regularizer(0.0005)):
y = slim.fully_connected(inputs, 10, scope='fc')
y = slim.unit_norm(y, dim=1)
output_name = [y.op.name]
coreml_model = self._test_tf_model(graph,
{"input:0": [1, 8]},
output_name,
check_numerical_accuracy=False,
add_custom_layers=True)
spec = coreml_model.get_spec()
layers = spec.neuralNetwork.layers
self.assertIsNotNone(layers[9].custom)
self.assertEqual('Tile', layers[9].custom.className)
def test_custom_topk(self):
def _convert_topk(**kwargs):
tf_op = kwargs["op"]
coreml_nn_builder = kwargs["nn_builder"]
constant_inputs = kwargs["constant_inputs"]
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = 'Top_K'
params.description = "Custom layer that corresponds to the top_k TF op"
params.parameters["sorted"].boolValue = tf_op.get_attr('sorted')
# get the value of k
k = constant_inputs.get(tf_op.inputs[1].name, 3)
params.parameters["k"].intValue = k
coreml_nn_builder.add_custom(name=tf_op.name,
input_names=[tf_op.inputs[0].name],
output_names=[tf_op.outputs[0].name],
custom_proto_spec=params)
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None, 8], name="input")
y = tf.layers.dense(inputs=x, units=12, activation=tf.nn.relu)
y = tf.nn.softmax(y, axis=1)
y = tf.nn.top_k(y, k=3, sorted=False, name='output')
output_name = ['output']
coreml_model = self._test_tf_model(graph,
{"input:0": [1, 8]},
output_name,
check_numerical_accuracy=False,
add_custom_layers=True,
custom_conversion_functions = {'TopKV2': _convert_topk})
spec = coreml_model.get_spec()
layers = spec.neuralNetwork.layers
self.assertIsNotNone(layers[3].custom)
self.assertEqual('Top_K', layers[3].custom.className)
self.assertEqual(3, layers[3].custom.parameters['k'].intValue)
self.assertEqual(False, layers[3].custom.parameters['sorted'].boolValue)
def test_custom_slice(self):
def _convert_slice(**kwargs):
tf_op = kwargs["op"]
coreml_nn_builder = kwargs["nn_builder"]
constant_inputs = kwargs["constant_inputs"]
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = 'Slice'
params.description = "Custom layer that corresponds to the slice TF op"
# get the value of begin
begin = constant_inputs.get(tf_op.inputs[1].name, [0, 0, 0, 0])
size = constant_inputs.get(tf_op.inputs[2].name, [0, 0, 0, 0])
# add begin and size as two repeated weight fields
begin_as_weights = params.weights.add()
begin_as_weights.floatValue.extend(map(float, begin))
size_as_weights = params.weights.add()
size_as_weights.floatValue.extend(map(float, size))
coreml_nn_builder.add_custom(name=tf_op.name,
input_names=[tf_op.inputs[0].name],
output_names=[tf_op.outputs[0].name],
custom_proto_spec=params)
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default() as g:
x = tf.placeholder(tf.float32, shape=[None, 10, 10, 3], name="input")
W = tf.Variable(tf.truncated_normal([1, 1, 3, 5], stddev=0.1))
y = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
y = tf.slice(y, begin=[0, 1, 1, 1], size=[1, 2, 2, 2], name='output')
output_name = [y.op.name]
for key in [y.op.name, y.op.type]:
coreml_model = self._test_tf_model(graph,
{"input:0": [1, 10,10, 3]},
output_name,
check_numerical_accuracy=False,
add_custom_layers=True,
custom_conversion_functions={key: _convert_slice})
spec = coreml_model.get_spec()
layers = spec.neuralNetwork.layers
self.assertIsNotNone(layers[1].custom)
self.assertEqual('Slice', layers[1].custom.className)
self.assertEqual(2, len(layers[1].custom.weights))
if __name__ == '__main__':
unittest.main()
## To run a specific test:
# suite = unittest.TestSuite()
# suite.addTest(TFSimpleNetworkTest("test_convnet"))
# unittest.TextTestRunner().run(suite)
| 38.923207
| 93
| 0.643158
|
8e3a4bf28409d75b784cbfb3af8c89052b95fdd1
| 3,000
|
py
|
Python
|
test/azure/low-level/Expected/AcceptanceTests/HeadLowLevel/headlowlevel/_configuration.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/azure/version-tolerant/Expected/AcceptanceTests/HeadVersionTolerant/headversiontolerant/_configuration.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/azure/legacy/Expected/AcceptanceTests/Head/head/_configuration.py
|
Azure/autorest.python
|
c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class AutoRestHeadTestServiceConfiguration(Configuration):
"""Configuration for AutoRestHeadTestService.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
"""
def __init__(
self,
credential, # type: "TokenCredential"
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(AutoRestHeadTestServiceConfiguration, self).__init__(**kwargs)
self.credential = credential
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "autorestheadtestservice/{}".format(VERSION))
self._configure(**kwargs)
def _configure(
self, **kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| 45.454545
| 107
| 0.686333
|
101c6558c815f29471902457f0569a399aed0a71
| 895
|
py
|
Python
|
test/test_ezsignfolder_reorder_v1_request.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_ezsignfolder_reorder_v1_request.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_ezsignfolder_reorder_v1_request.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.ezsignfolder_reorder_v1_request import EzsignfolderReorderV1Request
class TestEzsignfolderReorderV1Request(unittest.TestCase):
"""EzsignfolderReorderV1Request unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsignfolderReorderV1Request(self):
"""Test EzsignfolderReorderV1Request"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsignfolderReorderV1Request() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.189189
| 97
| 0.719553
|
fe6aaf572146bcf151542b348b03d777fc1cbebb
| 1,231
|
py
|
Python
|
pySDC/projects/parallelSDC/Van_der_Pol_implicit_Jac.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 20
|
2015-03-21T09:02:55.000Z
|
2022-02-26T20:22:21.000Z
|
pySDC/projects/parallelSDC/Van_der_Pol_implicit_Jac.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 61
|
2015-03-02T09:35:55.000Z
|
2022-03-17T12:42:48.000Z
|
pySDC/projects/parallelSDC/Van_der_Pol_implicit_Jac.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 19
|
2015-02-20T11:52:33.000Z
|
2022-02-02T10:46:27.000Z
|
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import spsolve
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
# noinspection PyUnusedLocal
class vanderpol_jac(vanderpol):
def eval_jacobian(self, u):
"""
Evaluation of the Jacobian of the right-hand side
Args:
u: space values
Returns:
Jacobian matrix
"""
x1 = u[0]
x2 = u[1]
dfdu = np.array([[0, 1], [-2 * self.params.mu * x1 * x2 - 1, self.params.mu * (1 - x1 ** 2)]])
return dfdu
def solve_system_jacobian(self, dfdu, rhs, factor, u0, t):
"""
Simple linear solver for (I-dtA)u = rhs
Args:
dfdu: the Jacobian of the RHS of the ODE
rhs: right-hand side for the linear system
factor: abbrev. for the node-to-node stepsize (or any other factor required)
u0: initial guess for the iterative solver (not used here so far)
t: current time (e.g. for time-dependent BCs)
Returns:
solution as mesh
"""
me = self.dtype_u(2)
me[:] = spsolve(sp.eye(2) - factor * dfdu, rhs)
return me
| 26.191489
| 102
| 0.582453
|
153a7394598bad5db23d4c5d3798a4194bff3120
| 544
|
py
|
Python
|
os_detector.py
|
cswpy/Head_position_orientation
|
6b1a10393a4e73fa02ee14382d5e6c997a45574a
|
[
"MIT"
] | null | null | null |
os_detector.py
|
cswpy/Head_position_orientation
|
6b1a10393a4e73fa02ee14382d5e6c997a45574a
|
[
"MIT"
] | null | null | null |
os_detector.py
|
cswpy/Head_position_orientation
|
6b1a10393a4e73fa02ee14382d5e6c997a45574a
|
[
"MIT"
] | null | null | null |
"""Detect OS"""
from platform import system
def detect_os(bypass=False):
"""Check OS, as multiprocessing may not work properly on Windows and macOS"""
if bypass is True:
return
os_name = system()
if os_name in ['Windows']:
print("It seems that you are running this code from {}, on which the Python multiprocessing may not work properly. Consider running this code on Linux.".format(os_name))
print("Exiting..")
#exit()
else:
print("Linux is fine! Python multiprocessing works.")
| 30.222222
| 177
| 0.661765
|
7ccbf87e037e64c4bd4725b64dc53eaeda818089
| 1,161
|
py
|
Python
|
spanner/google/cloud/spanner_admin_database_v1/gapic/enums.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | 1
|
2019-01-23T21:54:51.000Z
|
2019-01-23T21:54:51.000Z
|
spanner/google/cloud/spanner_admin_database_v1/gapic/enums.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | 1
|
2018-04-06T19:51:23.000Z
|
2018-04-06T19:51:23.000Z
|
spanner/google/cloud/spanner_admin_database_v1/gapic/enums.py
|
nielm/google-cloud-python
|
fd126fdea34206109eb00d675374ff7dc4dcc5ef
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class Database(object):
class State(enum.IntEnum):
"""
Indicates the current state of the database.
Attributes:
STATE_UNSPECIFIED (int): Not specified.
CREATING (int): The database is still being created. Operations on the database may fail
with ``FAILED_PRECONDITION`` in this state.
READY (int): The database is fully created and ready for use.
"""
STATE_UNSPECIFIED = 0
CREATING = 1
READY = 2
| 32.25
| 98
| 0.686477
|
cce13f0852a62cfeffdde976838b907e2e22aae4
| 805
|
py
|
Python
|
reclib/nn/regularizers/regularizers.py
|
tingkai-zhang/reclib
|
3c56dd7f811ab4d4f9f692efd0ee5e171a5f818b
|
[
"Apache-2.0"
] | 4
|
2019-09-16T08:33:19.000Z
|
2020-12-15T09:06:38.000Z
|
reclib/nn/regularizers/regularizers.py
|
tingkai-zhang/reclib
|
3c56dd7f811ab4d4f9f692efd0ee5e171a5f818b
|
[
"Apache-2.0"
] | null | null | null |
reclib/nn/regularizers/regularizers.py
|
tingkai-zhang/reclib
|
3c56dd7f811ab4d4f9f692efd0ee5e171a5f818b
|
[
"Apache-2.0"
] | 1
|
2020-03-04T21:33:44.000Z
|
2020-03-04T21:33:44.000Z
|
import torch
from reclib.nn.regularizers.regularizer import Regularizer
@Regularizer.register("l1")
class L1Regularizer(Regularizer):
"""Represents a penalty proportional to the sum of the absolute values of the parameters"""
def __init__(self, alpha: float = 0.01) -> None:
self.alpha = alpha
def __call__(self, parameter: torch.Tensor) -> torch.Tensor:
return self.alpha * torch.sum(torch.abs(parameter))
@Regularizer.register("l2")
class L2Regularizer(Regularizer):
"""Represents a penalty proportional to the sum of squared values of the parameters"""
def __init__(self, alpha: float = 0.01) -> None:
self.alpha = alpha
def __call__(self, parameter: torch.Tensor) -> torch.Tensor:
return self.alpha * torch.sum(torch.pow(parameter, 2))
| 30.961538
| 95
| 0.70559
|
cce3b9de994fc14f5b11219083bc77b54b81929f
| 15,771
|
py
|
Python
|
django/db/backends/mysql/base.py
|
christianbundy/django
|
f2ff1b2fabbe26d5e61d690c4c5a47f9582f9300
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/db/backends/mysql/base.py
|
christianbundy/django
|
f2ff1b2fabbe26d5e61d690c4c5a47f9582f9300
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/db/backends/mysql/base.py
|
christianbundy/django
|
f2ff1b2fabbe26d5e61d690c4c5a47f9582f9300
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
MySQLdb is supported for Python 2 only: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
'Error loading MySQLdb module: %s.\n'
'Did you install mysqlclient or MySQL-python?' % e
)
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import Thing2Literal, conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (
version[:3] == (1, 2, 1) and (len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
def adapt_datetime_warn_on_aware_datetime(value, conv):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The MySQL database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S.%f"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
datetime.datetime: adapt_datetime_warn_on_aware_datetime,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BigAutoField': 'bigint AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
if self.features.is_sql_auto_is_null_enabled:
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on
# a recently inserted row will return when the field is tested
# for NULL. Disabling this brings this aspect of MySQL in line
# with SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
""" % (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection() as cursor:
cursor.execute('SELECT VERSION()')
server_info = cursor.fetchone()[0]
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| 42.395161
| 117
| 0.641557
|
1fd2913dd99613b9dc8f2442c61ef034da1097a4
| 110
|
py
|
Python
|
python/timer_test.py
|
tbedford/code-snippets
|
9afe36c2726829f14fa5ec11acb8214bed704938
|
[
"MIT"
] | null | null | null |
python/timer_test.py
|
tbedford/code-snippets
|
9afe36c2726829f14fa5ec11acb8214bed704938
|
[
"MIT"
] | null | null | null |
python/timer_test.py
|
tbedford/code-snippets
|
9afe36c2726829f14fa5ec11acb8214bed704938
|
[
"MIT"
] | 1
|
2018-10-09T02:03:12.000Z
|
2018-10-09T02:03:12.000Z
|
import threading
def printit():
threading.Timer(1.0, printit).start()
print ("Hello, World!")
printit()
| 13.75
| 39
| 0.681818
|
08f7452f91737ccea888796c9f0016990ce6c4a5
| 41
|
py
|
Python
|
pylie/__init__.py
|
Ostoic/pylie
|
d568fda94ce4b0e249fcf9ffad83860aed5771a5
|
[
"MIT"
] | null | null | null |
pylie/__init__.py
|
Ostoic/pylie
|
d568fda94ce4b0e249fcf9ffad83860aed5771a5
|
[
"MIT"
] | null | null | null |
pylie/__init__.py
|
Ostoic/pylie
|
d568fda94ce4b0e249fcf9ffad83860aed5771a5
|
[
"MIT"
] | null | null | null |
import sl2
from constant import constants
| 20.5
| 30
| 0.878049
|
8bcea9c095553d0f4fa5b7eb7ae79632735b10cc
| 6,308
|
py
|
Python
|
PSD/finetune.py
|
zychen-ustc/PSD-Principled-Synthetic-to-Real-Dehazing-Guided-by-Physical-Priors
|
f200c202d08ecf8c8ef31d7821307b242e5767a0
|
[
"MIT"
] | 52
|
2021-06-14T07:59:08.000Z
|
2022-03-30T03:13:27.000Z
|
PSD/finetune.py
|
zychen-ustc/PSD-Principled-Synthetic-to-Real-Dehazing-Guided-by-Physical-Priors
|
f200c202d08ecf8c8ef31d7821307b242e5767a0
|
[
"MIT"
] | 16
|
2021-07-17T07:20:12.000Z
|
2022-03-28T09:04:31.000Z
|
PSD/finetune.py
|
zychen-ustc/PSD-Principled-Synthetic-to-Real-Dehazing-Guided-by-Physical-Priors
|
f200c202d08ecf8c8ef31d7821307b242e5767a0
|
[
"MIT"
] | 11
|
2021-06-22T08:09:27.000Z
|
2022-03-28T11:19:59.000Z
|
import os
import argparse
import time
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.models import vgg16
import torchvision.utils as vutils
from datasets.pretrain_datasets import *
from datasets.finetune_datasets import *
from datasets.concat_dataset import ConcatDataset
from utils import *
from losses.energy_functions import *
from losses.loss_functions import *
parser = argparse.ArgumentParser()
# Input Parameters
parser.add_argument('--backbone', type=str, default='MSBDNNet', help='Backbone model(GCANet/FFANet/MSBDNNet)')
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--num_epochs', type=int, default=4)
parser.add_argument('--train_batch_size', type=int, default=6)
parser.add_argument('--val_batch_size', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=16)
parser.add_argument('--crop_size', type=int, default=256, help='size of random crop')
parser.add_argument('--category', type=str, default='outdoor', help='dataset type: indoor/outdoor')
parser.add_argument('--print_freq', type=int, default=20)
parser.add_argument('--label_dir', type=str, default='/data/nnice1216/RESIDE/OTS/')
parser.add_argument('--unlabel_dir', type=str, default='/data/nnice1216/Dehazing/')
parser.add_argument('--pseudo_gt_dir', type=str, default='/data/nnice1216/Dehazing/')
parser.add_argument('--val_dir', type=str, default='/data/nnice1216/RESIDE/SOTS/outdoor/')
parser.add_argument('--pretrain_model_dir', type=str, default='/model/nnice1216/DAD/')
parser.add_argument('--lambda_dc', type=float, default=2e-3)
parser.add_argument('--lambda_bc', type=float, default=3e-2)
parser.add_argument('--lambda_CLAHE', type=float, default=1)
parser.add_argument('--lambda_rec', type=float, default=1)
parser.add_argument('--lambda_lwf_label', type=float, default=1)
parser.add_argument('--lambda_lwf_unlabel', type=float, default=1)
parser.add_argument('--lambda_lwf_sky', type=float, default=1)
parser.add_argument('--lambda_tv', type=float, default=1e-4)
opt = parser.parse_known_args()[0]
device_ids = [Id for Id in range(torch.cuda.device_count())]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
crop_size = [opt.crop_size, opt.crop_size]
net = load_model(opt.backbone, opt.pretrain_model_dir, device, device_ids)
net_o = copy.deepcopy(net)
net_o.eval()
optimizer = torch.optim.Adam(net.parameters(), lr=opt.lr)
print('Begin DataLoader!')
train_data_loader = DataLoader(
ConcatDataset(
TrainData(crop_size, opt.label_dir), # synthetic
RealTrainData_CLAHE(crop_size, opt.unlabel_dir) # real
#RealTrainData(crop_size, unlabel_data_dir)
#RealTrainData_pseudo_gt(crop_size, unlabel_data_dir_pseudo_gt)
),
batch_size=opt.train_batch_size,
shuffle=False,
num_workers=opt.num_workers,
drop_last=True)
val_data_loader = DataLoader(
ValData(opt.val_dir),
batch_size=opt.val_batch_size,
shuffle=False,
num_workers=opt.num_workers)
print('Dataloader Done')
train_psnr = 0
loss_f = energy_dc_loss()
loss_f2 = energy_bc_loss()
loss_f3 = energy_cap_loss()
for epoch in range(opt.num_epochs):
psnr_list = []
start_time = time.time()
adjust_learning_rate(optimizer, epoch, category=opt.category)
for batch_id, (label_train_data, unlabel_train_data) in enumerate(train_data_loader):
# --- load data --- #
label_haze, label_gt = label_train_data
unlabel_haze, unlabel_gt, haze_name = unlabel_train_data
unlabel_haze = unlabel_haze.to(device)
unlabel_gt = unlabel_gt.to(device)
label_haze = label_haze.to(device)
label_gt = label_gt.to(device)
# --- train --- #
optimizer.zero_grad()
net.train()
out, J_label, T_label, _, _ = net(label_haze)
out_o, J_label_o, T_label_o, _, _ = net_o(label_haze)
out1, J, T, A, I = net(unlabel_haze)
out1_o, J_1, T_o, _, _ = net_o(unlabel_haze)
I2 = T * unlabel_gt + (1 - T) * A
# --- losses --- #
#dc_loss = DCLoss(J, 35)
bc_loss = bright_channel(unlabel_haze, T)
energy_dc_loss = loss_f(unlabel_haze, T)
#tv_loss = tv_loss_f()(J)
rec_loss = F.smooth_l1_loss(I, unlabel_haze)
CLAHE_loss = F.smooth_l1_loss(I2, unlabel_haze)
#CLAHE_loss = F.smooth_l1_loss(J, unlabel_gt)
#rec3_loss = F.smooth_l1_loss(J, unlabel_gt)
lwf_loss_label = F.smooth_l1_loss(out, out_o)
lwf_loss_unlabel = F.smooth_l1_loss(out1, out1_o)
lwf_loss_sky = lwf_sky(unlabel_haze, J, J_1)
loss = opt.lambda_rec * rec_loss + opt.lambda_lwf_label * lwf_loss_label\
+ opt.lambda_lwf_unlabel * lwf_loss_unlabel + opt.lambda_lwf_sky * lwf_loss_sky\
+ opt.lambda_bc * bc_loss + opt.lambda_dc * energy_dc_loss\
+ opt.lambda_CLAHE * CLAHE_loss
#loss = opt.lambda_rec * rec_loss\
# + opt.lambda_bc * bc_loss + opt.lambda_dc * energy_dc_loss\
# + opt.lambda_CLAHE * CLAHE_loss
loss.backward()
optimizer.step()
if not (batch_id % opt.print_freq):
print('Epoch: {}, Iteration: {}, Loss: {}'.format(epoch, batch_id, loss))
# print('Epoch: {}, Iteration: {}, Loss: {}, energy_dc: {}, energy_bc: {}'.format(epoch, batch_id, loss, energy_cap_loss, energy_bc_loss))
# --- save model --- #
torch.save(net.state_dict(), '/output/{}_epoch{}.pth'.format(opt.backbone, epoch))
#torch.save(net.state_dict(), '/code/{}_epoch{}.pth'.format(opt.backbone, epoch))
# --- Use the evaluation model in testing --- #
net.eval()
val_psnr, val_ssim = validation(net, opt.backbone, val_data_loader, device, opt.category)
one_epoch_time = time.time() - start_time
print_log(epoch+1, opt.num_epochs, one_epoch_time, train_psnr, val_psnr, val_ssim, opt.category)
# --- output test images --- #
generate_test_images(net, TestData, opt.num_epochs, (0, opt.num_epochs - 1))
| 37.772455
| 152
| 0.675491
|
466e800a421ac009f0d81562ca0ff4ac1685b0f8
| 834
|
py
|
Python
|
detector/settings.py
|
gleseur/room-status
|
f6804618b83a96d85dbc6bf92e4a8f4efa48003e
|
[
"MIT"
] | null | null | null |
detector/settings.py
|
gleseur/room-status
|
f6804618b83a96d85dbc6bf92e4a8f4efa48003e
|
[
"MIT"
] | null | null | null |
detector/settings.py
|
gleseur/room-status
|
f6804618b83a96d85dbc6bf92e4a8f4efa48003e
|
[
"MIT"
] | null | null | null |
u"""
Here we define the settings for the detection
"""
from __future__ import unicode_literals
import local_settings
DETECTION_PAIRS = {
"male": {
"room_id": 1, # To communicate with Meteor API
"pir": 7,
"light": 17,
"free_time": 30, # time in seconds after which we deem the room freed
"lock_time": 1, # time in seconds before which we deem there can be someone else in the room
},
"female": {
"room_id": 2, # To communicate with Meteor API
"pir": 8,
"light": 23,
"free_time": 30, # time in seconds after which we deem the room freed
"lock_time": 1, # time in seconds before which we deem there can be someone else in the room
}
}
METEOR_API_URL = "http://wc-status.meteor.com"
METEOR_PASSWORD = local_settings.METEOR_PASSWORD
| 29.785714
| 101
| 0.639089
|
f16b84cb4b2c268bcb742db09aa83e3e165a9c8c
| 4,610
|
py
|
Python
|
src/spn/algorithms/CnetStructureLearning.py
|
felixdivo/SPFlow
|
682f044751145915680c6c82f0aa347e464620de
|
[
"Apache-2.0"
] | null | null | null |
src/spn/algorithms/CnetStructureLearning.py
|
felixdivo/SPFlow
|
682f044751145915680c6c82f0aa347e464620de
|
[
"Apache-2.0"
] | null | null | null |
src/spn/algorithms/CnetStructureLearning.py
|
felixdivo/SPFlow
|
682f044751145915680c6c82f0aa347e464620de
|
[
"Apache-2.0"
] | null | null | null |
"""
Created on Ocotber 27, 2018
@author: Nicola Di Mauro
"""
import logging
from collections import deque
from spn.algorithms.StructureLearning import Operation, default_slicer
import logging
logger = logging.getLogger(__name__)
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import numpy as np
from spn.algorithms.TransformStructure import Prune
from spn.algorithms.Validity import is_valid
from spn.structure.Base import Product, Sum, assign_ids
import multiprocessing
import os
cpus = max(1, os.cpu_count() - 2) # - int(os.getloadavg()[2])
# pool = multiprocessing.Pool(processes=cpus)
def get_next_operation_cnet(min_instances_slice=100, min_features_slice=1):
def next_operation_cnet(data, scope):
minimalFeatures = len(scope) == min_features_slice
minimalInstances = data.shape[0] <= min_instances_slice
if minimalFeatures or minimalInstances:
return Operation.CREATE_LEAF, None
else:
return Operation.CONDITIONING, None
return next_operation_cnet
def learn_structure_cnet(
dataset,
ds_context,
conditioning,
create_leaf,
next_operation_cnet=get_next_operation_cnet(),
initial_scope=None,
data_slicer=default_slicer,
):
assert dataset is not None
assert ds_context is not None
assert create_leaf is not None
assert next_operation_cnet is not None
root = Product()
root.children.append(None)
if initial_scope is None:
initial_scope = list(range(dataset.shape[1]))
tasks = deque()
tasks.append((dataset, root, 0, initial_scope))
while tasks:
local_data, parent, children_pos, scope = tasks.popleft()
operation, op_params = next_operation_cnet(local_data, scope)
logging.debug("OP: {} on slice {} (remaining tasks {})".format(operation, local_data.shape, len(tasks)))
if operation == Operation.CONDITIONING:
from spn.algorithms.splitting.Base import split_data_by_clusters
conditioning_start_t = perf_counter()
col_conditioning, found_conditioning = conditioning(local_data)
if not found_conditioning:
node = create_leaf(local_data, ds_context, scope)
parent.children[children_pos] = node
continue
clusters = (local_data[:, col_conditioning] == 1).astype(int)
data_slices = split_data_by_clusters(local_data, clusters, scope, rows=True)
node = Sum()
node.scope.extend(scope)
parent.children[children_pos] = node
for data_slice, scope_slice, proportion in data_slices:
assert isinstance(scope_slice, list), "slice must be a list"
node.weights.append(proportion)
product_node = Product()
node.children.append(product_node)
node.children[-1].scope.extend(scope)
right_data_slice = np.hstack(
(data_slice[:, :col_conditioning], data_slice[:, (col_conditioning + 1) :])
).reshape(data_slice.shape[0], data_slice.shape[1] - 1)
product_node.children.append(None)
tasks.append(
(
right_data_slice,
product_node,
len(product_node.children) - 1,
scope_slice[:col_conditioning] + scope_slice[col_conditioning + 1 :],
)
)
left_data_slice = data_slice[:, col_conditioning].reshape(data_slice.shape[0], 1)
product_node.children.append(None)
tasks.append(
(left_data_slice, product_node, len(product_node.children) - 1, [scope_slice[col_conditioning]])
)
conditioning_end_t = perf_counter()
logging.debug("\t\tconditioning (in {:.5f} secs)".format(conditioning_end_t - conditioning_start_t))
continue
elif operation == Operation.CREATE_LEAF:
cltree_start_t = perf_counter()
node = create_leaf(local_data, ds_context, scope)
parent.children[children_pos] = node
cltree_end_t = perf_counter()
else:
raise Exception("Invalid operation: " + operation)
node = root.children[0]
assign_ids(node)
valid, err = is_valid(node)
assert valid, "invalid spn: " + err
node = Prune(node)
valid, err = is_valid(node)
assert valid, "invalid spn: " + err
return node
| 31.360544
| 116
| 0.634707
|
e2a9da298a21566d7596e287cf46cfe9590b22a5
| 3,744
|
py
|
Python
|
examples/tensorflow2_keras_mnist.py
|
hcyang99/horovod
|
825cc197468548da47dcd38872d5b4ba6e6a125b
|
[
"Apache-2.0"
] | 2
|
2020-08-31T18:52:20.000Z
|
2020-09-01T00:13:41.000Z
|
examples/tensorflow2_keras_mnist.py
|
kyocen/horovod
|
e9b1e228ff92eb7f65d9aea2d36f23b327df28bd
|
[
"Apache-2.0"
] | null | null | null |
examples/tensorflow2_keras_mnist.py
|
kyocen/horovod
|
e9b1e228ff92eb7f65d9aea2d36f23b327df28bd
|
[
"Apache-2.0"
] | 3
|
2021-05-29T06:12:49.000Z
|
2022-02-24T06:34:59.000Z
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import horovod.tensorflow.keras as hvd
# Horovod: initialize Horovod.
hvd.init()
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
(mnist_images, mnist_labels), _ = \
tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % hvd.rank())
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),
tf.cast(mnist_labels, tf.int64))
)
dataset = dataset.repeat().shuffle(10000).batch(128)
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, [3, 3], activation='relu'),
tf.keras.layers.Conv2D(64, [3, 3], activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
# Horovod: adjust learning rate based on number of GPUs.
scaled_lr = 0.001 * hvd.size()
opt = tf.optimizers.Adam(scaled_lr)
# Horovod: add Horovod DistributedOptimizer.
opt = hvd.DistributedOptimizer(opt)
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
# uses hvd.DistributedOptimizer() to compute gradients.
mnist_model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),
optimizer=opt,
metrics=['accuracy'],
experimental_run_tf_function=False)
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=3, initial_lr=scaled_lr, verbose=1),
]
# Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if hvd.rank() == 0:
callbacks.append(tf.keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))
# Horovod: write logs on worker 0.
verbose = 1 if hvd.rank() == 0 else 0
# Train the model.
# Horovod: adjust number of steps based on number of GPUs.
mnist_model.fit(dataset, steps_per_epoch=500 // hvd.size(), callbacks=callbacks, epochs=24, verbose=verbose)
| 41.142857
| 108
| 0.710203
|
0555019766ae773c3ea9c6c4009bd9ff9f064d74
| 460
|
py
|
Python
|
project-master/proj01/proj01/proj01.py
|
dstingley22/project
|
6557409ae149d24791c8f2da4f8d7b9be62c8012
|
[
"MIT"
] | null | null | null |
project-master/proj01/proj01/proj01.py
|
dstingley22/project
|
6557409ae149d24791c8f2da4f8d7b9be62c8012
|
[
"MIT"
] | null | null | null |
project-master/proj01/proj01/proj01.py
|
dstingley22/project
|
6557409ae149d24791c8f2da4f8d7b9be62c8012
|
[
"MIT"
] | null | null | null |
# Name:Damian Stingley
# Date:6/19/17
# proj01: A Simple Program
# This program asks the user for his/her name and age.
# Then, it prints a sentence that says when the user will turn 100.
# If you complete extensions, describe your extensions here!
name=raw_input('Enter your name ')
age = int(raw_input('How old are you '))
print(2017-age+100)
if age == 10:
print "you're 10"
elif age == 20:
print "you're 20"
else:
print "you're not 10 or 20."
| 25.555556
| 67
| 0.693478
|
d746bc226f2b6e97930b078db84a7d27ad3e8ba4
| 3,131
|
py
|
Python
|
cellular_automata.py
|
ferakon/elementary-cellular-automata-py
|
4dc14ef635c8ffd7faab741a65ea6a633c351f30
|
[
"MIT"
] | null | null | null |
cellular_automata.py
|
ferakon/elementary-cellular-automata-py
|
4dc14ef635c8ffd7faab741a65ea6a633c351f30
|
[
"MIT"
] | null | null | null |
cellular_automata.py
|
ferakon/elementary-cellular-automata-py
|
4dc14ef635c8ffd7faab741a65ea6a633c351f30
|
[
"MIT"
] | null | null | null |
"""Simple code for a Wolfram elementary cellular automata in python"""
import os
import time
import numpy as np
from skimage.io import imsave
def define_rule_sets(set_size):
"""Find every possible rule for given set size"""
rule_sets = []
for n in range(1<<set_size):
s = bin(n)[2:]
s = '0'*(set_size-len(s))+s
rule_sets.append((list(map(int, list(s)))))
return rule_sets
def apply_rules(l, m, r, rules):
"""Apply selected rule to cell
Apply rule to cell given its current state m and neighbours
states l and r.
Args:
l: left neighbour cell state.
m: current cell state.
r: right neighbour cell state.
rules: array current rule.
"""
if l == 1 and m == 1 and r == 1:
return rules[0]
if l == 1 and m == 1 and r == 0:
return rules[1]
if l == 1 and m == 0 and r == 1:
return rules[2]
if l == 1 and m == 0 and r == 0:
return rules[3]
if l == 0 and m == 1 and r == 1:
return rules[4]
if l == 0 and m == 1 and r == 0:
return rules[5]
if l == 0 and m == 0 and r == 1:
return rules[6]
if l == 0 and m == 0 and r == 0:
return rules[7]
def update_ca(current, rules):
"""Get next ca state given current state
Get the next generations state from the current cell map for a
specified rule.
Args:
current: array of current cell states.
rules: array current rule.
"""
next_generation = np.ndarray([current.size])
i = 0
while i < current.size:
if i == 0:
left = current[map_size-1]
else:
left = current[i-1]
me = current[i]
if i == map_size-1:
right = current[0]
else:
right = current[i+1]
next_generation[i] = apply_rules(left, me, right, rules)
i+=1
return next_generation
def run_ca(generations, map_size, rule_sets):
"""Run the CA
Run CA for a number of generations for all possible
rules, and save each rule image.
Args:
generations: int number of generations to iterate.
map_size: int number of cells in each generation.
rule_sets: list of rules to implement.
"""
final_img = np.ndarray((map_size, generations)).astype(np.uint8)
rule_n = 0
for rs in rule_sets:
cell_map = np.zeros([map_size])
cell_map[int(map_size/2)] = 1
for r in range(generations):
final_img[:,r] = cell_map[:]
final_img[final_img == 1] = 255
#print(cell_map)
#time.sleep(0.1)
next_generation = update_ca(cell_map, rs)
cell_map[:] = next_generation[:]
imsave(os.path.join('outputs','{0}{1}'.format(rule_n,'_ca.png')), final_img.T)
rule_n+=1
if __name__ == '__main__':
generations = 100
map_size = 100
rule_sets = define_rule_sets(8)
run_ca(generations, map_size, rule_sets)
| 24.084615
| 86
| 0.547429
|
2aa3848cec961bd31440d2a74cc81278c17bdfd9
| 2,955
|
py
|
Python
|
paramak/parametric_components/port_cutters_rectangular.py
|
zmarkan/paramak
|
ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba
|
[
"MIT"
] | 9
|
2019-12-28T16:48:49.000Z
|
2020-05-20T13:38:49.000Z
|
paramak/parametric_components/port_cutters_rectangular.py
|
zmarkan/paramak
|
ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba
|
[
"MIT"
] | 8
|
2019-08-23T10:17:19.000Z
|
2020-05-21T18:47:22.000Z
|
paramak/parametric_components/port_cutters_rectangular.py
|
zmarkan/paramak
|
ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba
|
[
"MIT"
] | 1
|
2020-05-07T16:14:26.000Z
|
2020-05-07T16:14:26.000Z
|
from typing import Optional
from paramak import ExtrudeStraightShape
class PortCutterRectangular(ExtrudeStraightShape):
"""Creates an extruded shape with a rectangular section that is used to cut
other components (eg. blanket, vessel,..) in order to create ports.
Args:
height: height (cm) of the port cutter.
width: width (cm) of the port cutter.
distance: extruded distance (cm) of the port cutter.
center_point: Center point of the port cutter. Defaults to (0, 0).
workplane: workplane in which the port cutters are created. Defaults
to "ZY".
rotation_axis: axis around which the port cutters are rotated and
placed. Defaults to "Z".
extrusion_start_offset (float, optional): the distance between 0 and
the start of the extrusion. Defaults to 1..
fillet_radius (float, optional): If not None, radius (cm) of fillets
added to edges orthogonal to the Z direction. Defaults to None.
name (str, optional): defaults to "rectangular_port_cutter".
"""
def __init__(
self,
height: float,
width: float,
distance: float,
center_point: Optional[tuple] = (0, 0),
workplane: Optional[str] = "ZY",
rotation_axis: Optional[str] = "Z",
extrusion_start_offset: Optional[float] = 1.,
fillet_radius: Optional[float] = None,
name: Optional[str] = "rectangular_port_cutter",
**kwargs
):
super().__init__(
workplane=workplane,
rotation_axis=rotation_axis,
extrusion_start_offset=extrusion_start_offset,
extrude_both=False,
name=name,
distance=distance,
**kwargs
)
self.height = height
self.width = width
self.center_point = center_point
self.fillet_radius = fillet_radius
def find_points(self):
if self.workplane[0] < self.workplane[1]:
parameter_1 = self.width
parameter_2 = self.height
else:
parameter_1 = self.height
parameter_2 = self.width
points = [
(-parameter_1 / 2, parameter_2 / 2),
(parameter_1 / 2, parameter_2 / 2),
(parameter_1 / 2, -parameter_2 / 2),
(-parameter_1 / 2, -parameter_2 / 2),
]
points = [(e[0] + self.center_point[0], e[1] +
self.center_point[1]) for e in points]
self.points = points
def add_fillet(self, solid):
if "X" not in self.workplane:
filleting_edge = "|X"
if "Y" not in self.workplane:
filleting_edge = "|Y"
if "Z" not in self.workplane:
filleting_edge = "|Z"
if self.fillet_radius is not None and self.fillet_radius != 0:
solid = solid.edges(filleting_edge).fillet(self.fillet_radius)
return solid
| 33.965517
| 79
| 0.595939
|
19a58188c483d65481b9bd9e03b914c4366122c2
| 291
|
py
|
Python
|
contact_app/contact_app/doctype/booking/booking.py
|
victor-abz/app-mis-with-frappe
|
2b4a08606244368c16f980e88e5c6fdb4903d9dc
|
[
"MIT"
] | null | null | null |
contact_app/contact_app/doctype/booking/booking.py
|
victor-abz/app-mis-with-frappe
|
2b4a08606244368c16f980e88e5c6fdb4903d9dc
|
[
"MIT"
] | null | null | null |
contact_app/contact_app/doctype/booking/booking.py
|
victor-abz/app-mis-with-frappe
|
2b4a08606244368c16f980e88e5c6fdb4903d9dc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Abizeyimana Victor and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.website.website_generator import WebsiteGenerator
class Booking(WebsiteGenerator):
pass
| 26.454545
| 61
| 0.797251
|
87036694ccdac0fff5114da35b65ea9bc64dc633
| 1,283
|
py
|
Python
|
djangocms_teaser/models.py
|
sbahri81/djangocms-teaser
|
b3aa0e635244b7a0e3b19ce544c04f818cb24fd0
|
[
"BSD-3-Clause"
] | 5
|
2015-08-23T05:09:10.000Z
|
2018-05-07T07:28:38.000Z
|
djangocms_teaser/models.py
|
sbahri81/djangocms-teaser
|
b3aa0e635244b7a0e3b19ce544c04f818cb24fd0
|
[
"BSD-3-Clause"
] | 6
|
2015-07-08T10:02:55.000Z
|
2018-05-18T19:01:18.000Z
|
djangocms_teaser/models.py
|
sbahri81/djangocms-teaser
|
b3aa0e635244b7a0e3b19ce544c04f818cb24fd0
|
[
"BSD-3-Clause"
] | 19
|
2015-02-26T12:34:22.000Z
|
2021-09-03T14:00:58.000Z
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin, Page
try:
from cms.models import get_plugin_media_path
except ImportError:
def get_plugin_media_path(instance, filename):
"""
See cms.models.pluginmodel.get_plugin_media_path on django CMS 3.0.4+
for information
"""
return instance.get_media_path(filename)
from cms.utils.compat.dj import python_2_unicode_compatible
@python_2_unicode_compatible
class Teaser(CMSPlugin):
"""
A Teaser
"""
title = models.CharField(_("title"), max_length=255)
image = models.ImageField(
_("image"), upload_to=get_plugin_media_path, blank=True, null=True)
page_link = models.ForeignKey(
Page, verbose_name=_("page"),
help_text=_("If present image will be clickable"), blank=True,
null=True, limit_choices_to={'publisher_is_draft': True})
url = models.CharField(
_("link"), max_length=255, blank=True, null=True,
help_text=_("If present image will be clickable."))
description = models.TextField(_("description"), blank=True, null=True)
def __str__(self):
return self.title
search_fields = ('description',)
| 29.159091
| 77
| 0.687451
|
99144518cfcd0350e9b8ea8454f9a074ca985cdd
| 7,505
|
py
|
Python
|
ex4_part1.py
|
arkushin/MIP5
|
38b5d660087d7b7762d6f311801495c20e85c0a3
|
[
"MIT"
] | null | null | null |
ex4_part1.py
|
arkushin/MIP5
|
38b5d660087d7b7762d6f311801495c20e85c0a3
|
[
"MIT"
] | null | null | null |
ex4_part1.py
|
arkushin/MIP5
|
38b5d660087d7b7762d6f311801495c20e85c0a3
|
[
"MIT"
] | null | null | null |
from PIL import Image
import matplotlib.pyplot as plt
from utils import *
from skimage import transform as tf
import copy
########################################################################################################################
# USER GUIDE:
# The given images should be in the same directory as the .py file, or full path should be provided
# main function can be uncommented at the end of the file for running the different functions
########################################################################################################################
def display_matches(BL_im, FU_im, BL_points, FU_points, inliers=[]):
"""
A function that displays the two given images and plots the matching points in each of the corresponding images.
"""
fig = plt.figure()
fig.add_subplot(1, 2, 1)
plt.imshow(BL_im)
plt.title('BL01')
for point in range(len(BL_points)):
if len(inliers) > 0:
if point in inliers:
plt.scatter(BL_points[point, 0], BL_points[point, 1], marker='o', c='r')
else:
plt.scatter(BL_points[point, 0], BL_points[point, 1], marker='o', c='b')
plt.annotate(str(point + 1), (BL_points[point, 0], BL_points[point, 1]))
continue
plt.scatter(BL_points[point, 0], BL_points[point, 1], marker='o')
plt.annotate(str(point + 1), (BL_points[point, 0], BL_points[point, 1]))
fig.add_subplot(1, 2, 2)
plt.imshow(FU_im)
plt.title('FU01')
for point in range(len(FU_points)):
if len(inliers) > 0:
if point in inliers:
plt.scatter(FU_points[point, 0], FU_points[point, 1], marker='o', c='r')
else:
plt.scatter(FU_points[point, 0], FU_points[point, 1], marker='o', c='b')
plt.annotate(str(point + 1), (FU_points[point, 0], FU_points[point, 1]))
continue
plt.scatter(FU_points[point, 0], FU_points[point, 1], marker='o')
plt.annotate(str(point + 1), (FU_points[point, 0], FU_points[point, 1]))
plt.show()
def calcPointBasedReg(BLPoints, FUPoints):
"""
A funciton that calculates the rigid transformation between the given two sets of points using SVD calculation
"""
n = len(BLPoints)
# compute the centroid of each points set:
BL_centroid = [np.mean(BLPoints[:, 0]), np.mean(BLPoints[:, 1])]
FU_centroid = [np.mean(FUPoints[:, 0]), np.mean(FUPoints[:, 1])]
# compute the 2x2 covariance matrix:
X = FUPoints - FU_centroid
W = np.eye(n) / n # create a diagonal matrix with equal wights for all points
Y = BLPoints - BL_centroid
S = np.matmul(np.matmul(X.T, W), Y)
# compute the SVD and find the rotation:
u, sigma, vh = np.linalg.svd(S)
s_mat = np.eye(2)
s_mat[1, 1] = np.linalg.det(np.matmul(vh.T, u.T))
R = np.matmul(np.matmul(vh.T, s_mat), u.T)
# find the optimal translation:
t = BL_centroid - np.dot(R, FU_centroid)
rigidReg = np.zeros((3, 3))
rigidReg[0:2, 0:2] = R
rigidReg[0, 2] = t[0]
rigidReg[1, 2] = t[1]
rigidReg[2, 2] = 1
return rigidReg
def apply_registration(points, rigidReg):
"""
A function that receives an array with points and transforms the points using the given rigid registration
:param points: The points that should be transformed, as a [N,2] array
:param rigidReg: The registration that should be used for transforming the points
:return: An array of shape [N, 2] with the coordinates of 'points' after the transformation
"""
# transform the points into homogeneous points:
n = len(points)
homog_points = np.ones((3, n)) # create homogeneous representation to calculate the transformation
homog_points[0:2, :] = points.T
return np.matmul(rigidReg, homog_points)[0:n - 1, :].T # todo: check about return value of homogeneous coordinates
def calcDist(BL_Points, FU_Points, rigidReg):
"""
A function that calculates the distance of every matching point between FUPoints and BLPoints, by first applying
the registration of FUPoints
"""
est_BL = apply_registration(FU_Points, rigidReg) # todo: check about the shape!
return np.power((BL_Points[:, 0] - est_BL[:, 0]) ** 2 + (BL_Points[:, 1] - est_BL[:, 1]) ** 2, .5)
def display_registration(BL_im, FU_im, BLPoints, FUPoints):
"""
A function that receives two images and the matching points between them, calculates the registration between them
and then, moves one towards the other and displays them together.
"""
BL_copy = copy.deepcopy(BL_im)
rigidReg = calcPointBasedReg(BLPoints, FUPoints)
warped_im = tf.warp(FU_im, np.linalg.inv(rigidReg))
kernel = np.ones((5, 5), np.float32) / 25
warped_im = cv2.filter2D(warped_im, -1, kernel)
warped_im[warped_im < 0.3] = 0
warped_im[warped_im > 0] = 1
FU_im = cv2.filter2D(FU_im, -1, kernel)
FU_im[FU_im < 0.25 * 255] = 0
FU_im[FU_im > 0] = 1
fig = plt.figure()
fig.add_subplot(1, 3, 1)
plt.imshow(BL_copy)
plt.title('original BL image')
fig.add_subplot(1, 3, 2)
BL_copy[FU_im == 0] = (255, 56, 56)
plt.imshow(BL_copy)
plt.title('before registration')
fig.add_subplot(1, 3, 3)
BL_im[warped_im == 0] = (255, 56, 56) # mark the edges of the warped image in red
plt.imshow(BL_im)
plt.title('after registration')
plt.show()
def calcRobustPointBasedReg(FUPoints, BLPoints):
return ransac(BLPoints, FUPoints, calcPointBasedReg, calcDist, minPtNum=3, iterNum=100, thDist=15, thInlrRatio=0.1)
########################################################################################################################
# UNCOMMENT main and the functions to execute the different steps in the exercise
########################################################################################################################
# if __name__ == '__main__':
# FU_im_orig = Image.open('FU01.tif')
# FU_im = np.array(FU_im_orig)
# BL_im_orig = Image.open('BL01.tif')
# BL_im = np.array(BL_im_orig)
# BL_points, FU_points = getPoints('with_outliers') # change here between 'no/with _outliers'
# rigidReg = calcPointBasedReg(BL_points, FU_points)
# Question 1
# UNCOMMENT: to display the graph of the matches between the two images
# display_matches(BL_im, FU_im, BL_points, FU_points)
# Question 3
# UNCOMMENT: to calculate the mean distance in pixels
# dist_vec = calcDist(BL_points, FU_points, rigidReg)
# RMSE = np.mean(dist_vec)
# print('RMSE: ', RMSE)
# Question 4
# UNCOMMENT: to display the images before and after registration
# display_registration(BL_im, FU_im[:, :, 0], BL_points, FU_points)
# Question 7
# UNCOMMENT: to repeat steps 1-4 when using the robust function. Outliers status should be 'with_outliers'.
# f, inliers = calcRobustPointBasedReg(BL_points, FU_points)
# rigidReg = calcPointBasedReg(BL_points[inliers, :], FU_points[inliers, :])
# display_matches(BL_im, FU_im, BL_points, FU_points, inliers)
# dist_vec = calcDist(BL_points[inliers, :], FU_points[inliers, :], rigidReg)
# RMSE = np.mean(dist_vec)
# print('RMSE: ', RMSE)
# display_registration(BL_im, FU_im[:, :, 0], BL_points[inliers, :], FU_points[inliers, :])
| 42.40113
| 121
| 0.602265
|
24e85555790f65c2bfac1335348a0c18fb0f985b
| 20,551
|
py
|
Python
|
log_complete_bcl2/model_629.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_complete_bcl2/model_629.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_complete_bcl2/model_629.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 157250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.143519
| 798
| 0.804146
|
91328bfd5e049e074b14c8949cfef8ac0eb4a1ad
| 15,170
|
py
|
Python
|
lib/exchange_rate.py
|
argentumproject/electrum
|
e76622bc95b7ace5be3a7e54ed2416001a41bc20
|
[
"MIT"
] | 5
|
2017-06-17T16:50:49.000Z
|
2021-03-16T14:19:27.000Z
|
lib/exchange_rate.py
|
argentumproject/electrum
|
e76622bc95b7ace5be3a7e54ed2416001a41bc20
|
[
"MIT"
] | 1
|
2017-02-19T23:10:56.000Z
|
2017-02-19T23:10:56.000Z
|
lib/exchange_rate.py
|
argentumproject/electrum
|
e76622bc95b7ace5be3a7e54ed2416001a41bc20
|
[
"MIT"
] | 15
|
2017-04-02T17:25:55.000Z
|
2021-03-16T14:19:27.000Z
|
from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import traceback
import csv
from decimal import Decimal
from bitcoin import COIN
from i18n import _
from util import PrintError, ThreadJob
from util import format_satoshis
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return [str(a) for (a, b) in rates.iteritems() if b is not None]
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinaverage.com', '/ticker/global/all')
return dict([(r, Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def historical_rates(self, ccy):
history = self.get_csv('api.bitcoinaverage.com',
"/history/%s/per_day_all_time_history.csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['BTC'][r]) for r in json['BTC']
if json['BTC'][r] is not None] # Giving NULL for LTC
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def historical_rates(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=BTC")[ccy +'_BTC']
class BTCParalelo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('btcparalelo.com', '/api/price')
return {'VEF': Decimal(json['price'])}
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker')
return {'MXN': Decimal(json['last'])}
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BTCPLN/ticker.json')
return {'PLN': Decimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates')
return dict([(r['code'], Decimal(r['rate'])) for r in json])
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/ticker/')
return {'USD': Decimal(json['last'])}
class BlockchainInfo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('blockchain.info', '/ticker')
return dict([(r, Decimal(json[r]['15m'])) for r in json])
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker')
return {'CNY': Decimal(json['ticker']['last'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('coinbase.com',
'/api/v1/currencies/exchange_rates')
return dict([(r[7:].upper(), Decimal(json[r]))
for r in json if r.startswith('btc_to_')])
class CoinDesk(ExchangeBase):
def get_rates(self, ccy):
dicts = self.get_json('api.coindesk.com',
'/v1/bpi/supported-currencies.json')
json = self.get_json('api.coindesk.com',
'/v1/bpi/currentprice/%s.json' % ccy)
ccys = [d['currency'] for d in dicts]
result = dict.fromkeys(ccys)
result[ccy] = Decimal(json['bpi'][ccy]['rate_float'])
return result
def history_starts(self):
return { 'USD': '2012-11-30' }
def history_ccys(self):
return self.history_starts().keys()
def historical_rates(self, ccy):
start = self.history_starts()[ccy]
end = datetime.today().strftime('%Y-%m-%d')
# Note ?currency and ?index don't work as documented. Sigh.
query = ('/v1/bpi/historical/close.json?start=%s&end=%s'
% (start, end))
json = self.get_json('api.coindesk.com', query)
return json['bpi']
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com', '/v1/ticker/argentum/')
return {'USD': Decimal(json['price_usd'])}
class Coinsecure(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinsecure.in', '/v0/noauth/newticker')
return {'INR': Decimal(json['lastprice'] / 100.0 )}
class Unocoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.unocoin.com', 'trade?buy')
return {'INR': Decimal(json)}
class itBit(ExchangeBase):
def get_rates(self, ccy):
ccys = ['USD', 'EUR', 'SGD']
json = self.get_json('api.itbit.com', '/v1/markets/XBT%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['lastPrice'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD', 'CAD', 'GBP', 'JPY']
pairs = ['XBT%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], Decimal(float(v['c'][0])))
for k, v in json['result'].items())
class LocalBitcoins(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('localbitcoins.com',
'/bitcoinaverage/ticker-all-currencies/')
return dict([(r, Decimal(json[r]['rates']['last'])) for r in json])
class Winkdex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('winkdex.com', '/api/v0/price')
return {'USD': Decimal(json['price'] / 100.0)}
def history_ccys(self):
return ['USD']
def historical_rates(self, ccy):
json = self.get_json('winkdex.com',
"/api/v0/series?start_time=1342915200")
history = json['series'][0]['results']
return dict([(h['timestamp'][:10], h['price'] / 100.0)
for h in history])
class MercadoBitcoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['MBT']['last'])}
class Bitcointoyou(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitcointoyou.com', "/API/ticker.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
class Bitvalor(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['total']['last'])}
class Foxbit(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['FOX']['last'])}
class NegocieCoins(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['NEG']['last'])}
def history_ccys(self):
return ['BRL']
def dictinvert(d):
inv = {}
for k, vlist in d.iteritems():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
return json.loads(open(path, 'r').read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate is None else " 1 ARG~%s %s" % (self.value_str(COIN, rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
| 34.555809
| 121
| 0.594133
|
b450cc5e99cb9cbd0cbaf831b2d7fe7bc918c2ce
| 2,352
|
py
|
Python
|
components/expat/expat/expat/tests/udiffer.py
|
ebertn/esp-idf-nes
|
9dc7e9a258fd378d36408a1348253b373974b431
|
[
"Apache-2.0"
] | 2
|
2020-06-23T08:05:58.000Z
|
2020-06-24T01:25:51.000Z
|
components/expat/expat/expat/tests/udiffer.py
|
ebertn/esp-idf-nes
|
9dc7e9a258fd378d36408a1348253b373974b431
|
[
"Apache-2.0"
] | 2
|
2022-03-29T05:16:50.000Z
|
2022-03-29T05:16:50.000Z
|
vendors/espressif/esp-idf/components/expat/expat/expat/tests/udiffer.py
|
ictk-solution-dev/amazon-freertos
|
cc76512292ddfb70bba3030dbcb740ef3c6ead8b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# __ __ _
# ___\ \/ /_ __ __ _| |_
# / _ \\ /| '_ \ / _` | __|
# | __// \| |_) | (_| | |_
# \___/_/\_\ .__/ \__,_|\__|
# |_| XML parser
#
# Copyright (c) 2017 Expat development team
# Licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import difflib
import sys
def _read_lines(filename):
try:
with open(filename) as f:
return f.readlines()
except UnicodeDecodeError:
with open(filename, encoding='utf_16') as f:
return f.readlines()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('first', metavar='FILE')
parser.add_argument('second', metavar='FILE')
config = parser.parse_args()
first = _read_lines(config.first)
second = _read_lines(config.second)
diffs = list(difflib.unified_diff(first, second, fromfile=config.first,
tofile=config.second))
if diffs:
sys.stdout.writelines(diffs)
sys.exit(1)
if __name__ == '__main__':
main()
| 37.333333
| 76
| 0.610969
|
62f31b609370a153a68a12d00d6e66391859a5a5
| 8,298
|
py
|
Python
|
labml_nn/diffusion/ddpm/vqvae.py
|
gqkc/annotated_deep_learning_paper_implementations
|
677a6e9dfa626ba825b5a123adb6c60d947b6a92
|
[
"MIT"
] | null | null | null |
labml_nn/diffusion/ddpm/vqvae.py
|
gqkc/annotated_deep_learning_paper_implementations
|
677a6e9dfa626ba825b5a123adb6c60d947b6a92
|
[
"MIT"
] | null | null | null |
labml_nn/diffusion/ddpm/vqvae.py
|
gqkc/annotated_deep_learning_paper_implementations
|
677a6e9dfa626ba825b5a123adb6c60d947b6a92
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, RelaxedOneHotCategorical
import math
class VQEmbeddingEMA(nn.Module):
def __init__(self, latent_dim, num_embeddings, embedding_dim, commitment_cost=0.25, decay=0.999, epsilon=1e-5):
super(VQEmbeddingEMA, self).__init__()
self.commitment_cost = commitment_cost
self.decay = decay
self.epsilon = epsilon
embedding = torch.zeros(latent_dim, num_embeddings, embedding_dim)
embedding.uniform_(-1/num_embeddings, 1/num_embeddings)
self.register_buffer("embedding", embedding)
self.register_buffer("ema_count", torch.zeros(latent_dim, num_embeddings))
self.register_buffer("ema_weight", self.embedding.clone())
def quantize(self, prior_logits: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
prior_logits: Logits computed by the trained prior, one hot encoded, with shape
`(N, B, H, W, M)`.
Returns
-------
Quantized tensor with shape `(B, N*D, H, W)`.
"""
N, B, H, W, M = prior_logits.shape
N, M_, D = self.embedding.size()
C = N * D
assert M == M_, f"Input and self embedding dimension mismtach, got {M} from input and {M_} from self."
distances = prior_logits.view(N, B*H*W, M)
indices = torch.argmin(distances, dim=-1)
encodings = F.one_hot(indices, M).float()
quantized = torch.gather(self.embedding, 1, indices.unsqueeze(-1).expand(-1, -1, D))
quantized = quantized.view(N, B, H, W, D)
return quantized.permute(1, 0, 4, 2, 3).reshape(B, C, H, W)
def forward(self, x, prior_logits=None):
B, C, H, W = x.size()
N, M, D = self.embedding.size()
assert C == N * D
x = x.view(B, N, D, H, W).permute(1, 0, 3, 4, 2)
x_flat = x.detach().reshape(N, -1, D)
distances = torch.baddbmm(torch.sum(self.embedding ** 2, dim=2).unsqueeze(1) +
torch.sum(x_flat ** 2, dim=2, keepdim=True),
x_flat, self.embedding.transpose(1, 2),
alpha=-2.0, beta=1.0)
# Save distances for export
self.distances = distances.view(N, B, W, H, M)
indices = torch.argmin(distances, dim=-1)
encodings = F.one_hot(indices, M).float()
quantized = torch.gather(self.embedding, 1, indices.unsqueeze(-1).expand(-1, -1, D))
quantized = quantized.view_as(x)
if self.training:
self.ema_count = self.decay * self.ema_count + (1 - self.decay) * torch.sum(encodings, dim=1)
n = torch.sum(self.ema_count, dim=-1, keepdim=True)
self.ema_count = (self.ema_count + self.epsilon) / (n + M * self.epsilon) * n
dw = torch.bmm(encodings.transpose(1, 2), x_flat)
self.ema_weight = self.decay * self.ema_weight + (1 - self.decay) * dw
self.embedding = self.ema_weight / self.ema_count.unsqueeze(-1)
e_latent_loss = F.mse_loss(x, quantized.detach())
loss = self.commitment_cost * e_latent_loss
quantized = x + (quantized - x).detach()
avg_probs = torch.mean(encodings, dim=1)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10), dim=-1))
return quantized.permute(1, 0, 4, 2, 3).reshape(B, C, H, W), loss, perplexity.sum()
class VQEmbeddingGSSoft(nn.Module):
def __init__(self, latent_dim, num_embeddings, embedding_dim):
super(VQEmbeddingGSSoft, self).__init__()
self.embedding = nn.Parameter(torch.Tensor(latent_dim, num_embeddings, embedding_dim))
nn.init.uniform_(self.embedding, -1/num_embeddings, 1/num_embeddings)
def forward(self, x):
B, C, H, W = x.size()
N, M, D = self.embedding.size()
assert C == N * D
x = x.view(B, N, D, H, W).permute(1, 0, 3, 4, 2)
x_flat = x.reshape(N, -1, D)
distances = torch.baddbmm(torch.sum(self.embedding ** 2, dim=2).unsqueeze(1) +
torch.sum(x_flat ** 2, dim=2, keepdim=True),
x_flat, self.embedding.transpose(1, 2),
alpha=-2.0, beta=1.0)
distances = distances.view(N, B, H, W, M)
dist = RelaxedOneHotCategorical(0.5, logits=-distances)
if self.training:
samples = dist.rsample().view(N, -1, M)
else:
samples = torch.argmax(dist.probs, dim=-1)
samples = F.one_hot(samples, M).float()
samples = samples.view(N, -1, M)
quantized = torch.bmm(samples, self.embedding)
quantized = quantized.view_as(x)
KL = dist.probs * (dist.logits + math.log(M))
KL[(dist.probs == 0).expand_as(KL)] = 0
KL = KL.sum(dim=(0, 2, 3, 4)).mean()
avg_probs = torch.mean(samples, dim=1)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10), dim=-1))
return quantized.permute(1, 0, 4, 2, 3).reshape(B, C, H, W), KL, perplexity.sum()
class Residual(nn.Module):
def __init__(self, channels):
super(Residual, self).__init__()
self.block = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(channels, channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Conv2d(channels, channels, 1, bias=False),
nn.BatchNorm2d(channels)
)
def forward(self, x):
return x + self.block(x)
class Encoder(nn.Module):
def __init__(self, channels, latent_dim, embedding_dim):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, channels, 4, 2, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Conv2d(channels, channels, 4, 2, 1, bias=False),
nn.BatchNorm2d(channels),
Residual(channels),
Residual(channels),
nn.Conv2d(channels, latent_dim * embedding_dim, 1)
)
def forward(self, x):
return self.encoder(x)
class Decoder(nn.Module):
def __init__(self, channels, latent_dim, embedding_dim):
super(Decoder, self).__init__()
self.decoder = nn.Sequential(
nn.Conv2d(latent_dim * embedding_dim, channels, 1, bias=False),
nn.BatchNorm2d(channels),
Residual(channels),
Residual(channels),
nn.ConvTranspose2d(channels, channels, 4, 2, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.ConvTranspose2d(channels, channels, 4, 2, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Conv2d(channels, 3 * 256, 1)
)
def forward(self, x):
x = self.decoder(x)
B, _, H, W = x.size()
x = x.view(B, 3, 256, H, W).permute(0, 1, 3, 4, 2)
dist = Categorical(logits=x)
return dist
class VQVAE(nn.Module):
def __init__(self, channels, latent_dim, num_embeddings, embedding_dim):
super(VQVAE, self).__init__()
self.encoder = Encoder(channels, latent_dim, embedding_dim)
self.codebook = VQEmbeddingEMA(latent_dim, num_embeddings, embedding_dim)
self.decoder = Decoder(channels, latent_dim, embedding_dim)
def forward(self, x):
x = self.encoder(x)
x, loss, perplexity = self.codebook(x)
dist = self.decoder(x)
return dist, loss, perplexity
def decode(self, prior_logits):
x = self.codebook.quantize(prior_logits=prior_logits)
dist = self.decoder(x)
return dist
class GSSOFT(nn.Module):
def __init__(self, channels, latent_dim, num_embeddings, embedding_dim):
super(GSSOFT, self).__init__()
self.encoder = Encoder(channels, latent_dim, embedding_dim)
self.codebook = VQEmbeddingGSSoft(latent_dim, num_embeddings, embedding_dim)
self.decoder = Decoder(channels, latent_dim, embedding_dim)
def forward(self, x):
x = self.encoder(x)
x, KL, perplexity = self.codebook(x)
dist = self.decoder(x)
return dist, KL, perplexity
| 37.547511
| 115
| 0.591106
|
b6f01f36eeb3fc8152629687e30eb1f2de09ffe6
| 4,658
|
py
|
Python
|
llvm/tools/clang/tools/scan-build-py/tests/unit/test_intercept.py
|
vusec/typesan
|
831ca2af1a629e8ea93bb8c5b4215f12247b595c
|
[
"Apache-2.0"
] | 30
|
2016-09-06T06:58:43.000Z
|
2021-12-23T11:59:38.000Z
|
llvm/tools/clang/tools/scan-build-py/tests/unit/test_intercept.py
|
vusec/typesan
|
831ca2af1a629e8ea93bb8c5b4215f12247b595c
|
[
"Apache-2.0"
] | 1
|
2018-05-15T00:55:37.000Z
|
2018-05-15T00:55:37.000Z
|
llvm/tools/clang/tools/scan-build-py/tests/unit/test_intercept.py
|
vusec/typesan
|
831ca2af1a629e8ea93bb8c5b4215f12247b595c
|
[
"Apache-2.0"
] | 17
|
2016-10-24T06:08:16.000Z
|
2022-02-18T17:27:14.000Z
|
# -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libscanbuild.intercept as sut
from . import fixtures
import os.path
class InterceptUtilTest(fixtures.TestCase):
def test_is_compiler_call_filter(self):
def test(command):
return sut.is_compiler_call({'command': [command]})
self.assertTrue(test('clang'))
self.assertTrue(test('clang-3.6'))
self.assertTrue(test('clang++'))
self.assertTrue(test('clang++-3.5.1'))
self.assertTrue(test('cc'))
self.assertTrue(test('c++'))
self.assertTrue(test('gcc'))
self.assertTrue(test('g++'))
self.assertTrue(test('/usr/local/bin/gcc'))
self.assertTrue(test('/usr/local/bin/g++'))
self.assertTrue(test('/usr/local/bin/clang'))
self.assertTrue(test('armv7_neno-linux-gnueabi-g++'))
self.assertFalse(test(''))
self.assertFalse(test('ld'))
self.assertFalse(test('as'))
self.assertFalse(test('/usr/local/bin/compiler'))
def test_format_entry_filters_action(self):
def test(command):
return list(sut.format_entry(
{'command': command, 'directory': '/opt/src/project'}))
self.assertTrue(test(['cc', '-c', 'file.c', '-o', 'file.o']))
self.assertFalse(test(['cc', '-E', 'file.c']))
self.assertFalse(test(['cc', '-MM', 'file.c']))
self.assertFalse(test(['cc', 'this.o', 'that.o', '-o', 'a.out']))
self.assertFalse(test(['cc', '-print-prog-name']))
def test_format_entry_normalize_filename(self):
directory = os.path.join(os.sep, 'home', 'me', 'project')
def test(command):
result = list(sut.format_entry(
{'command': command, 'directory': directory}))
return result[0]['file']
self.assertEqual(test(['cc', '-c', 'file.c']),
os.path.join(directory, 'file.c'))
self.assertEqual(test(['cc', '-c', './file.c']),
os.path.join(directory, 'file.c'))
self.assertEqual(test(['cc', '-c', '../file.c']),
os.path.join(os.path.dirname(directory), 'file.c'))
self.assertEqual(test(['cc', '-c', '/opt/file.c']),
'/opt/file.c')
def test_sip(self):
def create_status_report(filename, message):
content = """#!/usr/bin/env sh
echo 'sa-la-la-la'
echo 'la-la-la'
echo '{0}'
echo 'sa-la-la-la'
echo 'la-la-la'
""".format(message)
lines = [line.strip() for line in content.split('\n')]
with open(filename, 'w') as handle:
handle.write('\n'.join(lines))
handle.close()
os.chmod(filename, 0x1ff)
def create_csrutil(dest_dir, status):
filename = os.path.join(dest_dir, 'csrutil')
message = 'System Integrity Protection status: {0}'.format(status)
return create_status_report(filename, message)
def create_sestatus(dest_dir, status):
filename = os.path.join(dest_dir, 'sestatus')
message = 'SELinux status:\t{0}'.format(status)
return create_status_report(filename, message)
ENABLED = 'enabled'
DISABLED = 'disabled'
OSX = 'darwin'
LINUX = 'linux'
with fixtures.TempDir() as tmpdir:
try:
saved = os.environ['PATH']
os.environ['PATH'] = tmpdir + ':' + saved
create_csrutil(tmpdir, ENABLED)
self.assertTrue(sut.is_preload_disabled(OSX))
create_csrutil(tmpdir, DISABLED)
self.assertFalse(sut.is_preload_disabled(OSX))
create_sestatus(tmpdir, ENABLED)
self.assertTrue(sut.is_preload_disabled(LINUX))
create_sestatus(tmpdir, DISABLED)
self.assertFalse(sut.is_preload_disabled(LINUX))
finally:
os.environ['PATH'] = saved
try:
saved = os.environ['PATH']
os.environ['PATH'] = ''
# shall be false when it's not in the path
self.assertFalse(sut.is_preload_disabled(OSX))
self.assertFalse(sut.is_preload_disabled(LINUX))
self.assertFalse(sut.is_preload_disabled('unix'))
finally:
os.environ['PATH'] = saved
| 37.564516
| 78
| 0.546801
|
d2f8aa917a6a90c0b7125e2ffccdc982061e0a97
| 10,232
|
py
|
Python
|
owtf/db/models.py
|
Lonewolf-Information-systems/owtf
|
65355ce8bf4a4ea0177e24ee106f77e2f87c17fa
|
[
"BSD-3-Clause"
] | 1
|
2018-02-05T12:10:28.000Z
|
2018-02-05T12:10:28.000Z
|
owtf/db/models.py
|
Lonewolf-Information-systems/owtf
|
65355ce8bf4a4ea0177e24ee106f77e2f87c17fa
|
[
"BSD-3-Clause"
] | 2
|
2021-03-11T03:35:23.000Z
|
2022-02-10T23:40:23.000Z
|
owtf/db/models.py
|
Lonewolf-Information-systems/owtf
|
65355ce8bf4a4ea0177e24ee106f77e2f87c17fa
|
[
"BSD-3-Clause"
] | null | null | null |
"""
owtf.db.models
~~~~~~~~~~~~~~
The SQLAlchemy models for every table in the OWTF DB.
"""
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import Table, Column, Integer, String, Boolean, Float, DateTime, ForeignKey, Text, Index
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship
Base = declarative_base()
# This table actually allows us to make a many to many relationship
# between transactions table and grep_outputs table
target_association_table = Table(
'target_session_association',
Base.metadata,
Column('target_id', Integer, ForeignKey('targets.id')),
Column('session_id', Integer, ForeignKey('sessions.id'))
)
Index('target_id_idx', target_association_table.c.target_id, postgresql_using='btree')
class Session(Base):
__tablename__ = "sessions"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=True)
active = Column(Boolean, default=False)
targets = relationship("Target", secondary=target_association_table, backref="sessions")
class Target(Base):
__tablename__ = "targets"
id = Column(Integer, primary_key=True, autoincrement=True)
target_url = Column(String, unique=True)
host_ip = Column(String)
port_number = Column(String)
url_scheme = Column(String)
alternative_ips = Column(String, nullable=True) # Comma seperated
host_name = Column(String)
host_path = Column(String)
ip_url = Column(String)
top_domain = Column(String)
top_url = Column(String)
scope = Column(Boolean, default=True)
transactions = relationship("Transaction", cascade="delete")
poutputs = relationship("PluginOutput", cascade="delete")
urls = relationship("Url", cascade="delete")
commands = relationship("Command", cascade="delete")
# Also has a column session specified as backref in
# session model
works = relationship("Work", backref="target", cascade="delete")
@hybrid_property
def max_user_rank(self):
user_ranks = [-1]
user_ranks += [poutput.user_rank for poutput in self.poutputs]
return(max(user_ranks))
@hybrid_property
def max_owtf_rank(self):
owtf_ranks = [-1]
owtf_ranks += [poutput.owtf_rank for poutput in self.poutputs]
return(max(owtf_ranks))
def __repr__(self):
return "<Target (url='%s')>" % (self.target_url)
# This table actually allows us to make a many to many relationship
# between transactions table and grep_outputs table
transaction_association_table = Table(
'transaction_grep_association',
Base.metadata,
Column('transaction_id', Integer, ForeignKey('transactions.id')),
Column('grep_output_id', Integer, ForeignKey('grep_outputs.id'))
)
Index('transaction_id_idx', transaction_association_table.c.transaction_id, postgresql_using='btree')
class Transaction(Base):
__tablename__ = "transactions"
target_id = Column(Integer, ForeignKey("targets.id"))
id = Column(Integer, primary_key=True)
url = Column(String)
scope = Column(Boolean, default=False)
method = Column(String)
data = Column(String, nullable=True) # Post DATA
time = Column(Float(precision=10))
time_human = Column(String)
local_timestamp = Column(DateTime)
raw_request = Column(Text)
response_status = Column(String)
response_headers = Column(Text)
response_size = Column(Integer, nullable=True)
response_body = Column(Text, nullable=True)
binary_response = Column(Boolean, nullable=True)
session_tokens = Column(String, nullable=True)
login = Column(Boolean, nullable=True)
logout = Column(Boolean, nullable=True)
grep_outputs = relationship(
"GrepOutput",
secondary=transaction_association_table,
cascade="delete",
backref="transactions"
)
def __repr__(self):
return "<HTTP Transaction (url='%s' method='%s' response_status='%s')>" % (self.url, self.method,
self.response_status)
class GrepOutput(Base):
__tablename__ = "grep_outputs"
target_id = Column(Integer, ForeignKey("targets.id"))
id = Column(Integer, primary_key=True)
name = Column(String)
output = Column(Text)
# Also has a column transactions, which is added by
# using backref in transaction
__table_args__ = (UniqueConstraint('name', 'output', target_id),)
class Url(Base):
__tablename__ = "urls"
target_id = Column(Integer, ForeignKey("targets.id"))
url = Column(String, primary_key=True)
visited = Column(Boolean, default=False)
scope = Column(Boolean, default=True)
def __repr__(self):
return "<URL (url='%s')>" % (self.url)
class PluginOutput(Base):
__tablename__ = "plugin_outputs"
target_id = Column(Integer, ForeignKey("targets.id"))
plugin_key = Column(String, ForeignKey("plugins.key"))
# There is a column named plugin which is caused by backref from the plugin class
id = Column(Integer, primary_key=True)
plugin_code = Column(String) # OWTF Code
plugin_group = Column(String)
plugin_type = Column(String)
date_time = Column(DateTime, default=datetime.datetime.now())
start_time = Column(DateTime)
end_time = Column(DateTime)
output = Column(String, nullable=True)
error = Column(String, nullable=True)
status = Column(String, nullable=True)
user_notes = Column(String, nullable=True)
user_rank = Column(Integer, nullable=True, default=-1)
owtf_rank = Column(Integer, nullable=True, default=-1)
output_path = Column(String, nullable=True)
@hybrid_property
def run_time(self):
return self.end_time - self.start_time
__table_args__ = (UniqueConstraint('plugin_key', 'target_id'),)
class Command(Base):
__tablename__ = "command_register"
start_time = Column(DateTime)
end_time = Column(DateTime)
success = Column(Boolean, default=False)
target_id = Column(Integer, ForeignKey("targets.id"))
plugin_key = Column(String, ForeignKey("plugins.key"))
modified_command = Column(String)
original_command = Column(String, primary_key=True)
@hybrid_property
def run_time(self):
return self.end_time - self.start_time
class Error(Base):
__tablename__ = "errors"
id = Column(Integer, primary_key=True)
owtf_message = Column(String)
traceback = Column(String, nullable=True)
user_message = Column(String, nullable=True)
reported = Column(Boolean, default=False)
github_issue_url = Column(String, nullable=True)
def __repr__(self):
return "<Error (traceback='%s')>" % (self.traceback)
class Resource(Base):
__tablename__ = "resources"
id = Column(Integer, primary_key=True)
dirty = Column(Boolean, default=False) # Dirty if user edited it. Useful while updating
resource_name = Column(String)
resource_type = Column(String)
resource = Column(String)
__table_args__ = (UniqueConstraint('resource', 'resource_type', 'resource_name'),)
class ConfigSetting(Base):
__tablename__ = "configuration"
key = Column(String, primary_key=True)
value = Column(String)
section = Column(String)
descrip = Column(String, nullable=True)
dirty = Column(Boolean, default=False)
def __repr__(self):
return "<ConfigSetting (key='%s', value='%s', dirty='%r')>" % (self.key, self.value, self.dirty)
class TestGroup(Base):
__tablename__ = "test_groups"
code = Column(String, primary_key=True)
group = Column(String) # web, network
descrip = Column(String)
hint = Column(String, nullable=True)
url = Column(String)
priority = Column(Integer)
plugins = relationship("Plugin")
class Plugin(Base):
__tablename__ = "plugins"
key = Column(String, primary_key=True) # key = type@code
title = Column(String)
name = Column(String)
code = Column(String, ForeignKey("test_groups.code"))
group = Column(String)
type = Column(String)
descrip = Column(String, nullable=True)
file = Column(String)
attr = Column(String, nullable=True)
works = relationship("Work", backref="plugin", cascade="delete")
outputs = relationship("PluginOutput", backref="plugin")
def __repr__(self):
return "<Plugin (code='%s', group='%s', type='%s')>" % (self.code, self.group, self.type)
@hybrid_property
def min_time(self):
"""
Consider last 5 runs only, better performance and accuracy
"""
poutputs_num = len(self.outputs)
if poutputs_num != 0:
if poutputs_num < 5:
run_times = [poutput.run_time for poutput in self.outputs]
else:
run_times = [poutput.run_time for poutput in self.outputs[-5:]]
return min(run_times)
else:
return None
@hybrid_property
def max_time(self):
"""
Consider last 5 runs only, better performance and accuracy
"""
poutputs_num = len(self.outputs)
if poutputs_num != 0:
if poutputs_num < 5:
run_times = [poutput.run_time for poutput in self.outputs]
else:
run_times = [poutput.run_time for poutput in self.outputs[-5:]]
return max(run_times)
else:
return None
__table_args__ = (UniqueConstraint('type', 'code'),)
class Work(Base):
__tablename__ = "worklist"
id = Column(Integer, primary_key=True, autoincrement=True)
target_id = Column(Integer, ForeignKey("targets.id"))
plugin_key = Column(String, ForeignKey("plugins.key"))
active = Column(Boolean, default=True)
# Columns plugin and target are created using backrefs
__table_args__ = (UniqueConstraint('target_id', 'plugin_key'),)
def __repr__(self):
return "<Work (target='%s', plugin='%s')>" % (self.target_id, self.plugin_key)
class Mapping(Base):
__tablename__ = 'mappings'
owtf_code = Column(String, primary_key=True)
mappings = Column(String)
category = Column(String, nullable=True)
| 32.48254
| 105
| 0.678069
|
bc83afb8c23b476f4a8b00d0c7192bcd4dc9e7f9
| 946
|
py
|
Python
|
engine/src/hopeit/server/version.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | null | null | null |
engine/src/hopeit/server/version.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | null | null | null |
engine/src/hopeit/server/version.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | null | null | null |
"""
Engine version constants.
Increment on release
To ensure configuration files from example apps and plugins have same version as engine,
environment variables `HOPEIT_ENGINE_VERSION` and `HOPEIT_APPS_API_VERSION` are set.
"""
import os
import sys
ENGINE_NAME = "hopeit.engine"
ENGINE_VERSION = "0.12.1"
# Major.Minor version to be used in App versions and Api endpoints for Apps/Plugins
APPS_API_VERSION = '.'.join(ENGINE_VERSION.split('.')[0:2])
APPS_ROUTE_VERSION = APPS_API_VERSION.replace('.', 'x')
os.environ['HOPEIT_ENGINE_VERSION'] = ENGINE_VERSION
os.environ['HOPEIT_APPS_API_VERSION'] = APPS_API_VERSION
os.environ['HOPEIT_APPS_ROUTE_VERSION'] = APPS_ROUTE_VERSION
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "APPS_API_VERSION":
print(APPS_API_VERSION)
elif len(sys.argv) > 1 and sys.argv[1] == "APPS_ROUTE_VERSION":
print(APPS_ROUTE_VERSION)
else:
print(ENGINE_VERSION)
| 33.785714
| 88
| 0.748414
|
7990150def023dad3e39bf904e671169e9c81ce1
| 2,924
|
py
|
Python
|
day23/code/main.py
|
JoseTomasTocino/AdventOfCode2020
|
19b22c3f9ef2331f08c2ad78f49f200a5f4adfc9
|
[
"MIT"
] | null | null | null |
day23/code/main.py
|
JoseTomasTocino/AdventOfCode2020
|
19b22c3f9ef2331f08c2ad78f49f200a5f4adfc9
|
[
"MIT"
] | null | null | null |
day23/code/main.py
|
JoseTomasTocino/AdventOfCode2020
|
19b22c3f9ef2331f08c2ad78f49f200a5f4adfc9
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger(__name__)
def play_cups_game(inp, max_moves=100, use_extended_rules=False):
labels = [int(x) for x in inp]
# Map where the key is the label, and the value is the next element
next_map = {}
for i in range(len(labels)):
# The next of the last element is the first element
if i == len(labels) - 1:
next_map[labels[i]] = labels[0]
else:
next_map[labels[i]] = labels[i + 1]
current = labels[0]
max_cup = max(labels)
# With extended rules, add the rest of the numbers up until 1 million in consecutive order
if use_extended_rules:
for i in range(max_cup + 1, 1000000):
next_map[i] = i + 1
next_map[1000000] = next_map[labels[-1]]
next_map[labels[-1]] = max_cup + 1
max_cup = 1000000
for move in range(max_moves):
# The crab picks up the three cups that are immediately clockwise of the current cup.
next_1 = next_map[current]
next_2 = next_map[next_1]
next_3 = next_map[next_2]
# They are removed from the circle
next_map[current] = next_map[next_3]
# The crab selects a destination cup: the cup with a label equal to the current cup's label minus one.
destination = current - 1
while True:
# If at any point in this process the value goes below the lowest value on any cup's label,
# it wraps around to the highest value on any cup's label instead.
if destination == 0:
destination = max_cup
# If this would select one of the cups that was just picked up, the crab will keep subtracting one until it
# finds a cup that wasn't just picked up.
if destination not in [next_1, next_2, next_3]:
break
destination -= 1
# The crab places the cups it just picked up so that they are immediately clockwise of the destination cup.
next_map[next_3] = next_map[destination]
next_map[destination] = next_1
# The crab selects a new current cup: the cup which is immediately clockwise of the current cup.
current = next_map[current]
if use_extended_rules:
# The crab is going to hide your stars - one each - under the two cups that will end up immediately clockwise
# of cup 1. What do you get if you multiply their labels together?
retval = next_map[1] * next_map[next_map[1]]
else:
# Starting after the cup labeled 1, collect the other cups' labels clockwise into a single string with no extra
# characters; each number except 1 should appear exactly once.
labels_in_order = []
n = next_map[1]
while n != 1:
labels_in_order.append(n)
n = next_map[n]
retval = ''.join(str(x) for x in labels_in_order)
return retval
| 35.658537
| 119
| 0.629617
|
d663520666817edfd4c03096c8b80b976ad2d182
| 50
|
py
|
Python
|
rpc/consts.py
|
ryanrain2016/rpc
|
6988ad6a26312e27270fc4439c0392751670c1cc
|
[
"MIT"
] | null | null | null |
rpc/consts.py
|
ryanrain2016/rpc
|
6988ad6a26312e27270fc4439c0392751670c1cc
|
[
"MIT"
] | null | null | null |
rpc/consts.py
|
ryanrain2016/rpc
|
6988ad6a26312e27270fc4439c0392751670c1cc
|
[
"MIT"
] | null | null | null |
MAX_PAYLOAD_LENGTH = 1024 * 1024 * 5
TIMEOUT = 60
| 16.666667
| 36
| 0.72
|
749498c888733cdda4529c37f89cf2b99a5a5b58
| 2,562
|
py
|
Python
|
Dinos/BoaFrill/BoaFrill_Character_BP.BoaFrill_Character_BP.py
|
cutec-chris/sce-PrimalEarth
|
4e7a45acffc57a455a7668af1a954004668c3085
|
[
"MIT"
] | null | null | null |
Dinos/BoaFrill/BoaFrill_Character_BP.BoaFrill_Character_BP.py
|
cutec-chris/sce-PrimalEarth
|
4e7a45acffc57a455a7668af1a954004668c3085
|
[
"MIT"
] | null | null | null |
Dinos/BoaFrill/BoaFrill_Character_BP.BoaFrill_Character_BP.py
|
cutec-chris/sce-PrimalEarth
|
4e7a45acffc57a455a7668af1a954004668c3085
|
[
"MIT"
] | null | null | null |
import sys,sce
class BoaFrill_Character_BP(sce.Dinos):
def __init__(self):
super().__init__(self)
name = 'Titanoboa'
blueprintPath = '/Game/PrimalEarth/Dinos/BoaFrill/BoaFrill_Character_BP.BoaFrill_Character_BP'
fullStatsRaw = [[170, 0.2, 0.27, 0.5, 0], [150, 0.1, 0.1, 0, 0], [175, 0.06, 0, 0.5, 0], [150, 0.1, 0.1, 0, 0], [1200, 0.1, 0.1, 0, 0], None, None, [150, 0.02, 0.04, 0, 0], [1, 0.05, 0.1, 0.5, 0.4], [1, 0, 0.01, 0.2, 0], None, None]
altBaseStats = {'0': 300}
immobilizedBy = []
noGender = True
colors = [{'name': 'Body', 'colors': ['Black', 'Dark Grey', 'Dino Albino', 'Dino Dark Blue', 'Dino Dark Brown', 'Dino Dark Green', 'Dino Dark Orange', 'Dino Dark Purple', 'Dino Dark Red', 'Dino Dark Yellow', 'Dino Darker Grey', 'Dino Medium Brown', 'Dino Medium Green']}, {'name': 'Frill Middle Line 1', 'colors': ['Black', 'Dino Albino', 'Dino Light Blue', 'Dino Light Brown', 'Dino Light Green', 'Dino Light Orange', 'Dino Light Purple', 'Dino Light Red', 'Dino Light Yellow', 'Dino Medium Brown', 'Dino Medium Green', 'Light Grey']}, {'name': 'Frill Middle Line 2', 'colors': ['Black', 'Dino Albino', 'Dino Light Blue', 'Dino Light Brown', 'Dino Light Green', 'Dino Light Orange', 'Dino Light Purple', 'Dino Light Red', 'Dino Light Yellow', 'Dino Medium Brown', 'Dino Medium Green', 'Light Grey']}, {'name': 'Frill Middle Line 3', 'colors': ['Black', 'Dino Albino', 'Dino Light Blue', 'Dino Light Brown', 'Dino Light Green', 'Dino Light Orange', 'Dino Light Purple', 'Dino Light Red', 'Dino Light Yellow', 'Dino Medium Brown', 'Dino Medium Green', 'Light Grey']}, {'name': 'Body Accent and Inner Frill', 'colors': ['Black', 'Dino Albino', 'Dino Light Blue', 'Dino Light Brown', 'Dino Light Green', 'Dino Light Orange', 'Dino Light Purple', 'Dino Light Red', 'Dino Light Yellow', 'Dino Medium Brown', 'Dino Medium Green', 'Light Grey']}, {'name': 'Outer Frill', 'colors': ['Black', 'Dino Albino', 'Dino Light Blue', 'Dino Light Brown', 'Dino Light Green', 'Dino Light Orange', 'Dino Light Purple', 'Dino Light Red', 'Dino Light Yellow', 'Dino Medium Brown', 'Dino Medium Green', 'Light Grey']}]
taming = {'nonViolent': True, 'violent': False, 'tamingIneffectiveness': 3, 'affinityNeeded0': 1950, 'affinityIncreasePL': 90, 'wakeAffinityMult': 1.6, 'wakeFoodDeplMult': 2, 'foodConsumptionBase': 0.001543, 'foodConsumptionMult': 648.0881}
boneDamageAdjusters = {'c_head': 2.5, 'c_jaw': 2.5, 'l_jaw': 2.5, 'r_jaw': 2.5}
TamedBaseHealthMultiplier = 1
displayedStats = 919
| 160.125
| 1,603
| 0.648322
|
2740520fa124a060bc32be056577159164b41067
| 3,351
|
py
|
Python
|
strategies/bank_put_kamas.py
|
ProjectBlackFalcon/BlackFalconCore
|
23af1829224738c06092e3e513a0bf2753b4c35f
|
[
"MIT"
] | 8
|
2019-05-26T19:44:28.000Z
|
2021-01-31T14:53:17.000Z
|
strategies/bank_put_kamas.py
|
ProjectBlackFalcon/BlackFalconCore
|
23af1829224738c06092e3e513a0bf2753b4c35f
|
[
"MIT"
] | 8
|
2019-07-17T21:19:47.000Z
|
2019-09-28T12:52:39.000Z
|
strategies/bank_put_kamas.py
|
ProjectBlackFalcon/BlackFalconCore
|
23af1829224738c06092e3e513a0bf2753b4c35f
|
[
"MIT"
] | null | null | null |
import json
import time
from tools import logger as log
import strategies
def bank_put_kamas(**kwargs):
"""
A strategy to put kamas into the bank
:param kwargs: strategy, listener, and orders_queue
:return: the input strategy with a report
"""
strategy = kwargs['strategy']
listener = kwargs['listener']
orders_queue = kwargs['orders_queue']
assets = kwargs['assets']
logger = log.get_logger(__name__, strategy['bot'])
global_start, start = time.time(), time.time()
kamas_to_transfer = 'all'
if 'parameters' in strategy.keys() and 'quantity' in strategy['parameters'].keys() and strategy['parameters']['quantity'] is not None:
kamas_to_transfer = strategy['parameters']['quantity']
if not listener.game_state['storage_open']:
logger.warning('Bank is not open')
strategy['report'] = {
'success': False,
'details': {'Execution time': 0, 'Reason': 'Bank is not open'}
}
log.close_logger(logger)
return strategy
kamas_in_inventory = listener.game_state['kamas']
kamas_to_transfer = kamas_in_inventory if kamas_to_transfer == 'all' else kamas_to_transfer
if kamas_to_transfer > kamas_in_inventory:
logger.warning('Cannot put {} kamas in the bank, only {} are available'.format(kamas_to_transfer, kamas_in_inventory))
strategy['report'] = {
'success': False,
'details': {'Execution time': time.time() - global_start, 'Reason': 'Cannot put {} kamas in the bank, only {} are available'.format(kamas_to_transfer, kamas_in_inventory)}
}
log.close_logger(logger)
return strategy
if kamas_to_transfer == 0:
logger.info('No kamas to transfer')
strategy['report'] = {
'success': True,
'details': {'Execution time': time.time() - global_start}
}
log.close_logger(logger)
return strategy
kamas_before = listener.game_state['kamas']
order = {
"command": "move_kamas",
"parameters": {
"quantity": kamas_to_transfer
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 10 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'kamas' in listener.game_state.keys():
if listener.game_state['kamas'] != kamas_before:
waiting = False
time.sleep(0.05)
execution_time = time.time() - global_start
if waiting:
logger.warning('Failed to put {} kamas in the bank in {}s'.format(kamas_to_transfer, execution_time))
strategy['report'] = {
'success': False,
'details': {
'Execution time': execution_time,
'Reason': 'Failed to put {} kamas in the bank in {}s'.format(kamas_to_transfer, execution_time)
}
}
log.close_logger(logger)
return strategy
logger.info('{} kamas transferred from inventory to storage'.format(kamas_to_transfer))
strategy['report'] = {
'success': True,
'details': {'Execution time': time.time() - global_start}
}
log.close_logger(logger)
return strategy
| 34.546392
| 183
| 0.622202
|
d090e571a8b970e21c3efd18099e7361dbb4b3ef
| 11,503
|
py
|
Python
|
homeassistant/helpers/device_registry.py
|
thecrazymonkey/home-assistant
|
348b347ed165c88559b40ce04feb5720e91253bf
|
[
"Apache-2.0"
] | 1
|
2019-12-06T23:19:51.000Z
|
2019-12-06T23:19:51.000Z
|
homeassistant/helpers/device_registry.py
|
ljmerza/home-assistant
|
7fd2e67d1121dfa026f37b30c84ce8ae2ce2e1ba
|
[
"Apache-2.0"
] | 4
|
2021-03-19T01:22:18.000Z
|
2022-01-13T01:19:34.000Z
|
homeassistant/helpers/device_registry.py
|
ljmerza/home-assistant
|
7fd2e67d1121dfa026f37b30c84ce8ae2ce2e1ba
|
[
"Apache-2.0"
] | 1
|
2020-11-15T17:46:09.000Z
|
2020-11-15T17:46:09.000Z
|
"""Provide a way to connect entities belonging to one device."""
import logging
import uuid
from asyncio import Event
from collections import OrderedDict
from typing import List, Optional, cast
import attr
from homeassistant.core import callback
from homeassistant.loader import bind_hass
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
_UNDEF = object()
DATA_REGISTRY = 'device_registry'
EVENT_DEVICE_REGISTRY_UPDATED = 'device_registry_updated'
STORAGE_KEY = 'core.device_registry'
STORAGE_VERSION = 1
SAVE_DELAY = 10
CONNECTION_NETWORK_MAC = 'mac'
CONNECTION_UPNP = 'upnp'
CONNECTION_ZIGBEE = 'zigbee'
@attr.s(slots=True, frozen=True)
class DeviceEntry:
"""Device Registry Entry."""
config_entries = attr.ib(type=set, converter=set,
default=attr.Factory(set))
connections = attr.ib(type=set, converter=set, default=attr.Factory(set))
identifiers = attr.ib(type=set, converter=set, default=attr.Factory(set))
manufacturer = attr.ib(type=str, default=None)
model = attr.ib(type=str, default=None)
name = attr.ib(type=str, default=None)
sw_version = attr.ib(type=str, default=None)
hub_device_id = attr.ib(type=str, default=None)
area_id = attr.ib(type=str, default=None)
name_by_user = attr.ib(type=str, default=None)
id = attr.ib(type=str, default=attr.Factory(lambda: uuid.uuid4().hex))
# This value is not stored, just used to keep track of events to fire.
is_new = attr.ib(type=bool, default=False)
def format_mac(mac):
"""Format the mac address string for entry into dev reg."""
to_test = mac
if len(to_test) == 17 and to_test.count(':') == 5:
return to_test.lower()
if len(to_test) == 17 and to_test.count('-') == 5:
to_test = to_test.replace('-', '')
elif len(to_test) == 14 and to_test.count('.') == 2:
to_test = to_test.replace('.', '')
if len(to_test) == 12:
# no : included
return ':'.join(to_test.lower()[i:i + 2] for i in range(0, 12, 2))
# Not sure how formatted, return original
return mac
class DeviceRegistry:
"""Class to hold a registry of devices."""
def __init__(self, hass):
"""Initialize the device registry."""
self.hass = hass
self.devices = None
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
@callback
def async_get(self, device_id: str) -> Optional[DeviceEntry]:
"""Get device."""
return self.devices.get(device_id)
@callback
def async_get_device(self, identifiers: set, connections: set):
"""Check if device is registered."""
for device in self.devices.values():
if any(iden in device.identifiers for iden in identifiers) or \
any(conn in device.connections for conn in connections):
return device
return None
@callback
def async_get_or_create(self, *, config_entry_id, connections=None,
identifiers=None, manufacturer=_UNDEF,
model=_UNDEF, name=_UNDEF, sw_version=_UNDEF,
via_hub=None):
"""Get device. Create if it doesn't exist."""
if not identifiers and not connections:
return None
if identifiers is None:
identifiers = set()
if connections is None:
connections = set()
connections = {
(key, format_mac(value)) if key == CONNECTION_NETWORK_MAC
else (key, value)
for key, value in connections
}
device = self.async_get_device(identifiers, connections)
if device is None:
device = DeviceEntry(is_new=True)
self.devices[device.id] = device
if via_hub is not None:
hub_device = self.async_get_device({via_hub}, set())
hub_device_id = hub_device.id if hub_device else _UNDEF
else:
hub_device_id = _UNDEF
return self._async_update_device(
device.id,
add_config_entry_id=config_entry_id,
hub_device_id=hub_device_id,
merge_connections=connections or _UNDEF,
merge_identifiers=identifiers or _UNDEF,
manufacturer=manufacturer,
model=model,
name=name,
sw_version=sw_version
)
@callback
def async_update_device(
self, device_id, *, area_id=_UNDEF, name_by_user=_UNDEF,
new_identifiers=_UNDEF):
"""Update properties of a device."""
return self._async_update_device(
device_id, area_id=area_id, name_by_user=name_by_user,
new_identifiers=new_identifiers)
@callback
def _async_update_device(self, device_id, *, add_config_entry_id=_UNDEF,
remove_config_entry_id=_UNDEF,
merge_connections=_UNDEF,
merge_identifiers=_UNDEF,
new_identifiers=_UNDEF,
manufacturer=_UNDEF,
model=_UNDEF,
name=_UNDEF,
sw_version=_UNDEF,
hub_device_id=_UNDEF,
area_id=_UNDEF,
name_by_user=_UNDEF):
"""Update device attributes."""
old = self.devices[device_id]
changes = {}
config_entries = old.config_entries
if (add_config_entry_id is not _UNDEF and
add_config_entry_id not in old.config_entries):
config_entries = old.config_entries | {add_config_entry_id}
if (remove_config_entry_id is not _UNDEF and
remove_config_entry_id in config_entries):
config_entries = config_entries - {remove_config_entry_id}
if config_entries is not old.config_entries:
changes['config_entries'] = config_entries
for attr_name, value in (
('connections', merge_connections),
('identifiers', merge_identifiers),
):
old_value = getattr(old, attr_name)
# If not undefined, check if `value` contains new items.
if value is not _UNDEF and not value.issubset(old_value):
changes[attr_name] = old_value | value
if new_identifiers is not _UNDEF:
changes['identifiers'] = new_identifiers
for attr_name, value in (
('manufacturer', manufacturer),
('model', model),
('name', name),
('sw_version', sw_version),
('hub_device_id', hub_device_id),
):
if value is not _UNDEF and value != getattr(old, attr_name):
changes[attr_name] = value
if (area_id is not _UNDEF and area_id != old.area_id):
changes['area_id'] = area_id
if (name_by_user is not _UNDEF and
name_by_user != old.name_by_user):
changes['name_by_user'] = name_by_user
if old.is_new:
changes['is_new'] = False
if not changes:
return old
new = self.devices[device_id] = attr.evolve(old, **changes)
self.async_schedule_save()
self.hass.bus.async_fire(EVENT_DEVICE_REGISTRY_UPDATED, {
'action': 'create' if 'is_new' in changes else 'update',
'device_id': new.id,
})
return new
def _async_remove_device(self, device_id):
del self.devices[device_id]
self.hass.bus.async_fire(EVENT_DEVICE_REGISTRY_UPDATED, {
'action': 'remove',
'device_id': device_id,
})
self.async_schedule_save()
async def async_load(self):
"""Load the device registry."""
data = await self._store.async_load()
devices = OrderedDict()
if data is not None:
for device in data['devices']:
devices[device['id']] = DeviceEntry(
config_entries=set(device['config_entries']),
connections={tuple(conn) for conn
in device['connections']},
identifiers={tuple(iden) for iden
in device['identifiers']},
manufacturer=device['manufacturer'],
model=device['model'],
name=device['name'],
sw_version=device['sw_version'],
id=device['id'],
# Introduced in 0.79
hub_device_id=device.get('hub_device_id'),
# Introduced in 0.87
area_id=device.get('area_id'),
name_by_user=device.get('name_by_user')
)
self.devices = devices
@callback
def async_schedule_save(self):
"""Schedule saving the device registry."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self):
"""Return data of device registry to store in a file."""
data = {}
data['devices'] = [
{
'config_entries': list(entry.config_entries),
'connections': list(entry.connections),
'identifiers': list(entry.identifiers),
'manufacturer': entry.manufacturer,
'model': entry.model,
'name': entry.name,
'sw_version': entry.sw_version,
'id': entry.id,
'hub_device_id': entry.hub_device_id,
'area_id': entry.area_id,
'name_by_user': entry.name_by_user
} for entry in self.devices.values()
]
return data
@callback
def async_clear_config_entry(self, config_entry_id):
"""Clear config entry from registry entries."""
remove = []
for dev_id, device in self.devices.items():
if device.config_entries == {config_entry_id}:
remove.append(dev_id)
else:
self._async_update_device(
dev_id, remove_config_entry_id=config_entry_id)
for dev_id in remove:
self._async_remove_device(dev_id)
@callback
def async_clear_area_id(self, area_id: str) -> None:
"""Clear area id from registry entries."""
for dev_id, device in self.devices.items():
if area_id == device.area_id:
self._async_update_device(dev_id, area_id=None)
@bind_hass
async def async_get_registry(hass: HomeAssistantType) -> DeviceRegistry:
"""Return device registry instance."""
reg_or_evt = hass.data.get(DATA_REGISTRY)
if not reg_or_evt:
evt = hass.data[DATA_REGISTRY] = Event()
reg = DeviceRegistry(hass)
await reg.async_load()
hass.data[DATA_REGISTRY] = reg
evt.set()
return reg
if isinstance(reg_or_evt, Event):
evt = reg_or_evt
await evt.wait()
return cast(DeviceRegistry, hass.data.get(DATA_REGISTRY))
return cast(DeviceRegistry, reg_or_evt)
@callback
def async_entries_for_area(registry: DeviceRegistry, area_id: str) \
-> List[DeviceEntry]:
"""Return entries that match an area."""
return [device for device in registry.devices.values()
if device.area_id == area_id]
| 34.235119
| 78
| 0.587499
|
96626016f3fe2470827b49e356f8eb4569fd1ade
| 1,934
|
py
|
Python
|
src/config/helper.py
|
moraisaugusto/schedule_backup
|
00fb3117508ea0493cb5dab1122e1ceaba129245
|
[
"BSD-3-Clause"
] | 1
|
2021-03-06T01:24:02.000Z
|
2021-03-06T01:24:02.000Z
|
src/config/helper.py
|
moraisaugusto/schedule_backup
|
00fb3117508ea0493cb5dab1122e1ceaba129245
|
[
"BSD-3-Clause"
] | null | null | null |
src/config/helper.py
|
moraisaugusto/schedule_backup
|
00fb3117508ea0493cb5dab1122e1ceaba129245
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import re
import subprocess
from loguru import logger
def ask_question(question, default_answer=None):
"""Ask a question to the user
@param question: question to be displayed
@type question: string
@param default_answer: default answer for the question
@type default_answer: bool
@return: answer
@rtype : bool
@raise e: None
"""
answers = {"y": True, "n": False}
question = f"\033[1m{question}\033[0m"
answer = input(question).lower() or default_answer
while True:
if answer in ["y", "n"]:
return answers[answer]
elif answer == default_answer:
return default_answer
else:
answer = input(question).lower() or default_answer
def replace_env_var(param):
"""Replace a string that contains a env variable
@param param: raw string that may have the env variable
@type param: string
@return: string replaced
@rtype : string
@raise e: None
"""
search_env_var = re.match(r"\$[A-Z0-1]+", param)
param_replaced = param
if search_env_var:
env_var = search_env_var.group()
env_var_value = os.getenv(env_var[1:])
param_replaced = param.replace(env_var, env_var_value)
return param_replaced
def subprocess_cmd(cmd):
"""execute a subprocess
@param cmd: command to be executed
@type cmd: string
@return: result of the command
@rtype : string
@raise e: None
"""
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
except subprocess.SubprocessError as e:
logger.error(e)
except OSError as e:
logger.error(e)
except ValueError as e:
logger.error(e)
except Exception as e:
logger.error(e)
finally:
SystemExit("Aborting...")
return proc_stdout
| 23.301205
| 75
| 0.630817
|
90f8284494b46b630a86e00bb63ce01db77dbe88
| 1,325
|
py
|
Python
|
test/test_issue363.py
|
gromgull/rdflib
|
7c90f646e3734ee6d3081b5d3f699f0f501f6a39
|
[
"BSD-3-Clause"
] | 4
|
2019-01-07T06:55:58.000Z
|
2021-07-16T13:34:58.000Z
|
test/test_issue363.py
|
gromgull/rdflib
|
7c90f646e3734ee6d3081b5d3f699f0f501f6a39
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_issue363.py
|
gromgull/rdflib
|
7c90f646e3734ee6d3081b5d3f699f0f501f6a39
|
[
"BSD-3-Clause"
] | 2
|
2018-05-01T13:18:13.000Z
|
2018-11-15T04:58:05.000Z
|
import rdflib
from nose import SkipTest
from nose.tools import assert_raises
data = '''<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:http="http://www.w3.org/2011/http#">
<http:HeaderElement rdf:about="#he0">
<http:params>
<http:Parameter rdf:about="#param0_0" />
<http:Parameter rdf:about="#param0_1" />
</http:params>
</http:HeaderElement>
</rdf:RDF>
'''
data2 = '''<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns="http://www.example.org/meeting_organization#">
<rdf:Description about="http://meetings.example.com/cal#m1">
<Location rdf:parseType="Resource">
<zip xmlns="http://www.another.example.org/geographical#">02139</zip>
<lat xmlns="http://www.another.example.org/geographical#">14.124425</lat>
</Location>
</rdf:Description>
</rdf:RDF>
'''
def test_broken_rdfxml():
#import ipdb; ipdb.set_trace()
def p():
rdflib.Graph().parse(data=data)
assert_raises(Exception, p)
def test_parsetype_resource():
g = rdflib.Graph().parse(data=data2)
print(g.serialize(format='n3'))
if __name__ == '__main__':
test_broken_rdfxml()
test_parsetype_resource()
| 28.804348
| 85
| 0.642264
|
259f23ec406b66330a85500e16d61e3721f5184e
| 833
|
py
|
Python
|
201-300/227.basic-calculator-ii.py
|
guangxu-li/leetcode-in-python
|
8a5a373b32351500342705c141591a1a8f5f1cb1
|
[
"MIT"
] | null | null | null |
201-300/227.basic-calculator-ii.py
|
guangxu-li/leetcode-in-python
|
8a5a373b32351500342705c141591a1a8f5f1cb1
|
[
"MIT"
] | null | null | null |
201-300/227.basic-calculator-ii.py
|
guangxu-li/leetcode-in-python
|
8a5a373b32351500342705c141591a1a8f5f1cb1
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=227 lang=python3
#
# [227] Basic Calculator II
#
# @lc code=start
class Solution:
def calculate(self, s: str) -> int:
res, prev, cur, operator = 0, 0, 0, "+"
for ch in s + "+":
if ch == " ":
continue
if ch.isdigit():
cur = cur * 10 + int(ch)
elif operator == '+':
res += prev
prev, cur, operator = cur, 0, ch
elif operator == '-':
res += prev
prev, cur, operator = -cur, 0, ch
elif operator == '*':
prev, cur, operator = prev * cur, 0, ch
elif operator == '/':
prev, cur, operator = int(prev / cur), 0, ch # in python, -3//2 = -2 not -1
return res + prev
# @lc code=end
| 27.766667
| 91
| 0.42617
|
e8a990994a3f31c160e6a38a24ecb298e15467b8
| 3,855
|
py
|
Python
|
scienz/sport/nfl_2020_pbp.py
|
Vibrant-Planet/aorist
|
067e119ef4d0d40802ce74a8e47d882e557ce195
|
[
"MIT"
] | 16
|
2021-08-14T10:20:16.000Z
|
2022-03-31T04:19:26.000Z
|
hub/sport/nfl_2020_pbp.py
|
scie-nz/aorist
|
ac1e31251af7d851c4491a310b417de880b79d09
|
[
"MIT"
] | 5
|
2021-08-15T23:19:10.000Z
|
2021-09-26T20:50:41.000Z
|
scienz/sport/nfl_2020_pbp.py
|
Vibrant-Planet/aorist
|
067e119ef4d0d40802ce74a8e47d882e557ce195
|
[
"MIT"
] | 1
|
2022-01-06T01:26:24.000Z
|
2022-01-06T01:26:24.000Z
|
from aorist import (
Attribute,
NaturalNumber,
StringIdentifier,
DateString,
Year,
POSIXTimestamp,
PositiveFloat,
default_tabular_schema,
RowStruct,
StaticDataTable,
DataSchema,
StorageSetup,
RemoteStorageSetup,
Storage,
RemoteStorage,
RemoteLocation,
CSVEncoding,
Encoding,
DataSet,
DatumTemplate,
Asset,
WebLocation,
FileBasedStorageLayout,
CSVHeader,
FileHeader,
APIOrFileLayout,
SingleFileLayout,
Empty,
FreeText,
IntegerNumber,
)
attributes = [
Attribute(StringIdentifier("GameId")),
Attribute(DateString("GameDate")),
Attribute(NaturalNumber("Quarter")),
Attribute(NaturalNumber("Minute")),
Attribute(NaturalNumber("Second")),
Attribute(StringIdentifier("OffenseTeam")),
Attribute(StringIdentifier("DefenseTeam")),
Attribute(NaturalNumber("Down")),
Attribute(NaturalNumber("ToGo")),
Attribute(NaturalNumber("YardLine")),
Attribute(Empty("")),
Attribute(NaturalNumber("SeriesFirstDown")),
Attribute(Empty("")),
Attribute(NaturalNumber("NextScore")),
Attribute(FreeText("Description")),
Attribute(NaturalNumber("TeamWin")),
Attribute(Empty("")),
Attribute(Empty("")),
Attribute(Year("SeasonYear")),
Attribute(IntegerNumber("Yards")),
Attribute(StringIdentifier("Formation")),
Attribute(StringIdentifier("PlayType")),
Attribute(NaturalNumber("IsRush")),
Attribute(NaturalNumber("IsPass")),
Attribute(NaturalNumber("IsIncomplete")),
Attribute(NaturalNumber("IsTouchdown")),
Attribute(StringIdentifier("PassType")),
Attribute(NaturalNumber("IsSack")),
Attribute(NaturalNumber("IsChallenge")),
Attribute(NaturalNumber("IsChallengeReversed")),
Attribute(Empty("Challenger")),
Attribute(NaturalNumber("IsMeasurement")),
Attribute(NaturalNumber("IsInterception")),
Attribute(NaturalNumber("IsFumble")),
Attribute(NaturalNumber("IsPenalty")),
Attribute(NaturalNumber("IsTwoPointConversion")),
Attribute(NaturalNumber("IsTwoPointConversionSuccessful")),
Attribute(StringIdentifier("RushDirection")),
Attribute(NaturalNumber("YardLineFixed")),
Attribute(StringIdentifier("YardLineDirection")),
Attribute(NaturalNumber("IsPenaltyAccepted")),
Attribute(StringIdentifier("PenaltyTeam")),
Attribute(NaturalNumber("IsNoPlay")),
Attribute(StringIdentifier("PenaltyType")),
Attribute(NaturalNumber("PenaltyYards")),
]
nfl_2020_pbp_datum = RowStruct(
name="nfl_2020_pbp_datum",
attributes=attributes,
)
nfl_2020_pbp_schema = default_tabular_schema(
DatumTemplate(nfl_2020_pbp_datum), attributes
)
table = Asset(StaticDataTable(
name="nfl_2020_pbp_table",
schema=DataSchema(nfl_2020_pbp_schema),
setup=StorageSetup(RemoteStorageSetup(
remote=Storage(RemoteStorage(
location=RemoteLocation(
WebLocation(
address=("http://nflsavant.com/pbp_data.php?year=2020"),
)
),
layout=APIOrFileLayout(
FileBasedStorageLayout(
SingleFileLayout()
),
),
encoding=Encoding(CSVEncoding(header=FileHeader(
CSVHeader(num_lines=1)
))),
)),
)),
tag="nfl_play_by_play",
))
nfl_2020_pbp_dataset = DataSet(
name="nfl_2020_pbp_dataset",
description="""
2020 NFL play-by-play data.
""",
source_path=__file__,
datum_templates=[DatumTemplate(nfl_2020_pbp_datum)],
assets={
"NFL_2020_play_by_play_data": table,
},
access_policies=[]
)
| 30.595238
| 84
| 0.64332
|
28246d9d7476ce91dcbbd13f7681030d3c1f825c
| 5,506
|
py
|
Python
|
corehq/tests/test_middleware.py
|
andyasne/commcare-hq
|
c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/tests/test_middleware.py
|
andyasne/commcare-hq
|
c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/tests/test_middleware.py
|
andyasne/commcare-hq
|
c59a24e57bdd4d2536493f9ecdcc9906f4ae1b88
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
import time
import mock
from django.http import HttpResponse
from django.test import override_settings, SimpleTestCase, TestCase
from django.urls import path, include
from django.views import View
from testil import Regex
from corehq.apps.domain.models import Domain
from corehq.apps.reports.dispatcher import ReportDispatcher
from corehq.apps.reports.generic import GenericReportView
from corehq.apps.users.models import WebUser
from corehq.util.timer import set_request_duration_reporting_threshold, TimingContext
@set_request_duration_reporting_threshold(0.1)
class SlowClassView(View):
def get(self, request):
time.sleep(0.2)
return HttpResponse()
@set_request_duration_reporting_threshold(0.1)
def slow_function_view(request):
timer = TimingContext()
with timer("sleep"):
time.sleep(0.2)
response = HttpResponse()
response.request_timer = timer
return response
class TestReportDispatcher(ReportDispatcher):
map_name = "REPORTS"
prefix = "test"
@classmethod
def get_reports(cls, domain):
return [('All Reports', [
SlowReport,
FastReport,
])]
class TestNoDomainReportDispatcher(ReportDispatcher):
map_name = "REPORTS"
prefix = "test_no_domain"
@classmethod
def get_reports(cls, domain):
return [('All Reports', [
NoDomainReport,
])]
class TestCustomReportDispatcher(TestNoDomainReportDispatcher):
map_name = "REPORTS"
prefix = "test_custom"
def dispatch(self, request, *args, **kwargs):
return CustomReport(request).view_response
@classmethod
def get_report_class_name(cls, domain, report_slug):
raise Exception("Custom dispatcher's don't like this method")
class BaseReport(GenericReportView):
name = "Test report"
section_name = "test"
@property
def view_response(self):
return HttpResponse(200)
@set_request_duration_reporting_threshold(0.1)
class SlowReport(BaseReport):
dispatcher = TestReportDispatcher
slug = 'slow_report'
@property
def view_response(self):
time.sleep(0.2)
return HttpResponse(200)
@set_request_duration_reporting_threshold(1)
class FastReport(BaseReport):
dispatcher = TestReportDispatcher
slug = 'fast_report'
class CustomReport(BaseReport):
dispatcher = TestCustomReportDispatcher
slug = 'custom_report'
class NoDomainReport(BaseReport):
dispatcher = TestNoDomainReportDispatcher
slug = 'admin_report'
urlpatterns = [
path('slow_class', SlowClassView.as_view()),
path('slow_function', slow_function_view),
TestNoDomainReportDispatcher.url_pattern(),
path('<domain>/', include([TestReportDispatcher.url_pattern()])),
path('<domain>/custom/', include([TestCustomReportDispatcher.url_pattern()])),
]
@override_settings(
ROOT_URLCONF='corehq.tests.test_middleware',
MIDDLEWARE=('corehq.middleware.LogLongRequestMiddleware',)
)
@mock.patch('corehq.middleware.add_breadcrumb')
@mock.patch('corehq.middleware.notify_exception')
class TestLogLongRequestMiddleware(SimpleTestCase):
def test_middleware_reports_slow_class_view(self, notify_exception, add_breadcrumb):
res = self.client.get('/slow_class')
self.assertEqual(res.status_code, 200)
notify_exception.assert_called_once()
add_breadcrumb.assert_not_called()
def test_middleware_reports_slow_function_view_with_timer(self, notify_exception, add_breadcrumb):
res = self.client.get('/slow_function')
self.assertEqual(res.status_code, 200)
notify_exception.assert_called_once()
add_breadcrumb.assert_has_calls([
mock.call(category="timing", message=Regex(r"^sleep: 0.\d+"), level="info")
])
@override_settings(
ROOT_URLCONF='corehq.tests.test_middleware',
DOMAIN_MODULE_MAP={"test_middleware": "corehq.tests.test_middleware"}
)
@mock.patch('corehq.middleware.notify_exception')
class TestLogLongRequestMiddlewareReports(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.domain = Domain(name="long_request", is_active=True)
cls.domain.save()
cls.username = 'fingile'
cls.password = '*******'
cls.user = WebUser.create(cls.domain.name, cls.username, cls.password, None, None)
cls.user.set_role(cls.domain.name, 'admin')
cls.user.save()
def setUp(self):
self.client.login(username=self.username, password=self.password)
@classmethod
def tearDownClass(cls):
cls.user.delete(cls.domain.name, deleted_by=None)
cls.domain.delete()
super().tearDownClass()
def test_slow_domain_report(self, notify_exception):
res = self.client.get('/domain1/slow_report/')
self.assertEqual(res.status_code, 200)
notify_exception.assert_called_once()
def test_fast_domain_report(self, notify_exception):
res = self.client.get('/domain1/fast_report/')
self.assertEqual(res.status_code, 200)
notify_exception.assert_not_called()
def test_no_domain_report(self, notify_exception):
res = self.client.get('/admin_report/')
self.assertEqual(res.status_code, 200)
notify_exception.assert_not_called()
def test_custom_report(self, notify_exception):
res = self.client.get('/domain2/custom/custom_report/')
self.assertEqual(res.status_code, 200)
notify_exception.assert_not_called()
| 30.087432
| 102
| 0.715946
|
4b0db1fe8e58ec6b9ce2d189d6e2544aa1b2608a
| 2,702
|
py
|
Python
|
cfgov/ask_cfpb/migrations/0037_recreated.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
cfgov/ask_cfpb/migrations/0037_recreated.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
cfgov/ask_cfpb/migrations/0037_recreated.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-12-20 16:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
replaces = [
('ask_cfpb', '0001_initial'),
('ask_cfpb', '0002_answeraudiencepage'),
('ask_cfpb', '0003_add_answercategorypage'),
('ask_cfpb', '0004_add_ask_category_images'),
('ask_cfpb', '0005_delete_answertagproxy'),
('ask_cfpb', '0006_update_help_text'),
('ask_cfpb', '0007_subcategory_prefixes'),
('ask_cfpb', '0008_fix_verbose_name_plural'),
('ask_cfpb', '0009_update_social_image_help_text'),
('ask_cfpb', '0010_answerpage_sidebar'),
('ask_cfpb', '0011_move_reusable_text_chooser_block'),
('ask_cfpb', '0012_add_rule_option_to_module'),
('ask_cfpb', '0013_recreated'),
('ask_cfpb', '0014_recreated_2'),
('ask_cfpb', '0015_update_email_signup_options'),
('ask_cfpb', '0016_modify_help_text_social_sharing_image'),
('ask_cfpb', '0017_adjust_fields_for_editing'),
('ask_cfpb', '0018_migrate_answer_field_help_text'),
('ask_cfpb', '0019_add_disclaimer_pagechooserblock_to_emailsignup'),
('ask_cfpb', '0020_rm_formfieldwithbutton'),
('ask_cfpb', '0021_rssfeed_improvements'),
('ask_cfpb', '0022_add_remaining_answer_fields_to_answer_page'),
('ask_cfpb', '0023_rename_snippet_to_short_answer'),
('ask_cfpb', '0024_add_portal_links_to_answerpage'),
('ask_cfpb', '0025_remove_answer_audience_page'),
('ask_cfpb', '0026_remove_redirect_to_and_answer_id_from_answer_page'),
('ask_cfpb', '0027_portalsearchpage'),
('ask_cfpb', '0028_answerpage_answer_content'),
('ask_cfpb', '0029_answer_schema_blocks'),
('ask_cfpb', '0030_remove_answer_category_page'),
('ask_cfpb', '0031_remove_deprecated_models_and_fields'),
('ask_cfpb', '0032_remove_html_editor'),
('ask_cfpb', '0033_add_article_page'),
('ask_cfpb', '0034_remove_answerresultspage_content'),
('ask_cfpb', '0035_move_glossaryterm'),
('ask_cfpb', '0036_auto_20191219_1445'),
]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-id'],
},
),
]
| 41.569231
| 114
| 0.642857
|
02b32ff1637fdba1dad275cca3755a4d8ba5e14c
| 41,085
|
py
|
Python
|
lib/doconce/sphinx.py
|
aless80/doconce_hplgit_fork
|
23fb3a7206fccafb7ef829a9a37bea2298b3ddb9
|
[
"BSD-3-Clause"
] | 305
|
2015-01-07T06:57:14.000Z
|
2022-02-16T01:45:25.000Z
|
lib/doconce/sphinx.py
|
aless80/doconce-1
|
23fb3a7206fccafb7ef829a9a37bea2298b3ddb9
|
[
"BSD-3-Clause"
] | 163
|
2015-01-08T11:03:26.000Z
|
2020-12-17T12:54:46.000Z
|
lib/doconce/sphinx.py
|
aless80/doconce-1
|
23fb3a7206fccafb7ef829a9a37bea2298b3ddb9
|
[
"BSD-3-Clause"
] | 91
|
2015-03-19T17:17:34.000Z
|
2022-01-02T15:45:20.000Z
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
# http://sphinx.pocoo.org/ext/math.html#
# can reuse most of rst module:
from .rst import *
from .common import align2equations, online_python_tutor, \
get_legal_pygments_lexers, has_custom_pygments_lexer
from .misc import option, _abort
from .doconce import errwarn
# RunestoneInteractive book counters
question_counter = 0
video_counter = 0
edit_markup_warning = False
def sphinx_figure(m):
result = ''
# m is a MatchObject
filename = m.group('filename')
caption = m.group('caption').strip()
# Stubstitute DocOnce label by rst label in caption
# (also, remove final period in caption since caption is used as hyperlink
# text to figures).
m_label = re.search(r'label\{(.+?)\}', caption)
if m_label:
label = m_label.group(1)
result += '\n.. _%s:\n' % label
# remove . at the end of the caption text
parts = caption.split('label')
parts[0] = parts[0].rstrip()
if parts[0] and parts[0][-1] == '.':
parts[0] = parts[0][:-1]
parts[0] = parts[0].strip()
# insert emphasize marks if not latex $ at the
# beginning or end (math subst does not work for *$I=1$*)
# or if not boldface or emphasize already in the caption
caption_font = option('sphinx_figure_captions=', 'emphasize')
if parts[0] and \
caption_font == 'emphasize' and \
not parts[0].startswith('$') and \
not parts[0].endswith('$') and \
not '*' in parts[0] and \
not '_' in parts[0]:
parts[0] = '*' + parts[0] + '*'
#caption = ' label'.join(parts)
caption = parts[0]
# contrary to rst_figure, we do not write label into caption
# since we just want to remove the whole label as part of
# the caption (otherwise done when handling ref and label)
else:
if caption and caption[-1] == '.':
caption = caption[:-1]
# math is ignored in references to figures, test for math only
if caption.startswith('$') and caption.endswith('$'):
errwarn('*** warning: math only in sphinx figure caption (it will be ignored by sphinx, resulting in empty caption)\n %s\n FIGURE: [%s' % (caption, filename))
#stem = os.path.splitext(filename)[0]
#result += '\n.. figure:: ' + stem + '.*\n' # utilize flexibility # does not work yet
result += '\n.. figure:: ' + filename + '\n'
opts = m.group('options')
if opts:
# opts: width=600 frac=0.5 align=center
# opts: width=600, frac=0.5, align=center
info = [s.split('=') for s in opts.split()]
fig_info = [' :%s: %s' % (opt, value.replace(',', ''))
for opt, value in info
if opt not in ['frac', 'sidecap']]
result += '\n'.join(fig_info)
if caption:
result += '\n\n ' + caption + '\n'
else:
result += '\n\n'
#errwarn('sphinx figure: caption=\n', caption, '\nresult:\n', result)
return result
def sphinx_movie(m):
filename = m.group('filename')
special_movie = '*' in filename or '->' in filename or 'youtu.be' in filename or 'youtube.com' in filename or 'vimeo.com' in filename
if option('runestone') and not special_movie:
# Use RunestoneInteractive video environment
global video_counter
video_counter += 1
text = """
.. video:: video_%d
:controls:
%s
""" % (video_counter, filename)
return text
else:
# Use plain html code
return rst_movie(m)
def sphinx_quiz_runestone(quiz):
quiz_feedback = option('quiz_explanations=', 'on')
text = ''
if 'new page' in quiz:
text += '.. !split\n%s\n%s' % (quiz['new page'], '-'*len(quiz['new page']))
text += '.. begin quiz\n\n'
global question_counter
question_counter += 1
# Multiple correct answers?
if sum([1 for choice in quiz['choices'] if choice[0] == 'right']) > 1:
text += '.. mchoicema:: question_%d' % question_counter + '\n'
else:
text += '.. mchoicemf:: question_%d' % question_counter + '\n'
def fix_text(s, tp='answer'):
"""
Answers and feedback in RunestoneInteractive book quizzes
cannot contain math, figure and rst markup. Perform fixes.
"""
drop = False
if 'math::' in s:
errwarn('\n*** warning: quiz %s with math block not supported:' % tp)
errwarn(s)
drop = True
if '.. code-block::' in s:
errwarn('\n*** warning: quiz %s with code block not supported:' % tp)
errwarn(s)
drop = True
if '.. figure::' in s:
errwarn('\n*** warning: quiz %s with figure not supported:' % tp)
errwarn(s)
drop = True
if drop:
return ''
# Make multi-line paragraph a one-liner
s = ' '.join(s.splitlines()).rstrip()
# Fixes
pattern = r'`(.+?) (<https?.+?)>`__' # URL
s = re.sub(pattern, '<a href="\g<2>"> \g<1> </a>', s)
pattern = r'``(.+?)``' # verbatim
s = re.sub(pattern, '<tt>\g<1></tt>', s)
pattern = r':math:`(.+?)`' # inline math
s = re.sub(pattern, '<em>\g<1></em>', s) # mimic italic....
pattern = r':\*(.+?)\*' # emphasize
s = re.sub(pattern, '\g<1>', s, flags=re.DOTALL)
return s
import string
correct = []
for i, choice in enumerate(quiz['choices']):
if i > 4: # not supported
errwarn('*** warning: quiz with %d choices gets truncated (first 5)' % len(quiz['choices']))
break
letter = string.ascii_lowercase[i]
text += ' :answer_%s: ' % letter
answer = fix_text(choice[1], tp='answer')
if not answer:
answer = 'Too advanced typesetting prevents the text from being rendered'
text += answer + '\n'
if choice[0] == 'right':
correct.append(letter)
if correct:
text += ' :correct: ' + ', '.join(correct) + '\n'
else:
errwarn('*** error: correct choice in quiz has index > 5 (max 5 allowed for RunestoneInteractive books)')
errwarn(quiz['question'])
_abort()
for i, choice in enumerate(quiz['choices']):
if i > 4: # not supported
break
letter = string.ascii_lowercase[i]
text += ' :feedback_%s: ' % letter # must be present
if len(choice) == 3 and quiz_feedback == 'on':
feedback = fix_text(choice[2], tp='explanation')
if not feedback:
feedback = '(Too advanced typesetting prevents the text from being rendered)'
text += feedback
text += '\n'
text += '\n' + indent_lines(quiz['question'], 'sphinx', ' '*3) + '\n\n\n'
return text
def sphinx_quiz(quiz):
if option('runestone'):
return sphinx_quiz_runestone(quiz)
else:
return rst_quiz(quiz)
from .latex import fix_latex_command_regex as fix_latex
def sphinx_code(filestr, code_blocks, code_block_types,
tex_blocks, format):
# In rst syntax, code blocks are typeset with :: (verbatim)
# followed by intended blocks. This function indents everything
# inside code (or TeX) blocks.
# default mappings of !bc environments and pygments languages:
envir2pygments = dict(
cod='python', pro='python',
pycod='python', cycod='cython',
pypro='python', cypro='cython',
fcod='fortran', fpro='fortran',
ccod='c', cppcod='c++',
cpro='c', cpppro='c++',
mcod='matlab', mpro='matlab',
plcod='perl', plpro='perl',
shcod='bash', shpro='bash',
rbcod='ruby', rbpro='ruby',
#sys='console',
sys='text',
rst='rst',
css='css', csspro='css', csscod='css',
dat='text', csv='text', txt='text',
cc='text', ccq='text', # not possible with extra indent for ccq
ipy='ipy',
xmlcod='xml', xmlpro='xml', xml='xml',
htmlcod='html', htmlpro='html', html='html',
texcod='latex', texpro='latex', tex='latex',
latexcod='latex', latexpro='latex', latex='latex',
do='doconce',
pyshell='python',
pyoptpro='python', pyscpro='python',
)
# grab line with: # sphinx code-blocks: cod=python cpp=c++ etc
# (do this before code is inserted in case verbatim blocks contain
# such specifications for illustration)
m = re.search(r'.. *[Ss]phinx +code-blocks?:(.+)', filestr)
if m:
defs_line = m.group(1)
# turn specifications into a dictionary:
for definition in defs_line.split():
key, value = definition.split('=')
envir2pygments[key] = value
# First indent all code blocks
for i in range(len(code_blocks)):
if code_block_types[i].startswith('pyoptpro') and not option('runestone'):
code_blocks[i] = online_python_tutor(code_blocks[i],
return_tp='iframe')
if code_block_types[i].endswith('-h'):
indentation = ' '*8
else:
indentation = ' '*4
code_blocks[i] = indent_lines(code_blocks[i], format,
indentation)
# After transforming align environments to separate equations
# the problem with math labels in multiple eqs has disappeared.
# (doconce.py applies align2equations, which takes all align
# envirs and translates them to separate equations, but align*
# environments are allowed.
# Any output of labels in align means an error in the
# align -> equation transformation...)
math_labels = []
multiple_math_labels = [] # sphinx has problems with multiple math labels
for i in range(len(tex_blocks)):
tex_blocks[i] = indent_lines(tex_blocks[i], format)
# extract all \label{}s inside tex blocks and typeset them
# with :label: tags
label_regex = fix_latex( r'label\{(.+?)\}', application='match')
labels = re.findall(label_regex, tex_blocks[i])
if len(labels) == 1:
tex_blocks[i] = ' :label: %s\n' % labels[0] + tex_blocks[i]
elif len(labels) > 1:
multiple_math_labels.append(labels)
if len(labels) > 0:
math_labels.extend(labels)
tex_blocks[i] = re.sub(label_regex, '', tex_blocks[i])
# fix latex constructions that do not work with sphinx math
# (just remove them)
commands = [r'\begin{equation}',
r'\end{equation}',
r'\begin{equation*}',
r'\end{equation*}',
#r'\begin{eqnarray}',
#r'\end{eqnarray}',
#r'\begin{eqnarray*}',
#r'\end{eqnarray*}',
#r'\begin{align}',
#r'\end{align}',
#r'\begin{align*}',
#r'\end{align*}',
r'\begin{multline}',
r'\end{multline}',
r'\begin{multline*}',
r'\end{multline*}',
#r'\begin{split}',
#r'\end{split}',
#r'\begin{gather}',
#r'\end{gather}',
#r'\begin{gather*}',
#r'\end{gather*}',
r'\[',
r'\]',
# some common abbreviations (newcommands):
r'\beqan',
r'\eeqan',
r'\beqa',
r'\eeqa',
r'\balnn',
r'\ealnn',
r'\baln',
r'\ealn',
r'\beq',
r'\eeq', # the simplest name, contained in others, must come last!
]
for command in commands:
tex_blocks[i] = tex_blocks[i].replace(command, '')
# &=& -> &=
tex_blocks[i] = re.sub('&\s*=\s*&', ' &= ', tex_blocks[i])
# provide warnings for problematic environments
# Replace all references to equations that have labels in math environments:
for label in math_labels:
filestr = filestr.replace('(:ref:`%s`)' % label, ':eq:`%s`' % label)
multiple_math_labels_with_refs = [] # collect the labels with references
for labels in multiple_math_labels:
for label in labels:
ref = ':eq:`%s`' % label # ref{} is translated to eq:``
if ref in filestr:
multiple_math_labels_with_refs.append(label)
if multiple_math_labels_with_refs:
errwarn("""
*** warning: detected non-align math environment with multiple labels
(Sphinx cannot handle this equation system - labels will be removed
and references to them will be empty):""")
for label in multiple_math_labels_with_refs:
errwarn(' label{%s}' % label)
print()
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, 'sphinx')
# Remove all !bc ipy and !bc pyshell since interactive sessions
# are automatically handled by sphinx without indentation
# (just a blank line before and after)
filestr = re.sub(r'^!bc +d?ipy *\n(.*?)^!ec *\n',
'\n\g<1>\n', filestr, re.DOTALL|re.MULTILINE)
filestr = re.sub(r'^!bc +d?pyshell *\n(.*?)^!ec *\n',
'\n\g<1>\n', filestr, re.DOTALL|re.MULTILINE)
# Check if we have custom pygments lexers
if 'ipy' in code_block_types:
if not has_custom_pygments_lexer('ipy'):
envir2pygments['ipy'] = 'python'
if 'do' in code_block_types:
if not has_custom_pygments_lexer('doconce'):
envir2pygments['do'] = 'text'
# Make correct code-block:: language constructions
legal_pygments_languages = get_legal_pygments_lexers()
for key in set(code_block_types):
if key in envir2pygments:
if not envir2pygments[key] in legal_pygments_languages:
errwarn("""*** warning: %s is not a legal Pygments language (lexer)
found in line:
%s
The 'text' lexer will be used instead.
""" % (envir2pygments[key], defs_line))
envir2pygments[key] = 'text'
#filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
# '\n.. code-block:: %s\n\n' % envir2pygments[key], filestr,
# flags=re.MULTILINE)
# Check that we have code installed to handle pyscpro
if 'pyscpro' in filestr and key == 'pyscpro':
try:
import icsecontrib.sagecellserver
except ImportError:
errwarn("""
*** warning: pyscpro for computer code (sage cells) is requested, but'
icsecontrib.sagecellserver from https://github.com/kriskda/sphinx-sagecell
is not installed. Using plain Python typesetting instead.""")
key = 'pypro'
if key == 'pyoptpro':
if option('runestone'):
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
'\n.. codelens:: codelens_\n :showoutput:\n\n',
filestr, flags=re.MULTILINE)
else:
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
'\n.. raw:: html\n\n',
filestr, flags=re.MULTILINE)
elif key == 'pyscpro':
if option('runestone'):
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
"""
.. activecode:: activecode_
:language: python
""", filestr, flags=re.MULTILINE)
else:
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
'\n.. sagecellserver::\n\n',
filestr, flags=re.MULTILINE)
elif key == 'pysccod':
if option('runestone'):
# Include (i.e., run) all previous code segments...
# NOTE: this is most likely not what we want
include = ', '.join([i for i in range(1, activecode_counter)])
filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
"""
.. activecode:: activecode_
:language: python
"include: %s
""" % include, filestr, flags=re.MULTILINE)
else:
errwarn('*** error: pysccod for sphinx is not supported without the --runestone flag\n (but pyscpro is via Sage Cell Server)')
_abort()
elif key == '':
# any !bc with/without argument becomes a text block:
filestr = re.sub(r'^!bc$', '\n.. code-block:: text\n\n', filestr,
flags=re.MULTILINE)
elif key.endswith('hid'):
if key in ('pyhid', 'jshid', 'htmlhid') and option('runestone'):
# Allow runestone books to run hidden code blocks
# (replace pyhid by pycod, then remove all !bc *hid)
for i in range(len(code_block_types)):
if code_block_types[i] == key:
code_block_types[i] = key.replace('hid', 'cod')
key2language = dict(py='python', js='javascript', html='html')
language = key2language[key.replace('hid', '')]
include = ', '.join([i for i in range(1, activecode_counter)])
filestr = re.sub(r'^!bc +%s\s*\n' % key,
"""
.. activecode:: activecode_
:language: %s
:include: %s
:hidecode:
""" % (language, include), filestr, flags=re.MULTILINE)
else:
# Remove hidden code block
pattern = r'^!bc +%s\n.+?^!ec' % key
filestr = re.sub(pattern, '', filestr,
flags=re.MULTILINE|re.DOTALL)
else:
show_hide = False
if key.endswith('-h'):
key_orig = key
key = key[:-2]
show_hide = True
# Use the standard sphinx code-block directive
if key in envir2pygments:
pygments_language = envir2pygments[key]
elif key in legal_pygments_languages:
pygments_language = key
else:
errwarn('*** error: detected code environment "%s"' % key)
errwarn(' which is not registered in sphinx.py (sphinx_code)')
errwarn(' or not a language registered in pygments')
_abort()
if show_hide:
filestr = re.sub(r'^!bc +%s\s*\n' % key_orig,
'\n.. container:: toggle\n\n .. container:: header\n\n **Show/Hide Code**\n\n .. code-block:: %s\n\n' % \
pygments_language, filestr, flags=re.MULTILINE)
# Must add 4 indent in corresponding code_blocks[i], done above
else:
filestr = re.sub(r'^!bc +%s\s*\n' % key,
'\n.. code-block:: %s\n\n' % \
pygments_language, filestr, flags=re.MULTILINE)
# any !bc with/without argument becomes a text block:
filestr = re.sub(r'^!bc.*$', '\n.. code-block:: text\n\n', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'^!ec *\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '', filestr, flags=re.MULTILINE)
filestr = re.sub(r'^!bt *\n', '\n.. math::\n', filestr, flags=re.MULTILINE)
filestr = re.sub(r'^!et *\n', '\n', filestr, flags=re.MULTILINE)
# Fix lacking blank line after :label:
filestr = re.sub(r'^( :label: .+?)(\n *[^ ]+)', r'\g<1>\n\n\g<2>',
filestr, flags=re.MULTILINE)
# Insert counters for runestone blocks
if option('runestone'):
codelens_counter = 0
activecode_counter = 0
lines = filestr.splitlines()
for i in range(len(lines)):
if '.. codelens:: codelens_' in lines[i]:
codelens_counter += 1
lines[i] = lines[i].replace('codelens_', 'codelens_%d' %
codelens_counter)
if '.. activecode:: activecode_' in lines[i]:
activecode_counter += 1
lines[i] = lines[i].replace('activecode_', 'activecode_%d' %
activecode_counter)
filestr = '\n'.join(lines)
# Final fixes
filestr = fix_underlines_in_headings(filestr)
# Ensure blank line before and after comments
filestr = re.sub(r'([.:;?!])\n^\.\. ', r'\g<1>\n\n.. ',
filestr, flags=re.MULTILINE)
filestr = re.sub(r'(^\.\. .+)\n([^ \n]+)', r'\g<1>\n\n\g<2>',
filestr, flags=re.MULTILINE)
# Line breaks interfer with tables and needs a final blank line too
lines = filestr.splitlines()
inside_block = False
for i in range(len(lines)):
if lines[i].startswith('<linebreakpipe>') and not inside_block:
inside_block = True
lines[i] = lines[i].replace('<linebreakpipe> ', '') + '\n'
continue
if lines[i].startswith('<linebreakpipe>') and inside_block:
lines[i] = '|' + lines[i].replace('<linebreakpipe>', '')
continue
if inside_block and not lines[i].startswith('<linebreakpipe>'):
inside_block = False
lines[i] = '| ' + lines[i] + '\n'
filestr = '\n'.join(lines)
# Remove double !split (TOC with a prefix !split gives two !splits)
pattern = '^.. !split\s+.. !split'
filestr = re.sub(pattern, '.. !split', filestr, flags=re.MULTILINE)
if option('html_links_in_new_window'):
# Insert a comment to be recognized by automake_sphinx.py such that it
# can replace the default links by proper modified target= option.
#filestr = '\n\n.. NOTE: Open external links in new windows.\n\n' + filestr
# Use JavaScript instead
filestr = """.. raw:: html
<script type="text/javascript">
$(document).ready(function() {
$("a[href^='http']").attr('target','_blank');
});
</script>
""" + filestr
# Remove too much vertical space
filestr = re.sub(r'\n{3,}', '\n\n', filestr)
return filestr
def sphinx_ref_and_label(section_label2title, format, filestr):
# Special fix early in the process:
# Deal with !split - by default we place splits before
# the all the topmost sections
# (This must be done before labels are put above section
# headings)
if '!split' in filestr and not option('sphinx_keep_splits'):
errwarn('*** warning: new !split inserted (override all existing !split)')
# Note: the title is at this stage translated to a chapter heading!
# This title/heading must be removed for the algorithm below to work
# (remove it, then insert afterwards)
pattern = r'^.. Document title:\n\n={3,9}.+?={3,9}'
m = re.search(pattern, filestr, flags=re.MULTILINE)
title_replacement = '<<<<<<<DOCUMENT TITLE>>>>>>>>>>>>' # "unlikely" str
if m:
title = m.group()
filestr = filestr.replace(title, title_replacement)
else:
title = ''
topmost_section = 0
for i in [9, 7, 5]:
if re.search(r'^%s' % ('='*i), filestr, flags=re.MULTILINE):
topmost_section = i
errwarn(' before every %s heading %s' % \
('='*topmost_section, '='*topmost_section))
errwarn(' because this strategy gives a well-functioning')
errwarn(' table of contents in Sphinx')
errwarn(' (use --sphinx_keep_splits to enforce your own !split commands)')
break
if topmost_section:
# First remove all !split
filestr = re.sub(r'^!split *\n', '', filestr, flags=re.MULTILINE)
# Insert new splits before all topmost sections
pattern = r'^%s (.+?) %s' % \
('='*topmost_section, '='*topmost_section)
lines = filestr.splitlines()
for i in range(len(lines)):
if re.search(pattern, lines[i]):
lines[i] = '!split\n' + lines[i]
filestr = '\n'.join(lines)
filestr = filestr.replace(title_replacement, title)
filestr = ref_and_label_commoncode(section_label2title, format, filestr)
# replace all references to sections:
for label in section_label2title:
filestr = filestr.replace('ref{%s}' % label, ':ref:`%s`' % label)
# Not of interest after sphinx got equation references:
#from common import ref2equations
#filestr = ref2equations(filestr)
# Replace remaining ref{x} as :ref:`x`
filestr = re.sub(r'ref\{(.+?)\}', ':ref:`\g<1>`', filestr)
return filestr
def sphinx_index_bib(filestr, index, citations, pubfile, pubdata):
# allow user to force the use of original bibliography keys instead of numbered labels
numbering = not option('sphinx_preserve_bib_keys', False)
filestr = rst_bib(filestr, citations, pubfile, pubdata, numbering=numbering)
from .common import INLINE_TAGS
for word in index:
# Drop verbatim, emphasize, bold, and math in index
word2 = word.replace('`', '')
word2 = word2.replace('$', '').replace('\\', '')
word2 = re.sub(INLINE_TAGS['bold'],
r'\g<begin>\g<subst>\g<end>', word2,
flags=re.MULTILINE)
word2 = re.sub(INLINE_TAGS['emphasize'],
r'\g<begin>\g<subst>\g<end>', word2,
flags=re.MULTILINE)
# Typeset idx{word} as ..index::
if '!' not in word and ',' not in word:
# .. index:: keyword
filestr = filestr.replace(
'idx{%s}' % word,
'\n.. index:: ' + word2 + '\n')
elif '!' not in word:
# .. index::
# single: keyword with comma
filestr = filestr.replace(
'idx{%s}' % word,
'\n.. index::\n single: ' + word2 + '\n')
else:
# .. index::
# single: keyword; subentry
word3 = word2.replace('!', '; ')
filestr = filestr.replace(
'idx{%s}' % word,
'\n.. index::\n single: ' + word3 + '\n')
# Symmetric keyword; subentry and subentry; keyword
#filestr = filestr.replace(
# 'idx{%s}' % word,
# '\n.. index::\n pair: ' + word3 + '\n')
return filestr
def sphinx_inline_comment(m):
# Explicit HTML typesetting does not work, we just use bold
name = m.group('name').strip()
comment = m.group('comment').strip()
global edit_markup_warning
if (not edit_markup_warning) and \
(name[:3] in ('add', 'del', 'edi') or '->' in comment):
errwarn('*** warning: sphinx/rst is a suboptimal format for')
errwarn(' typesetting edit markup such as')
errwarn(' ' + m.group())
errwarn(' Use HTML or LaTeX output instead, implement the')
errwarn(' edits (doconce apply_edit_comments) and then use sphinx.')
edit_markup_warning = True
chars = {',': 'comma', ';': 'semicolon', '.': 'period'}
if name[:4] == 'del ':
for char in chars:
if comment == char:
return r' (**edit %s**: delete %s)' % (name[4:], chars[char])
return r'(**edit %s**: **delete** %s)' % (name[4:], comment)
elif name[:4] == 'add ':
for char in chars:
if comment == char:
return r'%s (**edit %s: add %s**)' % (comment, name[4:], chars[char])
return r' (**edit %s: add**) %s (**end add**)' % (name[4:], comment)
else:
# Ordinary name
comment = ' '.join(comment.splitlines()) # '\s->\s' -> ' -> '
if ' -> ' in comment:
# Replacement
if comment.count(' -> ') != 1:
errwarn('*** wrong syntax in inline comment:')
errwarn(comment)
errwarn('(more than two ->)')
_abort()
orig, new = comment.split(' -> ')
return r'(**%s: remove** %s) (**insert:**)%s (**end insert**)' % (name, orig, new)
else:
# Ordinary comment
return r'[**%s**: %s]' % (name, comment)
def define(FILENAME_EXTENSION,
BLANKLINE,
INLINE_TAGS_SUBST,
CODE,
LIST,
ARGLIST,
TABLE,
EXERCISE,
FIGURE_EXT,
CROSS_REFS,
INDEX_BIB,
TOC,
ENVIRS,
QUIZ,
INTRO,
OUTRO,
filestr):
if not 'rst' in BLANKLINE:
# rst.define is not yet ran on these dictionaries, do it:
from . import rst
rst.define(FILENAME_EXTENSION,
BLANKLINE,
INLINE_TAGS_SUBST,
CODE,
LIST,
ARGLIST,
TABLE,
FIGURE_EXT,
INTRO,
OUTRO,
filestr)
FILENAME_EXTENSION['sphinx'] = FILENAME_EXTENSION['rst']
BLANKLINE['sphinx'] = BLANKLINE['rst']
CODE['sphinx'] = CODE['rst']
LIST['sphinx'] = LIST['rst']
FIGURE_EXT['sphinx'] = {
'search': ('.png', '.gif', '.jpg', '.jpeg'),
'convert': ('.png', '.gif', '.jpg')}
CROSS_REFS['sphinx'] = sphinx_ref_and_label
INDEX_BIB['sphinx'] = sphinx_index_bib
TABLE['sphinx'] = TABLE['rst']
EXERCISE['sphinx'] = EXERCISE['rst']
ENVIRS['sphinx'] = ENVIRS['rst']
INTRO['sphinx'] = INTRO['rst'].replace(
'.. Automatically generated reStructuredText',
'.. Automatically generated Sphinx-extended reStructuredText')
# make true copy of INLINE_TAGS_SUBST:
INLINE_TAGS_SUBST['sphinx'] = {}
for tag in INLINE_TAGS_SUBST['rst']:
INLINE_TAGS_SUBST['sphinx'][tag] = INLINE_TAGS_SUBST['rst'][tag]
# modify some tags:
#INLINE_TAGS_SUBST['sphinx']['math'] = r'\g<begin>:math:`\g<subst>`\g<end>'
# Important to strip the math expression
INLINE_TAGS_SUBST['sphinx']['math'] = lambda m: r'%s:math:`%s`%s' % (m.group('begin'), m.group('subst').strip(), m.group('end'))
#INLINE_TAGS_SUBST['sphinx']['math2'] = r'\g<begin>:math:`\g<latexmath>`\g<end>'
INLINE_TAGS_SUBST['sphinx']['math2'] = lambda m: r'%s:math:`%s`%s' % (m.group('begin'), m.group('latexmath').strip(), m.group('end'))
INLINE_TAGS_SUBST['sphinx']['figure'] = sphinx_figure
INLINE_TAGS_SUBST['sphinx']['movie'] = sphinx_movie
INLINE_TAGS_SUBST['sphinx']['inlinecomment'] = sphinx_inline_comment
CODE['sphinx'] = sphinx_code # function for typesetting code
ARGLIST['sphinx'] = {
'parameter': ':param',
'keyword': ':keyword',
'return': ':return',
'instance variable': ':ivar',
'class variable': ':cvar',
'module variable': ':var',
}
TOC['sphinx'] = lambda s, f: '' # Sphinx automatically generates a toc
QUIZ['sphinx'] = sphinx_quiz
#---------------------------------------------------------------------------
def sphinx_code_orig(filestr, format):
# NOTE: THIS FUNCTION IS NOT USED!!!!!!
# In rst syntax, code blocks are typeset with :: (verbatim)
# followed by intended blocks. This function indents everything
# inside code (or TeX) blocks.
# grab #sphinx code-blocks: cod=python cpp=c++ etc line
# (do this before code is inserted in case verbatim blocks contain
# such specifications for illustration)
m = re.search(r'#\s*[Ss]phinx\s+code-blocks?:(.+?)\n', filestr)
if m:
defs_line = m.group(1)
# turn defs into a dictionary definition:
defs = {}
for definition in defs_line.split():
key, value = definition.split('=')
defs[key] = value
else:
# default mappings:
defs = dict(cod='python',
pro='python',
pycod='python', cycod='cython',
pypro='python', cypro='cython',
fcod='fortran', fpro='fortran',
ccod='c', cppcod='c++',
cpro='c', cpppro='c++',
mcod='matlab', mpro='matlab',
plcod='perl', plpro='perl',
shcod='bash', shpro='bash',
rbcod='ruby', rbpro='ruby',
sys='console',
dat='python',
ipy='python',
xmlcod='xml', xmlpro='xml', xml='xml',
htmlcod='html', htmlpro='html', html='html',
texcod='latex', texpro='latex', tex='latex',
)
# (the "python" typesetting is neutral if the text
# does not parse as python)
# first indent all code/tex blocks by 1) extracting all blocks,
# 2) intending each block, and 3) inserting the blocks:
filestr, code_blocks, tex_blocks = remove_code_and_tex(filestr, format)
for i in range(len(code_blocks)):
code_blocks[i] = indent_lines(code_blocks[i], format)
for i in range(len(tex_blocks)):
tex_blocks[i] = indent_lines(tex_blocks[i], format)
# remove all \label{}s inside tex blocks:
tex_blocks[i] = re.sub(fix_latex(r'\label\{.+?\}', application='match'),
'', tex_blocks[i])
# remove those without \ if there are any:
tex_blocks[i] = re.sub(r'label\{.+?\}', '', tex_blocks[i])
# side effects: `label{eq1}` as verbatim, but this is mostly a
# problem for doconce documentation and can be rephrased...
# fix latex constructions that do not work with sphinx math
commands = [r'\begin{equation}',
r'\end{equation}',
r'\begin{equation*}',
r'\end{equation*}',
r'\begin{eqnarray}',
r'\end{eqnarray}',
r'\begin{eqnarray*}',
r'\end{eqnarray*}',
r'\begin{align}',
r'\end{align}',
r'\begin{align*}',
r'\end{align*}',
r'\begin{multline}',
r'\end{multline}',
r'\begin{multline*}',
r'\end{multline*}',
r'\begin{split}',
r'\end{split}',
r'\begin{gather}',
r'\end{gather}',
r'\begin{gather*}',
r'\end{gather*}',
r'\[',
r'\]',
# some common abbreviations (newcommands):
r'\beqan',
r'\eeqan',
r'\beqa',
r'\eeqa',
r'\balnn',
r'\ealnn',
r'\baln',
r'\ealn',
r'\beq',
r'\eeq', # the simplest, contained in others, must come last!
]
for command in commands:
tex_blocks[i] = tex_blocks[i].replace(command, '')
tex_blocks[i] = re.sub('&\s*=\s*&', ' &= ', tex_blocks[i])
# provide warnings for problematic environments
#if '{alignat' in tex_blocks[i]:
# errwarn('*** warning: the "alignat" environment will give errors in Sphinx:\n' + tex_blocks[i] + '\n')
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, 'rst')
for key in defs:
language = defs[key]
if not language in legal_pygments_languages:
raise TypeError('%s is not a legal Pygments language '\
'(lexer) in line with:\n %s' % \
(language, defs_line))
#filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
# '\n.. code-block:: %s\n\n' % defs[key], filestr,
# flags=re.MULTILINE)
cpattern = re.compile(r'^!bc\s+%s\s*\n' % key, flags=re.MULTILINE)
filestr, n = cpattern.subn('\n.. code-block:: %s\n\n' % defs[key], filestr)
errwarn(key + ' ' + n)
if n > 0:
errwarn('sphinx: %d subst %s by %s' % (n, key, defs[key]))
# any !bc with/without argument becomes a py (python) block:
#filestr = re.sub(r'^!bc.+\n', '\n.. code-block:: py\n\n', filestr,
# flags=re.MULTILINE)
cpattern = re.compile(r'^!bc.+$', flags=re.MULTILINE)
filestr = cpattern.sub('\n.. code-block:: py\n\n', filestr)
filestr = re.sub(r'^!ec *\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '\n', filestr, flags=re.MULTILINE)
#filestr = re.sub(r'^!ec\n', '', filestr, flags=re.MULTILINE)
filestr = re.sub(r'^!bt *\n', '\n.. math::\n\n', filestr,
flags=re.MULTILINE)
filestr = re.sub(r'^!et *\n', '\n\n', filestr,
flags=re.MULTILINE)
return filestr
def sphinx_code_newmathlabels(filestr, format):
# NOTE: THIS FUNCTION IS NOT USED!!!!!!
# In rst syntax, code blocks are typeset with :: (verbatim)
# followed by intended blocks. This function indents everything
# inside code (or TeX) blocks.
# grab #sphinx code-blocks: cod=python cpp=c++ etc line
# (do this before code is inserted in case verbatim blocks contain
# such specifications for illustration)
m = re.search(r'#\s*[Ss]phinx\s+code-blocks?:(.+?)\n', filestr)
if m:
defs_line = m.group(1)
# turn defs into a dictionary definition:
defs = {}
for definition in defs_line.split():
key, value = definition.split('=')
defs[key] = value
else:
# default mappings:
defs = dict(cod='python', pycod='python', cppcod='c++',
fcod='fortran', ccod='c',
pro='python', pypro='python', cpppro='c++',
fpro='fortran', cpro='c',
sys='console', dat='python')
# (the "python" typesetting is neutral if the text
# does not parse as python)
# First indent all code/tex blocks by 1) extracting all blocks,
# 2) intending each block, and 3) inserting the blocks.
# In between, handle the math blocks.
filestr, code_blocks, tex_blocks = remove_code_and_tex(filestr, format)
for i in range(len(code_blocks)):
code_blocks[i] = indent_lines(code_blocks[i], format)
math_labels = []
for i in range(len(tex_blocks)):
tex_blocks[i] = indent_lines(tex_blocks[i], format)
# extract all \label{}s inside tex blocks and typeset them
# with :label: tags
label_regex1 = fix_latex(r'\label\{(.+?)\}', application='match')
label_regex2 = fix_latex( r'label\{(.+?)\}', application='match')
math_labels.extend(re.findall(label_regex1, tex_blocks[i]))
tex_blocks[i] = re.sub(label_regex1,
r' :label: \g<1> ', tex_blocks[i])
# handle also those without \ if there are any:
math_labels.extend(re.findall(label_regex2, tex_blocks[i]))
tex_blocks[i] = re.sub(label_regex2, r' :label: \g<1> ', tex_blocks[i])
# replace all references to equations:
for label in math_labels:
filestr = filestr.replace(':ref:`%s`' % label, ':eq:`%s`' % label)
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, 'rst')
for key in defs:
language = defs[key]
if not language in legal_pygments_languages:
raise TypeError('%s is not a legal Pygments language '\
'(lexer) in line with:\n %s' % \
(language, defs_line))
#filestr = re.sub(r'^!bc\s+%s\s*\n' % key,
# '\n.. code-block:: %s\n\n' % defs[key], filestr,
# flags=re.MULTILINE)
cpattern = re.compile(r'^!bc\s+%s\s*\n' % key, flags=re.MULTILINE)
filestr = cpattern.sub('\n.. code-block:: %s\n\n' % defs[key], filestr)
# any !bc with/without argument becomes a py (python) block:
#filestr = re.sub(r'^!bc.+\n', '\n.. code-block:: py\n\n', filestr,
# flags=re.MULTILINE)
cpattern = re.compile(r'^!bc.+$', flags=re.MULTILINE)
filestr = cpattern.sub('\n.. code-block:: py\n\n', filestr)
filestr = re.sub(r'!ec *\n', '\n', filestr)
#filestr = re.sub(r'!ec\n', '\n', filestr)
#filestr = re.sub(r'!ec\n', '', filestr)
filestr = re.sub(r'!bt *\n', '\n.. math::\n :nowrap:\n\n', filestr)
filestr = re.sub(r'!et *\n', '\n\n', filestr)
return filestr
| 41.25
| 170
| 0.531605
|
aac4eabfd085825de699c46b43c72cbf1178a03e
| 7,545
|
py
|
Python
|
scannet/scannet_dataset_sw_rgb.py
|
MatteoPerotto/pointconv
|
204a0d534c4d75e80bde7722c075a78365a64929
|
[
"MIT"
] | 471
|
2019-03-26T02:01:55.000Z
|
2022-03-10T03:09:10.000Z
|
scannet/scannet_dataset_sw_rgb.py
|
MatteoPerotto/pointconv
|
204a0d534c4d75e80bde7722c075a78365a64929
|
[
"MIT"
] | 35
|
2019-03-28T05:28:17.000Z
|
2021-08-19T10:22:47.000Z
|
scannet/scannet_dataset_sw_rgb.py
|
MatteoPerotto/pointconv
|
204a0d534c4d75e80bde7722c075a78365a64929
|
[
"MIT"
] | 115
|
2019-04-21T07:33:00.000Z
|
2022-03-04T07:21:12.000Z
|
""" ScanNet Class From Charles R. Qi, Hao Su.
Modiyied to support point-wise evaluation in ScanNet v2.
Author: Wenxuan Wu
Date: July 2018
"""
import pickle
import os
import sys
import numpy as np
class ScannetDatasetWholeScene_evaluation():
#prepare to give prediction on each points
def __init__(self, root, split='test', num_class = 21, block_points = 8192, with_rgb = True):
self.root = root
self.split = split
self.with_rgb = with_rgb
self.block_points = block_points
self.point_num = []
self.data_filename = os.path.join(self.root, 'scannet_%s_rgb21c_pointid.pickle'%(split))
with open(self.data_filename,'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
self.scene_points_id = pickle.load(fp)
self.scene_points_num = pickle.load(fp)
if split=='train':
labelweights = np.zeros(num_class)
for seg in self.semantic_labels_list:
self.point_num.append(seg.shape[0])
tmp,_ = np.histogram(seg,range(num_class+1))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
#self.labelweights = 1/np.log(1.2+labelweights)
self.labelweights = np.power(np.amax(labelweights) / labelweights, 1/3.0)
else:
self.labelweights = np.ones(num_class)
for seg in self.semantic_labels_list:
self.point_num.append(seg.shape[0])
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_data(self, data, idx):
new_data = []
for i in range(len(idx)):
new_data += [np.expand_dims(data[idx[i]], axis = 0)]
return new_data
def nearest_dist(self, block_center, block_center_list):
num_blocks = len(block_center_list)
dist = np.zeros(num_blocks)
for i in range(num_blocks):
dist[i] = np.linalg.norm(block_center_list[i] - block_center, ord = 2) #i->j
return np.argsort(dist)[0]
def __getitem__(self, index):
delta = 0.5
if self.with_rgb:
point_set_ini = self.scene_points_list[index]
else:
point_set_ini = self.scene_points_list[index][:, 0:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini[:, 0:3],axis=0)
coordmin = np.min(point_set_ini[:, 0:3],axis=0)
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/delta).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/delta).astype(np.int32)
point_sets = []
semantic_segs = []
sample_weights = []
point_idxs = []
block_center = []
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*delta,j*delta,0]
curmax = curmin+[1.5,1.5,coordmax[2]-coordmin[2]]
curchoice = np.sum((point_set_ini[:,0:3]>=(curmin-0.2))*(point_set_ini[:,0:3]<=(curmax+0.2)),axis=1)==3
curchoice_idx = np.where(curchoice)[0]
cur_point_set = point_set_ini[curchoice,:]
cur_semantic_seg = semantic_seg_ini[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set[:,0:3]>=(curmin-0.001))*(cur_point_set[:,0:3]<=(curmax+0.001)),axis=1)==3
sample_weight = self.labelweights[cur_semantic_seg]
sample_weight *= mask # N
point_sets.append(cur_point_set) # 1xNx3/6
semantic_segs.append(cur_semantic_seg) # 1xN
sample_weights.append(sample_weight) # 1xN
point_idxs.append(curchoice_idx) #1xN
block_center.append((curmin[0:2] + curmax[0:2]) / 2.0)
# merge small blocks
num_blocks = len(point_sets)
block_idx = 0
while block_idx < num_blocks:
if point_sets[block_idx].shape[0] > 4096:
block_idx += 1
continue
small_block_data = point_sets[block_idx].copy()
small_block_seg = semantic_segs[block_idx].copy()
small_block_smpw = sample_weights[block_idx].copy()
small_block_idxs = point_idxs[block_idx].copy()
small_block_center = block_center[block_idx].copy()
point_sets.pop(block_idx)
semantic_segs.pop(block_idx)
sample_weights.pop(block_idx)
point_idxs.pop(block_idx)
block_center.pop(block_idx)
nearest_block_idx = self.nearest_dist(small_block_center, block_center)
point_sets[nearest_block_idx] = np.concatenate((point_sets[nearest_block_idx], small_block_data), axis = 0)
semantic_segs[nearest_block_idx] = np.concatenate((semantic_segs[nearest_block_idx], small_block_seg), axis = 0)
sample_weights[nearest_block_idx] = np.concatenate((sample_weights[nearest_block_idx], small_block_smpw), axis = 0)
point_idxs[nearest_block_idx] = np.concatenate((point_idxs[nearest_block_idx], small_block_idxs), axis = 0)
num_blocks = len(point_sets)
#divide large blocks
num_blocks = len(point_sets)
div_blocks = []
div_blocks_seg = []
div_blocks_smpw = []
div_blocks_idxs = []
div_blocks_center = []
for block_idx in range(num_blocks):
cur_num_pts = point_sets[block_idx].shape[0]
point_idx_block = np.array([x for x in range(cur_num_pts)])
if point_idx_block.shape[0]%self.block_points != 0:
makeup_num = self.block_points - point_idx_block.shape[0]%self.block_points
np.random.shuffle(point_idx_block)
point_idx_block = np.concatenate((point_idx_block,point_idx_block[0:makeup_num].copy()))
np.random.shuffle(point_idx_block)
sub_blocks = list(self.chunks(point_idx_block, self.block_points))
div_blocks += self.split_data(point_sets[block_idx], sub_blocks)
div_blocks_seg += self.split_data(semantic_segs[block_idx], sub_blocks)
div_blocks_smpw += self.split_data(sample_weights[block_idx], sub_blocks)
div_blocks_idxs += self.split_data(point_idxs[block_idx], sub_blocks)
div_blocks_center += [block_center[block_idx].copy() for i in range(len(sub_blocks))]
div_blocks = np.concatenate(tuple(div_blocks),axis=0)
div_blocks_seg = np.concatenate(tuple(div_blocks_seg),axis=0)
div_blocks_smpw = np.concatenate(tuple(div_blocks_smpw),axis=0)
div_blocks_idxs = np.concatenate(tuple(div_blocks_idxs),axis=0)
return div_blocks, div_blocks_seg, div_blocks_smpw, div_blocks_idxs
def __len__(self):
return len(self.scene_points_list)
if __name__=='__main__':
import pdb
pdb.set_trace()
#d = ScannetDataset(root = '../data/scannet/scannet_v2', split='test', npoints=8192)
d = ScannetDatasetWholeScene_evaluation(root = './data_v2')
labelweights_vox = np.zeros(21)
for ii in range(len(d)):
print(ii)
ps,seg,smpw, idxs = d[ii]
print(labelweights_vox[1:].astype(np.float32)/np.sum(labelweights_vox[1:].astype(np.float32)))
exit()
| 45.727273
| 127
| 0.625845
|
6e2f4ebf65ca2f4cfc420698f04a1867c7461f1b
| 2,454
|
py
|
Python
|
setup.py
|
ColinDuquesnoy/pyqode.rst
|
b94078193e597633db6220e498d738f4abbb8f52
|
[
"MIT"
] | null | null | null |
setup.py
|
ColinDuquesnoy/pyqode.rst
|
b94078193e597633db6220e498d738f4abbb8f52
|
[
"MIT"
] | null | null | null |
setup.py
|
ColinDuquesnoy/pyqode.rst
|
b94078193e597633db6220e498d738f4abbb8f52
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for pyqode.rst
"""
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from pyqode.rst import __version__
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
if self.pytest_args:
self.pytest_args = self.pytest_args.replace('"', '').split(' ')
else:
self.pytest_args = []
print('running test command: py.test "%s"' % ' '.join(
self.pytest_args))
errno = pytest.main(self.pytest_args)
sys.exit(errno)
cmdclass = {'test': PyTest}
DESCRIPTION = 'Adds RestructuredText support to pyqode.core'
def readme():
if 'bdist_deb' in sys.argv:
return DESCRIPTION
return str(open('README.rst').read())
setup(
name='pyqode.rst',
namespace_packages=['pyqode'],
version=__version__,
packages=[p for p in find_packages() if 'test' not in p],
keywords=['RestructuredText', 'editor', 'pyqode'],
url='https://github.com/pyQode/pyqode.rst',
license='MIT',
author='Colin Duquesnoy',
author_email='colin.duquesnoy@gmail.com',
description=DESCRIPTION,
long_description=readme(),
install_requires=['pyqode.core', 'restructuredtext_lint', 'docutils'],
tests_require=['pytest-cov', 'pytest-pep8', 'pytest'],
cmdclass=cmdclass,
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: X11 Applications :: Qt',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Widget Sets',
'Topic :: Text Editors :: Integrated Development Environments (IDE)'])
| 33.162162
| 79
| 0.632029
|
be46cca8d2c9bcb02807280deaf0c07ea11d7174
| 3,361
|
py
|
Python
|
tests/parsers/custom_destinations.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
tests/parsers/custom_destinations.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
tests/parsers/custom_destinations.py
|
roshanmaskey/plaso
|
637856f578eb4bc81f62b97d7f483f69314e7f47
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the .customDestinations-ms file parser."""
import unittest
from plaso.lib import definitions
from plaso.parsers import custom_destinations
from tests.parsers import test_lib
class CustomDestinationsParserTest(test_lib.ParserTestCase):
"""Tests for the .customDestinations-ms file parser."""
def testParse(self):
"""Tests the Parse function."""
parser = custom_destinations.CustomDestinationsParser()
storage_writer = self._ParseFile(
['5afe4de1b92fc382.customDestinations-ms'], parser)
self.assertEqual(storage_writer.number_of_events, 126)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
# The shortcut last accessed event.
expected_event_values = {
'data_type': 'windows:lnk:link',
'date_time': '2009-07-13 23:55:56.2481035',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[121], expected_event_values)
# The shortcut creation event.
expected_event_values = {
'data_type': 'windows:lnk:link',
'date_time': '2009-07-13 23:55:56.2481035',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[122], expected_event_values)
# The shortcut last modification event.
expected_event_values = {
'command_line_arguments': (
'{DE3895CB-077B-4C38-B6E3-F3DE1E0D84FC} %systemroot%\\system32\\'
'control.exe /name Microsoft.Display'),
'data_type': 'windows:lnk:link',
'date_time': '2009-07-14 01:39:11.3880000',
'description': '@%systemroot%\\system32\\oobefldr.dll,-1262',
'drive_serial_number': 0x24ba718b,
'drive_type': 3,
'env_var_location': '%SystemRoot%\\system32\\GettingStarted.exe',
'file_attribute_flags': 0x00000020,
'file_size': 11776,
'icon_location': '%systemroot%\\system32\\display.dll',
'link_target': (
'<My Computer> C:\\Windows\\System32\\GettingStarted.exe'),
'local_path': 'C:\\Windows\\System32\\GettingStarted.exe',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION}
self.CheckEventValues(storage_writer, events[123], expected_event_values)
# A shell item event.
expected_event_values = {
'data_type': 'windows:shell_item:file_entry',
'date_time': '2010-11-10 07:41:04',
'file_reference': '2331-1',
'long_name': 'System32',
'name': 'System32',
'origin': '5afe4de1b92fc382.customDestinations-ms',
'shell_item_path': '<My Computer> C:\\Windows\\System32'}
self.CheckEventValues(storage_writer, events[18], expected_event_values)
# A distributed link tracking event.
expected_event_values = {
'data_type': 'windows:distributed_link_tracking:creation',
'date_time': '2010-11-10 19:08:32.6562596',
'mac_address': '00:0c:29:03:1e:1e',
'origin': '5afe4de1b92fc382.customDestinations-ms',
'uuid': 'e9215b24-ecfd-11df-a81c-000c29031e1e'}
self.CheckEventValues(storage_writer, events[12], expected_event_values)
if __name__ == '__main__':
unittest.main()
| 37.344444
| 77
| 0.689081
|
f9a70f74983463938c5ec58e048cbbb9eaeca1e5
| 2,666
|
py
|
Python
|
pymunk-pymunk-4.0.0/build/lib/pymunk/_chipmunk_ffi.py
|
adrienchevrier/FollE_ML
|
56d39bcbafa33d336255d21379c4fa3b7268fe20
|
[
"MIT"
] | 1
|
2017-04-05T13:00:51.000Z
|
2017-04-05T13:00:51.000Z
|
pymunk-pymunk-4.0.0/pymunk/_chipmunk_ffi.py
|
adrienchevrier/FollE_ML
|
56d39bcbafa33d336255d21379c4fa3b7268fe20
|
[
"MIT"
] | null | null | null |
pymunk-pymunk-4.0.0/pymunk/_chipmunk_ffi.py
|
adrienchevrier/FollE_ML
|
56d39bcbafa33d336255d21379c4fa3b7268fe20
|
[
"MIT"
] | null | null | null |
"""
Contains low level wrapper around the chipmunk_ffi methods exported by
chipmunk_ffi.h as those methods are not automatically generated by the wrapper
generator.
You usually dont need to use this module directly, instead use the high level binding in pymunk
"""
from ctypes import *
from .vec2d import Vec2d
from ._chipmunk import cpBool, cpFloat
from ._chipmunk import cpBB, cpBody, cpVect, cpArbiter, cpShape, cpConstraint, cpGrooveJoint
from ._chipmunk import chipmunk_lib
from ._chipmunk import function_pointer
cpBodyIsSleeping = (function_pointer(cpBool, POINTER(cpBody))).in_dll(chipmunk_lib, '_cpBodyIsSleeping')
cpBodyIsRogue = (function_pointer(cpBool, POINTER(cpBody))).in_dll(chipmunk_lib, '_cpBodyIsRogue')
cpBodyIsStatic = (function_pointer(cpBool, POINTER(cpBody))).in_dll(chipmunk_lib, '_cpBodyIsStatic')
cpBodyLocal2World = (function_pointer(cpVect, POINTER(cpBody), cpVect)).in_dll(chipmunk_lib, '_cpBodyLocal2World')
cpBodyWorld2Local = (function_pointer(cpVect, POINTER(cpBody), cpVect)).in_dll(chipmunk_lib, '_cpBodyWorld2Local')
cpArbiterGetShapes = (function_pointer(None, POINTER(cpArbiter), POINTER(POINTER(cpShape)), POINTER(POINTER(cpShape)))).in_dll(chipmunk_lib, '_cpArbiterGetShapes')
cpArbiterIsFirstContact = (function_pointer(cpBool, POINTER(cpArbiter))).in_dll(chipmunk_lib, '_cpArbiterIsFirstContact')
cpConstraintGetImpulse = (function_pointer(cpFloat, POINTER(cpConstraint))).in_dll(chipmunk_lib, '_cpConstraintGetImpulse')
cpBBNew = (function_pointer(cpBB, cpFloat, cpFloat, cpFloat, cpFloat)).in_dll(chipmunk_lib, '_cpBBNew')
cpBBIntersects = (function_pointer(c_int, cpBB, cpBB)).in_dll(chipmunk_lib, '_cpBBIntersects')
cpBBContainsBB = (function_pointer(c_int, cpBB, cpBB)).in_dll(chipmunk_lib, '_cpBBContainsBB')
cpBBContainsVect = (function_pointer(c_int, cpBB, cpVect)).in_dll(chipmunk_lib, '_cpBBContainsVect')
cpBBMerge = (function_pointer(cpBB, cpBB, cpBB)).in_dll(chipmunk_lib, '_cpBBMerge')
cpBBExpand = (function_pointer(cpBB, cpBB, cpVect)).in_dll(chipmunk_lib, '_cpBBExpand')
cpShapeGetBB = (function_pointer(cpBB, POINTER(cpShape))).in_dll(chipmunk_lib, '_cpShapeGetBB')
#TODO: Use cpBBClampVect from Chipmunk when its available.
cpfclamp = (function_pointer(cpFloat, cpFloat, cpFloat, cpFloat)).in_dll(chipmunk_lib, '_cpfclamp')
def cpBBClampVect(bb, v):
return Vec2d(cpfclamp(v.x, bb.l, bb.r), cpfclamp(v.y, bb.b, bb.t))
#unused for now..
#cpGrooveJointGetGrooveA = (function_pointer(cpVect, POINTER(cpGrooveJoint))).in_dll(chipmunk_lib, '_cpGrooveJointGetGrooveA')
#cpGrooveJointGetGrooveB = (function_pointer(cpVect, POINTER(cpGrooveJoint))).in_dll(chipmunk_lib, '_cpGrooveJointGetGrooveA')
| 60.590909
| 163
| 0.803826
|
32065c60f833b07b4cc1f1a03b9708c6471069e3
| 7,544
|
py
|
Python
|
tests/components/deconz/test_services.py
|
pawelka/home-assistant
|
df6c7b97f5a6df962114ef1a07676ef11d8757cd
|
[
"Apache-2.0"
] | null | null | null |
tests/components/deconz/test_services.py
|
pawelka/home-assistant
|
df6c7b97f5a6df962114ef1a07676ef11d8757cd
|
[
"Apache-2.0"
] | null | null | null |
tests/components/deconz/test_services.py
|
pawelka/home-assistant
|
df6c7b97f5a6df962114ef1a07676ef11d8757cd
|
[
"Apache-2.0"
] | null | null | null |
"""deCONZ service tests."""
from copy import deepcopy
from asynctest import Mock, patch
import pytest
import voluptuous as vol
from homeassistant.components import deconz
from homeassistant.components.deconz.const import CONF_BRIDGEID
from .test_gateway import (
BRIDGEID,
DECONZ_WEB_REQUEST,
ENTRY_CONFIG,
setup_deconz_integration,
)
GROUP = {
"1": {
"id": "Group 1 id",
"name": "Group 1 name",
"type": "LightGroup",
"state": {},
"action": {},
"scenes": [{"id": "1", "name": "Scene 1"}],
"lights": ["1"],
}
}
LIGHT = {
"1": {
"id": "Light 1 id",
"name": "Light 1 name",
"state": {"reachable": True},
"type": "Light",
"uniqueid": "00:00:00:00:00:00:00:01-00",
}
}
SENSOR = {
"1": {
"id": "Sensor 1 id",
"name": "Sensor 1 name",
"type": "ZHALightLevel",
"state": {"lightlevel": 30000, "dark": False},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:02-00",
}
}
async def test_service_setup(hass):
"""Verify service setup works."""
assert deconz.services.DECONZ_SERVICES not in hass.data
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await deconz.services.async_setup_services(hass)
assert hass.data[deconz.services.DECONZ_SERVICES] is True
assert async_register.call_count == 2
async def test_service_setup_already_registered(hass):
"""Make sure that services are only registered once."""
hass.data[deconz.services.DECONZ_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await deconz.services.async_setup_services(hass)
async_register.assert_not_called()
async def test_service_unload(hass):
"""Verify service unload works."""
hass.data[deconz.services.DECONZ_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await deconz.services.async_unload_services(hass)
assert hass.data[deconz.services.DECONZ_SERVICES] is False
assert async_remove.call_count == 2
async def test_service_unload_not_registered(hass):
"""Make sure that services can only be unloaded once."""
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await deconz.services.async_unload_services(hass)
assert deconz.services.DECONZ_SERVICES not in hass.data
async_remove.assert_not_called()
async def test_configure_service_with_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
data = {
deconz.services.SERVICE_FIELD: "/light/2",
CONF_BRIDGEID: BRIDGEID,
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch("pydeconz.DeconzSession.request", return_value=Mock(True)) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with(
"put", "/light/2", json={"on": True, "attr1": 10, "attr2": 20}
)
async def test_configure_service_with_entity(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
gateway = await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
gateway.deconz_ids["light.test"] = "/light/1"
data = {
deconz.services.SERVICE_ENTITY: "light.test",
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch("pydeconz.DeconzSession.request", return_value=Mock(True)) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with(
"put", "/light/1", json={"on": True, "attr1": 10, "attr2": 20}
)
async def test_configure_service_with_entity_and_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
gateway = await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
gateway.deconz_ids["light.test"] = "/light/1"
data = {
deconz.services.SERVICE_ENTITY: "light.test",
deconz.services.SERVICE_FIELD: "/state",
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch("pydeconz.DeconzSession.request", return_value=Mock(True)) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with(
"put", "/light/1/state", json={"on": True, "attr1": 10, "attr2": 20}
)
async def test_configure_service_with_faulty_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
data = {deconz.services.SERVICE_FIELD: "light/2", deconz.services.SERVICE_DATA: {}}
with pytest.raises(vol.Invalid):
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
async def test_configure_service_with_faulty_entity(hass):
"""Test that service invokes pydeconz with the correct path and data."""
data = deepcopy(DECONZ_WEB_REQUEST)
await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
data = {
deconz.services.SERVICE_ENTITY: "light.nonexisting",
deconz.services.SERVICE_DATA: {},
}
with patch("pydeconz.DeconzSession.request", return_value=Mock(True)) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_not_called()
async def test_service_refresh_devices(hass):
"""Test that service can refresh devices."""
data = deepcopy(DECONZ_WEB_REQUEST)
gateway = await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
data = {CONF_BRIDGEID: BRIDGEID}
with patch(
"pydeconz.DeconzSession.request",
return_value={"groups": GROUP, "lights": LIGHT, "sensors": SENSOR},
):
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_DEVICE_REFRESH, service_data=data
)
await hass.async_block_till_done()
assert gateway.deconz_ids == {
"light.group_1_name": "/groups/1",
"light.light_1_name": "/lights/1",
"scene.group_1_name_scene_1": "/groups/1/scenes/1",
"sensor.sensor_1_name": "/sensors/1",
}
| 33.678571
| 87
| 0.666357
|
150ac11554b97f7b967f92ba46148391bdd2abfd
| 856
|
py
|
Python
|
thingsboard_gateway/connectors/opcua/opcua_converter.py
|
eric-erki/thingsboard-gateway
|
8522f6d63903475f4212b5af843c7ad98c5b855f
|
[
"Apache-2.0"
] | 1
|
2020-04-30T01:37:44.000Z
|
2020-04-30T01:37:44.000Z
|
thingsboard_gateway/connectors/opcua/opcua_converter.py
|
xinge-ok/thingsboard-gateway
|
6dab6030a6becf0ce9d34bc95a3a1f1e0838cb14
|
[
"Apache-2.0"
] | null | null | null |
thingsboard_gateway/connectors/opcua/opcua_converter.py
|
xinge-ok/thingsboard-gateway
|
6dab6030a6becf0ce9d34bc95a3a1f1e0838cb14
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from abc import ABC, abstractmethod
from thingsboard_gateway.connectors.converter import Converter, ABC, abstractmethod, log
class OpcUaConverter(ABC):
@abstractmethod
def convert(self, config, data):
pass
| 35.666667
| 88
| 0.728972
|
6207600e91571f4fe045ad84dae965f73274179c
| 12,018
|
py
|
Python
|
scantools/.history/scanbak/scanbackup_20210224144804.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2019-05-19T11:54:26.000Z
|
2019-05-19T12:03:49.000Z
|
scantools/.history/scanbak/scanbackup_20210224144804.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 1
|
2020-11-27T07:55:15.000Z
|
2020-11-27T07:55:15.000Z
|
scantools/.history/scanbak/scanbackup_20210224144804.py
|
Octoberr/swm0920
|
8f05a6b91fc205960edd57f9076facec04f49a1a
|
[
"Apache-2.0"
] | 2
|
2021-09-06T18:06:12.000Z
|
2021-12-31T07:44:43.000Z
|
"""
1、文件到这里
一份给ES 一份给自己
新增ES旧索引入库
在继承原有功能的基础上
重构备份程序,按照数据内的
国家-当前时间(年-月-日)
如果按照数据内的时间的话也会面临和按国家端口备份的问题
不用再分端口了
create by judy 20201217
"""
from pathlib import Path
import threading
import json
from queue import Queue
import traceback
import datetime
import time
from shutil import copyfile
import zipfile
import shutil
class ScanBackUP(object):
def __init__(self) -> None:
# super().__init__()
# 所有数据先到这
self._input = None
# 所有数据先复制一份到这, 这个是程序不用管的文件夹
self._esinput = None
# 将要备份的数据放到这, 要处理的数据全部放在这里
self._dbu_input = None
self._databack = None
self._zipdata: Path = None
self._zip_size = None
# 备份线程默认为一个,可以在配置里面更改重启
self.backup_thread = 1
self.zip_thread = 1
# 增加一个是否拷贝到ES的功能
self.copy_esinput_enable = True
self._tmp = Path('./tmp')
self._tmp.mkdir(exist_ok=True)
# 文件是否需要拷贝一份到旧索引
self._old_esinput = None
self.config_path = Path(r'./config_path.json')
try:
self._init_cpinfo()
except:
raise Exception(
f"初始化配置参数失败,请检查配置文件\nerror:{traceback.format_exc()}")
# 需要用到的参数
# 文件锁,同一时间只允许一个线程操作文件
self.__file_locker = threading.Lock()
self.__scan_file_locker = threading.Lock()
self._zipfile_locker = threading.Lock()
# 因为压缩可能处理的时间比较长,所以需要增加一个正在压缩的字典
self._zip_dealing = {}
# 根据后缀分配的需要处理的队列,目前只有iscan
self.iscan_task_queue = Queue()
self._zip_queue = Queue()
self.iscan_suffix = '.iscan_search'
# try:
# self._restore_existdata()
# except:
# raise Exception(
# "There's something wrong with restoring the environment")
def _init_cpinfo(self):
"""
初始化配置文件中的路径和参数
:return:
"""
conf_str = self.config_path.read_text(encoding='utf-8')
conf_dict = json.loads(conf_str)
_input = conf_dict.get('data_input')
if not isinstance(_input, str):
raise Exception("Unknown data_input path")
self._input = Path(_input)
self._input.mkdir(exist_ok=True)
print(
f"Start scan data file, input_file_path:{self._input.as_posix()}")
_esinput = conf_dict.get('es_input')
if not isinstance(_esinput, str):
raise Exception("Unknown es_input path")
self._esinput = Path(_esinput)
self._esinput.mkdir(exist_ok=True)
print(f"Save data to ES, es_path:{self._esinput.as_posix()}")
_dbuinput = conf_dict.get('backup_input')
if not isinstance(_dbuinput, str):
raise Exception("Unkown backup_input path")
self._dbu_input = Path(_dbuinput)
self._dbu_input.mkdir(exist_ok=True)
print(f"Data backup process path:{self._dbu_input.as_posix()}")
_databack = conf_dict.get('databackup')
if not isinstance(_databack, str):
raise Exception("Unknown databackup path")
self._databack = Path(_databack)
self._databack.mkdir(exist_ok=True)
print(f"Data save backup path:{self._databack.as_posix()}")
_zipdata = conf_dict.get('zipdata')
if not isinstance(_zipdata, str):
raise Exception("Unkown zipdata path")
self._zipdata = Path(_zipdata)
self._zipdata.mkdir(exist_ok=True)
print(f"Zipdata save path:{self._zipdata.as_posix()}")
_zip_size = conf_dict.get('zip_size')
if not isinstance(_zip_size, int):
raise Exception("Unknown zip_size type")
# 将单位换算成B
self._zip_size = _zip_size * 1024 * 1024
print(f"Zip data size:{_zip_size}MB")
backupthread = conf_dict.get('backup_thread')
if not isinstance(backupthread, int):
raise Exception("Unknown backupthread type")
self.backup_thread = backupthread
zipthread = conf_dict.get('zipdata_thread')
if not isinstance(zipthread, int):
raise Exception("Unknown zipthread type")
self.zip_thread = zipthread
time_limit = conf_dict.get('time_limit')
if not isinstance(time_limit, int):
raise Exception("Unknown time_limit type")
self._backup_interval_time = time_limit * 24 * 60 * 60
print(f"Zip data time expired after {time_limit} days")
# 默认拷贝到ES的功能为开放
copy_esinput_enable = conf_dict.get('copy_to_esinput', True)
self.copy_esinput_enable = copy_esinput_enable
# 拷贝旧索引数据
_esinput = conf_dict.get('es_input')
if not isinstance(_esinput, str):
raise Exception("Unknown es_input path")
self._esinput = Path(_esinput)
self._esinput.mkdir(exist_ok=True)
print(f"Save data to ES, es_path:{self._esinput.as_posix()}")
def scan_file(self):
"""
扫描输入的文件
根据文件后缀进行分类,将文件放入待处理队列
:return:
"""
while True:
try:
for file in self._input.iterdir():
name = file.name
# 全部移动到tmp目录下去
tmpname = self._tmp / name
# file.replace(tmpname)
with self.__scan_file_locker:
# 这个文件得尽快移动到tmp文件夹,不然下次扫描又会扫描到它就会出问题
shutil.move(file.as_posix(), tmpname.as_posix())
try:
if tmpname.suffix == self.iscan_suffix:
# 只进行复制操作
# source: Path = self._input / name
target: Path = self._dbu_input / name
copyfile(tmpname.as_posix(), target.as_posix())
self.iscan_task_queue.put(target)
print(
f"Backup iscan_search data, filename:{file.as_posix()}")
except:
print(
f'Scan list file error, err:{traceback.format_exc()}')
finally:
# 最后无论如何都需要将文件输出到esinput
if self.copy_esinput_enable:
outname = self._esinput / name
tmpname.replace(outname)
# 一般来说是不会有文件存在的,但是意外不可避免嘛, 所以这里做一个判定,如果还存在文件就删了
if tmpname.exists():
tmpname.unlink()
except:
print(f'Scan task file error, err:{traceback.format_exc()}')
continue
finally:
print("There is no scan data to back up")
time.sleep(0.5)
def _process_file(self, tmpfile: Path):
"""
读取文件里面的数据打开一下,获取到信息后再关上
"""
with tmpfile.open('r', encoding='utf-8') as fp:
j_text = fp.read()
d_text = json.loads(j_text)
# scan_time = d_text.get('time')
# if scan_time is None:
# scan_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
country = d_text.get('geoinfo').get('country').get('code')
except:
country = 'UNKNOWN'
return country
def back_file(self):
"""
开始备份数据,先保存到文件夹
当这个文件夹到达一定大小然后压缩保存
:return:
"""
got = False
while True:
got = False
if self.iscan_task_queue.empty():
time.sleep(0.5)
continue
try:
bfile: Path = self.iscan_task_queue.get()
got = True
name = bfile.name
# 现在直接读文件里面的国家和日期
country = self._process_file(bfile)
# 每次保存之前去判断下是否需要修改文件名字并进行压缩备份
date_now_str = datetime.datetime.now().strftime("%Y-%m-%d")
# 新建文件夹的时候需要锁一下,其他时候直接移动即可
with self.__file_locker:
# 先把文件移动过去
dirname: Path = self._databack / country / date_now_str
dirname.mkdir(exist_ok=True, parents=True)
# 移过去的文件名
filename = dirname / name
# 移动到目标文件夹
bfile.replace(filename)
print(
f"Backup file, country:{country}, filename:{name}, date:{date_now_str}")
except:
print(f'Backup file error:\n{traceback.format_exc()}')
finally:
if got:
self.iscan_task_queue.task_done()
def scan_zip_file(self):
"""
压缩文件的线程,每天去扫描一次
将昨天的文件夹压缩到压缩文件夹下
"""
while True:
try:
date_now = datetime.datetime.now().date()
for country in self._databack.iterdir():
if not country.exists():
continue
country_name = country.name
for d_file in country.iterdir():
if self._zip_dealing.__contains__(d_file):
continue
d_name = d_file.name
d_date = datetime.datetime.strptime(
d_name, "%Y-%m-%d").date()
# 如果是今天以前的数据那么就进行压缩
if date_now > d_date:
self._zip_queue.put((d_file, country_name))
with self._zipfile_locker:
# 加入正在处理队列
self._zip_dealing[d_file] = 1
print(
f"A file wait to zip, filename:{d_file.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
print("There is no scan data to zip")
time.sleep(3600)
def process_zip_file(self):
"""
压缩今天以前的文件夹
"""
got = False
zipfile_path = None
while True:
got = False
if self._zip_queue.empty():
time.sleep(1)
continue
try:
zipfile_path, country = self._zip_queue.get()
got = True
zip_store_file = self._zipdata / country
zip_store_file.mkdir(exist_ok=True)
zipname = zip_store_file/f"{zipfile_path.name}.zip"
print(
f"Start zipfile, filename:{zipname.as_posix()}")
# 增加一个写入限制
with zipfile.ZipFile(zipname.as_posix(), 'a', zipfile.ZIP_DEFLATED) as write:
for file in zipfile_path.iterdir():
write.write(file.as_posix())
# 写入后删除
file.unlink()
write.close()
# 最后删除已经压缩好的文件夹
zipfile_path.rmdir()
print(
f"Store zipfile success, filename:{zipname.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
if got:
self._zip_queue.task_done()
with self._zipfile_locker:
self._zip_dealing.pop(zipfile_path, None)
def start(self):
"""
项目启动
:return:
"""
thread1 = threading.Thread(target=self.scan_file, name="scanfile")
thread1.start()
for i in range(self.backup_thread):
t = threading.Thread(target=self.back_file, name=f"backfile{i}")
t.start()
thread2 = threading.Thread(
target=self.scan_zip_file, name=f"scan_zipfile")
thread2.start()
for j in range(self.zip_thread):
tz = threading.Thread(
target=self.process_zip_file, name=f"zipfile{j}")
tz.start()
if __name__ == "__main__":
scup = ScanBackUP()
scup.start()
| 36.865031
| 93
| 0.525628
|
1b045ae3f3bd1daaecd3461015ade90edea9354c
| 5,476
|
py
|
Python
|
obsolete files/linux_int/python_int.py
|
uwacits4419/DSR
|
208fcb37a4b5b2e10e7f96585d1eb10a932457a8
|
[
"Unlicense"
] | null | null | null |
obsolete files/linux_int/python_int.py
|
uwacits4419/DSR
|
208fcb37a4b5b2e10e7f96585d1eb10a932457a8
|
[
"Unlicense"
] | null | null | null |
obsolete files/linux_int/python_int.py
|
uwacits4419/DSR
|
208fcb37a4b5b2e10e7f96585d1eb10a932457a8
|
[
"Unlicense"
] | 4
|
2020-02-20T08:54:10.000Z
|
2022-03-29T07:45:12.000Z
|
#!/usr/bin/env python
import socket
import threading
import Packet
import Route
import PacketBuffer
import time
buffer = None
defaultSocket = None
rreqList= []
HOST = "localhost"
DSR_PORT = 60000
ACT_PORT = 61000
PROM_PORT = 62000
PROTOCOL_VALUE = 60000
EXPIRE_REQ_TIMER = 30
#def testFunction() :
# rawSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))
# rawSocket.bind(("lo", 0x0003))
#
# i = 10
#
# buffer = PacketBuffer()
#
# while i > 0 :
# pkt = rawSocket.recvfrom(65565)
#
# print pkt
#
# packet = Packet(pkt)
#
# buffer.add(packet)
#
# print "Added Packet to buffer: " + str(packet.ip_dst)
#
# i = i - 1
#
# print i
#
# #print buffer
# #print buffer.qMap['74.125.237.130']
# list1 = buffer.qMap.keys()
# print list1
#
# route = Route("8.8.8.8", "10.211.55.1", "eth0")
# route.add()
# buffer.release('8.8.8.8', rawSocket)
#
#
#testingThread = threading.Thread(target=testFunction)
#testingThread.start()
#--------------------------------------------------------------------------------------------------------------
def sendRouteRequestForDestination(destination, socket) :
#check if destination is in the list
for index, value in enumerate(rreqList) :
if destination == rreqList[index][0] :
#print "DEF: destination " + destination + " is in the list"
return None
#do some things
print "ROUT_T: Sending RRQ for: " + str(destination) + " on UDP " + str(DSR_PORT)
socket.sendto(destination, (HOST, DSR_PORT))
#add the destination to a list of sent requests
curTime = time.time()
rreqList.append( (destination, curTime) )
#find the expired request - ie older than EXPIRE_REQ_TIMER
for index, value in enumerate(rreqList) :
if curTime - rreqList[index][1] > EXPIRE_REQ_TIMER :
print "ROUT_T: Expiring request to destination " + rreqList[index][0]
del rreqList[index]
def receiveRouteAction() :
receiveSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
receiveSocket.bind((HOST, ACT_PORT))
while True :
data = receiveSocket.recvfrom(65535)
print "ROUT_T: Recieved Route Action on UDP " + str(ACT_PORT) + ": (" + str(data) + ")"
#do something with the data
#data = action-destination-gw
#e.g. "add-192.168.1.0-10.211.55.1"
#e.g. "del-192.168.1.0-10.211.55.1"
print "Rec Route Act: data: " + str(data)
destination = "192.168.1.0"
gateway = "10.211.55.1"
action = "add"
if action == "add" :
receiveRouteAdd(destination, gateway)
elif action == "del" :
receiveRouteDel(destination, gateway)
def receiveRouteAdd(destination, gateway) :
print "ROUT_T: Adding Route: dst: " + destination + " gw: " + gateway
newRoute = Route.Route(destination, gateway, "eth0")
newRoute.add()
buffer.release(destination, defaultSocket)
#remove the destination from the list
def receiveRouteDel(destination, gateway) :
print "ROUT_T: Deleting Route: dst: " + destination + " gw: " + gateway
oldRoute = Route.Route(destination, gateway, "eth0")
oldRoute.delete()
#sets up the socket for default packets and listens for them.
def defaultPacketInterception() :
#open up a raw socket to the loopback interface.
global defaultSocket
defaultSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))
defaultSocket.bind(("lo:0", 0x0003))
#open a socket to the routing daemon
routingSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#init out buffer
global buffer
buffer = PacketBuffer.PacketBuffer()
#start receiving packets
while True :
#get a packet from the socket (blocks waiting)
pkt = defaultSocket.recvfrom(65565)
#parse the packet into it's nice fields
packet = Packet.Packet(pkt)
#get the destination of the packet
destination = packet.ip_dst
#send route request message to the routing daemon
if packet.ip_dst != "10.211.55.4" and packet.ip_src != "127.0.0.1" and packet.ip_dst != "127.0.0.1":
sendRouteRequestForDestination(destination, routingSocket)
print "DEFAULT: Rec Pkt: src: " + packet.ip_src + " dst: " + packet.ip_dst
#add the packet to our buffer
buffer.add(packet)
#end while
def listenForPromisc() :
promiscSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))
promiscSocket.bind(("eth0", 0x0003))
#open a socket to the daemon
daemonSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True :
#process each packet and look for a specific protocol value
pkt = promiscSocket.recvfrom(65565)
packet = Packet.Packet(pkt)
#print "PROMISC: Rec Pkt: src: " + str(packet.ip_src) + " dst: " + str(packet.ip_dst) + " proto: " + str(packet.ip_protocol)
if(packet.ip_protocol == 17) :
#print "PROMISC: Rec Pkt: src: " + str(packet.ip_src) + " dst: " + str(packet.ip_dst) + " proto: " + str(packet.ip_protocol)
print "PROMIS: UDP: dst_port: " + str(packet.udp_dst_port) + " UDP Data: " + str(packet.udp_data)
if(packet.udp_dst_port == PROTOCOL_VALUE) :
#this is interesting to us.
print "PROMISC: Heard DSR packet: src: " + str(packet.ip_src) + " dst: " + str(packet.ip_dst) + " data: " + packet.udp_data
promiscSocket.sendto(packet.packet, (HOST, PROM_PORT))
# Main Code
defaultThread = threading.Thread(target=defaultPacketInterception)
defaultThread.start()
routeAddThread = threading.Thread(target=receiveRouteAction)
routeAddThread.start()
promiscThread = threading.Thread(target=listenForPromisc)
promiscThread.start()
| 24.230088
| 127
| 0.68718
|
2c7e57f8f91a6d651a732a5ccf3736d4db5716e5
| 3,644
|
py
|
Python
|
task_manager/task_manager/settings.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | 1
|
2022-03-03T10:16:14.000Z
|
2022-03-03T10:16:14.000Z
|
task_manager/task_manager/settings.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | null | null | null |
task_manager/task_manager/settings.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-b%$bj+j%4ify!a1cbaxy&_8czti1()@(@*e^)m!dmv49f-9-3s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'127.0.0.1',
]
# Application definition
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = ()
PROJECT_APPS = (
'task_manager.tasks',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + PROJECT_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'task_manager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'task_manager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGGING = {
'version': 1,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
}
}
}
| 24.13245
| 91
| 0.656696
|
25bde7ce2b0617c1e396504acf7db319db900202
| 3,040
|
py
|
Python
|
scenes/flip03_gen.py
|
BrianKmdy/mantaflow
|
273d6c148374316e4d04cae4f46fed56a630e183
|
[
"Apache-2.0"
] | null | null | null |
scenes/flip03_gen.py
|
BrianKmdy/mantaflow
|
273d6c148374316e4d04cae4f46fed56a630e183
|
[
"Apache-2.0"
] | null | null | null |
scenes/flip03_gen.py
|
BrianKmdy/mantaflow
|
273d6c148374316e4d04cae4f46fed56a630e183
|
[
"Apache-2.0"
] | null | null | null |
#
# Flip surface mesh creation scene, note - saves & loads (ie plays back) meshes in UI mode
#
from manta import *
import os
mantaMsg( "\nNote - this scene reads in particle data generated by, e.g., flip02_surface.py (set saveParts=True there). It does not perform any fluid simulation, only generate a nicer surface.\n" )
# surfacing method: 0=simple union, 1=averaged, 2=improved averaging
method = 2
# === surface generation parameters ===
# input file
partfile = 'flipParts_%04d.uni'
startFrame = 1
endFrame = 1000
interval = 1
# how much larger?
upres = 2.0
# output file name so that blender can directly read it...
meshfile = 'fluidsurface_final_%04d.bobj.gz'
# resolution for level set / output mesh
refName = ("ref_" + (partfile % 0) )
gs = getUniFileSize(refName)
if gs.x<=0:
mantaMsg("Warning! File '%s' not found, cannot determine size...\n"%refName, 0)
exit(1)
gs.x = int(gs.x*upres)
gs.y = int(gs.y*upres)
gs.z = int(gs.z*upres)
s = Solver(name='main', gridSize = gs , dim=3)
# kernel radius for surface creation
radiusFactor = 2.5
# triangle scale relative to cell size
#scale = 0.5
# counters
outCnt = 0
frame = startFrame
# prepare grids and particles
flags = s.create(FlagGrid)
phi = s.create(LevelsetGrid)
pp = s.create(BasicParticleSystem)
mesh = s.create(Mesh)
# acceleration data for particle nbs
pindex = s.create(ParticleIndexSystem)
gpi = s.create(IntGrid)
# scene setup
flags.initDomain(boundaryWidth=0)
#if 1 and (GUI):
# gui = Gui()
# gui.show()
# #gui.pause()
# main loop
while frame < endFrame:
meshfileCurr = meshfile % outCnt
mantaMsg( "Frame %d " % frame )
phi.setBound(value=0., boundaryWidth=1)
# already exists?
if (os.path.isfile( meshfileCurr )):
mesh.load( meshfileCurr )
else:
# generate mesh; first read input sim particles
if (os.path.isfile( partfile % frame )):
pp.load( partfile % frame );
# create surface
gridParticleIndex( parts=pp , flags=flags, indexSys=pindex, index=gpi )
if method==0:
unionParticleLevelset( pp, pindex, flags, gpi, phi , radiusFactor ) # faster, but not as smooth
elif method==1:
averagedParticleLevelset( pp, pindex, flags, gpi, phi , radiusFactor , 1, 1 )
elif method==2:
improvedParticleLevelset( pp, pindex, flags, gpi, phi , radiusFactor , 1, 1 , 0.4, 3.5)
else:
print("Error - unknown method"); exit(1)
phi.setBound(value=0., boundaryWidth=1)
phi.createMesh(mesh)
# beautify mesh, too slow right now!
#subdivideMesh(mesh=mesh, minAngle=0.01, minLength=scale, maxLength=3*scale, cutTubes=False)
# perform smoothing
#for iters in range(10):
#smoothMesh(mesh=mesh, strength=1e-3, steps=10)
#subdivideMesh(mesh=mesh, minAngle=0.01, minLength=scale, maxLength=3*scale, cutTubes=True)
# write output file:
mesh.save( meshfileCurr )
else:
# stop playback for UI, reset
#if (GUI):
# gui.pause()
# outCnt = 0
#gui.screenshot( 'flip03_%04d.png' % outCnt );
outCnt += 1
frame += interval
s.step()
| 25.546218
| 197
| 0.682566
|
425b910abdd035b89689130c2aa3847998c6e0bf
| 974
|
py
|
Python
|
entity/cards/LETL_030H/LETL_030P4.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 102
|
2021-10-20T09:06:39.000Z
|
2022-03-28T13:35:11.000Z
|
entity/cards/LETL_030H/LETL_030P4.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 98
|
2021-10-19T16:13:27.000Z
|
2022-03-27T13:27:49.000Z
|
entity/cards/LETL_030H/LETL_030P4.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 55
|
2021-10-19T03:56:50.000Z
|
2022-03-25T08:25:26.000Z
|
# -*- coding: utf-8 -*-
from hearthstone.entities import Entity
from hearthstone.enums import GameTag, SpellSchool
from entity.spell_entity import SpellEntity
class LETL_030P4(SpellEntity):
"""
地狱火
对所有敌人造成8点伤害。火焰连击:再造成8点伤害。
"""
def __init__(self, entity: Entity):
super().__init__(entity)
self.damage = 8
self.combo_damage = 8
self.range = 7
def play(self, game, hero, target):
# aoe或者随机不需要指定target
power = game.get_spell_power(self.spell_school, hero.own)
action_list = game.get_action_list(hero.own)
action_list.sort()
combo = game.can_combo(self, SpellSchool.FIRE, hero.own)
combo_damage = (self.combo_damage + power) * combo
hero_list = game.get_hero_list(not hero.own())
for h in hero_list:
h.got_damage(game, (self.damage + combo_damage + power) * self.damage_advantage[self.lettuce_role][
h.lettuce_role])
| 31.419355
| 111
| 0.645791
|
6c2952d486f4337d507c7473de91420df60b5e56
| 6,170
|
py
|
Python
|
tests/scripts/thread-cert/Cert_5_1_13_RouterReset.py
|
JeffreyHayes/openthread
|
0dde90edcb0aafef5f4b5fc3d30e19f756e27ee4
|
[
"BSD-3-Clause"
] | 2
|
2018-08-24T05:14:27.000Z
|
2018-09-25T03:02:36.000Z
|
tests/scripts/thread-cert/Cert_5_1_13_RouterReset.py
|
JeffreyHayes/openthread
|
0dde90edcb0aafef5f4b5fc3d30e19f756e27ee4
|
[
"BSD-3-Clause"
] | 4
|
2016-09-09T17:10:04.000Z
|
2016-09-29T05:18:09.000Z
|
tests/scripts/thread-cert/Cert_5_1_13_RouterReset.py
|
turon/openthread
|
20145cb42fca90d791c4918475db28b7b91290d6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import mle
import node
LEADER = 1
ROUTER = 2
class Cert_5_1_13_RouterReset(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1, 3):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self._setUpRouter()
def _setUpRouter(self):
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
def tearDown(self):
for n in list(self.nodes.values()):
n.stop()
n.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
rloc16 = self.nodes[ROUTER].get_addr16()
self.nodes[ROUTER].reset()
self._setUpRouter()
self.simulator.go(5)
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER].get_addr16(), rloc16)
leader_messages = self.simulator.get_messages_sent_by(LEADER)
router1_messages = self.simulator.get_messages_sent_by(ROUTER)
# 1 - All
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
msg = leader_messages.next_coap_message("2.04")
router1_messages.next_mle_message(mle.CommandType.LINK_REQUEST)
msg = leader_messages.next_mle_message_of_one_of_command_types(
mle.CommandType.LINK_ACCEPT_AND_REQUEST,
mle.CommandType.LINK_ACCEPT,
)
self.assertIsNotNone(msg)
# 2 - Router1 / Leader
msg = router1_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
msg.assertSentWithHopLimit(255)
msg.assertSentToDestinationAddress("ff02::1")
msg.assertMleMessageContainsTlv(mle.SourceAddress)
msg.assertMleMessageContainsTlv(mle.LeaderData)
msg.assertMleMessageContainsTlv(mle.Route64)
msg = leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
msg.assertSentWithHopLimit(255)
msg.assertSentToDestinationAddress("ff02::1")
msg.assertMleMessageContainsTlv(mle.SourceAddress)
msg.assertMleMessageContainsTlv(mle.LeaderData)
msg.assertMleMessageContainsTlv(mle.Route64)
# 4 - Router1
msg = router1_messages.next_mle_message(mle.CommandType.LINK_REQUEST)
msg.assertSentToDestinationAddress("ff02::2")
msg.assertMleMessageContainsTlv(mle.Challenge)
msg.assertMleMessageContainsTlv(mle.Version)
msg.assertMleMessageContainsTlv(mle.TlvRequest)
tlv_request = msg.get_mle_message_tlv(mle.TlvRequest)
self.assertIn(mle.TlvType.ROUTE64, tlv_request.tlvs)
self.assertIn(mle.TlvType.ADDRESS16, tlv_request.tlvs)
# 5 - Leader
msg = leader_messages.next_mle_message(mle.CommandType.LINK_ACCEPT)
msg.assertSentToNode(self.nodes[ROUTER])
msg.assertMleMessageContainsTlv(mle.SourceAddress)
msg.assertMleMessageContainsTlv(mle.LeaderData)
msg.assertMleMessageContainsTlv(mle.Response)
msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
msg.assertMleMessageContainsOptionalTlv(mle.MleFrameCounter)
msg.assertMleMessageContainsTlv(mle.Address16)
msg.assertMleMessageContainsTlv(mle.Version)
msg.assertMleMessageContainsTlv(mle.Route64)
msg.assertMleMessageContainsOptionalTlv(mle.Challenge)
if __name__ == '__main__':
unittest.main()
| 40.064935
| 78
| 0.718963
|
af3d578205b51bec84e5cea7a331648456ce0fa7
| 17,082
|
py
|
Python
|
snmp_cmds/commands.py
|
alextremblay/snmpcmd
|
77245e6a2dc761397f2cbed0d1b41b8c279a4c82
|
[
"MIT"
] | 7
|
2019-06-12T08:43:54.000Z
|
2021-12-16T15:12:38.000Z
|
snmp_cmds/commands.py
|
alextremblay/snmpcmd
|
77245e6a2dc761397f2cbed0d1b41b8c279a4c82
|
[
"MIT"
] | 3
|
2019-10-22T08:28:33.000Z
|
2021-10-04T21:15:42.000Z
|
snmp_cmds/commands.py
|
alextremblay/snmpcmd
|
77245e6a2dc761397f2cbed0d1b41b8c279a4c82
|
[
"MIT"
] | 5
|
2019-08-13T13:44:09.000Z
|
2021-12-16T15:10:24.000Z
|
"""
This module provides the individual SNMP commands for use in one-off
situations, or situations where you would need to make a single SNMP request
to many devices. If you plan on making multiple calls to the same device, you
might want to check out api.py instead.
"""
# Standard Library imports
import csv
from subprocess import run, PIPE
# imports for type-hinting purposes
from typing import Optional, List, Tuple, Dict
from typing import Union as OneOf
# Internal module imports
from .exceptions import SNMPTableError, SNMPWriteError
from .helpers import validate_ip_address, check_for_timeout, \
handle_unknown_error
def snmpget(ipaddress: str, oid: str, community: str = 'public',
port: OneOf[int, str] = 161, timeout: OneOf[int, str] = 3
) -> Optional[str]:
"""
Wrapper around Net-SNMP's ``snmpget`` command
Runs the equivalent of
'``snmpget -Oqv -Pe -t {timeout} -r 0 -v 2c -c {community} {host} {oid}``'
and parses the result. if the response from the server is a
``No Such Object`` or a ``No Such Instance`` error, this function returns
:obj:`None`. Otherwise, it returns the value retrieved from the server
:param community: the snmpv2 community string
:param ipaddress: the IP address of the target SNMP server
:param oid: the Object IDentifier to request from the target SNMP server
:param port: the port on which SNMP is running on the target server
:param timeout: the number of seconds to wait for a response from the
SNMP server
:return: the value stored at that OID on the target SNMP server if
successful, :obj:`None` otherwise
:raises `~snmp_cmds.exceptions.SNMPTimeout`: if the target SNMP server
fails to respond
:raises `~snmp_cmds.exceptions.SNMPInvalidAddress`: if the hostname or
IP address supplied is not valid or cannot be resolved
:raises `~snmp_cmds.exceptions.SNMPError`: if the underlying
Net-SNMP command produces an unknown or unhandled error
"""
ipaddress = validate_ip_address(ipaddress)
host = '{}:{}'.format(ipaddress, port)
cmdargs = [
'snmpget', '-Oqv', '-Pe', '-t', str(timeout), '-r', '0', '-v', '2c',
'-c', community, host, oid]
cmd = run(cmdargs, stdout=PIPE, stderr=PIPE)
# Handle any errors that came up
if cmd.returncode is not 0:
check_for_timeout(cmd.stderr, host)
# if previous check didn't generate an Error, this handler will be
# called as a sort of catch-all
handle_unknown_error(' '.join(cmdargs), cmd.stderr)
# Process results
else:
# subprocess returns stdout from completed command as a single bytes
# string. We'll convert it into a regular python string for easier
# handling
cmdoutput = cmd.stdout.decode('utf-8')
# Check for no such instance
if 'No Such Instance' in cmdoutput:
return None
else:
return cmdoutput
def snmpgetsome(ipaddress: str, oids: List[str], community: str = 'public',
port: OneOf[str, int] = 161, timeout: OneOf[int, str] = 3
) -> List[Tuple[str, str]]:
"""
Warpper around Net-SNMP's
Runs Net-SNMP's 'snmpget' command on a list of OIDs, and returns a list
of tuples of the form (oid, result).
:param community: the snmpv2 community string
:param ipaddress: the IP address of the target SNMP server
:param oids: a list of Object IDentifiers to request from the target
SNMP server
:param port: the port on which SNMP is running on the target server
:param timeout: the number of seconds to wait for a response from the
SNMP server
:return: a list of tuples of the form (oid, result)
:raises `~snmp_cmds.exceptions.SNMPTimeout`: if the target SNMP
server
fails to respond
:raises `~snmp_cmds.exceptions.SNMPInvalidAddress`: if the hostname or
IP address supplied is not valid or cannot be resolved
:raises `~snmp_cmds.exceptions.SNMPError`: if the underlying
Net-SNMP command produces an unknown or unhandled error
"""
ipaddress = validate_ip_address(ipaddress)
host = '{}:{}'.format(ipaddress, port)
if type(oids) is not list:
oids = [oids]
cmdargs = [
'snmpget', '-OQfn', '-Pe', '-t', str(timeout), '-r', '0', '-v', '2c',
'-c', community, host, *oids
]
cmd = run(cmdargs, stdout=PIPE, stderr=PIPE)
# Handle any errors that came up
if cmd.returncode is not 0:
check_for_timeout(cmd.stderr, host)
# if previous check didn't generate an Error, this handler will be
# called as a sort of catch-all
handle_unknown_error(' '.join(cmdargs), cmd.stderr)
# Process results
else:
cmdoutput = cmd.stdout.splitlines()
result = []
for line in cmdoutput:
# subprocess returns stdout from completed command as a bytes
# string. We'll convert each line into a regular python string,
# and separate the OID portion from the result portion
item = line.decode('utf-8').split(' = ', 1)
# there is an unfortunate bug / oversight in the net-snmp
# commands where newline characters within SNMP variables
# returned from a server are not escaped before printing. if you
# do an snmpget for 3 oids you'll get 3 lines of output printed (
# which is what we want), but if one of those 3 variables
# contains, say, 2 new-line chars in it, you'll get 5 lines of
# output :(
# our quick-n-dirty solution is to check each line to see if it
# "looks" like an oid-value pair (meaning it has a " = " in it). if
# it doesn't, we'll assume that this line is part of the last pair's
# value and tack it on accordingly. When we run the .split()
# function above, if the string did not have a " = " to split on,
# the function returns a list with one item: the original string
if len(item) > 1: # This is a normal oid-value pair
# Check for no such instance
if 'No Such Instance' in item[1]:
item[1] = None
# add it to the results
result.append(tuple(item))
else: # This line is a continuation of the last oid-value pair
# make a copy of the last oid-value pair for us to edit
prev_item = list(result[-1])
# append the new line to it
prev_item[1] += '\n' + item[0]
# replace the original with the edited copy
result[-1] = tuple(prev_item)
return result
def snmpwalk(ipaddress: str, oid: str, community: str = 'public',
port: OneOf[str, int] = 161, timeout: int = 3
) -> List[Tuple[str, str]]:
"""
Runs Net-SNMP's 'snmpget' command on a list of OIDs, and returns a list
of tuples of the form (oid, result).
:param community: the snmpv2 community string
:param ipaddress: the IP address of the target SNMP server
:param oid: the Object IDentifier to request from the target SNMP server
:param port: the port on which SNMP is running on the target server
:param timeout: the number of seconds to wait for a response from the
SNMP server
:return: a list of tuples of the form (oid, result)
:raises `~snmp_cmds.exceptions.SNMPTimeout`: if the target SNMP server
fails to respond
:raises `~snmp_cmds.exceptions.SNMPInvalidAddress`: if the hostname or
IP address supplied is not valid or cannot be resolved
:raises `~snmp_cmds.exceptions.SNMPError`: if the underlying
Net-SNMP command produces an unknown or unhandled error
"""
ipaddress = validate_ip_address(ipaddress)
host = '{}:{}'.format(ipaddress, port)
cmdargs = [
'snmpwalk', '-OQfn', '-Pe', '-t', str(timeout), '-r', '0', '-v', '2c',
'-c', community, host, oid
]
cmd = run(cmdargs, stdout=PIPE, stderr=PIPE)
# Handle any errors that came up
if cmd.returncode is not 0:
check_for_timeout(cmd.stderr, host)
# if previous check didn't generate an Error, this handler will be
# called as a sort of catch-all
handle_unknown_error(' '.join(cmdargs), cmd.stderr)
# Process results
else:
cmdoutput = cmd.stdout.splitlines()
result = []
for line in cmdoutput:
# subprocess returns stdout from completed command as a bytes
# string. We'll convert each line into a regular python string,
# and separate the OID portion from the result portion
item = line.decode('utf-8').split(' = ', 1)
# there is an unfortunate bug / oversight in the net-snmp
# commands where newline characters within SNMP variables
# returned from a server are not escaped before printing. if you
# do an snmpget for 3 oids you'll get 3 lines of output printed (
# which is what we want), but if one of those 3 variables
# contains, say, 2 new-line chars in it, you'll get 5 lines of
# output :(
# our quick-n-dirty solution is to check each line to see if it
# "looks" like an oid-value pair (meaning it has a " = " in it). if
# it doesn't, we'll assume that this line is part of the last pair's
# value and tack it on accordingly. When we run the .split()
# function above, if the string did not have a " = " to split on,
# the function returns a list with one item: the original string
if len(item) > 1: # This is a normal oid-value pair
# Check for no such instance
if 'No Such Instance' in item[1]:
item[1] = None
# add it to the results
result.append(tuple(item))
else: # This line is a continuation of the last oid-value pair
# make a copy of the last oid-value pair for us to edit
prev_item = list(result[-1])
# append the new line to it
prev_item[1] += '\n' + item[0]
# replace the original with the edited copy
result[-1] = tuple(prev_item)
return result
def snmptable(ipaddress: str, oid: str, community: str = 'public',
port: OneOf[str, int] = 161, timeout: int = 3,
sortkey: Optional[str] = None
) -> OneOf[List[Dict[str, str]], Dict[str, Dict[str, str]]]:
"""
Runs Net-SNMP's 'snmptable' command on a given OID, converts the results
into a list of dictionaries, and optionally sorts the list by a given key.
:param community: the snmpv2 community string
:param ipaddress: the IP address of the target SNMP server
:param oid: the Object IDentifier to request from the target SNMP server
:param port: the port on which SNMP is running on the target server
:param sortkey: the key within each dict upon which to sort the list of
results
:param timeout: the number of seconds to wait for a response from the
SNMP server
:return: a list of dicts, one for each row of the table. The keys of the
dicts correspond to the column names of the table.
:raises `~snmp_cmds.exceptions.SNMPTimeout`: if the target SNMP server
fails to respond
:raises `~snmp_cmds.exceptions.SNMPInvalidAddress`: if the hostname or
IP address supplied is not valid or cannot be resolved
:raises `~snmp_cmds.exceptions.SNMPError`: if the underlying
Net-SNMP command produces an unknown or unhandled error
:raises `~snmp_cmds.exceptions.SNMPTableError`: if the requested OID
is not a valid table
"""
# We want our delimiter to be something that would never show up in the
# wild, so we'll use the non-printable ascii character RS (Record Separator)
delimiter = '\x1E'
ipaddress = validate_ip_address(ipaddress)
host = '{}:{}'.format(ipaddress, port)
cmdargs = [
'snmptable', '-m', 'ALL', '-Pe', '-t', str(timeout), '-r', '0', '-v',
'2c', '-Cif', delimiter, '-c', community, host, oid
]
cmd = run(cmdargs, stdout=PIPE, stderr=PIPE)
# Handle any errors that came up
if cmd.returncode is not 0:
check_for_timeout(cmd.stderr, host)
if b'Was that a table?' in cmd.stderr:
raise SNMPTableError(oid)
else:
handle_unknown_error(' '.join(cmdargs), cmd.stderr)
# Process results
else:
# subprocess returns stdout from completed command as a single bytes
# string. we'll split it into a list of bytes strings, and convert
# each into a standard python string which the csv reader can handle
cmdoutput = cmd.stdout.splitlines()
cmdoutput = [item.decode('utf-8') for item in cmdoutput]
# Strip the table name and the blank line following it from the output,
# so all that remains is the table itself
cmdoutput = cmdoutput[2:]
table_parser = csv.DictReader(cmdoutput, delimiter=delimiter)
results = [element for element in table_parser]
if sortkey:
results.sort(key=lambda i: i[sortkey])
return results
def snmpset(ipaddress: str, oid: str, value_type: str, value: str,
community: str = 'private', port: OneOf[int, str] = 161,
timeout: OneOf[int, str] = 3
) -> str:
"""
Runs Net-SNMP's 'snmpset' command on a given OID, and returns the result
if successful.
:param community: the snmpv2 community string
:param ipaddress: the IP address of the target SNMP server
:param oid: the Object IDentifier to request from the target SNMP server
:param value_type: the SNMP value type to set. can be one of
(i/u/t/a/o/s/x/d/b)
:param value: the value to set
:param port: the port on which SNMP is running on the target server
:param timeout: the number of seconds to wait for a response from the
SNMP server
:return: the value that was set on the SNMP target
:raises `~snmp_cmds.exceptions.SNMPTimeout`: if the target SNMP server
fails to respond
:raises `~snmp_cmds.exceptions.SNMPInvalidAddress`: if the hostname or
IP address supplied is not valid or cannot be resolved
:raises `~snmp_cmds.exceptions.SNMPError`: if the underlying
Net-SNMP command produces an unknown or unhandled error
:raises `~snmp_cmds.exceptions.SNMPWriteError`: if the snmpset
operation failed for a known reason. The message associated with this
error should always contain information regarding the reason for the
error.
"""
ipaddress = validate_ip_address(ipaddress)
host = '{}:{}'.format(ipaddress, port)
# snmpset type checking
valid_types = ['i', 'u', 't', 'a', 'o', 's', 'x', 'd', 'b']
for type_code in valid_types:
if value_type == type_code:
# the type argument is one of snmpset's accepted type codes
break
else:
# type didn't match any type code accepted by snmpset
raise SNMPWriteError(
"The type value you specified does not match one of the accepted "
"type codes.\nValid type codes are one of ({})"
.format("|".join(valid_types))
)
cmdargs = [
'snmpset', '-OQfn', '-t', str(timeout), '-r', '0', '-v', '2c', '-c',
community, host, oid, value_type, value]
cmd = run(cmdargs, stdout=PIPE, stderr=PIPE)
# Handle any errors that came up
if cmd.returncode is not 0:
check_for_timeout(cmd.stderr, host)
# Check for write errors
for errormsg in [b'Bad variable type', b'Value out of range']:
if errormsg in cmd.stderr:
raise SNMPWriteError(cmd.stderr)
# if previous check didn't generate an Error, this handler will be
# called as a sort of catch-all
handle_unknown_error(' '.join(cmdargs), cmd.stderr)
# Process results
else:
# subprocess returns stdout from completed command as a single bytes
# string. We'll convert it into a regular python string for easier
# handling
cmdoutput = cmd.stdout.decode('utf-8')
# Check for no such instance
if 'No Such Instance' in cmdoutput:
raise SNMPWriteError(
"We've received a 'No Such Instance' error from the server. "
"This can be caused by a number of things. Most likely, your "
"SNMP write community is incorrect, the OID you specified "
"doesn't exist on your target device, or your target device "
"doesn't support writing to this writeable field")
else:
return cmdoutput
| 43.8
| 80
| 0.629552
|
3574d5ffd6b021573f43735e5a66e0680ecc0450
| 1,406
|
py
|
Python
|
active_campaign_api/resources/custom_field_value.py
|
selfdecode/active-campaign
|
1f085b304d9014ebba69d85164c9a5036b32f501
|
[
"MIT"
] | null | null | null |
active_campaign_api/resources/custom_field_value.py
|
selfdecode/active-campaign
|
1f085b304d9014ebba69d85164c9a5036b32f501
|
[
"MIT"
] | null | null | null |
active_campaign_api/resources/custom_field_value.py
|
selfdecode/active-campaign
|
1f085b304d9014ebba69d85164c9a5036b32f501
|
[
"MIT"
] | 1
|
2021-06-03T12:00:25.000Z
|
2021-06-03T12:00:25.000Z
|
"""FieldValue resource for Active Campaign """
import typing
from ..base_resource import Resource
class CustomFieldValue(Resource):
"""An ActiveCampaign CustomFieldValue. Allows to:
- Create a CustomFieldValue
- Update a CustomFieldValue
- Delete a CustomFieldValue
"""
def __init__(
self,
contact_id: str,
custom_field_id: str,
value,
**kwargs: typing.Dict,
) -> None:
"""Initialize the CustomFieldValue.
Args:
contact_id: int
ID of the contact whose field value you're updating
custom_field_id: int
ID of the custom field whose value you're updating for the contact
value: str
Value for the field that you're updating. For multi-select options
this needs to be in the format of ||option1||option2||
"""
super().__init__(**kwargs)
self.contact_id = contact_id
self.field_id = custom_field_id
self.value = value
@staticmethod
def resource_name() -> str:
"""Get the resource name."""
return "fieldValues"
@staticmethod
def map_field_name_to_attribute() -> typing.Dict:
"""Serialize the CustomFieldValue."""
return {
"contact": "contact_id",
"field": "field_id",
"value": "value",
}
| 27.038462
| 82
| 0.586771
|
3bd64aaa47c817711c5080a5604ea0617bc90397
| 16,760
|
py
|
Python
|
test/functional/test_framework/mininode.py
|
altecoin-altc/altecoin
|
913678a298eb7eb9157cd2be184b927cfbb429a9
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/mininode.py
|
altecoin-altc/altecoin
|
913678a298eb7eb9157cd2be184b927cfbb429a9
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/mininode.py
|
altecoin-altc/altecoin
|
913678a298eb7eb9157cd2be184b927cfbb429a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P"""
import asyncore
from collections import defaultdict
from io import BytesIO
import logging
import socket
import struct
import sys
import threading
from test_framework.messages import *
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
#b"getsporks": msg_generic,
}
MAGIC_BYTES = {
"mainnet": b"\x90\xc4\xfd\xe9", # mainnet
"testnet3": b"\x45\x76\x65\xba", # testnet3
"regtest": b"\xa1\xcf\x7e\xac", # regtest
}
class P2PConnection(asyncore.dispatcher):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# All P2PConnections must be created before starting the NetworkThread.
# assert that the network thread is not running.
assert not network_thread_running()
super().__init__(map=mininode_socket_map)
def peer_connect(self, dstaddr, dstport, net="regtest"):
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sendbuf = b""
self.recvbuf = b""
self.state = "connecting"
self.network = net
self.disconnect = False
logger.info('Connecting to Altecoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
def peer_disconnect(self):
# Connection could have already been closed by other end.
if self.state == "connected":
self.disconnect_node()
# Connection and disconnection methods
def handle_connect(self):
"""asyncore callback when a connection is opened."""
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.on_open()
def handle_close(self):
"""asyncore callback when a connection is closed."""
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.on_close()
def disconnect_node(self):
"""Disconnect the p2p connection.
Called by the test logic thread. Causes the p2p connection
to be disconnected on the next iteration of the asyncore loop."""
self.disconnect = True
# Socket read methods
def handle_read(self):
"""asyncore callback when data is read from the socket."""
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in MESSAGEMAP:
#raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
logger.debug("Command: '" + str(command) + "'")
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def writable(self):
"""asyncore method to determine whether the handle_write() callback should be called on the next loop."""
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
"""asyncore callback when data should be written to the socket."""
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def send_message(self, message, pushbuf=False):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
if (len(self.sendbuf) == 0 and not pushbuf):
try:
sent = self.send(tmsg)
self.sendbuf = tmsg[sent:]
except BlockingIOError:
self.sendbuf = tmsg
else:
self.sendbuf += tmsg
# Class utility methods
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs):
super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
self.verack_received = True
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.state != "connected"
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
def __init__(self):
super().__init__(name="NetworkThread")
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[obj.handle_close() for obj in disconnected]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
def network_thread_start():
"""Start the network thread."""
# Only one network thread may run at a time
assert not network_thread_running()
NetworkThread().start()
def network_thread_running():
"""Return whether the network thread is running."""
return any([thread.name == "NetworkThread" for thread in threading.enumerate()])
def network_thread_join(timeout=10):
"""Wait timeout seconds for the network thread to terminate.
Throw if the network thread doesn't terminate in timeout seconds."""
network_threads = [thread for thread in threading.enumerate() if thread.name == "NetworkThread"]
assert len(network_threads) <= 1
for thread in network_threads:
thread.join(timeout)
assert not thread.is_alive()
| 37.662921
| 182
| 0.633532
|
d4a31b03db3a9f9ef49f6feda829dfbcfcd1e122
| 147
|
py
|
Python
|
controls/admin.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 11
|
2021-01-23T01:09:54.000Z
|
2021-01-25T07:16:30.000Z
|
controls/admin.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 7
|
2021-04-06T18:19:10.000Z
|
2021-09-22T19:45:03.000Z
|
controls/admin.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 3
|
2021-01-23T18:55:32.000Z
|
2021-02-16T17:47:59.000Z
|
from django.contrib import admin
from controls.models import FinancialYear, Period
admin.site.register(FinancialYear)
admin.site.register(Period)
| 24.5
| 49
| 0.843537
|
66bfe87e28747d7fcd3b0fc483a7904bee57dd56
| 1,061
|
py
|
Python
|
pymagnitude/third_party/allennlp/tests/modules/attention/bilinear_attention_test.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 1,520
|
2018-03-01T13:37:49.000Z
|
2022-03-25T11:40:20.000Z
|
pymagnitude/third_party/allennlp/tests/modules/attention/bilinear_attention_test.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 87
|
2018-03-03T15:12:50.000Z
|
2022-02-21T15:24:12.000Z
|
pymagnitude/third_party/allennlp/tests/modules/attention/bilinear_attention_test.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 121
|
2018-03-03T08:40:53.000Z
|
2022-03-16T05:19:38.000Z
|
# pylint: disable=no-self-use,invalid-name,protected-access
from __future__ import absolute_import
from numpy.testing import assert_almost_equal
import torch
from torch.nn.parameter import Parameter
from allennlp.common import Params
from allennlp.modules.attention import BilinearAttention
from allennlp.common.testing import AllenNlpTestCase
class TestBilinearAttention(AllenNlpTestCase):
def test_forward_does_a_bilinear_product(self):
params = Params({
u'vector_dim': 2,
u'matrix_dim': 2,
u'normalize': False,
})
bilinear = BilinearAttention.from_params(params)
bilinear._weight_matrix = Parameter(torch.FloatTensor([[-.3, .5], [2.0, -1.0]]))
bilinear._bias = Parameter(torch.FloatTensor([.1]))
a_vectors = torch.FloatTensor([[1, 1]])
b_vectors = torch.FloatTensor([[[1, 0], [0, 1]]])
result = bilinear(a_vectors, b_vectors).detach().numpy()
assert result.shape == (1, 2)
assert_almost_equal(result, [[1.8, -.4]])
| 39.296296
| 88
| 0.67295
|
6e3863db078bdb288990742523586afc47cd118f
| 6,725
|
py
|
Python
|
configs/_base_/models/cascade_mask_rcnn_r50_fpn_fashion_spinet.py
|
jireh-father/mmdetection
|
b797e4d5b81c5a3d7d868ee2dc9aa27dbab23e7d
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/models/cascade_mask_rcnn_r50_fpn_fashion_spinet.py
|
jireh-father/mmdetection
|
b797e4d5b81c5a3d7d868ee2dc9aa27dbab23e7d
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/models/cascade_mask_rcnn_r50_fpn_fashion_spinet.py
|
jireh-father/mmdetection
|
b797e4d5b81c5a3d7d868ee2dc9aa27dbab23e7d
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='CascadeRCNN',
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=21,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=21,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=21,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=21,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
# rcnn=dict(
# # score_thr=0.6
# score_thr=0.6,
# nms=dict(type='nms', iou_threshold=0.5),
# max_per_img=100,
# mask_thr_binary=0.5)
rcnn=dict(
# score_thr=0.6
score_thr=0.6,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
# rcnn=dict(
# # score_thr=0.05
# score_thr=0.5,
# nms=dict(type='nms', iou_threshold=0.3),
# max_per_img=100,
# mask_thr_binary=0.45)
)
| 32.645631
| 79
| 0.472862
|
2f1f79dd25fc9a0632afc18a1a954f100d2266b9
| 504
|
py
|
Python
|
angr/angr/procedures/definitions/glibc.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
angr/angr/procedures/definitions/glibc.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
angr/angr/procedures/definitions/glibc.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
from . import SimLibrary
from .. import SIM_PROCEDURES as P
libc = SimLibrary()
libc.set_library_names('libc.so.6', 'libc.so.0', 'libc.so')
libc.add_all_from_dict(P['libc'])
libc.add_all_from_dict(P['posix'])
libc.add_all_from_dict(P['glibc'])
libc.add_alias('abort', '__assert_fail', '__stack_chk_fail')
libc.add_alias('memcpy', 'memmove', 'bcopy')
libc.set_non_returning('exit_group', 'exit', 'abort', 'pthread_exit', '__assert_fail',
'longjmp', 'siglongjmp', '__longjmp_chk', '__siglongjmp_chk')
| 38.769231
| 86
| 0.736111
|
f9e4b3a1e3737dee017683d53c9cd59ba1ecf4dd
| 6,696
|
py
|
Python
|
src/datasets/voc2012.py
|
paulwarkentin/tf-ssd-vgg
|
f48e3ccbb8eb092d3cb82a9d90164c7328880477
|
[
"MIT"
] | 5
|
2021-09-26T07:19:42.000Z
|
2022-03-11T23:25:36.000Z
|
ssd/src/datasets/voc2012.py
|
bharatmahaur/ComparativeStudy
|
2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d
|
[
"Apache-2.0"
] | null | null | null |
ssd/src/datasets/voc2012.py
|
bharatmahaur/ComparativeStudy
|
2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d
|
[
"Apache-2.0"
] | null | null | null |
##
## /src/datasets/voc2012.py
##
## Created by Paul Warkentin <paul@warkentin.email> on 13/07/2018.
## Updated by Paul Warkentin <paul@warkentin.email> on 15/07/2018.
##
import argparse
import os
import sys
import tensorflow as tf
import xml.etree.ElementTree
from datetime import datetime
from tqdm import tqdm
__exec_dir = sys.path[0]
while os.path.basename(__exec_dir) != "src":
__exec_dir = os.path.dirname(__exec_dir)
sys.path.insert(0, __exec_dir)
from datasets.common import DatasetSource, DatasetWriter
from utils.common.logging import logging_info
# wget -c "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar"
class VOC2012(DatasetSource):
"""Handle the Pascal VOC 2012 dataset.
"""
def __init__(self):
"""Initialize the class.
"""
super().__init__("voc2012")
@property
def labels(self):
return [
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor"
]
@property
def selected_labels(self):
# select all labels
return self.labels
def convert(self, split_name):
if split_name not in ["train", "validation"]:
raise NotImplementedError("The dataset split '{}' is currently not supported.".format(split_name))
# open the dataset writer
destination_path = os.path.join(self.paths["processed"], split_name)
writer = DatasetWriter(destination_path, compression_type=self.compression_type)
writer.open()
# convert the split
self.__convert_split(split_name, writer)
# close the dataset writer
writer.close()
def __convert_split(self, split_name, writer):
"""Convert a single dataset split.
Arguments:
split_name: Name of the dataset split.
devkit_path: Path to the VOC devkit.
writer: TFRecords file writer.
"""
devkit_path = os.path.join(self.paths["interim"], "VOCtrainval_11-May-2012", "VOCdevkit", "VOC2012")
# load image ids
if split_name == "train":
imageset_filename = "train.txt"
elif split_name == "validation":
imageset_filename = "val.txt"
path = os.path.join(devkit_path, "ImageSets", "Main", imageset_filename)
with open(path, "r") as file:
image_ids = file.readlines()
image_ids = [image_id.strip() for image_id in image_ids]
# process annotations
logging_info("Processing and writing images ...")
# iterate all images
num_images = 0
annotations_path = os.path.join(devkit_path, "Annotations")
images_path = os.path.join(devkit_path, "JPEGImages")
for image_id in tqdm(image_ids, ascii=True):
# meta information
meta_url = "host.robots.ox.ac.uk"
meta_time = datetime.utcnow().isoformat()
meta_requester = "paul@warkentin.email"
# read xml file
xml_path = os.path.join(annotations_path, "{}.xml".format(image_id))
if not os.path.isfile(xml_path):
continue
xml_root = xml.etree.ElementTree.parse(xml_path).getroot()
# check format of image
filename = xml_root.findtext("filename")
image_format = self.get_image_format_of_file(filename)
if image_format is None:
raise NotImplementedError("The format of the file '{}' is currently not supported.".format(filename))
# read image size
image_height = int(xml_root.find("size").findtext("height"))
image_width = int(xml_root.find("size").findtext("width"))
image_channels = int(xml_root.find("size").findtext("depth"))
# read image
image_path = os.path.join(images_path, filename)
with tf.gfile.FastGFile(image_path, "rb") as file:
image_raw_data = file.read()
# read bounding boxes
labels = []
bboxes = [[], [], [], []]
for sobject in xml_root.findall("object"):
label_name = sobject.find("name").text
if label_name not in self.selected_labels:
continue
labels.append(self.selected_labels.index(label_name) + 1)
bndbox = sobject.find("bndbox")
bboxes[0].append(int(bndbox.find("ymin").text) / image_height)
bboxes[1].append(int(bndbox.find("xmin").text) / image_width)
bboxes[2].append(int(bndbox.find("ymax").text) / image_height)
bboxes[3].append(int(bndbox.find("xmax").text) / image_width)
if len(labels) == 0:
continue
# write sample
writer.write_single_example({
"meta/url": writer.bytes_feature(meta_url),
"meta/requester": writer.bytes_feature(meta_requester),
"meta/time": writer.bytes_feature(meta_time),
"image/filename": writer.bytes_feature(filename),
"image/format": writer.bytes_feature(image_format),
"image/encoded": writer.bytes_feature(image_raw_data),
"image/width": writer.int64_feature(image_width),
"image/height": writer.int64_feature(image_height),
"image/channels": writer.int64_feature(image_channels),
"image/shape": writer.int64_feature((image_height, image_width, image_channels)),
"image/object/bbox/label": writer.int64_feature(labels),
"image/object/bbox/y_min": writer.float_feature(bboxes[0]),
"image/object/bbox/x_min": writer.float_feature(bboxes[1]),
"image/object/bbox/y_max": writer.float_feature(bboxes[2]),
"image/object/bbox/x_max": writer.float_feature(bboxes[3])
})
num_images += 1
logging_info("Successfully written {} image(s) to the TFRecords file.".format(num_images))
if __name__ == "__main__":
# initialize arguments
parser = argparse.ArgumentParser(
description = "PASCAL Visual Object Classes 2012"
)
parser.add_argument(
"--extract",
action = "store_true",
help = "Extract all archives."
)
parser.add_argument(
"--convert",
type = str,
choices = ["train", "validation"],
help = "Convert a dataset split to TFRecords. The data must be extracted."
)
parser.add_argument(
"--rgb-mean",
type = str,
choices = ["train", "validation"],
help = "Calculate the RGB mean of a dataset split. The split must be converted to TFRecords."
)
arguments = parser.parse_args()
if not arguments.extract and arguments.convert is None and arguments.rgb_mean is None:
parser.print_help()
exit()
# initialize dataset
dataset = VOC2012()
# extract dataset
if arguments.extract:
logging_info("Extract all archives.")
dataset.extract(overwrite=True)
# convert dataset
if arguments.convert is not None:
logging_info("Convert dataset split '{}'.".format(arguments.convert))
dataset.convert(arguments.convert)
# calculate rgb mean
if arguments.rgb_mean is not None:
logging_info("Calculate RGB mean of the dataset split '{}'.".format(arguments.rgb_mean))
rgb_mean = dataset.calculate_rgb_mean(arguments.rgb_mean)
logging_info("RGB mean is R = {:.2f}, G = {:.2f}, B = {:.2f}.".format(*rgb_mean))
| 29.76
| 105
| 0.705645
|
422cb700c27ca9942124e02ee61a2c5cd2309c79
| 592
|
py
|
Python
|
intro/part07-02_special_characters/src/special_characters.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
intro/part07-02_special_characters/src/special_characters.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
intro/part07-02_special_characters/src/special_characters.py
|
Hannah-Abi/python-pro-21
|
2ce32c4bf118054329d19afdf83c50561be1ada8
|
[
"MIT"
] | null | null | null |
# Write your solution here
import string
def separate_characters(my_string: str):
ascii_ltr = ""
punc_ltr = ""
others = ""
for i in my_string:
if i in string.ascii_letters:
ascii_ltr += i
if i in string.punctuation:
punc_ltr += i
if i not in string.ascii_letters and i not in string.punctuation:
others += i
part = (ascii_ltr,punc_ltr,others)
return part
if __name__=="__main__":
parts = separate_characters("Olé!!! Hey, are ümläüts wörking?")
print(parts[0])
print(parts[1])
print(parts[2])
| 28.190476
| 73
| 0.616554
|
872d7df74a21fec2db7d53179d7c966524a81327
| 33,084
|
py
|
Python
|
preql/core/evaluate.py
|
manor/Preql
|
f1c294b7893843ef77f3c81410e315c573b5a5ef
|
[
"MIT"
] | null | null | null |
preql/core/evaluate.py
|
manor/Preql
|
f1c294b7893843ef77f3c81410e315c573b5a5ef
|
[
"MIT"
] | null | null | null |
preql/core/evaluate.py
|
manor/Preql
|
f1c294b7893843ef77f3c81410e315c573b5a5ef
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
import logging
from pathlib import Path
from preql.utils import safezip, dataclass, SafeDict, listgen
from preql import settings
from .interp_common import assert_type, exclude_fields, call_builtin_func, is_global_scope, cast_to_python_string, cast_to_python_int
from .exceptions import InsufficientAccessLevel, ReturnSignal, Signal
from . import exceptions as exc
from . import pql_objects as objects
from . import pql_ast as ast
from . import sql
from .parser import Str
from .interp_common import State, dsp, pyvalue_inst, cast_to_python
from .compiler import compile_to_inst, cast_to_instance
from .pql_types import T, Type, Object, Id
from .types_impl import table_params, table_flat_for_insert, flatten_type, pql_repr, kernel_type
MODULES_PATH = Path(__file__).parent.parent / 'modules'
@dsp
def resolve(state: State, struct_def: ast.StructDef):
members = {str(k):resolve(state, v) for k, v in struct_def.members}
struct = T.struct(members)
state.set_var(struct_def.name, struct)
return struct
@dsp
def resolve(state: State, table_def: ast.TableDef):
name = table_def.name
if is_global_scope(state):
name = state.db.qualified_name(name)
temporary = False
else:
name = '__local_' + state.unique_name(name)
temporary = True
t = T.table({}, name=Id(name), temporary=temporary)
with state.use_scope({table_def.name: t}): # For self-reference
elems = {c.name: resolve(state, c) for c in table_def.columns}
t = t(elems)
if table_def.methods:
methods = evaluate(state, table_def.methods)
t.methods.update({m.userfunc.name:m.userfunc for m in methods})
return t
@dsp
def resolve(state: State, col_def: ast.ColumnDef):
coltype = resolve(state, col_def.type)
query = col_def.query
assert not query
if isinstance(coltype, objects.SelectedColumnInstance):
table = coltype.parent.type
if 'name' not in table.options:
# XXX better test for persistence
raise Signal.make(T.TypeError, col_def.type, "Tables provided as relations must be persistent.")
x = T.t_relation[coltype.type](rel={'table': table, 'column': coltype.name, 'key': False})
return x.replace(_nullable=coltype.type._nullable) # inherit is_nullable (TODO: use sumtypes?)
elif coltype <= T.table:
if 'name' not in coltype.options:
# XXX better test for persistence
raise Signal.make(T.TypeError, col_def.type, "Tables provided as relations must be persistent.")
x = T.t_relation[T.t_id.as_nullable()](rel={'table': coltype, 'column': 'id', 'key': True})
return x.replace(_nullable=coltype._nullable) # inherit is_nullable (TODO: use sumtypes?)
return coltype(default=col_def.default)
@dsp
def resolve(state: State, type_: ast.Type):
t = evaluate(state, type_.type_obj)
if isinstance(t, objects.TableInstance):
t = t.type
if not isinstance(t, (Type, objects.SelectedColumnInstance)):
raise Signal.make(T.TypeError, type_, f"Expected type in column definition. Instead got '{t}'")
if type_.nullable:
t = t.as_nullable()
return t
@dsp
def _execute(state: State, struct_def: ast.StructDef):
resolve(state, struct_def)
def db_query(state: State, sql_code, subqueries=None):
try:
return state.db.query(sql_code, subqueries)
except exc.DatabaseQueryError as e:
raise Signal.make(T.DbQueryError, None, e.args[0]) from e
def drop_table(state, table_type):
name ,= table_type.options['name'].parts
code = sql.compile_drop_table(state, name)
return state.db.query(code, {})
@dsp
def _execute(state: State, table_def: ast.TableDefFromExpr):
expr = cast_to_instance(state, table_def.expr)
name = table_def.name
if is_global_scope(state):
temporary = False
else:
name = '__local_' + state.unique_name(name)
temporary = True
t = new_table_from_expr(state, name, expr, table_def.const, temporary)
state.set_var(table_def.name, t)
@dsp
def _execute(state: State, table_def: ast.TableDef):
if table_def.columns and isinstance(table_def.columns[-1], ast.Ellipsis):
ellipsis = table_def.columns.pop()
else:
ellipsis = None
if any(isinstance(c, ast.Ellipsis) for c in table_def.columns):
# XXX why must it? just ensure it appears once
raise Signal.make(T.SyntaxError, table_def, "Ellipsis must appear at the end")
# Create type and a corresponding table in the database
t = resolve(state, table_def)
db_name = t.options['name']
exists = state.db.table_exists(db_name.repr_name)
if exists:
assert not t.options['temporary']
cur_type = state.db.import_table_type(db_name.repr_name, None if ellipsis else set(t.elems) | {'id'})
if ellipsis:
elems_to_add = {Str(n, ellipsis.text_ref): v for n, v in cur_type.elems.items() if n not in t.elems}
# TODO what is primary key isn't included?
t = t({**t.elems, **elems_to_add}, **cur_type.options)
# Auto-add id only if it exists already and not defined by user
if 'id' in cur_type.elems: #and 'id' not in t.elems:
# t = t(dict(id=T.t_id, **t.elems), pk=[['id']])
assert cur_type.elems['id'] <= T.primitive, cur_type.elems['id']
t.elems['id'] = T.t_id
for e_name, e1_type in t.elems.items():
if e_name not in cur_type.elems:
raise Signal.make(T.TypeError, table_def, f"Column '{e_name}' defined, but doesn't exist in database.")
# e2_type = cur_type.elems[e_name]
# XXX use can_cast() instead of hardcoding it
# if not (e1_type <= e2_type or (e1_type <= T.t_id and e2_type <= T.int)):
# raise Signal.make(T.TypeError, table_def, f"Cannot cast column '{e_name}' from type '{e2_type}' to '{e1_type}'")
inst = objects.new_table(t, db_name, select_fields=True)
else:
# Auto-add id by default
elems = dict(t.elems)
if 'id' not in elems:
elems = {'id': T.t_id, **elems}
t = t(elems, pk=[['id']])
inst = objects.new_table(t, db_name)
state.set_var(table_def.name, inst)
if not exists:
sql_code = sql.compile_type_def(state, db_name.repr_name, t)
db_query(state, sql_code)
@dsp
def _set_value(state: State, name: ast.Name, value):
state.set_var(name.name, value)
@dsp
def _set_value(state: State, attr: ast.Attr, value):
raise Signal.make(T.NotImplementedError, attr, f"Cannot set attribute for {attr.expr.repr()}")
@dsp
def _execute(state: State, var_def: ast.SetValue):
res = evaluate(state, var_def.value)
# res = apply_database_rw(state, res)
_set_value(state, var_def.name, res)
return res
def _copy_rows(state: State, target_name: ast.Name, source: objects.TableInstance):
if source is objects.EmptyList: # Nothing to add
return objects.null
target = evaluate(state, target_name)
params = dict(table_params(target.type))
for p in params:
if p not in source.type.elems:
raise Signal.make(T.TypeError, source, f"Missing column '{p}' in {source.type}")
primary_keys, columns = table_flat_for_insert(target.type)
source = exclude_fields(state, source, set(primary_keys) & set(source.type.elems))
code = sql.Insert(target.type.options['name'], columns, source.code)
db_query(state, code, source.subqueries)
return objects.null
@dsp
def _execute(state: State, insert_rows: ast.InsertRows):
if not isinstance(insert_rows.name, ast.Name):
# TODO support Attr
raise Signal.make(T.SyntaxError, insert_rows, "L-value must be table name")
rval = evaluate(state, insert_rows.value)
assert_type(rval.type, T.table, insert_rows, '+=')
return _copy_rows(state, insert_rows.name, rval)
@dsp
def _execute(state: State, func_def: ast.FuncDef):
func = func_def.userfunc
assert isinstance(func, objects.UserFunction)
new_params = []
for p in func.params:
if p.type:
t = evaluate(state, p.type)
p = p.replace(type=t)
new_params.append(p)
state.set_var(func.name, func.replace(params=new_params))
@dsp
def _execute(state: State, p: ast.Print):
# TODO Can be done better. Maybe cast to ReprText?
insts = evaluate(state, p.value)
assert isinstance(insts, list)
for inst in insts:
# inst = evaluate(state, p.value)
if inst.type <= T.string:
repr_ = cast_to_python_string(state, inst)
else:
repr_ = inst.repr()
state.display.print(repr_, end=" ")
state.display.print("")
@dsp
def _execute(state: State, p: ast.Assert):
res = cast_to_python(state, p.cond)
if not res:
# TODO pretty print values
if isinstance(p.cond, ast.Compare):
s = (' %s '%p.cond.op).join(str(evaluate(state, a).repr()) for a in p.cond.args)
else:
s = p.cond.repr()
raise Signal.make(T.AssertError, p.cond, f"Assertion failed: {s}")
@dsp
def _execute(state: State, cb: ast.CodeBlock):
for stmt in cb.statements:
execute(state, stmt)
return objects.null
@dsp
def _execute(state: State, i: ast.If):
cond = cast_to_python(state, i.cond)
if cond:
execute(state, i.then)
elif i.else_:
execute(state, i.else_)
@dsp
def _execute(state: State, w: ast.While):
while cast_to_python(state, w.cond):
execute(state, w.do)
@dsp
def _execute(state: State, f: ast.For):
expr = cast_to_python(state, f.iterable)
for i in expr:
with state.use_scope({f.var: objects.from_python(i)}):
execute(state, f.do)
@dsp
def _execute(state: State, t: ast.Try):
try:
execute(state, t.try_)
except Signal as e:
catch_type = localize(state, evaluate(state, t.catch_expr))
if not isinstance(catch_type, Type):
raise Signal.make(T.TypeError, t.catch_expr, f"Catch expected type, got {t.catch_expr.type}")
if e.type <= catch_type:
scope = {t.catch_name: e} if t.catch_name else {}
with state.use_scope(scope):
execute(state, t.catch_block)
else:
raise
def find_module(module_name):
paths = [MODULES_PATH, Path.cwd()]
for path in paths:
module_path = (path / module_name).with_suffix(".pql")
if module_path.exists():
return module_path
raise Signal.make(T.ImportError, r, "Cannot find module")
def import_module(state, r):
module_path = find_module(r.module_path)
assert state is state.interp.state
i = state.interp.clone(use_core=r.use_core)
state.stacktrace.append(r.text_ref)
try:
i.include(module_path)
finally:
assert state.stacktrace[-1] is r.text_ref
state.stacktrace.pop()
# Inherit module db (in case it called connect())
state.db = i.state.db
ns = i.state.ns
assert len(ns) == 1
return objects.Module(r.module_path, ns._ns[0])
@dsp
def _execute(state: State, r: ast.Import):
module = import_module(state, r)
state.set_var(r.as_name or r.module_path, module)
return module
@dsp
def _execute(state: State, r: ast.Return):
value = evaluate(state, r.value)
raise ReturnSignal(value)
@dsp
def _execute(state: State, t: ast.Throw):
e = evaluate(state, t.value)
if isinstance(e, ast.Ast):
raise exc.InsufficientAccessLevel()
assert isinstance(e, Exception), e
raise e
def execute(state, stmt):
if isinstance(stmt, ast.Statement):
return _execute(state, stmt) or objects.null
return evaluate(state, stmt)
# Simplify performs local operations before any db-specific compilation occurs
# Technically not super useful at the moment, but makes conceptual sense.
@dsp
def simplify(state: State, cb: ast.CodeBlock):
# if len(cb.statements) == 1:
# s ,= cb.statements
# return simplify(state, s)
try:
return _execute(state, cb)
except ReturnSignal as r:
# XXX is this correct?
return r.value
except Signal as e:
# Failed to run it, so try to cast as instance
# XXX order should be other way around!
if e.type <= T.CastError:
return compile_to_inst(state, cb)
raise
except InsufficientAccessLevel:
return cb
@dsp
def simplify(state: State, n: ast.Name):
# XXX what happens to caching if this is a global variable?
return state.get_var(n.name)
@dsp
def simplify(state: State, x):
return x
# @dsp
# def simplify(state: State, ls: list):
# return [simplify(state, i) for i in ls]
# @dsp
# def simplify(state: State, d: objects.ParamDict):
# return d.replace(params={name: evaluate(state, v) for name, v in d.params.items()})
# @dsp
# def simplify(state: State, node: ast.Ast):
# # return _simplify_ast(state, node)
# return node
# def _simplify_ast(state, node):
# resolved = {k:simplify(state, v) for k, v in node
# if isinstance(v, types.PqlObject) or isinstance(v, list) and all(isinstance(i, types.PqlObject) for i in v)}
# return node.replace(**resolved)
# TODO isn't this needed somewhere??
# @dsp
# def simplify(state: State, if_: ast.If):
# if_ = _simplify_ast(state, if_)
# if isinstance(if_.cond, objects.ValueInstance): # XXX a more general test?
# if if_.cond.local_value:
# return if_.then
# else:
# return if_.else_
# return if_
# TODO Optimize these, right now failure to evaluate will lose all work
@dsp
def simplify(state: State, obj: ast.Or):
a, b = evaluate(state, obj.args)
ta = kernel_type(a.type)
tb = kernel_type(b.type)
if ta != tb:
raise Signal.make(T.TypeError, obj, f"'or' operator requires both arguments to be of the same type, but got '{ta}' and '{tb}'.")
try:
if test_nonzero(state, a):
return a
except InsufficientAccessLevel:
return obj
return b
@dsp
def simplify(state: State, obj: ast.And):
a, b = evaluate(state, obj.args)
ta = kernel_type(a.type)
tb = kernel_type(b.type)
if ta != tb:
raise Signal.make(T.TypeError, obj, f"'and' operator requires both arguments to be of the same type, but got '{ta}' and '{tb}'.")
try:
if not test_nonzero(state, a):
return a
except InsufficientAccessLevel:
return obj
return b
@dsp
def simplify(state: State, obj: ast.Not):
inst = evaluate(state, obj.expr)
try:
nz = test_nonzero(state, inst)
except InsufficientAccessLevel:
return obj
return objects.pyvalue_inst(not nz)
@dsp
def simplify(state: State, funccall: ast.FuncCall):
func = evaluate(state, funccall.func)
if isinstance(func, objects.UnknownInstance):
# evaluate(state, [a.value for a in funccall.args])
raise Signal.make(T.TypeError, funccall.func, f"Error: Object of type '{func.type}' is not callable")
args = funccall.args
if isinstance(func, Type):
# Cast to type
args = args + [func]
func = state.get_var('cast')
if not isinstance(func, objects.Function):
raise Signal.make(T.TypeError, funccall.func, f"Error: Object of type '{func.type}' is not callable")
state.stacktrace.append(funccall.text_ref)
try:
res = eval_func_call(state, func, args)
finally:
assert state.stacktrace[-1] is funccall.text_ref
state.stacktrace.pop()
assert isinstance(res, Object), (type(res), res)
return res
def eval_func_call(state, func, args):
assert isinstance(func, objects.Function)
matched_args = func.match_params(state, args)
if isinstance(func, objects.MethodInstance):
args = {'this': func.parent}
# args.update(func.parent.all_attrs())
else:
args = {}
# XXX simplify destroys text_ref, so it harms error messages.
# TODO Can I get rid of it, or make it preserve the text_ref somehow?
# Don't I need an instance to ensure I have type?
for i, (p, a) in enumerate(matched_args):
if not p.name.startswith('$'): # $param means don't evaluate expression, leave it to the function
a = evaluate(state, a)
# TODO cast?
if p.type and not a.type <= p.type:
raise Signal.make(T.TypeError, func, f"Argument #{i} of '{func.name}' is of type '{a.type}', expected '{p.type}'")
args[p.name] = a
if isinstance(func, objects.InternalFunction):
# TODO ensure pure function?
# TODO Ensure correct types
args = list(args.values())
return func.func(state, *args)
# TODO make tests to ensure caching was successful
expr = func.expr
if settings.cache:
params = {name: ast.Parameter(name, value.type) for name, value in args.items()}
sig = (func.name,) + tuple(a.type for a in args.values())
try:
with state.use_scope(params):
if sig in state._cache:
compiled_expr = state._cache[sig]
else:
logging.info(f"Compiling.. {func}")
compiled_expr = _call_expr(state.reduce_access(state.AccessLevels.COMPILE), func.expr)
logging.info("Compiled successfully")
if isinstance(compiled_expr, objects.Instance):
# XXX a little ugly
qb = sql.QueryBuilder(state.db.target, True)
x = compiled_expr.code.compile(qb)
x = x.optimize()
compiled_expr = compiled_expr.replace(code=x)
state._cache[sig] = compiled_expr
expr = ast.ResolveParameters(compiled_expr, args)
except exc.InsufficientAccessLevel:
# Don't cache
pass
with state.use_scope(args):
res = _call_expr(state, expr)
if isinstance(res, ast.ResolveParameters): # XXX A bit of a hack
raise exc.InsufficientAccessLevel()
return res
def _call_expr(state, expr):
try:
return evaluate(state, expr)
except ReturnSignal as r:
return r.value
# TODO fix these once we have proper types
@dsp
def test_nonzero(state: State, table: objects.TableInstance):
count = call_builtin_func(state, "count", [table])
return bool(cast_to_python_int(state, count))
@dsp
def test_nonzero(state: State, inst: objects.Instance):
return bool(cast_to_python(state, inst))
@dsp
def test_nonzero(state: State, inst: Type):
return True
@dsp
def apply_database_rw(state: State, o: ast.One):
# TODO move these to the core/base module
obj = evaluate(state, o.expr)
if obj.type <= T.struct:
if len(obj.attrs) != 1:
raise Signal.make(T.ValueError, o, f"'one' expected a struct with a single attribute, got {len(obj.attrs)}")
x ,= obj.attrs.values()
return x
slice_ast = ast.Slice(obj, ast.Range(None, ast.Const(T.int, 2))).set_text_ref(o.text_ref)
table = evaluate(state, slice_ast)
assert (table.type <= T.table), table
rows = localize(state, table) # Must be 1 row
if len(rows) == 0:
if not o.nullable:
raise Signal.make(T.ValueError, o, "'one' expected a single result, got an empty expression")
return objects.null
elif len(rows) > 1:
raise Signal.make(T.ValueError, o, "'one' expected a single result, got more")
row ,= rows
rowtype = T.row[table.type]
if table.type <= T.list:
return pyvalue_inst(row)
assert table.type <= T.table
assert_type(table.type, T.table, o, 'one')
d = {k: pyvalue_inst(v, table.type.elems[k], True) for k, v in row.items()}
return objects.RowInstance(rowtype, d)
@dsp
def apply_database_rw(state: State, d: ast.Delete):
state.catch_access(state.AccessLevels.WRITE_DB)
# TODO Optimize: Delete on condition, not id, when possible
cond_table = ast.Selection(d.table, d.conds).set_text_ref(d.text_ref)
table = evaluate(state, cond_table)
if not table.type <= T.table:
raise Signal.make(T.TypeError, d.table, f"Expected a table. Got: {table.type}")
if not 'name' in table.type.options:
raise Signal.make(T.ValueError, d.table, "Cannot delete. Table is not persistent")
rows = list(localize(state, table))
if rows:
if 'id' not in rows[0]:
raise Signal.make(T.TypeError, d, "Delete error: Table does not contain id")
ids = [row['id'] for row in rows]
for code in sql.deletes_by_ids(table, ids):
db_query(state, code, table.subqueries)
return evaluate(state, d.table)
@dsp
def apply_database_rw(state: State, u: ast.Update):
state.catch_access(state.AccessLevels.WRITE_DB)
# TODO Optimize: Update on condition, not id, when possible
table = evaluate(state, u.table)
if not table.type <= T.table:
raise Signal.make(T.TypeError, u.table, f"Expected a table. Got: {table.type}")
if not 'name' in table.type.options:
raise Signal.make(T.ValueError, u.table, "Cannot update: Table is not persistent")
for f in u.fields:
if not f.name:
raise Signal.make(T.SyntaxError, f, f"Update requires that all fields have a name")
# TODO verify table is concrete (i.e. lvalue, not a transitory expression)
update_scope = {n:c for n, c in table.all_attrs().items()}
with state.use_scope(update_scope):
proj = {f.name:evaluate(state, f.value) for f in u.fields}
rows = list(localize(state, table))
if rows:
if 'id' not in rows[0]:
raise Signal.make(T.TypeError, u, "Update error: Table does not contain id")
if not set(proj) < set(rows[0]):
raise Signal.make(T.TypeError, u, "Update error: Not all keys exist in table")
ids = [row['id'] for row in rows]
for code in sql.updates_by_ids(table, proj, ids):
db_query(state, code, table.subqueries)
# TODO return by ids to maintain consistency, and skip a possibly long query
return table
@dsp
def apply_database_rw(state: State, new: ast.NewRows):
state.catch_access(state.AccessLevels.WRITE_DB)
obj = state.get_var(new.type)
if len(new.args) > 1:
raise Signal.make(T.NotImplementedError, new, "Not yet implemented") #. Requires column-wise table concat (use join and enum)")
if isinstance(obj, objects.UnknownInstance):
arg ,= new.args
table = evaluate(state, arg.value)
fakerows = [objects.RowInstance(T.row[table], {'id': T.t_id})]
return ast.List_(T.list[T.int], fakerows).set_text_ref(new.text_ref)
if isinstance(obj, objects.TableInstance):
# XXX Is it always TableInstance? Just sometimes? What's the transition here?
obj = obj.type
assert_type(obj, T.table, new, "'new' expected an object of type '%s', instead got '%s'")
arg ,= new.args
# TODO postgres can do it better!
table = evaluate(state, arg.value)
rows = localize(state, table)
# TODO ensure rows are the right type
cons = TableConstructor.make(obj)
# TODO very inefficient, vectorize this
ids = []
for row in rows:
matched = cons.match_params(state, [objects.from_python(v) for v in row.values()])
ids += [_new_row(state, new, obj, matched).primary_key()] # XXX return everything, not just pk?
# XXX find a nicer way - requires a better typesystem, where id(t) < int
return ast.List_(T.list[T.int], ids).set_text_ref(new.text_ref)
@listgen
def _destructure_param_match(state, ast_node, param_match):
# TODO use cast rather than a ad-hoc hardwired destructure
for k, v in param_match:
if isinstance(v, objects.RowInstance):
v = v.primary_key()
v = localize(state, v)
if k.type <= T.struct:
names = [name for name, t in flatten_type(k.orig, [k.name])]
if not isinstance(v, list):
msg = f"Parameter {k.name} received a bad value: {v} (expecting a struct or a list)"
raise Signal.make(T.TypeError, ast_node, msg)
if len(v) != len(names):
msg = f"Parameter {k.name} received a bad value (size of {len(names)})"
raise Signal.make(T.TypeError, ast_node, msg)
yield from safezip(names, v)
else:
yield k.name, v
def _new_value(state, v, type_):
if isinstance(v, list):
return evaluate(state, objects.PythonList(v))
return objects.pyvalue_inst(v, type_=type_)
@dsp
def freeze(state, i: objects.Instance):
return _new_value(state, cast_to_python(state, i), type_=i.type )
@dsp
def freeze(state, i: objects.RowInstance):
return i.replace(attrs={k: freeze(state, v) for k, v in i.attrs.items()})
def _new_row(state, new_ast, table, matched):
matched = [(k, freeze(state, evaluate(state, v))) for k, v in matched]
destructured_pairs = _destructure_param_match(state, new_ast, matched)
keys = [name for (name, _) in destructured_pairs]
values = [sql.make_value(v) for (_,v) in destructured_pairs]
assert keys and values
# XXX use regular insert?
if state.db.target == sql.bigquery:
rowid = db_query(state, sql.FuncCall(T.string, 'GENERATE_UUID', []))
keys += ['id']
values += [sql.make_value(rowid)]
q = sql.InsertConsts(table.options['name'].repr_name, keys, [values])
db_query(state, q)
else:
q = sql.InsertConsts(table.options['name'].repr_name, keys, [values])
# q = sql.InsertConsts(new_ast.type, keys, [values])
db_query(state, q)
rowid = db_query(state, sql.LastRowId())
d = SafeDict({'id': objects.pyvalue_inst(rowid)})
d.update({p.name:v for p, v in matched})
return objects.RowInstance(T.row[table], d)
@dsp
def apply_database_rw(state: State, new: ast.New):
state.catch_access(state.AccessLevels.WRITE_DB)
obj = state.get_var(new.type)
# XXX Assimilate this special case
if isinstance(obj, Type) and obj <= T.Exception:
def create_exception(state, msg):
msg = cast_to_python(state, msg)
assert new.text_ref is state.stacktrace[-1]
return Signal(obj, list(state.stacktrace), msg) # TODO move this to `throw`?
f = objects.InternalFunction(obj.typename, [objects.Param('message')], create_exception)
res = evaluate(state, ast.FuncCall(f, new.args).set_text_ref(new.text_ref))
return res
if not isinstance(obj, objects.TableInstance):
raise Signal.make(T.TypeError, new, f"'new' expects a table or exception, instead got {obj.repr()}")
table = obj
# TODO assert tabletype is a real table and not a query (not transient), otherwise new is meaningless
assert_type(table.type, T.table, new, "'new' expected an object of type '%s', instead got '%s'")
cons = TableConstructor.make(table.type)
matched = cons.match_params(state, new.args)
return _new_row(state, new, table.type, matched)
@dataclass
class TableConstructor(objects.Function):
"Serves as an ad-hoc constructor function for given table, to allow matching params"
params: List[objects.Param]
param_collector: Optional[objects.Param] = None
name = 'new'
@classmethod
def make(cls, table):
return cls([
objects.Param(name, p, p.options.get('default'), orig=p).set_text_ref(getattr(name, 'text_ref', None))
for name, p in table_params(table)
])
def add_as_subquery(state: State, inst: objects.Instance):
code_cls = sql.TableName if (inst.type <= T.table) else sql.Name
name = state.unique_name(inst)
return inst.replace(code=code_cls(inst.code.type, name), subqueries=inst.subqueries.update({name: inst.code}))
@dsp
def resolve_parameters(state: State, x):
return x
@dsp
def resolve_parameters(state: State, p: ast.Parameter):
return state.get_var(p.name)
@dsp
def evaluate(state, obj: list):
return [evaluate(state, item) for item in obj]
@dsp
def evaluate(state, obj_):
assert context.state
# - Generic, non-db related operations
obj = simplify(state, obj_)
assert obj, obj_
if state.access_level < state.AccessLevels.COMPILE:
return obj
# - Compile to instances with db-specific code (sql)
# . Compilation may fail (e.g. due to lack of DB access)
# . Resulting code generic within the same database, and can be cached
# obj = compile_to_inst(state.reduce_access(state.AccessLevels.COMPILE), obj)
obj = compile_to_inst(state, obj)
if state.access_level < state.AccessLevels.EVALUATE:
return obj
# - Resolve parameters to "instantiate" the cached code
# TODO necessary?
obj = resolve_parameters(state, obj)
if state.access_level < state.AccessLevels.READ_DB:
return obj
# - Apply operations that read or write the database (delete, insert, update, one, etc.)
obj = apply_database_rw(state, obj)
assert not isinstance(obj, (ast.ResolveParameters, ast.ParameterizedSqlCode)), obj
return obj
@dsp
def apply_database_rw(state, x):
return x
#
# localize()
# -------------
#
# Return the local value of the expression. Only requires computation if the value is an instance.
#
@dsp
def localize(state, inst: objects.AbsInstance):
raise NotImplementedError(inst)
@dsp
def localize(state, inst: objects.AbsStructInstance):
return {k: localize(state, evaluate(state, v)) for k, v in inst.attrs.items()}
@dsp
def localize(state, inst: objects.Instance):
# TODO This protection doesn't work for unoptimized code
# Cancel unoptimized mode? Or leave this unprotected?
# state.require_access(state.AccessLevels.WRITE_DB)
if inst.code is sql.null:
return None
return db_query(state, inst.code, inst.subqueries)
@dsp
def localize(state, inst: objects.ValueInstance):
return inst.local_value
@dsp
def localize(state, inst: objects.SelectedColumnInstance):
# XXX is this right?
p = evaluate(state, inst.parent)
return p.get_attr(inst.name)
@dsp
def localize(state, x):
return x
def new_table_from_rows(state, name, columns, rows):
# TODO check table doesn't exist
tuples = [
[sql.make_value(i) for i in row]
for row in rows
]
# TODO refactor into function?
elems = {c:v.type.as_nullable() for c,v in zip(columns, tuples[0])}
elems['id'] = T.t_id
table = T.table(elems, temporary=True, pk=[['id']], name=Id(name))
db_query(state, sql.compile_type_def(state, name, table))
code = sql.InsertConsts(name, columns, tuples)
db_query(state, code)
x = objects.new_table(table)
state.set_var(name, x)
return x
def new_table_from_expr(state, name, expr, const, temporary):
elems = expr.type.elems
if any(t <= T.unknown for t in elems.values()):
return objects.TableInstance.make(sql.null, expr.type, [])
if 'id' in elems and not const:
msg = "Field 'id' already exists. Rename it, or use 'const table' to copy it as-is."
raise Signal.make(T.NameError, None, msg)
table = T.table(dict(elems), name=Id(name), pk=[] if const else [['id']], temporary=temporary)
if not const:
table.elems['id'] = T.t_id
db_query(state, sql.compile_type_def(state, name, table))
read_only, flat_columns = table_flat_for_insert(table)
expr = exclude_fields(state, expr, set(read_only) & set(elems))
db_query(state, sql.Insert(Id(name), flat_columns, expr.code), expr.subqueries)
return objects.new_table(table)
# cast_to_python - make sure the value is a native python object, not a preql instance
@dsp
def cast_to_python(state, obj):
raise Signal.make(T.TypeError, None, f"Unexpected value: {pql_repr(obj.type, obj)}")
@dsp
def cast_to_python(state, obj: ast.Ast):
inst = cast_to_instance(state, obj)
return cast_to_python(state, inst)
@dsp
def cast_to_python(state, obj: objects.AbsInstance):
# if state.access_level <= state.AccessLevels.QUERY:
if obj.type <= T.projected | T.aggregated:
raise exc.InsufficientAccessLevel(state.access_level)
# raise Signal.make(T.CastError, None, f"Internal error. Cannot cast projected obj: {obj}")
res = localize(state, obj)
if obj.type == T.float:
res = float(res)
elif obj.type == T.int:
res = int(res)
elif obj.type == T.bool:
assert res in (0, 1), res
res = bool(res)
return res
### Added functions
def function_localize_keys(self, state, struct):
return cast_to_python(state, struct)
objects.Function._localize_keys = function_localize_keys
from preql.context import context
def instance_repr(self):
return pql_repr(self.type, localize(context.state, self))
objects.Instance.repr = instance_repr
| 32.05814
| 137
| 0.654002
|
aefc522314fb7a549eff6a18be9fe6cdaeaaef41
| 35
|
py
|
Python
|
mcproxy.py
|
felixbade/minecraft-proxy
|
3b040cda4f06a1c7711fd7aea1158b33ca8640f7
|
[
"Artistic-2.0"
] | 1
|
2015-02-17T08:43:12.000Z
|
2015-02-17T08:43:12.000Z
|
mcproxy.py
|
felixbade/minecraft-proxy
|
3b040cda4f06a1c7711fd7aea1158b33ca8640f7
|
[
"Artistic-2.0"
] | null | null | null |
mcproxy.py
|
felixbade/minecraft-proxy
|
3b040cda4f06a1c7711fd7aea1158b33ca8640f7
|
[
"Artistic-2.0"
] | null | null | null |
from app import proxy
proxy.run()
| 8.75
| 21
| 0.742857
|
43ff7fa597f1bc570bff198e6443b39de6ab886f
| 110,899
|
py
|
Python
|
python/ccxt/bybit.py
|
red-eagle-eye/ccxt
|
56260c9f00d8eadbb23c9505c224d7f82a0d4f35
|
[
"MIT"
] | 2
|
2021-10-08T23:05:57.000Z
|
2022-01-22T15:03:40.000Z
|
python/ccxt/bybit.py
|
ReWard0101/ccxt
|
be730006aeef31ff5b533ad93a8f098645665d6d
|
[
"MIT"
] | null | null | null |
python/ccxt/bybit.py
|
ReWard0101/ccxt
|
be730006aeef31ff5b533ad93a8f098645665d6d
|
[
"MIT"
] | 3
|
2021-08-14T18:53:01.000Z
|
2021-12-21T22:36:42.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bybit(Exchange):
def describe(self):
return self.deep_extend(super(bybit, self).describe(), {
'id': 'bybit',
'name': 'Bybit',
'countries': ['VG'], # British Virgin Islands
'version': 'v2',
'userAgent': None,
'rateLimit': 100,
'hostname': 'bybit.com', # bybit.com, bytick.com
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchDeposits': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': False,
'fetchIndexOHLCV': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': None,
'fetchWithdrawals': True,
'setMarginMode': True,
'setLeverage': True,
},
'timeframes': {
'1m': '1',
'3m': '3',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': 'D',
'1w': 'W',
'1M': 'M',
'1y': 'Y',
},
'urls': {
'test': {
'spot': 'https://api-testnet.{hostname}',
'futures': 'https://api-testnet.{hostname}',
'v2': 'https://api-testnet.{hostname}',
'public': 'https://api-testnet.{hostname}',
'private': 'https://api-testnet.{hostname}',
},
'logo': 'https://user-images.githubusercontent.com/51840849/76547799-daff5b80-649e-11ea-87fb-3be9bac08954.jpg',
'api': {
'spot': 'https://api.{hostname}',
'futures': 'https://api.{hostname}',
'v2': 'https://api.{hostname}',
'public': 'https://api.{hostname}',
'private': 'https://api.{hostname}',
},
'www': 'https://www.bybit.com',
'doc': [
'https://bybit-exchange.github.io/docs/inverse/',
'https://bybit-exchange.github.io/docs/linear/',
'https://github.com/bybit-exchange',
],
'fees': 'https://help.bybit.com/hc/en-us/articles/360039261154',
'referral': 'https://www.bybit.com/app/register?ref=X7Prm',
},
'api': {
'spot': {
'public': {
'get': [
'symbols',
],
},
'quote': {
'get': [
'depth',
'depth/merged',
'trades',
'kline',
'ticker/24hr',
'ticker/price',
'ticker/book_ticker',
],
},
'private': {
'get': [
'order',
'open-orders',
'history-orders',
'myTrades',
'account',
'time',
],
'post': [
'order',
],
'delete': [
'order',
'order/fast',
],
},
'order': {
'delete': [
'batch-cancel',
'batch-fast-cancel',
'batch-cancel-by-ids',
],
},
},
'futures': {
'private': {
'get': [
'order/list',
'order',
'stop-order/list',
'stop-order',
'position/list',
'execution/list',
'trade/closed-pnl/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'stop-order/replace',
'position/change-position-margin',
'position/trading-stop',
'position/leverage/save',
'position/switch-mode',
'position/switch-isolated',
'position/risk-limit',
],
},
},
'v2': {
'public': {
'get': [
'orderBook/L2',
'kline/list',
'tickers',
'trading-records',
'symbols',
'liq-records',
'mark-price-kline',
'index-price-kline',
'premium-index-kline',
'open-interest',
'big-deal',
'account-ratio',
'time',
'announcement',
'funding/prev-funding-rate',
'risk-limit/list',
],
},
'private': {
'get': [
'order/list',
'order',
'stop-order/list',
'stop-order',
'position/list',
'execution/list',
'trade/closed-pnl/list',
'funding/prev-funding-rate',
'funding/prev-funding',
'funding/predicted-funding',
'account/api-key',
'account/lcp',
'wallet/balance',
'wallet/fund/records',
'wallet/withdraw/list',
'exchange-order/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'stop-order/replace',
'position/change-position-margin',
'position/trading-stop',
'position/leverage/save',
'position/switch-mode',
'position/switch-isolated',
'position/risk-limit',
],
},
},
'public': {
'linear': {
'get': [
'kline',
'recent-trading-records',
'funding/prev-funding-rate',
'mark-price-kline',
'index-price-kline',
'premium-index-kline',
'risk-limit',
],
},
},
'private': {
'linear': {
'get': [
'order/list',
'order/search',
'stop-order/list',
'stop-order/search',
'position/list',
'trade/execution/list',
'trade/closed-pnl/list',
'funding/predicted-funding',
'funding/prev-funding',
],
'post': [
'order/create',
'order/cancel',
'order/cancel-all',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancel-all',
'stop-order/replace',
'position/set-auto-add-margin',
'position/switch-isolated',
'tpsl/switch-mode',
'position/add-margin',
'position/set-leverage',
'position/trading-stop',
'position/set-risk',
],
},
},
},
'httpExceptions': {
'403': RateLimitExceeded, # Forbidden -- You request too many times
},
'exceptions': {
'exact': {
'-2015': AuthenticationError, # Invalid API-key, IP, or permissions for action.
'10001': BadRequest, # parameter error
'10002': InvalidNonce, # request expired, check your timestamp and recv_window
'10003': AuthenticationError, # Invalid apikey
'10004': AuthenticationError, # invalid sign
'10005': PermissionDenied, # permission denied for current apikey
'10006': RateLimitExceeded, # too many requests
'10007': AuthenticationError, # api_key not found in your request parameters
'10010': PermissionDenied, # request ip mismatch
'10017': BadRequest, # request path not found or request method is invalid
'10018': RateLimitExceeded, # exceed ip rate limit
'20001': OrderNotFound, # Order not exists
'20003': InvalidOrder, # missing parameter side
'20004': InvalidOrder, # invalid parameter side
'20005': InvalidOrder, # missing parameter symbol
'20006': InvalidOrder, # invalid parameter symbol
'20007': InvalidOrder, # missing parameter order_type
'20008': InvalidOrder, # invalid parameter order_type
'20009': InvalidOrder, # missing parameter qty
'20010': InvalidOrder, # qty must be greater than 0
'20011': InvalidOrder, # qty must be an integer
'20012': InvalidOrder, # qty must be greater than zero and less than 1 million
'20013': InvalidOrder, # missing parameter price
'20014': InvalidOrder, # price must be greater than 0
'20015': InvalidOrder, # missing parameter time_in_force
'20016': InvalidOrder, # invalid value for parameter time_in_force
'20017': InvalidOrder, # missing parameter order_id
'20018': InvalidOrder, # invalid date format
'20019': InvalidOrder, # missing parameter stop_px
'20020': InvalidOrder, # missing parameter base_price
'20021': InvalidOrder, # missing parameter stop_order_id
'20022': BadRequest, # missing parameter leverage
'20023': BadRequest, # leverage must be a number
'20031': BadRequest, # leverage must be greater than zero
'20070': BadRequest, # missing parameter margin
'20071': BadRequest, # margin must be greater than zero
'20084': BadRequest, # order_id or order_link_id is required
'30001': BadRequest, # order_link_id is repeated
'30003': InvalidOrder, # qty must be more than the minimum allowed
'30004': InvalidOrder, # qty must be less than the maximum allowed
'30005': InvalidOrder, # price exceeds maximum allowed
'30007': InvalidOrder, # price exceeds minimum allowed
'30008': InvalidOrder, # invalid order_type
'30009': ExchangeError, # no position found
'30010': InsufficientFunds, # insufficient wallet balance
'30011': PermissionDenied, # operation not allowed as position is undergoing liquidation
'30012': PermissionDenied, # operation not allowed as position is undergoing ADL
'30013': PermissionDenied, # position is in liq or adl status
'30014': InvalidOrder, # invalid closing order, qty should not greater than size
'30015': InvalidOrder, # invalid closing order, side should be opposite
'30016': ExchangeError, # TS and SL must be cancelled first while closing position
'30017': InvalidOrder, # estimated fill price cannot be lower than current Buy liq_price
'30018': InvalidOrder, # estimated fill price cannot be higher than current Sell liq_price
'30019': InvalidOrder, # cannot attach TP/SL params for non-zero position when placing non-opening position order
'30020': InvalidOrder, # position already has TP/SL params
'30021': InvalidOrder, # cannot afford estimated position_margin
'30022': InvalidOrder, # estimated buy liq_price cannot be higher than current mark_price
'30023': InvalidOrder, # estimated sell liq_price cannot be lower than current mark_price
'30024': InvalidOrder, # cannot set TP/SL/TS for zero-position
'30025': InvalidOrder, # trigger price should bigger than 10% of last price
'30026': InvalidOrder, # price too high
'30027': InvalidOrder, # price set for Take profit should be higher than Last Traded Price
'30028': InvalidOrder, # price set for Stop loss should be between Liquidation price and Last Traded Price
'30029': InvalidOrder, # price set for Stop loss should be between Last Traded Price and Liquidation price
'30030': InvalidOrder, # price set for Take profit should be lower than Last Traded Price
'30031': InsufficientFunds, # insufficient available balance for order cost
'30032': InvalidOrder, # order has been filled or cancelled
'30033': RateLimitExceeded, # The number of stop orders exceeds maximum limit allowed
'30034': OrderNotFound, # no order found
'30035': RateLimitExceeded, # too fast to cancel
'30036': ExchangeError, # the expected position value after order execution exceeds the current risk limit
'30037': InvalidOrder, # order already cancelled
'30041': ExchangeError, # no position found
'30042': InsufficientFunds, # insufficient wallet balance
'30043': InvalidOrder, # operation not allowed as position is undergoing liquidation
'30044': InvalidOrder, # operation not allowed as position is undergoing AD
'30045': InvalidOrder, # operation not allowed as position is not normal status
'30049': InsufficientFunds, # insufficient available balance
'30050': ExchangeError, # any adjustments made will trigger immediate liquidation
'30051': ExchangeError, # due to risk limit, cannot adjust leverage
'30052': ExchangeError, # leverage can not less than 1
'30054': ExchangeError, # position margin is invalid
'30057': ExchangeError, # requested quantity of contracts exceeds risk limit
'30063': ExchangeError, # reduce-only rule not satisfied
'30067': InsufficientFunds, # insufficient available balance
'30068': ExchangeError, # exit value must be positive
'30074': InvalidOrder, # can't create the stop order, because you expect the order will be triggered when the LastPrice(or IndexPrice、 MarkPrice, determined by trigger_by) is raising to stop_px, but the LastPrice(or IndexPrice、 MarkPrice) is already equal to or greater than stop_px, please adjust base_price or stop_px
'30075': InvalidOrder, # can't create the stop order, because you expect the order will be triggered when the LastPrice(or IndexPrice、 MarkPrice, determined by trigger_by) is falling to stop_px, but the LastPrice(or IndexPrice、 MarkPrice) is already equal to or less than stop_px, please adjust base_price or stop_px
'33004': AuthenticationError, # apikey already expired
'34026': ExchangeError, # the limit is no change
},
'broad': {
'unknown orderInfo': OrderNotFound, # {"ret_code":-1,"ret_msg":"unknown orderInfo","ext_code":"","ext_info":"","result":null,"time_now":"1584030414.005545","rate_limit_status":99,"rate_limit_reset_ms":1584030414003,"rate_limit":100}
'invalid api_key': AuthenticationError, # {"ret_code":10003,"ret_msg":"invalid api_key","ext_code":"","ext_info":"","result":null,"time_now":"1599547085.415797"}
},
},
'precisionMode': TICK_SIZE,
'options': {
'marketTypes': {
'BTC/USDT': 'linear',
'ETH/USDT': 'linear',
'BNB/USDT': 'linear',
'ADA/USDT': 'linear',
'DOGE/USDT': 'linear',
'XRP/USDT': 'linear',
'DOT/USDT': 'linear',
'UNI/USDT': 'linear',
'BCH/USDT': 'linear',
'LTC/USDT': 'linear',
'SOL/USDT': 'linear',
'LINK/USDT': 'linear',
'MATIC/USDT': 'linear',
'ETC/USDT': 'linear',
'FIL/USDT': 'linear',
'EOS/USDT': 'linear',
'AAVE/USDT': 'linear',
'XTZ/USDT': 'linear',
'SUSHI/USDT': 'linear',
'XEM/USDT': 'linear',
'BTC/USD': 'inverse',
'ETH/USD': 'inverse',
'EOS/USD': 'inverse',
'XRP/USD': 'inverse',
},
'defaultType': 'linear', # linear, inverse, futures
'code': 'BTC',
'cancelAllOrders': {
# 'method': 'v2PrivatePostOrderCancelAll', # v2PrivatePostStopOrderCancelAll
},
'recvWindow': 5 * 1000, # 5 sec default
'timeDifference': 0, # the difference between system clock and exchange server clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.00075,
'maker': -0.00025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
def load_time_difference(self, params={}):
serverTime = self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - serverTime
return self.options['timeDifference']
def fetch_time(self, params={}):
response = self.v2PublicGetTime(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {},
# time_now: '1583933682.448826'
# }
#
return self.safe_timestamp(response, 'time_now')
def fetch_markets(self, params={}):
if self.options['adjustForTimeDifference']:
self.load_time_difference()
response = self.v2PublicGetSymbols(params)
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# {
# "name":"BTCUSD",
# "alias":"BTCUSD",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USD",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999.5","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":1000000,"min_trading_qty":1,"qty_step":1}
# },
# {
# "name":"BTCUSDT",
# "alias":"BTCUSDT",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USDT",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999.5","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":100,"min_trading_qty":0.001,"qty_step":0.001}
# },
# ],
# "time_now":"1610539664.818033"
# }
#
markets = self.safe_value(response, 'result', [])
options = self.safe_value(self.options, 'fetchMarkets', {})
linearQuoteCurrencies = self.safe_value(options, 'linear', {'USDT': True})
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string_2(market, 'name', 'symbol')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
linear = (quote in linearQuoteCurrencies)
inverse = not linear
symbol = base + '/' + quote
baseQuote = base + quote
type = 'swap'
if baseQuote != id:
symbol = id
type = 'futures'
lotSizeFilter = self.safe_value(market, 'lot_size_filter', {})
priceFilter = self.safe_value(market, 'price_filter', {})
precision = {
'amount': self.safe_number(lotSizeFilter, 'qty_step'),
'price': self.safe_number(priceFilter, 'tick_size'),
}
leverage = self.safe_value(market, 'leverage_filter', {})
status = self.safe_string(market, 'status')
active = None
if status is not None:
active = (status == 'Trading')
spot = (type == 'spot')
swap = (type == 'swap')
futures = (type == 'futures')
option = (type == 'option')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': active,
'precision': precision,
'taker': self.safe_number(market, 'taker_fee'),
'maker': self.safe_number(market, 'maker_fee'),
'type': type,
'spot': spot,
'swap': swap,
'futures': futures,
'option': option,
'linear': linear,
'inverse': inverse,
'limits': {
'amount': {
'min': self.safe_number(lotSizeFilter, 'min_trading_qty'),
'max': self.safe_number(lotSizeFilter, 'max_trading_qty'),
},
'price': {
'min': self.safe_number(priceFilter, 'min_price'),
'max': self.safe_number(priceFilter, 'max_price'),
},
'cost': {
'min': None,
'max': None,
},
'leverage': {
'max': self.safe_number(leverage, 'max_leverage', 1),
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'last_price')
open = self.safe_number(ticker, 'prev_price_24h')
percentage = self.safe_number(ticker, 'price_24h_pcnt')
if percentage is not None:
percentage *= 100
change = None
average = None
if (last is not None) and (open is not None):
change = last - open
average = self.sum(open, last) / 2
baseVolume = self.safe_number(ticker, 'turnover_24h')
quoteVolume = self.safe_number(ticker, 'volume_24h')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high_price_24h'),
'low': self.safe_number(ticker, 'low_price_24h'),
'bid': self.safe_number(ticker, 'bid_price'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask_price'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v2PublicGetTickers(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
first = self.safe_value(result, 0)
timestamp = self.safe_timestamp(response, 'time_now')
ticker = self.parse_ticker(first, market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.v2PublicGetTickers(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
tickers = {}
for i in range(0, len(result)):
ticker = self.parse_ticker(result[i])
symbol = ticker['symbol']
tickers[symbol] = ticker
return self.filter_by_array(tickers, 'symbol', symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# inverse perpetual BTC/USD
#
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# }
#
# linear perpetual BTC/USDT
#
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
#
return [
self.safe_timestamp_2(ohlcv, 'open_time', 'start_at'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number_2(ohlcv, 'turnover', 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
params = self.omit(params, 'price')
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.seconds()
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a since argument or a limit argument')
else:
request['from'] = now - limit * duration
else:
request['from'] = int(since / 1000)
if limit is not None:
request['limit'] = limit # max 200, default 200
method = 'v2PublicGetKlineList'
if price == 'mark':
method = 'v2PublicGetMarkPriceKline'
elif price == 'index':
method = 'v2PublicGetIndexPriceKline'
elif price == 'premiumIndex':
method = 'v2PublicGetPremiumIndexKline'
elif market['linear']:
method = 'publicLinearGetKline'
response = getattr(self, method)(self.extend(request, params))
#
# inverse perpetual BTC/USD
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# },
# ],
# time_now: '1583953082.397330'
# }
#
# linear perpetual BTC/USDT
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
# ],
# "time_now":"1587884120.168077"
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = 'v2PublicGetFundingPrevFundingRate'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "symbol": "BTCUSD",
# "funding_rate": "0.00010000",
# "funding_rate_timestamp": 1577433600
# },
# "ext_info": null,
# "time_now": "1577445586.446797",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577445586454,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result')
nextFundingRate = self.safe_number(result, 'funding_rate')
previousFundingTime = self.safe_integer(result, 'funding_rate_timestamp') * 1000
nextFundingTime = previousFundingTime + (8 * 3600000)
currentTime = self.milliseconds()
return {
'info': result,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': currentTime,
'datetime': self.iso8601(currentTime),
'previousFundingRate': None,
'nextFundingRate': nextFundingRate,
'previousFundingTimestamp': previousFundingTime,
'nextFundingTimestamp': nextFundingTime,
'previousFundingDatetime': self.iso8601(previousFundingTime),
'nextFundingDatetime': self.iso8601(nextFundingTime),
}
def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchIndexOHLCV() requires a since argument or a limit argument')
request = {
'price': 'index',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchMarkOHLCV() requires a since argument or a limit argument')
request = {
'price': 'mark',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_premium_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchPremiumIndexOHLCV() requires a since argument or a limit argument')
request = {
'price': 'premiumIndex',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# id: 43785688,
# symbol: 'BTCUSD',
# price: 7786,
# qty: 67,
# side: 'Sell',
# time: '2020-03-11T19:18:30.123Z'
# }
#
# fetchMyTrades, fetchOrderTrades(private)
#
# {
# "closed_size": 0,
# "cross_seq": 277136382,
# "exec_fee": "0.0000001",
# "exec_id": "256e5ef8-abfe-5772-971b-f944e15e0d68",
# "exec_price": "8178.5",
# "exec_qty": 1,
# # the docs say the exec_time field is "abandoned" now
# # the user should use "trade_time_ms"
# "exec_time": "1571676941.70682",
# "exec_type": "Trade", #Exec Type Enum
# "exec_value": "0.00012227",
# "fee_rate": "0.00075",
# "last_liquidity_ind": "RemovedLiquidity", #Liquidity Enum
# "leaves_qty": 0,
# "nth_fill": 2,
# "order_id": "7ad50cb1-9ad0-4f74-804b-d82a516e1029",
# "order_link_id": "",
# "order_price": "8178",
# "order_qty": 1,
# "order_type": "Market", #Order Type Enum
# "side": "Buy", #Side Enum
# "symbol": "BTCUSD", #Symbol Enum
# "user_id": 1,
# "trade_time_ms": 1577480599000
# }
#
id = self.safe_string_2(trade, 'id', 'exec_id')
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
amountString = self.safe_string_2(trade, 'qty', 'exec_qty')
priceString = self.safe_string_2(trade, 'exec_price', 'price')
cost = self.safe_number(trade, 'exec_value')
amount = self.parse_number(amountString)
price = self.parse_number(priceString)
if cost is None:
cost = self.parse_number(Precise.string_mul(priceString, amountString))
timestamp = self.parse8601(self.safe_string(trade, 'time'))
if timestamp is None:
timestamp = self.safe_integer(trade, 'trade_time_ms')
side = self.safe_string_lower(trade, 'side')
lastLiquidityInd = self.safe_string(trade, 'last_liquidity_ind')
takerOrMaker = 'maker' if (lastLiquidityInd == 'AddedLiquidity') else 'taker'
feeCost = self.safe_number(trade, 'exec_fee')
fee = None
if feeCost is not None:
feeCurrencyCode = market['base'] if market['inverse'] else market['quote']
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
'rate': self.safe_number(trade, 'fee_rate'),
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': self.safe_string(trade, 'order_id'),
'type': self.safe_string_lower(trade, 'order_type'),
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'from': 123, # from id
}
if limit is not None:
request['count'] = limit # default 500, max 1000
method = 'publicLinearGetRecentTradingRecords' if market['linear'] else 'v2PublicGetTradingRecords'
response = getattr(self, method)(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# id: 43785688,
# symbol: 'BTCUSD',
# price: 7786,
# qty: 67,
# side: 'Sell',
# time: '2020-03-11T19:18:30.123Z'
# },
# ],
# time_now: '1583954313.393362'
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_trades(result, market, since, limit)
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='Buy', asksKey='Sell', priceKey='price', amountKey='size'):
bids = []
asks = []
for i in range(0, len(orderbook)):
bidask = orderbook[i]
side = self.safe_string(bidask, 'side')
if side == 'Buy':
bids.append(self.parse_bid_ask(bidask, priceKey, amountKey))
elif side == 'Sell':
asks.append(self.parse_bid_ask(bidask, priceKey, amountKey))
else:
raise ExchangeError(self.id + ' parseOrderBook encountered an unrecognized bidask format: ' + self.json(bidask))
return {
'symbol': symbol,
'bids': self.sort_by(bids, 0, True),
'asks': self.sort_by(asks, 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v2PublicGetOrderBookL2(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {symbol: 'BTCUSD', price: '7767.5', size: 677956, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7767', size: 580690, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7766.5', size: 475252, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7768', size: 330847, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7768.5', size: 97159, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7769', size: 6508, side: 'Sell'},
# ],
# time_now: '1583954829.874823'
# }
#
result = self.safe_value(response, 'result', [])
timestamp = self.safe_timestamp(response, 'time_now')
return self.parse_order_book(result, symbol, timestamp, 'Buy', 'Sell', 'price', 'size')
def fetch_balance(self, params={}):
self.load_markets()
request = {}
coin = self.safe_string(params, 'coin')
code = self.safe_string(params, 'code')
if coin is not None:
request['coin'] = coin
elif code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
response = self.v2PrivateGetWalletBalance(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {
# BTC: {
# equity: 0,
# available_balance: 0,
# used_margin: 0,
# order_margin: 0,
# position_margin: 0,
# occ_closing_fee: 0,
# occ_funding_fee: 0,
# wallet_balance: 0,
# realised_pnl: 0,
# unrealised_pnl: 0,
# cum_realised_pnl: 0,
# given_cash: 0,
# service_cash: 0
# }
# },
# time_now: '1583937810.370020',
# rate_limit_status: 119,
# rate_limit_reset_ms: 1583937810367,
# rate_limit: 120
# }
#
result = {
'info': response,
}
balances = self.safe_value(response, 'result', {})
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
balance = balances[currencyId]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available_balance')
account['used'] = self.safe_string(balance, 'used_margin')
account['total'] = self.safe_string(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_order_status(self, status):
statuses = {
# basic orders
'Created': 'open',
'Rejected': 'rejected', # order is triggered but failed upon being placed
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Cancelled': 'canceled',
'PendingCancel': 'canceling', # the engine has received the cancellation but there is no guarantee that it will be successful
# conditional orders
'Active': 'open', # order is triggered and placed successfully
'Untriggered': 'open', # order waits to be triggered
'Triggered': 'closed', # order is triggered
# 'Cancelled': 'canceled', # order is cancelled
# 'Rejected': 'rejected', # order is triggered but fail to be placed
'Deactivated': 'canceled', # conditional order was cancelled before triggering
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'GoodTillCancel': 'GTC',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
'PostOnly': 'PO',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0, # in contracts, where 1 contract = 1 quote currency unit(USD for inverse contracts)
# "cum_exec_value": 0, # in contract's underlying currency(BTC for inverse contracts)
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# }
#
# fetchOrder
#
# {
# "user_id" : 599946,
# "symbol" : "BTCUSD",
# "side" : "Buy",
# "order_type" : "Limit",
# "price" : "7948",
# "qty" : 10,
# "time_in_force" : "GoodTillCancel",
# "order_status" : "Filled",
# "ext_fields" : {
# "o_req_num" : -1600687220498,
# "xreq_type" : "x_create"
# },
# "last_exec_time" : "1588150113.968422",
# "last_exec_price" : "7948",
# "leaves_qty" : 0,
# "leaves_value" : "0",
# "cum_exec_qty" : 10,
# "cum_exec_value" : "0.00125817",
# "cum_exec_fee" : "-0.00000031",
# "reject_reason" : "",
# "cancel_type" : "",
# "order_link_id" : "",
# "created_at" : "2020-04-29T08:45:24.399146Z",
# "updated_at" : "2020-04-29T08:48:33.968422Z",
# "order_id" : "dd2504b9-0157-406a-99e1-efa522373944"
# }
#
# conditional order
#
# {
# "user_id":##,
# "symbol":"BTCUSD",
# "side":"Buy",
# "order_type":"Market",
# "price":0,
# "qty":10,
# "time_in_force":"GoodTillCancel",
# "stop_order_type":"Stop",
# "trigger_by":"LastPrice",
# "base_price":11833,
# "order_status":"Untriggered",
# "ext_fields":{
# "stop_order_type":"Stop",
# "trigger_by":"LastPrice",
# "base_price":11833,
# "expected_direction":"Rising",
# "trigger_price":12400,
# "close_on_trigger":true,
# "op_from":"api",
# "remark":"x.x.x.x",
# "o_req_num":0
# },
# "leaves_qty":10,
# "leaves_value":0.00080645,
# "reject_reason":null,
# "cross_seq":-1,
# "created_at":"2020-08-21T09:18:48.000Z",
# "updated_at":"2020-08-21T09:18:48.000Z",
# "trigger_price":12400,
# "stop_order_id":"3f3b54b1-3379-42c7-8510-44f4d9915be0"
# }
#
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
feeCurrency = None
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
id = self.safe_string_2(order, 'order_id', 'stop_order_id')
type = self.safe_string_lower(order, 'order_type')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'average_price')
amount = self.safe_string(order, 'qty')
cost = self.safe_string(order, 'cum_exec_value')
filled = self.safe_string(order, 'cum_exec_qty')
remaining = self.safe_string(order, 'leaves_qty')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
if market is not None:
if marketType == 'linear':
feeCurrency = market['quote']
else:
feeCurrency = market['base']
lastTradeTimestamp = self.safe_timestamp(order, 'last_exec_time')
if lastTradeTimestamp == 0:
lastTradeTimestamp = None
status = self.parse_order_status(self.safe_string_2(order, 'order_status', 'stop_order_status'))
side = self.safe_string_lower(order, 'side')
feeCostString = self.safe_string(order, 'cum_exec_fee')
feeCost = self.parse_number(Precise.string_abs(feeCostString))
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'order_link_id')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None
timeInForce = self.parse_time_in_force(self.safe_string(order, 'time_in_force'))
stopPrice = self.safe_number_2(order, 'trigger_price', 'stop_px')
postOnly = (timeInForce == 'PO')
return self.safe_order2({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearGetOrderSearch'
elif market['inverse']:
method = 'v2PrivateGetOrder'
elif market['futures']:
method = 'futuresPrivateGetOrder'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
if market['swap']:
if market['linear']:
method = 'privateLinearGetStopOrderSearch'
elif market['inverse']:
method = 'v2PrivateGetStopOrder'
elif market['futures']:
method = 'futuresPrivateGetStopOrder'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Limit",
# "price": "8083",
# "qty": 10,
# "time_in_force": "GoodTillCancel",
# "order_status": "New",
# "ext_fields": {"o_req_num": -308787, "xreq_type": "x_create", "xreq_offset": 4154640},
# "leaves_qty": 10,
# "leaves_value": "0.00123716",
# "cum_exec_qty": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-10-21T07:28:19.396246Z",
# "updated_at": "2019-10-21T07:28:19.396246Z",
# "order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"
# },
# "time_now": "1571651135.291930",
# "rate_limit_status": 99, # The remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": "8000",
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Untriggered",
# "ext_fields": {},
# "leaves_qty": 1,
# "leaves_value": "0.00013333",
# "cum_exec_qty": 0,
# "cum_exec_value": null,
# "cum_exec_fee": null,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-12-27T19:56:24.052194Z",
# "updated_at": "2019-12-27T19:56:24.052194Z",
# "order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"
# },
# "time_now": "1577476584.386958",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request = {
# orders ---------------------------------------------------------
'side': self.capitalize(side),
'symbol': market['id'],
'order_type': self.capitalize(type),
'qty': qty, # order quantity in USD, integer only
# 'price': float(self.price_to_precision(symbol, price)), # required for limit orders
'time_in_force': 'GoodTillCancel', # ImmediateOrCancel, FillOrKill, PostOnly
# 'take_profit': 123.45, # take profit price, only take effect upon opening the position
# 'stop_loss': 123.45, # stop loss price, only take effect upon opening the position
# 'reduce_only': False, # reduce only, required for linear orders
# when creating a closing order, bybit recommends a True value for
# close_on_trigger to avoid failing due to insufficient available margin
# 'close_on_trigger': False, required for linear orders
# 'order_link_id': 'string', # unique client order id, max 36 characters
# conditional orders ---------------------------------------------
# base_price is used to compare with the value of stop_px, to decide
# whether your conditional order will be triggered by crossing trigger
# price from upper side or lower side, mainly used to identify the
# expected direction of the current conditional order
# 'base_price': 123.45, # required for conditional orders
# 'stop_px': 123.45, # trigger price, required for conditional orders
# 'trigger_by': 'LastPrice', # IndexPrice, MarkPrice
}
priceIsRequired = False
if type == 'limit':
priceIsRequired = True
if priceIsRequired:
if price is not None:
request['price'] = float(self.price_to_precision(symbol, price))
else:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for a ' + type + ' order')
clientOrderId = self.safe_string_2(params, 'order_link_id', 'clientOrderId')
if clientOrderId is not None:
request['order_link_id'] = clientOrderId
params = self.omit(params, ['order_link_id', 'clientOrderId'])
stopPx = self.safe_value_2(params, 'stop_px', 'stopPrice')
basePrice = self.safe_value(params, 'base_price')
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderCreate'
request['reduce_only'] = False
request['close_on_trigger'] = False
elif market['inverse']:
method = 'v2PrivatePostOrderCreate'
elif market['futures']:
method = 'futuresPrivatePostOrderCreate'
if stopPx is not None:
if basePrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires both the stop_px and base_price params for a conditional ' + type + ' order')
else:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderCreate'
elif market['inverse']:
method = 'v2PrivatePostStopOrderCreate'
elif market['futures']:
method = 'futuresPrivatePostStopOrderCreate'
request['stop_px'] = float(self.price_to_precision(symbol, stopPx))
request['base_price'] = float(self.price_to_precision(symbol, basePrice))
params = self.omit(params, ['stop_px', 'stopPrice', 'base_price'])
elif basePrice is not None:
raise ArgumentsRequired(self.id + ' createOrder() requires both the stop_px and base_price params for a conditional ' + type + ' order')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0,
# "cum_exec_value": 0,
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# },
# "time_now": "1575111823.458705",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_status": "Untriggered",
# "ext_fields": {
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "expected_direction": "Rising",
# "trigger_price": 7500,
# "op_from": "api",
# "remark": "127.0.01",
# "o_req_num": 0
# },
# "leaves_qty": 1,
# "leaves_value": 0.00013333,
# "reject_reason": null,
# "cross_seq": -1,
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# "ext_info": null,
# "time_now": "1577450904.327654",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1577450904335,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' editOrder() requires an symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
# 'order_id': id, # only for non-conditional orders
'symbol': market['id'],
# 'p_r_qty': self.amount_to_precision(symbol, amount), # new order quantity, optional
# 'p_r_price' self.priceToprecision(symbol, price), # new order price, optional
# ----------------------------------------------------------------
# conditional orders
# 'stop_order_id': id, # only for conditional orders
# 'p_r_trigger_price': 123.45, # new trigger price also known as stop_px
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderReplace'
elif market['inverse']:
method = 'v2PrivatePostOrderReplace'
elif market['futures']:
method = 'futuresPrivatePostOrderReplace'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is not None:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderReplace'
elif market['inverse']:
method = 'v2PrivatePostStopOrderReplace'
elif market['futures']:
method = 'futuresPrivatePostStopOrderReplace'
request['stop_order_id'] = stopOrderId
params = self.omit(params, ['stop_order_id'])
else:
request['order_id'] = id
if amount is not None:
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request['p_r_qty'] = qty
if price is not None:
request['p_r_price'] = float(self.price_to_precision(symbol, price))
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"},
# "time_now": "1539778407.210858",
# "rate_limit_status": 99, # remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"stop_order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"},
# "ext_info": null,
# "time_now": "1577475760.604942",
# "rate_limit_status": 96,
# "rate_limit_reset_ms": 1577475760612,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result', {})
return {
'info': response,
'id': self.safe_string_2(result, 'order_id', 'stop_order_id'),
'order_id': self.safe_string(result, 'order_id'),
'stop_order_id': self.safe_string(result, 'stop_order_id'),
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderCancel'
elif market['inverse']:
method = 'v2PrivatePostOrderCancel'
elif market['futures']:
method = 'futuresPrivatePostOrderCancel'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderCancel'
elif market['inverse']:
method = 'v2PrivatePostStopOrderCancel'
elif market['futures']:
method = 'futuresPrivatePostStopOrderCancel'
response = getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
options = self.safe_value(self.options, 'cancelAllOrders', {})
defaultMethod = None
if market['swap']:
if market['linear']:
defaultMethod = 'privateLinearPostOrderCancelAll'
elif market['inverse']:
defaultMethod = 'v2PrivatePostOrderCancelAll'
elif market['futures']:
defaultMethod = 'futuresPrivatePostOrderCancelAll'
method = self.safe_string(options, 'method', defaultMethod)
response = getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'order_id': 'string'
# 'order_link_id': 'string', # unique client order id, max 36 characters
# 'symbol': market['id'], # default BTCUSD
# 'order': 'desc', # asc
# 'page': 1,
# 'limit': 20, # max 50
# 'order_status': 'Created,New'
# conditional orders ---------------------------------------------
# 'stop_order_id': 'string',
# 'stop_order_status': 'Untriggered',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
options = self.safe_value(self.options, 'fetchOrders', {})
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
defaultMethod = None
marketDefined = (market is not None)
linear = (marketDefined and market['linear']) or (marketType == 'linear')
inverse = (marketDefined and market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = (marketDefined and market['futures']) or (marketType == 'futures')
if linear:
defaultMethod = 'privateLinearGetOrderList'
elif inverse:
defaultMethod = 'v2PrivateGetOrderList'
elif futures:
defaultMethod = 'futuresPrivateGetOrderList'
query = params
if ('stop_order_id' in params) or ('stop_order_status' in params):
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is not None:
if isinstance(stopOrderStatus, list):
stopOrderStatus = ','.join(stopOrderStatus)
request['stop_order_status'] = stopOrderStatus
query = self.omit(params, 'stop_order_status')
if linear:
defaultMethod = 'privateLinearGetStopOrderList'
elif inverse:
defaultMethod = 'v2PrivateGetStopOrderList'
elif futures:
defaultMethod = 'futuresPrivateGetStopOrderList'
method = self.safe_string(options, 'method', defaultMethod)
response = getattr(self, method)(self.extend(request, query))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 6,
# "data": [
# {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Market",
# "price": 7074,
# "qty": 2,
# "time_in_force": "ImmediateOrCancel",
# "order_status": "Filled",
# "ext_fields": {
# "close_on_trigger": True,
# "orig_order_type": "BLimit",
# "prior_x_req_price": 5898.5,
# "op_from": "pc",
# "remark": "127.0.0.1",
# "o_req_num": -34799032763,
# "xreq_type": "x_create"
# },
# "last_exec_time": "1577448481.696421",
# "last_exec_price": 7070.5,
# "leaves_qty": 0,
# "leaves_value": 0,
# "cum_exec_qty": 2,
# "cum_exec_value": 0.00028283,
# "cum_exec_fee": 0.00002,
# "reject_reason": "NoError",
# "order_link_id": "",
# "created_at": "2019-12-27T12:08:01.000Z",
# "updated_at": "2019-12-27T12:08:01.000Z",
# "order_id": "f185806b-b801-40ff-adec-52289370ed62"
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577448922.437871",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 1,
# "data": [
# {
# "user_id": 1,
# "stop_order_status": "Untriggered",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_link_id": "",
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# ]
# },
# "ext_info": null,
# "time_now": "1577451658.755468",
# "rate_limit_status": 599,
# "rate_limit_reset_ms": 1577451658762,
# "rate_limit": 600
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_orders(data, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Rejected',
'Filled',
'Cancelled',
# conditional orders
# 'Active',
# 'Triggered',
# 'Cancelled',
# 'Rejected',
# 'Deactivated',
]
options = self.safe_value(self.options, 'fetchClosedOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Created',
'New',
'PartiallyFilled',
'PendingCancel',
# conditional orders
# 'Untriggered',
]
options = self.safe_value(self.options, 'fetchOpenOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
'order_id': id,
}
return self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'order_id': 'f185806b-b801-40ff-adec-52289370ed62', # if not provided will return user's trading records
# 'symbol': market['id'],
# 'start_time': int(since / 1000),
# 'page': 1,
# 'limit' 20, # max 50
}
market = None
if symbol is None:
orderId = self.safe_string(params, 'order_id')
if orderId is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument or an order_id param')
else:
request['order_id'] = orderId
params = self.omit(params, 'order_id')
else:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit # default 20, max 50
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
marketDefined = (market is not None)
linear = (marketDefined and market['linear']) or (marketType == 'linear')
inverse = (marketDefined and market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = (marketDefined and market['futures']) or (marketType == 'futures')
method = None
if linear:
method = 'privateLinearGetTradeExecutionList'
elif inverse:
method = 'v2PrivateGetExecutionList'
elif futures:
method = 'futuresPrivateGetExecutionList'
response = getattr(self, method)(self.extend(request, params))
#
# inverse
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "order_id": "Abandonednot !", # Abandonednot !
# "trade_list": [
# {
# "closed_size": 0,
# "cross_seq": 277136382,
# "exec_fee": "0.0000001",
# "exec_id": "256e5ef8-abfe-5772-971b-f944e15e0d68",
# "exec_price": "8178.5",
# "exec_qty": 1,
# "exec_time": "1571676941.70682",
# "exec_type": "Trade", #Exec Type Enum
# "exec_value": "0.00012227",
# "fee_rate": "0.00075",
# "last_liquidity_ind": "RemovedLiquidity", #Liquidity Enum
# "leaves_qty": 0,
# "nth_fill": 2,
# "order_id": "7ad50cb1-9ad0-4f74-804b-d82a516e1029",
# "order_link_id": "",
# "order_price": "8178",
# "order_qty": 1,
# "order_type": "Market", #Order Type Enum
# "side": "Buy", #Side Enum
# "symbol": "BTCUSD", #Symbol Enum
# "user_id": 1
# }
# ]
# },
# "time_now": "1577483699.281488",
# "rate_limit_status": 118,
# "rate_limit_reset_ms": 1577483699244737,
# "rate_limit": 120
# }
#
# linear
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":{
# "current_page":1,
# "data":[
# {
# "order_id":"b59418ec-14d4-4ef9-b9f4-721d5d576974",
# "order_link_id":"",
# "side":"Sell",
# "symbol":"BTCUSDT",
# "exec_id":"0327284d-faec-5191-bd89-acc5b4fafda9",
# "price":0.5,
# "order_price":0.5,
# "order_qty":0.01,
# "order_type":"Market",
# "fee_rate":0.00075,
# "exec_price":9709.5,
# "exec_type":"Trade",
# "exec_qty":0.01,
# "exec_fee":0.07282125,
# "exec_value":97.095,
# "leaves_qty":0,
# "closed_size":0.01,
# "last_liquidity_ind":"RemovedLiquidity",
# "trade_time":1591648052,
# "trade_time_ms":1591648052861
# }
# ]
# },
# "time_now":"1591736501.979264",
# "rate_limit_status":119,
# "rate_limit_reset_ms":1591736501974,
# "rate_limit":120
# }
#
result = self.safe_value(response, 'result', {})
trades = self.safe_value_2(result, 'trade_list', 'data', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
'wallet_fund_type': 'Deposit', # Deposit, Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.ymd(since)
if limit is not None:
request['limit'] = limit
response = self.v2PrivateGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, {'type': 'deposit'})
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'coin': currency['id'],
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'status': 'Pending', # ToBeConfirmed, UnderReview, Pending, Success, CancelByUser, Reject, Expire
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.ymd(since)
if limit is not None:
request['limit'] = limit
response = self.v2PrivateGetWalletWithdrawList(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# },
# ],
# "current_page": 1,
# "last_page": 1
# },
# "ext_info": null,
# "time_now": "1577482295.125488",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577482295132,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, {'type': 'withdrawal'})
def parse_transaction_status(self, status):
statuses = {
'ToBeConfirmed': 'pending',
'UnderReview': 'pending',
'Pending': 'pending',
'Success': 'ok',
'CancelByUser': 'canceled',
'Reject': 'rejected',
'Expire': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchWithdrawals
#
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# }
#
# fetchDeposits ledger entries
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.parse8601(self.safe_string_2(transaction, 'submited_at', 'exec_time'))
updated = self.parse8601(self.safe_string(transaction, 'updated_at'))
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
address = self.safe_string(transaction, 'address')
feeCost = self.safe_number(transaction, 'fee')
type = self.safe_string_lower(transaction, 'type')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'wallet_fund_type': 'Deposit', # Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.ymd(since)
if limit is not None:
request['limit'] = limit
response = self.v2PrivateGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_ledger(data, currency, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(item, 'coin')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'amount')
after = self.safe_number(item, 'wallet_balance')
direction = 'out' if (amount < 0) else 'in'
before = None
if after is not None and amount is not None:
difference = amount if (direction == 'out') else -amount
before = self.sum(after, difference)
timestamp = self.parse8601(self.safe_string(item, 'exec_time'))
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
id = self.safe_string(item, 'id')
referenceId = self.safe_string(item, 'tx_id')
return {
'id': id,
'currency': code,
'account': self.safe_string(item, 'wallet_id'),
'referenceAccount': None,
'referenceId': referenceId,
'status': None,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
def parse_ledger_entry_type(self, type):
types = {
'Deposit': 'transaction',
'Withdraw': 'transaction',
'RealisedPNL': 'trade',
'Commission': 'fee',
'Refund': 'cashback',
'Prize': 'prize', # ?
'ExchangeOrderWithdraw': 'transaction',
'ExchangeOrderDeposit': 'transaction',
}
return self.safe_string(types, type, type)
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
request = {}
if isinstance(symbols, list):
length = len(symbols)
if length != 1:
raise ArgumentsRequired(self.id + ' fetchPositions takes an array with exactly one symbol')
request['symbol'] = self.market_id(symbols[0])
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
response = None
if type == 'linear':
response = self.privateLinearGetPositionList(self.extend(request, params))
elif type == 'inverse':
response = self.v2PrivateGetPositionList(self.extend(request, params))
elif type == 'inverseFuture':
response = self.futuresPrivateGetPositionList(self.extend(request, params))
if (isinstance(response, basestring)) and self.is_json_encoded_object(response):
response = json.loads(response)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [] or {} depending on the request
# }
#
return self.safe_value(response, 'result')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
type = self.safe_string(api, 0)
section = self.safe_string(api, 1)
if type == 'spot':
if section == 'public':
section = 'v1'
else:
section += '/v1'
url = self.implode_hostname(self.urls['api'][type])
request = '/' + type + '/' + section + '/' + path
if (type == 'spot') or (type == 'quote'):
if params:
request += '?' + self.rawencode(params)
elif section == 'public':
if params:
request += '?' + self.rawencode(params)
elif type == 'public':
if params:
request += '?' + self.rawencode(params)
else:
self.check_required_credentials()
timestamp = self.nonce()
query = self.extend(params, {
'api_key': self.apiKey,
'recv_window': self.options['recvWindow'],
'timestamp': timestamp,
})
sortedQuery = self.keysort(query)
auth = self.rawencode(sortedQuery)
signature = self.hmac(self.encode(auth), self.encode(self.secret))
if method == 'POST':
body = self.json(self.extend(query, {
'sign': signature,
}))
headers = {
'Content-Type': 'application/json',
}
else:
request += '?' + self.urlencode(sortedQuery) + '&sign=' + signature
url += request
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
#
# {
# ret_code: 10001,
# ret_msg: 'ReadMapCB: expect {or n, but found \u0000, error ' +
# 'found in #0 byte of ...||..., bigger context ' +
# '...||...',
# ext_code: '',
# ext_info: '',
# result: null,
# time_now: '1583934106.590436'
# }
#
errorCode = self.safe_string(response, 'ret_code')
if errorCode != '0':
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback) # unknown message
def set_margin_mode(self, marginType, symbol=None, params={}):
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": null,
# "ext_info": null,
# "time_now": "1577477968.175013",
# "rate_limit_status": 74,
# "rate_limit_reset_ms": 1577477968183,
# "rate_limit": 75
# }
#
leverage = self.safe_value(params, 'leverage')
if leverage is None:
raise ArgumentsRequired(self.id + '.setMarginMode requires a leverage parameter')
marginType = marginType.upper()
if (marginType != 'ISOLATED') and (marginType != 'CROSSED'):
raise BadRequest(self.id + ' marginType must be either isolated or crossed')
self.load_markets()
market = self.market(symbol)
method = None
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
linear = market['linear'] or (marketType == 'linear')
inverse = (market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = market['futures'] or (marketType == 'futures')
if linear:
method = 'privateLinearPostPositionSwitchIsolated'
elif inverse:
method = 'v2PrivatePostPositionSwitchIsolated'
elif futures:
method = 'privateFuturesPostPositionSwitchIsolated'
isIsolated = (marginType == 'ISOLATED')
request = {
'symbol': market['id'],
'is_isolated': isIsolated,
'buy_leverage': leverage,
'sell_leverage': leverage,
}
return getattr(self, method)(self.extend(request, params))
def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
linear = market['linear'] or (marketType == 'linear')
inverse = (market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = market['futures'] or (marketType == 'futures')
method = None
if linear:
method = 'privateLinearPostPositionSetLeverage'
elif inverse:
method = 'v2PrivatePostPositionLeverageSave'
elif futures:
method = 'privateFuturesPostPositionLeverageSave'
buy_leverage = leverage
sell_leverage = leverage
if params['buy_leverage'] and params['sell_leverage'] and linear:
buy_leverage = params['buy_leverage']
sell_leverage = params['sell_leverage']
elif not leverage:
if linear:
raise ArgumentsRequired(self.id + ' setLeverage() requires either the parameter leverage or params["buy_leverage"] and params["sell_leverage"] for linear contracts')
else:
raise ArgumentsRequired(self.id + ' setLeverage() requires parameter leverage for inverse and futures contracts')
if (buy_leverage < 1) or (buy_leverage > 100) or (sell_leverage < 1) or (sell_leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
request = {
'symbol': market['id'],
'leverage_only': True,
}
if not linear:
request['leverage'] = buy_leverage
else:
request['buy_leverage'] = buy_leverage
request['sell_leverage'] = sell_leverage
return getattr(self, method)(self.extend(request, params))
| 43.781682
| 340
| 0.464928
|
b32aa637a7ea6dcb432b655b25e16f4e94548efe
| 2,682
|
py
|
Python
|
org/geo/utils/Generator.py
|
haiming-97/Conflict1227
|
c40f8fef17b59cbdc372405a6559f18d4f91c32b
|
[
"MIT"
] | null | null | null |
org/geo/utils/Generator.py
|
haiming-97/Conflict1227
|
c40f8fef17b59cbdc372405a6559f18d4f91c32b
|
[
"MIT"
] | null | null | null |
org/geo/utils/Generator.py
|
haiming-97/Conflict1227
|
c40f8fef17b59cbdc372405a6559f18d4f91c32b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# coding=utf-8
import os
import xlwt
import numpy as np
import random
import xlrd
def satisfied(prob):
return random.random() < prob
def rand():
return random.random()
def select(a, b):
return random.randint(a, b)
def normal(mu, sigma):
return random.normalvariate(mu=mu, sigma=sigma)
def unique_list(max_num, size):
origin_list = [i for i in range(max_num)]
if max_num <= size: return origin_list
indexes = []
new_list = origin_list
count = 0
while count < size:
value = np.random.choice(new_list)
indexes.append(value)
new_list.remove(value)
count += 1
return indexes
def unique_pick_list(origin_list, size):
if len(origin_list) <= size: return origin_list
indexes = []
new_list = origin_list
count = 0
while count < size:
value = np.random.choice(new_list)
indexes.append(value)
new_list.remove(value)
count += 1
return indexes
def check_filename_available(filename):
n = [0]
def check_meta(file_name):
file_name_new = file_name
if os.path.isfile(file_name):
file_name_new = file_name[:file_name.rfind('.')] + '_' + str(n[0]) + file_name[file_name.rfind('.'):]
n[0] += 1
if os.path.isfile(file_name_new):
file_name_new = check_meta(file_name)
return file_name_new
return_name = check_meta(filename)
return return_name
def write_excel(name1, name2, list1, list2, filename):
f = xlwt.Workbook()
sheet1 = f.add_sheet(u'conflict', cell_overwrite_ok=True)
name = [str(name1), str(name2)]
path = 'C:/Users/石海明/Desktop/testexcel20191220/'+str(filename)+'.xls'
for i in range(0, len(name)):
sheet1.write(0, i, name[i])
for i in range(0, len(list1)):
sheet1.write(i+1, 0, list1[i])
for i in range(0, len(list2)):
sheet1.write(i+1, 1, list2[i])
return_path = check_filename_available(path)
f.save(return_path)
def write_list(list1, filename):
f = xlwt.Workbook()
sheet1 = f.add_sheet(u'conflict', cell_overwrite_ok=True)
path = 'C:/Users/石海明/Desktop/testlist/'+str(filename)+'.xls'
for i in range(0, len(list1)):
sheet1.write(i, 0, str(list1[i]))
return_path = check_filename_available(path)
f.save(return_path)
def read_excel(excel_path):
data = xlrd.open_workbook(excel_path)
list = []
table = data.sheets()[0]
nrows = table.nrows #行数
ncols = table.ncols #列数
for i in range(0,nrows):
rowValues = table.row_values(i) #某一行数据
for item in rowValues:
list.append(int(item))
return list
| 25.065421
| 113
| 0.63572
|
0bc7c40da8940e30ddda98c1f2cfaf581b50454c
| 4,437
|
py
|
Python
|
pdfSplitter/pdf2htmlEX/test/test_remote_browser.py
|
mackymiro/primo-tpccr
|
a0db409bb7e72911cffc59e771fa0d98c978d3b3
|
[
"MIT"
] | null | null | null |
pdfSplitter/pdf2htmlEX/test/test_remote_browser.py
|
mackymiro/primo-tpccr
|
a0db409bb7e72911cffc59e771fa0d98c978d3b3
|
[
"MIT"
] | null | null | null |
pdfSplitter/pdf2htmlEX/test/test_remote_browser.py
|
mackymiro/primo-tpccr
|
a0db409bb7e72911cffc59e771fa0d98c978d3b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Run browser tests through Sauce Labs
import unittest
import sys
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from sauceclient import SauceClient
from browser_tests import BrowserTests
# Set your own environment variables
USERNAME = os.environ.get('SAUCE_USERNAME')
ACCESS_KEY = os.environ.get('SAUCE_ACCESS_KEY')
# The base url that remote browser will access
# Usually a HTTP server should be set up in the folder containing the test cases
# Also Sauce Connect should be enabled
BASEURL='http://localhost:8000/'
SAUCE_OPTIONS = {
'record-video': 'false',
}
# we want to test the latest stable version
# and 'beta' is usually the best estimation
BROWSER_MATRIX = [
('win_ie', {
'platform': 'Windows 8.1',
'browserName': 'internet explorer',
'version': '11',
}),
('win_firefox', {
'platform': 'Windows 8.1',
'browserName': 'firefox',
'version': 'beta',
}),
('win_chrome', {
'platform': 'Windows 8.1',
'browserName': 'chrome',
'version': 'beta',
}),
('mac_firefox', {
'platform': 'OS X 10.9',
'browserName': 'firefox',
'version': 'beta',
}),
('mac_chrome', {
'platform': 'OS X 10.9',
'browserName': 'chrome',
'version': '40.0', # beta is not supported
}),
('linux_firefox', {
'platform': 'Linux',
'browserName': 'firefox',
'version': 'beta',
}),
('linux_chrome', {
'platform': 'Linux',
'browserName': 'chrome',
'version': 'beta',
}),
]
@unittest.skipIf((not (USERNAME and ACCESS_KEY)), 'Sauce Labs is not available')
class test_remote_browser_base(BrowserTests):
@classmethod
def setUpClass(cls):
super(test_remote_browser_base, cls).setUpClass()
if not cls.GENERATING_MODE:
cls.sauce = SauceClient(USERNAME, ACCESS_KEY)
cls.sauce_url = 'http://%s:%s@ondemand.saucelabs.com:80/wd/hub' % (USERNAME, ACCESS_KEY)
cls.browser = webdriver.Remote(
desired_capabilities=cls.desired_capabilities,
command_executor=cls.sauce_url
)
cls.browser.implicitly_wait(30)
# remote screen may not be large enough for the whole page
cls.browser.set_window_size(cls.BROWSER_WIDTH, cls.BROWSER_HEIGHT)
@classmethod
def tearDownClass(cls):
if not cls.GENERATING_MODE:
cls.browser.quit()
super(test_remote_browser_base, cls).tearDownClass()
def setUp(self):
super(test_remote_browser_base, self).setUp()
sys.exc_clear()
def tearDown(self):
try:
passed = (sys.exc_info() == (None, None, None))
branch = os.environ.get('TRAVIS_BRANCH', 'manual')
pull_request = os.environ.get('TRAVIS_PULL_REQUEST', 'false')
self.sauce.jobs.update_job(self.browser.session_id,
build_num=os.environ.get('TRAVIS_BUILD_NUMBER', '0'),
name='pdf2htmlEX',
passed=passed,
public='public restricted',
tags = [pull_request] if pull_request != 'false' else [branch]
)
except:
raise
pass
def generate_image(self, html_file, png_file, page_must_load=True):
self.browser.get(BASEURL + html_file)
try:
WebDriverWait(self.browser, 5).until(expected_conditions.presence_of_element_located((By.ID, 'page-container')))
except:
if page_must_load:
raise
self.browser.save_screenshot(png_file)
test_classnames = []
def generate_classes():
module = globals()
for browser_name, browser_caps in BROWSER_MATRIX:
d = dict(test_remote_browser_base.__dict__)
caps = SAUCE_OPTIONS.copy()
caps.update(browser_caps)
tunnel_identifier = os.environ.get('TRAVIS_JOB_NUMBER')
if tunnel_identifier:
caps['tunnel-identifier'] = tunnel_identifier
d['desired_capabilities'] = caps
name = "test_remote_%s" % (browser_name, )
module[name] = type(name, (test_remote_browser_base, unittest.TestCase), d)
test_classnames.append(name)
generate_classes()
| 31.920863
| 124
| 0.62407
|
7216fd3970c95b89547005638bec6539830063a2
| 947
|
py
|
Python
|
softhub/rest_api/serializers.py
|
davcri/softhub
|
5a932da36d1393c361b1940283d468692fe57d9d
|
[
"MIT"
] | 1
|
2018-05-05T20:01:15.000Z
|
2018-05-05T20:01:15.000Z
|
softhub/rest_api/serializers.py
|
davcri/softhub
|
5a932da36d1393c361b1940283d468692fe57d9d
|
[
"MIT"
] | 2
|
2021-03-18T20:16:53.000Z
|
2021-06-08T19:09:15.000Z
|
softhub/rest_api/serializers.py
|
davcri/softhub
|
5a932da36d1393c361b1940283d468692fe57d9d
|
[
"MIT"
] | 2
|
2017-05-10T22:33:39.000Z
|
2018-01-30T14:05:06.000Z
|
from rest_framework.serializers import ModelSerializer
from softhub.models.OperatingSystem import OperatingSystem
from softhub.models.Application import Application
from softhub.models.Executable import Executable
from softhub.models.Developer import Developer
from softhub.models.Version import Version
class OperatingSystemSerializer(ModelSerializer):
class Meta:
model = OperatingSystem
#fields = ('name', 'release_date', 'family')
fields = '__all__'
class ApplicationSerializer(ModelSerializer):
class Meta:
model = Application
fields = '__all__'
class VersionSerializer(ModelSerializer):
class Meta:
model = Version
fields = '__all__'
class ExecutableSerializer(ModelSerializer):
class Meta:
model = Executable
fields = '__all__'
class DeveloperSerializer(ModelSerializer):
class Meta:
model = Developer
fields = '__all__'
| 24.282051
| 58
| 0.724393
|
5a2f783d69aca9839b2fb6dda89263f44666c37b
| 1,157
|
py
|
Python
|
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepHypKinFit_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepHypKinFit_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepHypKinFit_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
#
# module to make the kinematic fit hypothesis
#
ttSemiLepHypKinFit = cms.EDProducer("TtSemiLepHypKinFit",
## met input
mets = cms.InputTag("patMETs"),
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## kin fit results
match = cms.InputTag("kinFitTtSemiLepEventHypothesis"),
status = cms.InputTag("kinFitTtSemiLepEventHypothesis","Status"),
leptons = cms.InputTag("kinFitTtSemiLepEventHypothesis","Leptons"),
neutrinos = cms.InputTag("kinFitTtSemiLepEventHypothesis","Neutrinos"),
partonsHadP = cms.InputTag("kinFitTtSemiLepEventHypothesis","PartonsHadP"),
partonsHadQ = cms.InputTag("kinFitTtSemiLepEventHypothesis","PartonsHadQ"),
partonsHadB = cms.InputTag("kinFitTtSemiLepEventHypothesis","PartonsHadB"),
partonsLepB = cms.InputTag("kinFitTtSemiLepEventHypothesis","PartonsLepB"),
## number of considered jets
nJetsConsidered = cms.InputTag("kinFitTtSemiLepEventHypothesis","NumberOfConsideredJets")
)
| 42.851852
| 113
| 0.699222
|
2785ad9ea682590b60d84262d4e69687d346f52c
| 1,277
|
py
|
Python
|
app/streamflow/sherlock/sh_Q90_EcoB.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/streamflow/sherlock/sh_Q90_EcoB.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/streamflow/sherlock/sh_Q90_EcoB.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | 2
|
2021-04-04T02:45:59.000Z
|
2022-03-19T09:41:39.000Z
|
from hydroDL import kPath
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.master import slurm
from hydroDL.master import basinFull
varX = gridMET.varLst
varY = ['runoff']
varXC = gageII.lstWaterQuality
varYC = None
dataName = 'Q90'
sd = '1979-01-01'
ed = '2010-01-01'
l3Lst = ['080304',
'050301',
'080401',
'090203',
'080305',
'080203',
'080503',
'090402',
'080301',
'080107',
'080204',
'080402']
subsetLst = list()
for l3 in l3Lst:
subsetLst.append('EcoB'+l3[:2])
subsetLst.append('EcoB'+l3[:4])
subsetLst.append('EcoB'+l3[:6])
subsetLst = list(set(subsetLst))
globalName = '{}-B10'.format(dataName)
caseLst = list()
for subset in subsetLst:
outName = '{}-{}-B10-gs'.format(dataName, subset)
caseName = basinFull.wrapMaster(outName=outName, dataName=dataName, varX=varX,
varY=varY, varXC=varXC, varYC=varYC, sd=sd, ed=ed,
subset=subset, borrowStat=globalName)
caseLst.append(caseName)
cmdP = 'python /home/users/kuaifang/GitHUB/geolearn/hydroDL/master/cmd/basinFull.py -M {}'
for caseName in caseLst:
slurm.submitJobGPU(caseName, cmdP.format(caseName))
| 26.604167
| 90
| 0.614722
|
04a15c8517477127a002f98dd92591b9653a7a6b
| 13,552
|
py
|
Python
|
carp_fastapi/api/routers/services/carp_deployments_datapoints_consents.py
|
almaxdtu/carp.client-fastAPI
|
867394b0d43292abb42edfe38462a8ebe304ac57
|
[
"MIT"
] | 1
|
2022-03-26T04:45:58.000Z
|
2022-03-26T04:45:58.000Z
|
carp_fastapi/api/routers/services/carp_deployments_datapoints_consents.py
|
cph-cachet/carp.client-fastAPI
|
867394b0d43292abb42edfe38462a8ebe304ac57
|
[
"MIT"
] | null | null | null |
carp_fastapi/api/routers/services/carp_deployments_datapoints_consents.py
|
cph-cachet/carp.client-fastAPI
|
867394b0d43292abb42edfe38462a8ebe304ac57
|
[
"MIT"
] | null | null | null |
"""
Copyright 2018 Copenhagen Center for Health Technology (CACHET) at the Technical University of Denmark (DTU).
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the ”Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ”AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from fastapi import APIRouter, Request
from carp import datapoint_service as datapoint, consent_service as consent, deployment_service as deployment
from carp_fastapi.resources import carp_environment as env
from starlette.config import Config
config = Config(".env")
environment: str = config("ENVIRONMENT", default="local")
router = APIRouter()
"""
DATA-POINTS :: CREATE :: GET :: DELETE
"""
@router.post("/{deployment_id}/data-points")
async def create_data_point(request: Request, deployment_id: str):
"""
Endpoint: [create_data_point]
:param request: The [request] body.
:param deployment_id: The [deployment_id] assigned in the deployment.
:return: The new create data point by its [deployment_id].
"""
body: bytes = await request.body()
request_body: str = bytes.decode(body)
response = await datapoint.create_data_point(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
data_points_body=request_body)
return response
@router.post("/{deployment_id}/data-points/batch")
async def create_many_data_point(request: Request, deployment_id: str):
"""
Endpoint: [create_many_data_points]
:param request: The [request] body.
:param deployment_id: The [deployment_id] assigned in the deployment.
:return: The new created data points.
"""
body: bytes = await request.body()
request_body: str = bytes.decode(body)
response = await datapoint.create_many_data_points(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
data_points_body=request_body)
return response
@router.get('/{deployment_id}/data-points/{data_point_id}')
async def get_one_data_point(request: Request, deployment_id: str, data_point_id: str):
"""
Endpoint: [get_one_data_point]
:param request: The [request] header.
:param deployment_id: The [deployment_id] assigned in the deployment.
:param data_point_id: The [data_point_id] assigned in the data point.
:return: The data point by its [data_point_id] and [data_point_id].
"""
response = await datapoint.get_data_point(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
data_point_id=data_point_id)
return response
@router.get('/{deployment_id}/data-points')
async def get_all_data_points(request: Request, deployment_id: str):
"""
Endpoint: [get_all_data_points]
:param request: The [request] header.
:param deployment_id: The [deployment_id] assigned in the deployment.
:return: The data points by their [deployment_id].
"""
response = await datapoint.get_all_data_points(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id)
return response
@router.get('/{deployment_id}/data-points/page/{page_id}')
async def get_all_data_points_pageable(request: Request, deployment_id: str, page_id: int = 0):
"""
Endpoint: [get_all_data_points_pageable]
:param request: The [request] header.
:param deployment_id: The [deployment_id] assigned in the deployment.
:param page_id: The [page_id] of the data point.
:return: The data points by their [deployment_id] and [page_number].
"""
response = await datapoint.get_all_data_points_pageable(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
page=page_id)
return response
@router.get('/{deployment_id}/data-points/sort/{sort_param}')
async def get_all_data_points_sorted(request: Request, deployment_id: str, sort_param: str):
"""
Endpoint: [get_all_data_points_sorted]
:param request: The [request] header.
:param deployment_id: The [deployment_id] assigned in the deployment.
:param sort_param: The [sort_param] parameter to order the data points (asc, desc).
:return: The data points sorted by their [deployment_id] and the [sort_param] parameter.
"""
response = await datapoint.get_all_data_points_sorted(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
sort=sort_param)
return response
@router.get('/{deployment_id}/data-points/query/{query_param}')
async def get_all_data_points_with_query(request: Request, deployment_id: str, query_param: str):
"""
Endpoint: [get_all_data_points_with_query]
:param request: The [request] header.
:param deployment_id: The [deployment_id] assigned in the deployment.
:param query_param: The [query_param] parameters to retrieve the data point.
:return: The data points by their [deployment_id] and the [query_param].
"""
response = await datapoint.get_all_nested_query(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
query=query_param)
return response
@router.get('/{deployment_id}/data-points/count/query/{query_param}')
async def get_count_of_data_points(request: Request, deployment_id: str, query_param: str):
"""
Endpoint: [get_all_data_points_with_query]
:param request: The [request] header.
:param deployment_id: The [deployment_id] assigned in the deployment.
:param query_param: The [query_param] parameters to retrieve the data point.
:return: The data points by their [deployment_id] and the [query] parameter.
"""
response = await datapoint.get_count_data_points(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
query=query_param)
return response
@router.delete('/{deployment_id}/data-points/{data_point_id}')
async def delete_data_point(request: Request, deployment_id: str, data_point_id: str):
"""
Endpoint: [delete_data_point]
:param request: The [request] header.
:param deployment_id: The [deployment_id] assigned in the deployment.
:param data_point_id: The [data_point_id] assigned in the data point.
:return: The [data_point_id] of the delete data point.
"""
response = await datapoint.delete_data_point(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
data_point_id=data_point_id)
return response
"""
CONSENT DOCUMENT :: CREATE :: GET :: DELETE
"""
@router.post('/{deployment_id}/consent-documents')
async def create_collection(request: Request, deployment_id: str):
"""
Endpoint: [create_consent]
:param request: The [request] body.
:param deployment_id: The [deployment_id] of the consent document.
:return: The newly created consent document by its [deployment_id].
"""
body: bytes = await request.body()
request_body: str = bytes.decode(body)
response = await consent.create_consent(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
consent_body=request_body)
return response
@router.get('/{deployment_id}/consent-documents/{consent_id}')
async def get_consent_document(request: Request, deployment_id: str, consent_id: str):
"""
Endpoint: [get_consent_document]
:param request: The [request] header.
:param deployment_id: The [deployment_id] of the consent document.
:param consent_id: The [consent_id] of the consent document.
:return: The consent document by its [deployment_id] and [consent_id].
"""
response = await consent.get_consent_document(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
consent_id=consent_id)
return response
@router.get('/{deployment_id}/consent-documents')
async def get_all_consent_documents(request: Request, deployment_id: str):
"""
Endpoint: [get_all_consent_documents]
:param request: The [request] header.
:param deployment_id: The [deployment_id] of the deployment.
:return: The consent documents by its [deployment_id].
"""
response = await consent.get_all_consent_documents(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id)
return response
@router.delete('/{deployment_id}/consent-documents/{consent_id}')
async def delete_consent_document(request: Request, deployment_id: str, consent_id: str):
"""
Endpoint: [delete_consent_document]
:param request: The [request] header.
:param deployment_id: The [deployment_id] of the study deployment.
:param consent_id: The [consent_id] of the consent document.
:return: The deleted deployment.
"""
response = await consent.delete_consent(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_id=deployment_id,
consent_id=consent_id)
return response
"""
DEPLOYMENTS :: CREATE :: GET :: DELETE
"""
@router.post('/deployment-service')
async def deployment_service(request: Request):
"""
Endpoint: [deployment_service]
:param request: The [request] body.
:return: The deployment response.
"""
body: bytes = await request.body()
request_body: str = bytes.decode(body)
response = await deployment.deployment_service(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_body=request_body)
return response
@router.post('/participation-service')
async def deployment_participation(request: Request):
"""
Endpoint: [deployment_participation]
:param request: The [request] body.
:return: The deployment response.
"""
body: bytes = await request.body()
request_body: str = bytes.decode(body)
response = await deployment.deployment_participation(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_body=request_body)
return response
@router.post('/participation-service')
async def deployment_statistics(request: Request):
"""
Endpoint: [deployment_statistics]
:return: The deployment statistics response.
"""
body: bytes = await request.body()
request_body: str = bytes.decode(body)
response = await deployment.deployment_statistics(env.BASE_URL[environment],
access_token=request.headers['authorization'],
deployment_body=request_body)
return response
| 47.384615
| 460
| 0.63142
|
91c7c605e2e900ad5999099c615b54bddd48045d
| 177
|
py
|
Python
|
backup_scripts/exceptions.py
|
markliederbach/backup-scripts
|
5c70b786d4c5303e4b78b02ed4649b5972f9ae5d
|
[
"MIT"
] | null | null | null |
backup_scripts/exceptions.py
|
markliederbach/backup-scripts
|
5c70b786d4c5303e4b78b02ed4649b5972f9ae5d
|
[
"MIT"
] | null | null | null |
backup_scripts/exceptions.py
|
markliederbach/backup-scripts
|
5c70b786d4c5303e4b78b02ed4649b5972f9ae5d
|
[
"MIT"
] | null | null | null |
class BackupsException(Exception):
pass
class NextCloudBackupsException(BackupsException):
pass
class NextCloudClientException(NextCloudBackupsException):
pass
| 14.75
| 58
| 0.80226
|
da5f835de5c155e7478cc5ea92b06b2d8e65870b
| 2,542
|
py
|
Python
|
src/isle/evolver/transform/constantShift.py
|
chelseajohn/isle
|
f610b55a1e8b6d2584896eb649092b0524cc1f8c
|
[
"MIT"
] | 2
|
2021-01-14T17:47:01.000Z
|
2021-07-16T22:31:25.000Z
|
src/isle/evolver/transform/constantShift.py
|
chelseajohn/isle
|
f610b55a1e8b6d2584896eb649092b0524cc1f8c
|
[
"MIT"
] | 21
|
2018-06-04T07:09:02.000Z
|
2020-12-11T09:37:08.000Z
|
src/isle/evolver/transform/constantShift.py
|
chelseajohn/isle
|
f610b55a1e8b6d2584896eb649092b0524cc1f8c
|
[
"MIT"
] | 3
|
2021-01-18T19:18:29.000Z
|
2021-03-26T03:27:15.000Z
|
r"""!\file
\ingroup evolvers
Transform to shift configurations by a constant.
"""
import numpy as np
from .transform import Transform
from ... import CDVector
class ConstantShift(Transform):
r"""! \ingroup evolvers
Transform that shifts configurations by a constant.
"""
def __init__(self, shift, action, lattSize=None):
if isinstance(shift, (np.ndarray, CDVector)):
self.shift = CDVector(shift)
else:
if lattSize is None:
raise ValueError("Argument lattSize must not be None if shift is"
" passed as a scalar")
self.shift = CDVector(np.full(lattSize, shift, dtype=complex))
self.action = action
def forward(self, phi, actVal):
r"""!
Shift by +shift.
\param phi Configuration on proposal manifold.
\param actVal Value of the action at phi.
\returns In order:
- Configuration on MC manifold.
- Value of action at configuration on MC manifold.
- \f$\log \det J\f$ where \f$J\f$ is the Jacobian of the transformation.
"""
return phi+self.shift, self.action.eval(phi), 0
def backward(self, phi, jacobian=False):
r"""!
Shift by -shift.
\param phi Configuration on MC manifold.
\returns
- Configuration on proposal manifold
- \f$\log \det J\f$ where \f$J\f$ is the Jacobian of the
*forwards* transformation. `None` if `jacobian==False`.
"""
return phi-self.shift, 0 if jacobian else None
def save(self, h5group, manager):
r"""!
Save the transform to HDF5.
Has to be the inverse of Transform.fromH5().
\param h5group HDF5 group to save to.
\param manager EvolverManager whose purview to save the transform in.
"""
h5group["shift"] = self.shift
@classmethod
def fromH5(cls, h5group, _manager, action, _lattice, _rng):
r"""!
Construct a transform from HDF5.
Create and initialize a new instance from parameters stored via Identity.save().
\param h5group HDF5 group to load parameters from.
\param _manager EvolverManager responsible for the HDF5 file.
\param action Action to use.
\param _lattice Lattice the simulation runs on.
\param _rng Central random number generator for the run.
\returns A newly constructed identity transform.
"""
return cls(h5group["shift"][()], action)
| 34.821918
| 88
| 0.614477
|
d0aa9899a6ca4f1a53def7ab2771989ffce2b0f6
| 539
|
py
|
Python
|
cython/profiling.py
|
Chiel92/evolutionary-computing
|
12a77a9d0504d55141ac8f8b02213d58a5a39d2b
|
[
"MIT"
] | null | null | null |
cython/profiling.py
|
Chiel92/evolutionary-computing
|
12a77a9d0504d55141ac8f8b02213d58a5a39d2b
|
[
"MIT"
] | null | null | null |
cython/profiling.py
|
Chiel92/evolutionary-computing
|
12a77a9d0504d55141ac8f8b02213d58a5a39d2b
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# filename: profile.py
"""
Profiling utilities.
"""
# Import profile tools
import pstats, cProfile
def profile(function, filename='Profile.prof'):
"""Evaluate and profile given string."""
cProfile.runctx('print(function())', globals(), locals(), filename)
report = pstats.Stats(filename)
report.strip_dirs().sort_stats('time').print_stats()
def compare(functions, filename='Profile.prof'):
"""Profile a sequence of functions."""
for function in functions:
profile(function, filename)
| 24.5
| 71
| 0.695733
|
8b0efb1a924e24b9f13f5ae09f0ac3edd64a05a5
| 25,044
|
py
|
Python
|
delphin/ace.py
|
arademaker/pydelphin
|
3ebcce48c4edcb4e797fc8e447a80e38e8fef53a
|
[
"MIT"
] | null | null | null |
delphin/ace.py
|
arademaker/pydelphin
|
3ebcce48c4edcb4e797fc8e447a80e38e8fef53a
|
[
"MIT"
] | null | null | null |
delphin/ace.py
|
arademaker/pydelphin
|
3ebcce48c4edcb4e797fc8e447a80e38e8fef53a
|
[
"MIT"
] | null | null | null |
"""
An interface for the ACE processor.
"""
from typing import (
Any, Iterator, Iterable, Mapping, Dict, List, Tuple, Pattern, IO)
import logging
import os
from pathlib import Path
import argparse
import re
from subprocess import (
check_call,
check_output,
CalledProcessError,
Popen,
PIPE
)
from platform import platform # portable system information
from getpass import getuser # portable way to get username
from socket import gethostname # portable way to get host name
from datetime import datetime
import locale
from delphin import interface
from delphin import util
from delphin.exceptions import PyDelphinException
# Default modules need to import the PyDelphin version
from delphin.__about__ import __version__ # noqa: F401
logger = logging.getLogger(__name__)
# do this right away to avoid some encoding issues
locale.setlocale(locale.LC_ALL, '')
encoding = locale.getpreferredencoding(False)
class ACEProcessError(PyDelphinException):
"""Raised when the ACE process has crashed and cannot be recovered."""
class ACEProcess(interface.Processor):
"""
The base class for interfacing ACE.
This manages most subprocess communication with ACE, but does not
interpret the response returned via ACE's stdout. Subclasses
override the :meth:`receive` method to interpret the task-specific
response formats.
Note that not all arguments to this class are used by every
subclass; the documentation for each subclass specifies which are
available.
Args:
grm (str): path to a compiled grammar image
cmdargs (list, optional): a list of command-line arguments
for ACE; note that arguments and their values should be
separate entries, e.g. `['-n', '5']`
executable (str, optional): the path to the ACE binary; if
`None`, ACE is assumed to be callable via `ace`
env (dict): environment variables to pass to the ACE
subprocess
tsdbinfo (bool): if `True` and ACE's version is compatible,
all information ACE reports for [incr tsdb()] processing
is gathered and returned in the response
full_forest (bool): if `True` and *tsdbinfo* is `True`, output
the full chart for each parse result
stderr (file): stream used for ACE's stderr
"""
_cmdargs: List[str] = []
_termini: List[Pattern[str]] = []
def __init__(self,
grm: util.PathLike,
cmdargs: List[str] = None,
executable: util.PathLike = None,
env: Mapping[str, str] = None,
tsdbinfo: bool = True,
full_forest: bool = False,
stderr: IO[Any] = None):
self.grm = str(Path(grm).expanduser())
self.cmdargs = cmdargs or []
# validate the arguments
_ace_argparser.parse_args(self.cmdargs)
self.executable = 'ace'
if executable:
self.executable = str(Path(executable).expanduser())
ace_version = self.ace_version
if ace_version >= (0, 9, 14):
self.cmdargs.append('--tsdb-notes')
if tsdbinfo and ace_version >= (0, 9, 24):
self.cmdargs.extend(['--tsdb-stdout', '--report-labels'])
setattr(self, 'receive', self._tsdb_receive)
if full_forest:
self._cmdargs.append('--itsdb-forest')
else:
setattr(self, 'receive', self._default_receive)
self.env = env or os.environ
self._run_id = -1
self.run_infos: List[Dict[str, Any]] = []
self._stderr = stderr
self._open()
@property
def ace_version(self) -> Tuple[int, ...]:
"""The version of the specified ACE binary."""
return _ace_version(self.executable)
@property
def run_info(self) -> Dict[str, Any]:
"""Contextual information about the the running process."""
return self.run_infos[-1]
def _open(self) -> None:
self._p = Popen(
[self.executable, '-g', self.grm] + self._cmdargs + self.cmdargs,
stdin=PIPE,
stdout=PIPE,
stderr=self._stderr,
env=self.env,
universal_newlines=True
)
self._run_id += 1
self.run_infos.append({
'run-id': self._run_id,
'application': 'ACE {} via PyDelphin v{}'.format(
'.'.join(map(str, self.ace_version)), __version__),
'environment': ' '.join(self.cmdargs),
'user': getuser(),
'host': gethostname(),
'os': platform(),
'start': datetime.now()
})
if self._p.poll() is not None and self._p.returncode != 0:
raise ACEProcessError("ACE process closed on startup")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False # don't try to handle any exceptions
def _result_lines(self, termini: List[Pattern[str]] = None) -> List[str]:
poll = self._p.poll
assert self._p.stdout is not None, 'cannot receive output from ACE'
next_line = self._p.stdout.readline
if termini is None:
termini = self._termini
i, end = 0, len(termini)
cur_terminus = termini[i]
lines = []
while i < end:
s = next_line()
if s == '' and poll() is not None:
logger.info(
'Process closed unexpectedly; giving up.'
)
self.close()
break
# The 'run' note should appear when the process is opened, but
# handle it here to avoid potential deadlocks if it gets buffered
elif s.startswith('NOTE: tsdb run:'):
self._read_run_info(s.rstrip())
# the rest should be normal result lines
else:
lines.append(s.rstrip())
if cur_terminus.search(s):
i += 1
return [line for line in lines if line != '']
def _read_run_info(self, line: str) -> None:
assert line.startswith('NOTE: tsdb run:')
for key, value in _sexpr_data(line[15:].lstrip()):
if key == ':application':
continue # PyDelphin sets 'application'
self.run_info[key.lstrip(':')] = value
def send(self, datum: str) -> None:
"""
Send *datum* (e.g. a sentence or MRS) to ACE.
Warning:
Sending data without reading (e.g., via :meth:`receive`) can
fill the buffer and cause data to be lost. Use the
:meth:`interact` method for most data-processing tasks with
ACE.
"""
assert self._p.stdin is not None, 'cannot send inputs to ACE'
try:
self._p.stdin.write((datum.rstrip() + '\n'))
self._p.stdin.flush()
except (IOError, OSError): # ValueError if file was closed manually
logger.info(
'Attempted to write to a closed process; attempting to reopen'
)
self._open()
self._p.stdin.write((datum.rstrip() + '\n'))
self._p.stdin.flush()
def receive(self) -> interface.Response:
"""
Return the stdout response from ACE.
Warning:
Reading beyond the last line of stdout from ACE can cause
the process to hang while it waits for the next line. Use
the :meth:`interact` method for most data-processing tasks
with ACE.
"""
raise NotImplementedError()
def _default_receive(self) -> interface.Response:
raise NotImplementedError()
def _tsdb_receive(self) -> interface.Response:
lines = self._result_lines()
response, lines = _make_response(lines, self.run_info)
# now it should be safe to reopen a closed process (if necessary)
if self._p.poll() is not None:
logger.info('Attempting to restart ACE.')
self._open()
line = ' '.join(lines) # ACE 0.9.24 on Mac puts superfluous newlines
response = _tsdb_response(response, line)
return response
def interact(self, datum: str) -> interface.Response:
"""
Send *datum* to ACE and return the response.
This is the recommended method for sending and receiving data
to/from an ACE process as it reduces the chances of
over-filling or reading past the end of the buffer. It also
performs a simple validation of the input to help ensure that
one complete item is processed at a time.
If input item identifiers need to be tracked throughout
processing, see :meth:`process_item`.
Args:
datum (str): the input sentence or MRS
Returns:
:class:`~delphin.interface.Response`
"""
validated = self._validate_input(datum)
if validated:
self.send(validated)
result = self.receive()
else:
result, lines = _make_response(
[('NOTE: PyDelphin could not validate the input and '
'refused to send it to ACE'),
f'SKIP: {datum}'],
self.run_info)
result['input'] = datum
return result
def process_item(self,
datum: str,
keys: Dict[str, Any] = None) -> interface.Response:
"""
Send *datum* to ACE and return the response with context.
The *keys* parameter can be used to track item identifiers
through an ACE interaction. If the `task` member is set on
the ACEProcess instance (or one of its subclasses), it is
kept in the response as well.
Args:
datum (str): the input sentence or MRS
keys (dict): a mapping of item identifier names and values
Returns:
:class:`~delphin.interface.Response`
"""
response = self.interact(datum)
if keys is not None:
response['keys'] = keys
if 'task' not in response and self.task is not None:
response['task'] = self.task
return response
def close(self) -> int:
"""
Close the ACE process and return the process's exit code.
"""
self.run_info['end'] = datetime.now()
if self._p.stdin is not None:
self._p.stdin.close()
if self._p.stdout is not None:
for line in self._p.stdout:
if line.startswith('NOTE: tsdb run:'):
self._read_run_info(line)
else:
logger.debug('ACE cleanup: %s', line.rstrip())
retval = self._p.wait()
return retval
def _validate_input(self, datum: str) -> str:
raise NotImplementedError()
class ACEParser(ACEProcess):
"""
A class for managing parse requests with ACE.
See :class:`ACEProcess` for initialization parameters.
"""
task = 'parse'
_termini = [re.compile(r'^$'), re.compile(r'^$')]
def _validate_input(self, datum: str):
# valid input for parsing is non-empty
# (this relies on an empty string evaluating to False)
return isinstance(datum, str) and datum.strip()
def _default_receive(self):
lines = self._result_lines()
response, lines = _make_response(lines, self.run_info)
response['results'] = [
dict(zip(('mrs', 'derivation'), map(str.strip, line.split(' ; '))))
for line in lines
]
return response
class ACETransferer(ACEProcess):
"""
A class for managing transfer requests with ACE.
See :class:`ACEProcess` for initialization parameters.
"""
task = 'transfer'
_termini = [re.compile(r'^$')]
def __init__(self,
grm: util.PathLike,
cmdargs: List[str] = None,
executable: util.PathLike = None,
env: Mapping[str, str] = None,
stderr: IO[Any] = None):
super().__init__(grm, cmdargs=cmdargs, executable=executable, env=env,
tsdbinfo=False, full_forest=False, stderr=stderr)
def _validate_input(self, datum):
return _possible_mrs(datum)
def _default_receive(self):
lines = self._result_lines()
response, lines = _make_response(lines, self.run_info)
response['results'] = [{'mrs': line.strip()} for line in lines]
return response
class ACEGenerator(ACEProcess):
"""
A class for managing realization requests with ACE.
See :class:`ACEProcess` for initialization parameters.
"""
task = 'generate'
_cmdargs = ['-e', '--tsdb-notes']
_termini = [re.compile(r'NOTE: tsdb parse: ')]
def __init__(self,
grm: util.PathLike,
cmdargs: List[str] = None,
executable: util.PathLike = None,
env: Mapping[str, str] = None,
tsdbinfo: bool = True,
stderr: IO[Any] = None):
super().__init__(grm, cmdargs=cmdargs, executable=executable, env=env,
tsdbinfo=tsdbinfo, full_forest=False, stderr=stderr)
def _validate_input(self, datum):
return _possible_mrs(datum)
def _default_receive(self):
show_tree = '--show-realization-trees' in self.cmdargs
show_mrs = '--show-realization-mrses' in self.cmdargs
lines = self._result_lines()
response, lines = _make_response(lines, self.run_info)
i, numlines = 0, len(lines)
results = []
while i < numlines:
result = {'SENT': lines[i].strip()}
i += 1
if show_tree and lines[i].startswith('DTREE = '):
result['derivation'] = lines[i][8:].strip()
i += 1
if show_mrs and lines[i].startswith('MRS = '):
result['mrs'] = lines[i][6:].strip()
i += 1
results.append(result)
response['results'] = results
return response
def _tsdb_receive(self):
# with --tsdb-stdout, the notes line is not printed
lines = self._result_lines(termini=[re.compile(r'\(:results \.')])
response, lines = _make_response(lines, self.run_info)
line = ' '.join(lines) # ACE 0.9.24 on Mac puts superfluous newlines
response = _tsdb_response(response, line)
return response
def compile(cfg_path: util.PathLike,
out_path: util.PathLike,
executable: util.PathLike = None,
env: Mapping[str, str] = None,
stdout: IO[Any] = None,
stderr: IO[Any] = None) -> None:
"""
Use ACE to compile a grammar.
Args:
cfg_path (str): the path to the ACE config file
out_path (str): the path where the compiled grammar will be
written
executable (str, optional): the path to the ACE binary; if
`None`, the `ace` command will be used
env (dict, optional): environment variables to pass to the ACE
subprocess
stdout (file, optional): stream used for ACE's stdout
stderr (file, optional): stream used for ACE's stderr
"""
cfg_path = str(Path(cfg_path).expanduser())
out_path = str(Path(out_path).expanduser())
try:
check_call(
[(executable or 'ace'), '-g', cfg_path, '-G', out_path],
stdout=stdout, stderr=stderr, close_fds=True,
env=(env or os.environ)
)
except (CalledProcessError, OSError):
logger.error(
'Failed to compile grammar with ACE. See %s',
getattr(stderr, 'name', '<stderr>')
)
raise
def parse_from_iterable(
grm: util.PathLike,
data: Iterable[str],
**kwargs: Any) -> Iterator[interface.Response]:
"""
Parse each sentence in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): the sentences to parse
**kwargs: additional keyword arguments to pass to the ACEParser
Yields:
:class:`~delphin.interface.Response`
Example:
>>> sentences = ['Dogs bark.', 'It rained']
>>> responses = list(ace.parse_from_iterable('erg.dat', sentences))
NOTE: parsed 2 / 2 sentences, avg 723k, time 0.01026s
"""
with ACEParser(grm, **kwargs) as parser:
for datum in data:
yield parser.interact(datum)
def parse(grm: util.PathLike,
datum: str,
**kwargs: Any) -> interface.Response:
"""
Parse sentence *datum* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
datum (str): the sentence to parse
**kwargs: additional keyword arguments to pass to the ACEParser
Returns:
:class:`~delphin.interface.Response`
Example:
>>> response = ace.parse('erg.dat', 'Dogs bark.')
NOTE: parsed 1 / 1 sentences, avg 797k, time 0.00707s
"""
return next(parse_from_iterable(grm, [datum], **kwargs))
def transfer_from_iterable(
grm: util.PathLike,
data: Iterable[str],
**kwargs: Any) -> Iterator[interface.Response]:
"""
Transfer from each MRS in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): source MRSs as SimpleMRS strings
**kwargs: additional keyword arguments to pass to the
ACETransferer
Yields:
:class:`~delphin.interface.Response`
"""
with ACETransferer(grm, **kwargs) as transferer:
for datum in data:
yield transferer.interact(datum)
def transfer(grm: util.PathLike,
datum: str,
**kwargs: Any) -> interface.Response:
"""
Transfer from the MRS *datum* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
datum: source MRS as a SimpleMRS string
**kwargs: additional keyword arguments to pass to the
ACETransferer
Returns:
:class:`~delphin.interface.Response`
"""
return next(transfer_from_iterable(grm, [datum], **kwargs))
def generate_from_iterable(
grm: util.PathLike,
data: Iterable[str],
**kwargs: Any) -> Iterator[interface.Response]:
"""
Generate from each MRS in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): MRSs as SimpleMRS strings
**kwargs: additional keyword arguments to pass to the
ACEGenerator
Yields:
:class:`~delphin.interface.Response`
"""
with ACEGenerator(grm, **kwargs) as generator:
for datum in data:
yield generator.interact(datum)
def generate(grm: util.PathLike,
datum: str,
**kwargs: Any) -> interface.Response:
"""
Generate from the MRS *datum* with ACE using *grm*.
Args:
grm (str): path to a compiled grammar image
datum: the SimpleMRS string to generate from
**kwargs: additional keyword arguments to pass to the
ACEGenerator
Returns:
:class:`~delphin.interface.Response`
"""
return next(generate_from_iterable(grm, [datum], **kwargs))
# The following defines the command-line options available for users to
# specify in ACEProcess tasks. For a description of these options, see:
# http://moin.delph-in.net/AceOptions
# thanks: https://stackoverflow.com/a/14728477/1441112
class _ACEArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ValueError(message)
_ace_argparser = _ACEArgumentParser()
_ace_argparser.add_argument('-n', type=int)
_ace_argparser.add_argument('-1', action='store_const', const=1, dest='n')
_ace_argparser.add_argument('-r')
_ace_argparser.add_argument('-p', action='store_true')
_ace_argparser.add_argument('-X', action='store_true')
_ace_argparser.add_argument('-L', action='store_true')
_ace_argparser.add_argument('-y', action='store_true')
_ace_argparser.add_argument('--max-chart-megabytes', type=int)
_ace_argparser.add_argument('--max-unpack-megabytes', type=int)
_ace_argparser.add_argument('--timeout', type=int)
_ace_argparser.add_argument('--disable-subsumption-test', action='store_true')
_ace_argparser.add_argument('--show-realization-trees', action='store_true')
_ace_argparser.add_argument('--show-realization-mrses', action='store_true')
_ace_argparser.add_argument('--show-probability', action='store_true')
_ace_argparser.add_argument('--disable-generalization', action='store_true')
_ace_argparser.add_argument('--ubertagging', nargs='?', type=float)
_ace_argparser.add_argument('--pcfg', type=argparse.FileType())
_ace_argparser.add_argument('--rooted-derivations', action='store_true')
_ace_argparser.add_argument('--udx', nargs='?', choices=('all',))
_ace_argparser.add_argument('--yy-rules', action='store_true')
_ace_argparser.add_argument('--max-words', type=int)
def _ace_version(executable: str) -> Tuple[int, ...]:
# 0.9.0 is the initial public release of ACE
version: Tuple[int, ...] = (0, 9, 0)
try:
out = check_output([executable, '-V'], universal_newlines=True)
except (CalledProcessError, OSError):
logger.error('Failed to get ACE version number.')
raise
else:
match = re.search(r'ACE version ([.0-9]+)', out)
if match is not None:
version = tuple(map(int, match.group(1).split('.')))
return version
def _possible_mrs(s: str) -> str:
start, end = -1, -1
depth = 0
for i, c in enumerate(s):
if c == '[':
if depth == 0:
start = i
depth += 1
elif c == ']':
depth -= 1
if depth == 0:
end = i + 1
break
# only valid if neither start nor end is -1
# note: this ignores any secondary MRSs on the same line
if start != -1 and end != -1:
# only log if taking a substring
if start != 0 and end != len(s):
logger.debug('Possible MRS found at <%d:%d>: %s', start, end, s)
s = s[start:end]
return s
else:
return ''
def _make_response(lines, run) -> Tuple[interface.Response, List[str]]:
response = interface.Response({
'NOTES': [],
'WARNINGS': [],
'ERRORS': [],
'run': run,
'input': None,
'surface': None,
'results': []
})
content_lines = []
for line in lines:
if line.startswith('NOTE: '):
response['NOTES'].append(line[6:])
elif line.startswith('WARNING: '):
response['WARNINGS'].append(line[9:])
elif line.startswith('ERROR: '):
response['ERRORS'].append(line[7:])
elif line.startswith('SENT: ') or line.startswith('SKIP: '):
response['surface'] = line[6:]
else:
content_lines.append(line)
return response, content_lines
def _sexpr_data(line: str) -> Iterator[Dict[str, Any]]:
while line:
try:
expr = util.SExpr.parse(line)
except IndexError:
expr = util.SExprResult(
(':error', 'incomplete output from ACE'),
'')
if len(expr.data) != 2:
logger.error('Malformed output from ACE: %s', line)
break
line = expr.remainder.lstrip()
yield expr.data
def _tsdb_response(response: interface.Response,
line: str) -> interface.Response:
for key, val in _sexpr_data(line):
if key == ':p-input':
response.setdefault('tokens', {})['initial'] = val.strip()
elif key == ':p-tokens':
response.setdefault('tokens', {})['internal'] = val.strip()
elif key == ':results':
for result in val:
res = {}
for reskey, resval in result:
if reskey == ':derivation':
res['derivation'] = resval.strip()
elif reskey == ':mrs':
res['mrs'] = resval.strip()
elif reskey == ':surface':
res['surface'] = resval.strip()
elif isinstance(resval, str):
res[reskey[1:]] = resval.strip()
else:
res[reskey[1:]] = resval
response['results'].append(res)
elif key == ':chart':
response['chart'] = chart = []
for edge in val:
chart.append({edgekey[1:]: edgeval
for edgekey, edgeval in edge})
elif isinstance(val, str):
response[key[1:]] = val.strip()
else:
response[key[1:]] = val
return response
| 34.880223
| 79
| 0.586807
|
fed168295183d3301861825c3cea75da189a0516
| 441
|
py
|
Python
|
Python Fundamentals/Functions/More Exr/Task02.py
|
DonikaChervenkova/SoftUni
|
bff579c037ec48f39ed193b34bc3502a32e90732
|
[
"MIT"
] | null | null | null |
Python Fundamentals/Functions/More Exr/Task02.py
|
DonikaChervenkova/SoftUni
|
bff579c037ec48f39ed193b34bc3502a32e90732
|
[
"MIT"
] | null | null | null |
Python Fundamentals/Functions/More Exr/Task02.py
|
DonikaChervenkova/SoftUni
|
bff579c037ec48f39ed193b34bc3502a32e90732
|
[
"MIT"
] | 1
|
2021-12-04T12:30:57.000Z
|
2021-12-04T12:30:57.000Z
|
import math
x1 = float(input())
y1 = float(input())
x2 = float(input())
y2 = float(input())
def get_distance(num1, num2):
x = math.pow(num1, 2)
y = math.pow(num2, 2)
return math.sqrt(x + y)
double_distance_1 = get_distance(x1, y1)
double_distance_2 = get_distance(x2, y2)
if double_distance_1 <= double_distance_2:
print(f"({math.floor(x1)}, {math.floor(y1)})")
else:
print(f"({math.floor(x2)}, {math.floor(y2)})")
| 20.045455
| 50
| 0.646259
|
230b9ea7323f2b255f57864bef93e4f5c1a8aef3
| 250
|
py
|
Python
|
pdtable/io/__init__.py
|
startable/pdtable
|
693af4f4d49a27f54c79887a42a0b1a1b68d77a9
|
[
"BSD-3-Clause"
] | 5
|
2020-11-11T10:15:04.000Z
|
2021-08-19T07:45:12.000Z
|
pdtable/io/__init__.py
|
startable/pdtable
|
693af4f4d49a27f54c79887a42a0b1a1b68d77a9
|
[
"BSD-3-Clause"
] | 63
|
2020-09-02T12:40:14.000Z
|
2021-07-14T19:52:33.000Z
|
pdtable/io/__init__.py
|
startable/pdtable
|
693af4f4d49a27f54c79887a42a0b1a1b68d77a9
|
[
"BSD-3-Clause"
] | 4
|
2021-01-04T12:44:45.000Z
|
2022-03-02T00:38:01.000Z
|
# flake8: noqa
from .csv import read_csv, write_csv
from .excel import read_excel, write_excel
from .json import table_to_json_data, json_data_to_table
from .parsers.fixer import ParseFixer
from .parsers.blocks import parse_blocks
from . import load
| 31.25
| 56
| 0.828
|
86ec6b6c179ac48e0eed82400e5041a05ababd77
| 3,102
|
py
|
Python
|
lib/governance_class.py
|
lifetioncoin/sentinel
|
a895cf671424261a60174386939391ea07c3a6d6
|
[
"MIT"
] | null | null | null |
lib/governance_class.py
|
lifetioncoin/sentinel
|
a895cf671424261a60174386939391ea07c3a6d6
|
[
"MIT"
] | null | null | null |
lib/governance_class.py
|
lifetioncoin/sentinel
|
a895cf671424261a60174386939391ea07c3a6d6
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import models
from bitcoinrpc.authproxy import JSONRPCException
import misc
import re
from misc import printdbg
import time
# mixin for GovObj composed classes like proposal and superblock, etc.
class GovernanceClass(object):
only_masternode_can_submit = False
# lazy
@property
def go(self):
return self.governance_object
# pass thru to GovernanceObject#vote
def vote(self, lifetioncoind, signal, outcome):
return self.go.vote(lifetioncoind, signal, outcome)
# pass thru to GovernanceObject#voted_on
def voted_on(self, **kwargs):
return self.go.voted_on(**kwargs)
def vote_validity(self, lifetioncoind):
if self.is_valid():
printdbg("Voting valid! %s: %d" % (self.__class__.__name__, self.id))
self.vote(lifetioncoind, models.VoteSignals.valid, models.VoteOutcomes.yes)
else:
printdbg("Voting INVALID! %s: %d" % (self.__class__.__name__, self.id))
self.vote(lifetioncoind, models.VoteSignals.valid, models.VoteOutcomes.no)
def get_submit_command(self):
obj_data = self.serialise()
# new objects won't have parent_hash, revision, etc...
cmd = ['gobject', 'submit', '0', '1', str(int(time.time())), obj_data]
# some objects don't have a collateral tx to submit
if not self.only_masternode_can_submit:
cmd.append(go.object_fee_tx)
return cmd
def submit(self, lifetioncoind):
# don't attempt to submit a superblock unless a masternode
# note: will probably re-factor this, this has code smell
if (self.only_masternode_can_submit and not lifetioncoind.is_masternode()):
print("Not a masternode. Only masternodes may submit these objects")
return
try:
object_hash = lifetioncoind.rpc_command(*self.get_submit_command())
printdbg("Submitted: [%s]" % object_hash)
except JSONRPCException as e:
print("Unable to submit: %s" % e.message)
def serialise(self):
import binascii
import simplejson
return binascii.hexlify(simplejson.dumps(self.get_dict(), sort_keys=True).encode('utf-8')).decode('utf-8')
@classmethod
def serialisable_fields(self):
# Python is so not very elegant...
pk_column = self._meta.primary_key.db_column
fk_columns = [fk.db_column for fk in self._meta.rel.values()]
do_not_use = [pk_column]
do_not_use.extend(fk_columns)
do_not_use.append('object_hash')
fields_to_serialise = list(self._meta.columns.keys())
for field in do_not_use:
if field in fields_to_serialise:
fields_to_serialise.remove(field)
return fields_to_serialise
def get_dict(self):
dikt = {}
for field_name in self.serialisable_fields():
dikt[field_name] = getattr(self, field_name)
dikt['type'] = getattr(self, 'govobj_type')
return dikt
| 33.354839
| 114
| 0.657318
|
f9e43300cbe464e0fe339e2196a8f1de802f049d
| 12,633
|
py
|
Python
|
BaseTools/Source/Python/Common/EdkIIWorkspace.py
|
James992927108/uEFI_Edk2_Practice
|
2cac7618dfee10bfa5104a2e167c85425fde0100
|
[
"BSD-2-Clause"
] | 6
|
2020-01-10T05:16:15.000Z
|
2022-01-06T17:41:58.000Z
|
BaseTools/Source/Python/Common/EdkIIWorkspace.py
|
James992927108/uEFI_Edk2_Practice
|
2cac7618dfee10bfa5104a2e167c85425fde0100
|
[
"BSD-2-Clause"
] | null | null | null |
BaseTools/Source/Python/Common/EdkIIWorkspace.py
|
James992927108/uEFI_Edk2_Practice
|
2cac7618dfee10bfa5104a2e167c85425fde0100
|
[
"BSD-2-Clause"
] | 3
|
2018-04-21T07:59:33.000Z
|
2018-04-23T02:06:01.000Z
|
## @file
# This is the base class for applications that operate on an EDK II Workspace
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os, sys, time
from DataType import *
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
## EdkIIWorkspace
#
# Collect WorkspaceDir from the environment, the Verbose command line flag, and detect an icon bitmap file.
#
# @var StartTime: Time of build system starting
# @var PrintRunTime: Printable time of build system running
# @var PrintRunStatus: Printable status of build system running
# @var RunStatus: Status of build system running
#
class EdkIIWorkspace:
def __init__(self):
self.StartTime = time.time()
self.PrintRunTime = False
self.PrintRunStatus = False
self.RunStatus = ''
#
# Check environment valiable 'WORKSPACE'
#
if os.environ.get('WORKSPACE') == None:
print 'ERROR: WORKSPACE not defined. Please run EdkSetup from the EDK II install directory.'
return False
self.CurrentWorkingDir = os.getcwd()
self.WorkspaceDir = os.path.realpath(os.environ.get('WORKSPACE'))
(Drive, Path) = os.path.splitdrive(self.WorkspaceDir)
if Drive == '':
(Drive, CwdPath) = os.path.splitdrive(self.CurrentWorkingDir)
if Drive != '':
self.WorkspaceDir = Drive + Path
else:
self.WorkspaceDir = Drive.upper() + Path
self.WorkspaceRelativeWorkingDir = self.WorkspaceRelativePath (self.CurrentWorkingDir)
try:
#
# Load TianoCoreOrgLogo, used for GUI tool
#
self.Icon = wx.Icon(self.WorkspaceFile('tools/Python/TianoCoreOrgLogo.gif'), wx.BITMAP_TYPE_GIF)
except:
self.Icon = None
self.Verbose = False
for Arg in sys.argv:
if Arg.lower() == '-v':
self.Verbose = True
## Close build system
#
# Close build system and print running time and status
#
def Close(self):
if self.PrintRunTime:
Seconds = int(time.time() - self.StartTime)
if Seconds < 60:
print 'Run Time: %d seconds' % (Seconds)
else:
Minutes = Seconds / 60
Seconds = Seconds % 60
if Minutes < 60:
print 'Run Time: %d minutes %d seconds' % (Minutes, Seconds)
else:
Hours = Minutes / 60
Minutes = Minutes % 60
print 'Run Time: %d hours %d minutes %d seconds' % (Hours, Minutes, Seconds)
if self.RunStatus != '':
print self.RunStatus
## Convert to a workspace relative filename
#
# Convert a full path filename to a workspace relative filename.
#
# @param FileName: The filename to be Converted
#
# @retval None Workspace dir is not found in the full path
# @retval string The relative filename
#
def WorkspaceRelativePath(self, FileName):
FileName = os.path.realpath(FileName)
if FileName.find(self.WorkspaceDir) != 0:
return None
return FileName.replace (self.WorkspaceDir, '').strip('\\').strip('/')
## Convert to a full path filename
#
# Convert a workspace relative filename to a full path filename.
#
# @param FileName: The filename to be Converted
#
# @retval string The full path filename
#
def WorkspaceFile(self, FileName):
return os.path.realpath(mws.join(self.WorkspaceDir,FileName))
## Convert to a real path filename
#
# Convert ${WORKSPACE} to real path
#
# @param FileName: The filename to be Converted
#
# @retval string The full path filename
#
def WorkspacePathConvert(self, FileName):
return os.path.realpath(FileName.replace(TAB_WORKSPACE, self.WorkspaceDir))
## Convert XML into a DOM
#
# Parse an XML file into a DOM and return the DOM.
#
# @param FileName: The filename to be parsed
#
# @retval XmlParseFile (self.WorkspaceFile(FileName))
#
def XmlParseFile (self, FileName):
if self.Verbose:
print FileName
return XmlParseFile (self.WorkspaceFile(FileName))
## Convert a XML section
#
# Parse a section of an XML file into a DOM(Document Object Model) and return the DOM.
#
# @param FileName: The filename to be parsed
# @param SectionTag: The tag name of the section to be parsed
#
# @retval XmlParseFileSection (self.WorkspaceFile(FileName), SectionTag)
#
def XmlParseFileSection (self, FileName, SectionTag):
if self.Verbose:
print FileName
return XmlParseFileSection (self.WorkspaceFile(FileName), SectionTag)
## Save a XML file
#
# Save a DOM(Document Object Model) into an XML file.
#
# @param Dom: The Dom to be saved
# @param FileName: The filename
#
# @retval XmlSaveFile (Dom, self.WorkspaceFile(FileName))
#
def XmlSaveFile (self, Dom, FileName):
if self.Verbose:
print FileName
return XmlSaveFile (Dom, self.WorkspaceFile(FileName))
## Convert Text File To Dictionary
#
# Convert a workspace relative text file to a dictionary of (name:value) pairs.
#
# @param FileName: Text filename
# @param Dictionary: Dictionary to store data
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
#
# @retval ConvertTextFileToDictionary(self.WorkspaceFile(FileName), Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter)
#
def ConvertTextFileToDictionary(self, FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
if self.Verbose:
print FileName
return ConvertTextFileToDictionary(self.WorkspaceFile(FileName), Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter)
## Convert Dictionary To Text File
#
# Convert a dictionary of (name:value) pairs to a workspace relative text file.
#
# @param FileName: Text filename
# @param Dictionary: Dictionary to store data
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
#
# @retval ConvertDictionaryToTextFile(self.WorkspaceFile(FileName), Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter)
#
def ConvertDictionaryToTextFile(self, FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
if self.Verbose:
print FileName
return ConvertDictionaryToTextFile(self.WorkspaceFile(FileName), Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter)
## Convert Text File To Dictionary
#
# Convert a text file to a dictionary of (name:value) pairs.
#
# @param FileName: Text filename
# @param Dictionary: Dictionary to store data
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
#
# @retval True Convert successfully
# @retval False Open file failed
#
def ConvertTextFileToDictionary(FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
try:
F = open(FileName, 'r')
except:
return False
Keys = []
for Line in F:
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter and Key[0] not in Keys:
if ValueSplitFlag:
Dictionary[Key[0]] = LineList[1].replace('\\', '/').split(ValueSplitCharacter)
else:
Dictionary[Key[0]] = LineList[1].strip().replace('\\', '/')
Keys += [Key[0]]
F.close()
return True
## Convert Dictionary To Text File
#
# Convert a dictionary of (name:value) pairs to a text file.
#
# @param FileName: Text filename
# @param Dictionary: Dictionary to store data
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
#
# @retval True Convert successfully
# @retval False Open file failed
#
def ConvertDictionaryToTextFile(FileName, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
try:
F = open(FileName, 'r')
Lines = []
Lines = F.readlines()
F.close()
except:
Lines = []
Keys = Dictionary.keys()
MaxLength = 0
for Key in Keys:
if len(Key) > MaxLength:
MaxLength = len(Key)
Index = 0
for Line in Lines:
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter and Key[0] in Dictionary:
if ValueSplitFlag:
Line = '%-*s %c %s\n' % (MaxLength, Key[0], KeySplitCharacter, ' '.join(Dictionary[Key[0]]))
else:
Line = '%-*s %c %s\n' % (MaxLength, Key[0], KeySplitCharacter, Dictionary[Key[0]])
Lines.pop(Index)
if Key[0] in Keys:
Lines.insert(Index, Line)
Keys.remove(Key[0])
Index += 1
for RemainingKey in Keys:
if ValueSplitFlag:
Line = '%-*s %c %s\n' % (MaxLength, RemainingKey, KeySplitCharacter, ' '.join(Dictionary[RemainingKey]))
else:
Line = '%-*s %c %s\n' % (MaxLength, RemainingKey, KeySplitCharacter, Dictionary[RemainingKey])
Lines.append(Line)
try:
F = open(FileName, 'w')
except:
return False
F.writelines(Lines)
F.close()
return True
## Create a new directory
#
# @param Directory: Directory to be created
#
def CreateDirectory(Directory):
if not os.access(Directory, os.F_OK):
os.makedirs (Directory)
## Create a new file
#
# @param Directory: Directory to be created
# @param FileName: Filename to be created
# @param Mode: The mode of open file, defautl is 'w'
#
def CreateFile(Directory, FileName, Mode='w'):
CreateDirectory (Directory)
return open(os.path.join(Directory, FileName), Mode)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
# Nothing to do here. Could do some unit tests
pass
| 39.478125
| 158
| 0.643394
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.