hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
849a20daae6edb04cc92fd6d513ec61b99f56fc8
| 1,928
|
py
|
Python
|
src/websitewatcher.py
|
OzTamir/WebsiteWatcher
|
54e949b72a2c66aed7c6282f17db00735b9966ed
|
[
"MIT"
] | 1
|
2021-06-03T19:13:05.000Z
|
2021-06-03T19:13:05.000Z
|
src/websitewatcher.py
|
OzTamir/WebsiteWatcher
|
54e949b72a2c66aed7c6282f17db00735b9966ed
|
[
"MIT"
] | null | null | null |
src/websitewatcher.py
|
OzTamir/WebsiteWatcher
|
54e949b72a2c66aed7c6282f17db00735b9966ed
|
[
"MIT"
] | null | null | null |
"""
WebsiteWatcher.py - Watch websites for changes and alert the human about it!
Author: OzTamir
URL: https://github.com/OzTamir/WebsiteWatcher
"""
import json
import logging
from configuration import Configuration
from watcher.watcher_manager import WatcherManager
from twilio_mode import TwilioWatcher
from telegram_bot import Bot
logging.basicConfig(
level=logging.INFO,
format='[WebsiteWatcher][%(levelname)s][%(filename)s:%(funcName)s]: %(message)s')
CONFIG_FILE = 'config.json'
def telegram_mode(watcher: WatcherManager, config: Configuration):
"""Run the telegram bot
Args:
watcher (WatcherManager): A WatcherManager object, used to manage the watching of urls
config (Configuration): A python representation of the config file on the disk
"""
bot = Bot(watcher, config.token, config.password, config.tick_frequency)
bot.run_bot()
def twilio_mode(watcher: WatcherManager, config: Configuration):
"""Run the Twilio loop
Args:
watcher (WatcherManager): A WatcherManager object, used to manage the watching of urls
config (Configuration): A python representation of the config file on the disk
"""
twilio_watcher = TwilioWatcher(watcher, config)
twilio_watcher.run_watcher()
def main():
logging.info('Starting...')
# Setup the configuration and the WatcherManager, both of which are the same in both modes
config = Configuration(CONFIG_FILE)
watcher_manager = WatcherManager(config.watchers_list)
# Run in the configured mode
if config.mode == Configuration.TELEGRAM:
return telegram_mode(watcher_manager, config)
elif config.mode == Configuration.TWILIO:
return twilio_mode(watcher_manager, config)
else:
# This should never happen, as it is checked when the configuration is parsed that one mode is enabled
raise NotImplementedError
if __name__ == '__main__':
main()
| 35.703704
| 110
| 0.734959
|
b2702d560629bae70433d188efa99d4111d7ce00
| 2,144
|
py
|
Python
|
rastervision2/pytorch_backend/pytorch_learner_backend.py
|
csaybar/raster-vision
|
617ca15f64e3b8a391432306a743f7d0dfff352f
|
[
"Apache-2.0"
] | 1
|
2020-10-10T12:32:43.000Z
|
2020-10-10T12:32:43.000Z
|
rastervision2/pytorch_backend/pytorch_learner_backend.py
|
alvintuitoek/raster-vision
|
ec6c8309f89c404513862369bb93dd9e6a70b455
|
[
"Apache-2.0"
] | null | null | null |
rastervision2/pytorch_backend/pytorch_learner_backend.py
|
alvintuitoek/raster-vision
|
ec6c8309f89c404513862369bb93dd9e6a70b455
|
[
"Apache-2.0"
] | 1
|
2021-12-02T08:07:21.000Z
|
2021-12-02T08:07:21.000Z
|
from os.path import join
import tempfile
from rastervision2.pipeline.filesystem import (make_dir, upload_or_copy,
zipdir)
from rastervision2.core.backend import Backend, SampleWriter
from rastervision2.core.data_sample import DataSample
class PyTorchLearnerSampleWriter(SampleWriter):
def __init__(self, output_uri, class_config, tmp_dir_root):
self.output_uri = output_uri
self.class_config = class_config
self.tmp_dir_root = tmp_dir_root
def __enter__(self):
self.tmp_dir_obj = tempfile.TemporaryDirectory(dir=self.tmp_dir_root)
self.sample_dir = join(self.tmp_dir_obj.name, 'samples')
make_dir(self.sample_dir)
self.sample_ind = 0
return self
def __exit__(self, type, value, traceback):
"""
This writes a zip file for a group of scenes at {chip_uri}/{uuid}.zip.
This method is called once per instance of the chip command.
A number of instances of the chip command can run simultaneously to
process chips in parallel. The uuid in the zip path above is what allows
separate instances to avoid overwriting each others' output.
"""
output_path = join(self.tmp_dir_obj.name, 'output.zip')
zipdir(self.sample_dir, output_path)
upload_or_copy(output_path, self.output_uri)
self.tmp_dir_obj.cleanup()
def write_sample(self, sample: DataSample):
raise NotImplementedError()
class PyTorchLearnerBackend(Backend):
def __init__(self, pipeline_cfg, learner_cfg, tmp_dir):
self.pipeline_cfg = pipeline_cfg
self.learner_cfg = learner_cfg
self.tmp_dir = tmp_dir
self.learner = None
def train(self):
learner = self.learner_cfg.build(self.tmp_dir)
learner.main()
def load_model(self):
self.learner = self.learner_cfg.build_from_model_bundle(
self.learner_cfg.get_model_bundle_uri(), self.tmp_dir)
def get_sample_writer(self):
raise NotImplementedError()
def predict(self, chips, windows):
raise NotImplementedError()
| 34.580645
| 80
| 0.686567
|
e8ca2029602983315c4ea183dc09ed227b9980e4
| 1,020
|
py
|
Python
|
qa327_test/conftest.py
|
KennethWest/ChairNerd
|
574d41fef1b784021926527f969ebc2b4c470908
|
[
"MIT"
] | null | null | null |
qa327_test/conftest.py
|
KennethWest/ChairNerd
|
574d41fef1b784021926527f969ebc2b4c470908
|
[
"MIT"
] | null | null | null |
qa327_test/conftest.py
|
KennethWest/ChairNerd
|
574d41fef1b784021926527f969ebc2b4c470908
|
[
"MIT"
] | null | null | null |
import pytest
import subprocess
import os
import signal
import time
import tempfile
from qa327.__main__ import FLASK_PORT
from qa327.__main__ import app
import threading
from werkzeug.serving import make_server
base_url = 'http://localhost:{}'.format(FLASK_PORT)
class ServerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.srv = make_server('127.0.0.1', FLASK_PORT, app)
self.ctx = app.app_context()
self.ctx.push()
def run(self):
self.srv.serve_forever()
def shutdown(self):
self.srv.shutdown()
@pytest.fixture(scope="module", autouse=True)
def server():
on_win = os.name == 'nt'
with tempfile.TemporaryDirectory() as tmp_folder:
# create a live server for testing
# with a temporary file as database
db = os.path.join(tmp_folder, 'db.sqlite')
server = ServerThread()
server.start()
time.sleep(5)
yield
server.shutdown()
time.sleep(2)
| 23.181818
| 60
| 0.661765
|
3f3838cab49b96a289a7d659e6e7883efb02cef3
| 6,533
|
py
|
Python
|
src/users/views.py
|
DoubleTakoMeat/pycon.tw
|
6a49d890ced9027ddfdc965e8753c35d12da79b1
|
[
"MIT"
] | null | null | null |
src/users/views.py
|
DoubleTakoMeat/pycon.tw
|
6a49d890ced9027ddfdc965e8753c35d12da79b1
|
[
"MIT"
] | null | null | null |
src/users/views.py
|
DoubleTakoMeat/pycon.tw
|
6a49d890ced9027ddfdc965e8753c35d12da79b1
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.contrib import messages
from django.contrib import auth
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.http import Http404
from django.shortcuts import redirect, render
from django.utils.translation import gettext, get_language
from django.views.decorators.cache import never_cache
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.http import require_POST
from django.template.loader import render_to_string
from .decorators import login_forbidden
from .forms import (
AuthenticationForm, PublicUserCreationForm, UserProfileUpdateForm,
PasswordResetForm, SetPasswordForm, CocAgreementForm,
)
from .models import CocRecord
from reviews.context import proposals_state, reviews_state
from lxml import etree
import lxml.html
User = auth.get_user_model()
@sensitive_post_parameters()
@never_cache
@login_forbidden
def user_signup(request):
if request.method == 'POST':
form = PublicUserCreationForm(data=request.POST)
if form.is_valid():
user = form.save()
user.send_verification_email(request)
auth.login(request, user)
messages.success(request, gettext(
'Sign up successful. You are now logged in.'
))
return redirect('user_dashboard')
else:
form = PublicUserCreationForm()
return render(request, 'registration/signup.html', {'form': form})
@sensitive_post_parameters()
@never_cache
def user_verify(request, verification_key):
try:
user = User.objects.get_with_verification_key(verification_key)
except User.DoesNotExist:
raise Http404
user.verified = True
user.save()
messages.success(request, gettext('Email verification successful.'))
return redirect('user_dashboard')
@never_cache
@login_required
@require_POST
def request_verification(request):
user = request.user
user.send_verification_email(request)
messages.success(
request,
gettext('A verification email has been sent to {email}').format(
email=user.email,
),
)
return redirect('user_dashboard')
@login_required
def user_dashboard(request):
if not request.user.is_valid_speaker():
return redirect('user_profile_update')
logout_next = reverse('login')
return render(request, 'users/user_dashboard.html', {
'logout_next': logout_next,
**proposals_state()._asdict(),
**reviews_state()._asdict(),
})
@login_required
def user_profile_update(request):
logout_next = reverse('index')
if request.method == 'POST':
form = UserProfileUpdateForm(
data=request.POST, files=request.FILES,
instance=request.user,
)
if form.is_valid():
form.save()
messages.success(request, gettext(
'Your profile has been updated successfully.',
))
return redirect('user_dashboard')
else:
form = UserProfileUpdateForm(instance=request.user)
return render(request, 'users/user_profile_update.html', {
'form': form, 'logout_next': logout_next,
**reviews_state()._asdict(),
})
def password_change_done(request):
messages.success(request, gettext(
'Your new password has been applied successfully.'
))
return redirect('user_dashboard')
def password_reset_done(request):
messages.success(request, gettext(
'An email is sent to your email account. Please check your inbox for '
'furthur instructions to reset your password.'
))
return redirect('login')
def password_reset_complete(request):
messages.success(request, gettext(
'Password reset successful. You can now login.'
))
return redirect('login')
@login_required
def coc_agree(request):
if request.method == 'POST':
form = CocAgreementForm(data=request.POST)
if form.is_valid():
try:
agreement = CocRecord.objects.get(user=request.user, coc_version=settings.COC_VERSION)
except CocRecord.DoesNotExist:
agreement = CocRecord(user=request.user, coc_version=settings.COC_VERSION)
agreement.save()
# The query param indicating redirect target (setup by CocAgreementMixin) can be removed after set_language.
# Redirect to dashboard intead if this situation happened.
redirect_to = request.GET.get('next', reverse('user_dashboard'))
return redirect(redirect_to)
else:
form = CocAgreementForm()
# Get code of conduct
lang = get_language()
content = render_to_string('contents/%s/about/code-of-conduct.html' % lang[:2], {}, request)
tree = lxml.html.document_fromstring(content)
main = tree.xpath('//main')[0]
# Remove the title
# Since the HTML structure has changed
# need to find the direct child from main which contains h1 as its descendant
# and remove it
for h1 in main.xpath('//h1'):
target = h1
parent = h1.getparent()
while parent != main and parent != None:
target = parent
parent = parent.getparent()
if parent == main:
main.remove(target)
coc = etree.tostring(main, encoding='utf-8').decode('utf-8')
return render(request, 'users/coc_agreement.html', {
'form': form,
'coc': coc,
**reviews_state()._asdict(),
})
class PasswordChangeView(auth_views.PasswordChangeView):
# cannot merely pass extra_context=reviews_state()._asdict() to
# auth_views.PasswordChangeView because
# we need to resolve reviews_state()._asdict() everytime when
# reaching this view
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(**reviews_state()._asdict())
return context
login = auth_views.LoginView.as_view(authentication_form=AuthenticationForm)
logout = auth_views.LogoutView.as_view()
password_change = PasswordChangeView.as_view()
password_reset = auth_views.PasswordResetView.as_view(form_class=PasswordResetForm,
template_name='registration/password_reset.html',
email_template_name='registration/password_reset_email.txt')
password_reset_confirm = auth_views.PasswordResetConfirmView.as_view(
form_class=SetPasswordForm
)
| 32.02451
| 120
| 0.691566
|
ebc81295aa1ce251290d89b1f711fdbe9b802b4b
| 23,544
|
py
|
Python
|
tests/pulses/sequence_pulse_template_tests.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 30
|
2018-09-13T02:59:55.000Z
|
2022-03-21T04:25:22.000Z
|
tests/pulses/sequence_pulse_template_tests.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 319
|
2015-03-10T09:37:20.000Z
|
2018-09-06T10:11:32.000Z
|
tests/pulses/sequence_pulse_template_tests.py
|
qutech-lab/qc-toolkit
|
f00e0d0000bdc7a6604ceae2c15b60f4d10c4000
|
[
"MIT"
] | 14
|
2019-01-08T14:42:36.000Z
|
2021-05-21T08:53:06.000Z
|
import unittest
from unittest import mock
from qupulse.parameter_scope import DictScope
from qupulse.expressions import Expression, ExpressionScalar
from qupulse.pulses.table_pulse_template import TablePulseTemplate
from qupulse.pulses.sequence_pulse_template import SequencePulseTemplate, SequenceWaveform
from qupulse.pulses.mapping_pulse_template import MappingPulseTemplate
from qupulse.pulses.parameters import ParameterConstraint, ParameterConstraintViolation, ParameterNotProvidedException
from qupulse._program._loop import Loop
from tests.pulses.sequencing_dummies import DummyPulseTemplate,\
DummyNoValueParameter, DummyWaveform, MeasurementWindowTestCase
from tests.serialization_dummies import DummySerializer
from tests.serialization_tests import SerializableTests
from tests._program.transformation_tests import TransformationStub
from tests.pulses.pulse_template_tests import get_appending_internal_create_program, PulseTemplateStub
class SequencePulseTemplateTest(unittest.TestCase):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# Setup test data
self.square = TablePulseTemplate({'default': [(0, 0),
('up', 'v', 'hold'),
('down', 0, 'hold'),
('length', 0)]},
measurements=[('mw1', 'up', 'length-up')])
self.mapping1 = {
'up': 'uptime',
'down': 'uptime + length',
'v': 'voltage',
'length': '0.5 * pulse_length'
}
self.window_name_mapping = {'mw1' : 'test_window'}
self.outer_parameters = {'uptime', 'length', 'pulse_length', 'voltage'}
self.parameters = dict()
self.parameters['uptime'] = 5
self.parameters['length'] = 10
self.parameters['pulse_length'] = 100
self.parameters['voltage'] = 10
self.sequence = SequencePulseTemplate(MappingPulseTemplate(self.square,
parameter_mapping=self.mapping1,
measurement_mapping=self.window_name_mapping))
def test_duration(self):
pt = SequencePulseTemplate(DummyPulseTemplate(duration='a'),
DummyPulseTemplate(duration='a'),
DummyPulseTemplate(duration='b'))
self.assertEqual(pt.duration, Expression('a+a+b'))
def test_parameter_names(self) -> None:
pt = SequencePulseTemplate(DummyPulseTemplate(parameter_names={'a'}), DummyPulseTemplate(parameter_names={'b'}),
parameter_constraints=['a==b', 'a<c'], measurements=[('meas', 'd', 1)])
self.assertEqual({'a', 'b', 'c', 'd'}, pt.parameter_names, )
def test_build_waveform(self):
wfs = [DummyWaveform(), DummyWaveform()]
pts = [DummyPulseTemplate(waveform=wf) for wf in wfs]
spt = SequencePulseTemplate(*pts, parameter_constraints=['a < 3'])
with self.assertRaises(ParameterConstraintViolation):
spt.build_waveform(dict(a=4), dict())
parameters = dict(a=2)
channel_mapping = dict()
wf = spt.build_waveform(parameters, channel_mapping=channel_mapping)
for wfi, pt in zip(wfs, pts):
self.assertEqual(pt.build_waveform_calls, [(parameters, dict())])
self.assertIs(pt.build_waveform_calls[0][0], parameters)
self.assertIsInstance(wf, SequenceWaveform)
for wfa, wfb in zip(wf.compare_key, wfs):
self.assertIs(wfa, wfb)
def test_identifier(self) -> None:
identifier = 'some name'
pulse = SequencePulseTemplate(DummyPulseTemplate(), identifier=identifier)
self.assertEqual(identifier, pulse.identifier)
def test_multiple_channels(self) -> None:
dummy = DummyPulseTemplate(parameter_names={'hugo'}, defined_channels={'A', 'B'})
subtemplates = [(dummy, {'hugo': 'foo'}, {}), (dummy, {'hugo': '3'}, {})]
sequence = SequencePulseTemplate(*subtemplates)
self.assertEqual({'A', 'B'}, sequence.defined_channels)
self.assertEqual({'foo'}, sequence.parameter_names)
def test_multiple_channels_mismatch(self) -> None:
with self.assertRaises(ValueError):
SequencePulseTemplate(DummyPulseTemplate(defined_channels={'A'}),
DummyPulseTemplate(defined_channels={'B'}))
with self.assertRaises(ValueError):
SequencePulseTemplate(
DummyPulseTemplate(defined_channels={'A'}), DummyPulseTemplate(defined_channels={'A', 'B'})
)
def test_integral(self) -> None:
dummy1 = DummyPulseTemplate(defined_channels={'A', 'B'},
integrals={'A': ExpressionScalar('k+2*b'), 'B': ExpressionScalar('3')})
dummy2 = DummyPulseTemplate(defined_channels={'A', 'B'},
integrals={'A': ExpressionScalar('7*(b-f)'), 'B': ExpressionScalar('0.24*f-3.0')})
pulse = SequencePulseTemplate(dummy1, dummy2)
self.assertEqual({'A': ExpressionScalar('k+2*b+7*(b-f)'), 'B': ExpressionScalar('0.24*f')}, pulse.integral)
def test_concatenate(self):
a = DummyPulseTemplate(parameter_names={'foo'}, defined_channels={'A'})
b = DummyPulseTemplate(parameter_names={'bar'}, defined_channels={'A'})
spt_anon = SequencePulseTemplate(a, b)
spt_id = SequencePulseTemplate(a, b, identifier='id')
spt_meas = SequencePulseTemplate(a, b, measurements=[('m', 0, 'd')])
spt_constr = SequencePulseTemplate(a, b, parameter_constraints=['a < b'])
merged = SequencePulseTemplate.concatenate(a, spt_anon, b)
self.assertEqual(merged.subtemplates, [a, a, b, b])
result = SequencePulseTemplate.concatenate(a, spt_id, b)
self.assertEqual(result.subtemplates, [a, spt_id, b])
result = SequencePulseTemplate.concatenate(a, spt_meas, b)
self.assertEqual(result.subtemplates, [a, spt_meas, b])
result = SequencePulseTemplate.concatenate(a, spt_constr, b)
self.assertEqual(result.subtemplates, [a, spt_constr, b])
class SequencePulseTemplateSerializationTests(SerializableTests, unittest.TestCase):
@property
def class_to_test(self):
return SequencePulseTemplate
def make_kwargs(self):
return {
'subtemplates': [DummyPulseTemplate(), DummyPulseTemplate()],
'parameter_constraints': [str(ParameterConstraint('a<b'))],
'measurements': [('m', 0, 1)]
}
def make_instance(self, identifier=None, registry=None):
kwargs = self.make_kwargs()
subtemplates = kwargs['subtemplates']
del kwargs['subtemplates']
return self.class_to_test(identifier=identifier, *subtemplates, **kwargs, registry=registry)
def assert_equal_instance_except_id(self, lhs: SequencePulseTemplate, rhs: SequencePulseTemplate):
self.assertIsInstance(lhs, SequencePulseTemplate)
self.assertIsInstance(rhs, SequencePulseTemplate)
self.assertEqual(lhs.subtemplates, rhs.subtemplates)
self.assertEqual(lhs.parameter_constraints, rhs.parameter_constraints)
self.assertEqual(lhs.measurement_declarations, rhs.measurement_declarations)
class SequencePulseTemplateOldSerializationTests(unittest.TestCase):
def setUp(self) -> None:
self.table_foo = TablePulseTemplate({'default': [('hugo', 2),
('albert', 'voltage')]},
parameter_constraints=['albert<9.1'],
measurements=[('mw_foo','hugo','albert')],
identifier='foo',
registry=dict())
self.foo_param_mappings = dict(hugo='ilse', albert='albert', voltage='voltage')
self.foo_meas_mappings = dict(mw_foo='mw_bar')
def test_get_serialization_data_old(self) -> None:
# test for deprecated version during transition period, remove after final switch
with self.assertWarnsRegex(DeprecationWarning, "deprecated",
msg="SequencePT does not issue warning for old serialization routines."):
dummy1 = DummyPulseTemplate()
dummy2 = DummyPulseTemplate()
sequence = SequencePulseTemplate(dummy1, dummy2, parameter_constraints=['a<b'], measurements=[('m', 0, 1)],
registry=dict())
serializer = DummySerializer(serialize_callback=lambda x: str(x))
expected_data = dict(
subtemplates=[str(dummy1), str(dummy2)],
parameter_constraints=['a < b'],
measurements=[('m', 0, 1)]
)
data = sequence.get_serialization_data(serializer)
self.assertEqual(expected_data, data)
def test_deserialize_old(self) -> None:
# test for deprecated version during transition period, remove after final switch
with self.assertWarnsRegex(DeprecationWarning, "deprecated",
msg="SequencePT does not issue warning for old serialization routines."):
dummy1 = DummyPulseTemplate()
dummy2 = DummyPulseTemplate()
serializer = DummySerializer(serialize_callback=lambda x: str(id(x)))
data = dict(
subtemplates=[serializer.dictify(dummy1), serializer.dictify(dummy2)],
identifier='foo',
parameter_constraints=['a < b'],
measurements=[('m', 0, 1)]
)
template = SequencePulseTemplate.deserialize(serializer, **data)
self.assertEqual(template.subtemplates, [dummy1, dummy2])
self.assertEqual(template.parameter_constraints, [ParameterConstraint('a<b')])
self.assertEqual(template.measurement_declarations, [('m', 0, 1)])
class SequencePulseTemplateSequencingTests(MeasurementWindowTestCase):
def test_internal_create_program(self):
sub_templates = PulseTemplateStub(defined_channels={'a'}, duration=ExpressionScalar('t1')),\
PulseTemplateStub(defined_channels={'a'}, duration=ExpressionScalar('t2'))
wfs = DummyWaveform(duration=1), DummyWaveform(duration=2)
spt = SequencePulseTemplate(*sub_templates, measurements=[('m', 'a', 'b')])
kwargs = dict(scope=DictScope.from_kwargs(t1=.4,
t2=.5,
a=.1, b=.2,
irrelevant=42),
measurement_mapping={'m': 'l'},
channel_mapping={'g': 'h'},
global_transformation=TransformationStub(),
to_single_waveform={'to', 'single', 'waveform'})
program = Loop()
expected_program = Loop(children=[Loop(waveform=wfs[0]),
Loop(waveform=wfs[1])],
measurements=[('l', .1, .2)])
with mock.patch.object(spt, 'validate_scope') as validate_scope:
with mock.patch.object(spt, 'get_measurement_windows',
return_value=[('l', .1, .2)]) as get_measurement_windows:
with mock.patch.object(sub_templates[0], '_create_program',
wraps=get_appending_internal_create_program(wfs[0], True)) as create_0,\
mock.patch.object(sub_templates[1], '_create_program',
wraps=get_appending_internal_create_program(wfs[1], True)) as create_1:
spt._internal_create_program(**kwargs, parent_loop=program)
self.assertEqual(expected_program, program)
validate_scope.assert_called_once_with(kwargs['scope'])
get_measurement_windows.assert_called_once_with(kwargs['scope'], kwargs['measurement_mapping'])
create_0.assert_called_once_with(**kwargs, parent_loop=program)
create_1.assert_called_once_with(**kwargs, parent_loop=program)
def test_create_program_internal(self) -> None:
sub1 = DummyPulseTemplate(duration=3, waveform=DummyWaveform(duration=3), measurements=[('b', 1, 2)], defined_channels={'A'})
sub2 = DummyPulseTemplate(duration=2, waveform=DummyWaveform(duration=2), parameter_names={'foo'}, defined_channels={'A'})
scope = DictScope.from_kwargs()
measurement_mapping = {'a': 'a', 'b': 'b'}
channel_mapping = dict()
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)])
loop = Loop()
seq._internal_create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([Loop(repetition_count=1, waveform=sub1.waveform),
Loop(repetition_count=1, waveform=sub2.waveform)],
list(loop.children))
self.assert_measurement_windows_equal({'a': ([0], [1]), 'b': ([1], [2])}, loop.get_measurement_windows())
### test again with inverted sequence
seq = SequencePulseTemplate(sub2, sub1, measurements=[('a', 0, 1)])
loop = Loop()
seq._internal_create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([Loop(repetition_count=1, waveform=sub2.waveform),
Loop(repetition_count=1, waveform=sub1.waveform)],
list(loop.children))
self.assert_measurement_windows_equal({'a': ([0], [1]), 'b': ([3], [2])}, loop.get_measurement_windows())
def test_internal_create_program_no_measurement_mapping(self) -> None:
sub1 = DummyPulseTemplate(duration=3, waveform=DummyWaveform(duration=3), measurements=[('b', 1, 2)])
sub2 = DummyPulseTemplate(duration=2, waveform=DummyWaveform(duration=2), parameter_names={'foo'})
scope = DictScope.from_kwargs()
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)])
children = [Loop(waveform=DummyWaveform())]
loop = Loop(measurements=[], children=children)
with self.assertRaises(KeyError):
seq._internal_create_program(scope=scope,
measurement_mapping=dict(),
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertFalse(sub1.create_program_calls)
self.assertFalse(sub2.create_program_calls)
self.assertEqual(children, list(loop.children))
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assert_measurement_windows_equal({}, loop.get_measurement_windows())
# test for child level measurements (does not guarantee to leave parent_loop unchanged in this case)
with self.assertRaises(KeyError):
seq._internal_create_program(scope=scope,
measurement_mapping=dict(a='a'),
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
def test_internal_create_program_one_child_no_duration(self) -> None:
sub1 = DummyPulseTemplate(duration=0, waveform=None, measurements=[('b', 1, 2)], defined_channels={'A'})
sub2 = DummyPulseTemplate(duration=2, waveform=DummyWaveform(duration=2), parameter_names={'foo'}, defined_channels={'A'})
scope = DictScope.from_kwargs()
measurement_mapping = {'a': 'a', 'b': 'b'}
channel_mapping = dict()
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)])
loop = Loop()
seq._internal_create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([Loop(repetition_count=1, waveform=sub2.waveform)],
list(loop.children))
self.assert_measurement_windows_equal({'a': ([0], [1])}, loop.get_measurement_windows())
# MultiChannelProgram calls cleanup
loop.cleanup()
self.assert_measurement_windows_equal({'a': ([0], [1])}, loop.get_measurement_windows())
### test again with inverted sequence
seq = SequencePulseTemplate(sub2, sub1, measurements=[('a', 0, 1)])
loop = Loop()
seq._internal_create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([Loop(repetition_count=1, waveform=sub2.waveform)],
list(loop.children))
self.assert_measurement_windows_equal({'a': ([0], [1])}, loop.get_measurement_windows())
# MultiChannelProgram calls cleanup
loop.cleanup()
self.assert_measurement_windows_equal({'a': ([0], [1])}, loop.get_measurement_windows())
def test_internal_create_program_both_children_no_duration(self) -> None:
sub1 = DummyPulseTemplate(duration=0, waveform=None, measurements=[('b', 1, 2)], defined_channels={'A'})
sub2 = DummyPulseTemplate(duration=0, waveform=None, parameter_names={'foo'}, defined_channels={'A'})
scope = DictScope.from_kwargs()
measurement_mapping = {'a': 'a', 'b': 'b'}
channel_mapping = dict()
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)])
loop = Loop(measurements=None)
seq._internal_create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([], list(loop.children))
self.assertIsNone(loop._measurements)
def test_internal_create_program_parameter_constraint_violations(self) -> None:
sub1 = DummyPulseTemplate(duration=3, waveform=DummyWaveform(duration=3), measurements=[('b', 1, 2)])
sub2 = DummyPulseTemplate(duration=2, waveform=DummyWaveform(duration=2), parameter_names={'foo'})
scope = DictScope.from_kwargs(foo=7)
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)], parameter_constraints={'foo < 2'})
loop = Loop()
with self.assertRaises(ParameterConstraintViolation):
seq._internal_create_program(scope=scope,
measurement_mapping={'a': 'a', 'b': 'b'},
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
def test_internal_create_program_parameter_missing(self) -> None:
sub1 = DummyPulseTemplate(duration=3, waveform=DummyWaveform(duration=3), measurements=[('b', 1, 2)])
sub2 = DummyPulseTemplate(duration='d', waveform=DummyWaveform(duration=2), parameter_names={'foo'})
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 'bar', 1)], parameter_constraints={'foo < 2'})
loop = Loop()
# test parameter from constraints
scope = DictScope.from_kwargs()
with self.assertRaises(ParameterNotProvidedException):
seq._internal_create_program(scope=scope,
measurement_mapping={'a': 'a', 'b': 'b'},
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
# test parameter from measurements
scope = DictScope.from_mapping({'foo': 1})
with self.assertRaises(ParameterNotProvidedException):
seq._internal_create_program(scope=scope,
measurement_mapping={'a': 'a', 'b': 'b'},
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
# test parameter from duration
scope = DictScope.from_mapping({'foo': 1, 'bar': 0})
with self.assertRaises(ParameterNotProvidedException):
seq._internal_create_program(scope=scope,
measurement_mapping={'a': 'a', 'b': 'b'},
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
class SequencePulseTemplateTestProperties(SequencePulseTemplateTest):
def test_measurement_names(self):
d1 = DummyPulseTemplate(measurement_names={'a'})
d2 = DummyPulseTemplate(measurement_names={'b'})
spt = SequencePulseTemplate(d1, d2, measurements=[('c', 0, 1)])
self.assertEqual(spt.measurement_names, {'a', 'b', 'c'})
if __name__ == "__main__":
unittest.main(verbosity=2)
| 51.745055
| 133
| 0.584947
|
61c933fed98fb9ca55e5f511c582a408f469a38d
| 4,484
|
py
|
Python
|
models/utils/args.py
|
mingxuts/multi-center-fed-learning
|
9262ddaefb79b14ea44b61ffce200b82d31b0af1
|
[
"BSD-2-Clause"
] | 4
|
2021-09-03T02:44:14.000Z
|
2022-03-04T03:28:09.000Z
|
models/utils/args.py
|
mingxuts/multi-center-fed-learning
|
9262ddaefb79b14ea44b61ffce200b82d31b0af1
|
[
"BSD-2-Clause"
] | null | null | null |
models/utils/args.py
|
mingxuts/multi-center-fed-learning
|
9262ddaefb79b14ea44b61ffce200b82d31b0af1
|
[
"BSD-2-Clause"
] | 2
|
2021-10-10T11:57:53.000Z
|
2022-02-15T20:27:10.000Z
|
import argparse
from .constants import DATASETS, SIM_TIMES
def parse_job_args():
parser = argparse.ArgumentParser()
parser.add_argument('-dataset',
help='name of dataset;',
type=str,
choices=DATASETS,
required=True)
parser.add_argument('-experiment',
help='name of experiment to perform;',
type=str,
required=True)
parser.add_argument('-configuration',
help='file that maintains job settings',
type=str,
default='job.yaml',
required=False)
return parser.parse_args()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-dataset',
help='name of dataset;',
type=str,
choices=DATASETS,
required=True)
parser.add_argument('-model',
help='name of model;',
type=str,
required=True)
parser.add_argument('--num-rounds',
help='number of rounds to simulate;',
type=int,
default=-1)
parser.add_argument('--eval-every',
help='evaluate every ____ rounds;',
type=int,
default=-1)
parser.add_argument('--clients-per-round',
help='number of clients trained per round;',
type=int,
default=2)
parser.add_argument('--batch-size',
help='batch size when clients train on data;',
type=int,
default=10)
parser.add_argument('--seed',
help='seed for random client sampling and batch splitting',
type=int,
default=0)
parser.add_argument('--metrics-name',
help='name for metrics file;',
type=str,
default='metrics',
required=False)
parser.add_argument('--metrics-dir',
help='dir for metrics file;',
type=str,
default='metrics',
required=False)
parser.add_argument('--weight_mode',
help='using sample size or not',
type=str,
default='no_size',
required=False)
parser.add_argument('--metric-file',
help='which file to print metric',
type=str,
default='metric_score.tsv',
required=False)
# Minibatch doesn't support num_epochs, so make them mutually exclusive
epoch_capability_group = parser.add_mutually_exclusive_group()
epoch_capability_group.add_argument('--minibatch',
help='None for FedAvg, else fraction;',
type=float,
default=None)
epoch_capability_group.add_argument('--num-epochs',
help='number of epochs when clients train on data;',
type=int,
default=1)
parser.add_argument('-t',
help='simulation time: small, medium, or large;',
type=str,
choices=SIM_TIMES,
default='large')
parser.add_argument('-lr',
help='learning rate for local optimizers;',
type=float,
default=-1,
required=False)
parser.add_argument('--num_clusters',
help='divide selected clients into ___ groups, value from 2 - 11',
type=int,
default=3)
parser.add_argument('--mu',
help='the proximal term for minimizing distance of center and device',
type=float,
default=0.01,
required=False)
parser.add_argument('--update-head-every',
help='we choose update our head models center every _ rounds',
type=int,
default=1)
parser.add_argument('--regul-term',
type=float,
default=10.,
required=False)
return parser.parse_args()
| 35.307087
| 90
| 0.476137
|
601e02e48722d92eaa796d98eaaa18e5fe007afa
| 469
|
py
|
Python
|
tests/test_news.py
|
zecollokaris/News-Highlights
|
cedd0087f45e7a8e634401d726216efecda2764b
|
[
"Unlicense"
] | null | null | null |
tests/test_news.py
|
zecollokaris/News-Highlights
|
cedd0087f45e7a8e634401d726216efecda2764b
|
[
"Unlicense"
] | null | null | null |
tests/test_news.py
|
zecollokaris/News-Highlights
|
cedd0087f45e7a8e634401d726216efecda2764b
|
[
"Unlicense"
] | null | null | null |
import unittest
from app.models import News
class NewsTest(unittest.TestCase):
'''Test Class to test the behaviour of the News class'''
def setUp (self):
'''Set up method that will run before every Test'''
self.new_news = News(1234,'Python Must Be Crazy','A thrilling new Python Series','sdsdsfsdf',8.5,129993)
def test_instance(self):
self.assertTrue(isinstance(self.new_news,News))
if __name__ == '__main__':
unittest.main()
| 33.5
| 112
| 0.692964
|
6cd9e848d4226a3ce345dff5a06c2d8dd0734b99
| 1,568
|
py
|
Python
|
operators/functions/unsorted_segment_sum.py
|
YuhangSong/RBP
|
68a230053198de1b689e262974947c4186ee1c49
|
[
"MIT"
] | 316
|
2019-01-03T19:54:44.000Z
|
2022-03-11T21:25:08.000Z
|
operators/functions/unsorted_segment_sum.py
|
YuhangSong/RBP
|
68a230053198de1b689e262974947c4186ee1c49
|
[
"MIT"
] | 10
|
2019-01-17T20:43:27.000Z
|
2020-05-30T23:07:50.000Z
|
operators/functions/unsorted_segment_sum.py
|
YuhangSong/RBP
|
68a230053198de1b689e262974947c4186ee1c49
|
[
"MIT"
] | 63
|
2019-01-03T20:18:38.000Z
|
2022-03-11T21:58:17.000Z
|
import math
import numpy as np
import torch
from torch.autograd import Function, Variable
from operators._ext import segment_reduction
class UnsortedSegmentSumFunction(Function):
@staticmethod
def forward(ctx, data, segment_index, num_segments):
# data's shape should be (batch, dim1, dim2), and segment reduction will be performed over dim1
ctx.save_for_backward(data, segment_index)
# data = data.contiguous()
# segment_index = segment_index.contiguous()
if not data.is_cuda:
output = torch.FloatTensor(data.size(0), num_segments,
data.size(2)).zero_()
segment_reduction.unsorted_segment_sum_forward(data, segment_index,
data.size(), output)
else:
output = torch.cuda.FloatTensor(data.size(0), num_segments,
data.size(2)).zero_()
segment_reduction.unsorted_segment_sum_forward_gpu(data, segment_index,
data.size(), output)
return output
@staticmethod
def backward(ctx, grad_output):
data, segment_index = ctx.saved_tensors
grad_data = data.new().resize_as_(data).zero_()
if not data.is_cuda:
segment_reduction.unsorted_segment_sum_backward(
grad_output.data, segment_index, grad_data.size(), grad_data)
else:
segment_reduction.unsorted_segment_sum_backward_gpu(
grad_output.data, segment_index, grad_data.size(), grad_data)
return Variable(grad_data), None, None
| 34.844444
| 99
| 0.656888
|
5a112a1028f5cdac6dd2bdd994d5a20d112682de
| 1,361
|
py
|
Python
|
lang/py/pylib/code/zipimport/zipimport_load_module.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | 13
|
2020-01-04T07:37:38.000Z
|
2021-08-31T05:19:58.000Z
|
lang/py/pylib/code/zipimport/zipimport_load_module.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | 3
|
2020-06-05T22:42:53.000Z
|
2020-08-24T07:18:54.000Z
|
lang/py/pylib/code/zipimport/zipimport_load_module.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | 9
|
2020-10-19T04:53:06.000Z
|
2021-08-31T05:20:01.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Retrieving the code for a module within a zip archive.
"""
#end_pymotw_header
import zipimport
importer = zipimport.zipimporter('zipimport_example.zip')
module = importer.load_module('zipimport_get_code')
print 'Name :', module.__name__
print 'Loader :', module.__loader__
print 'Code :', module.code
| 34.025
| 70
| 0.766348
|
cf30488d1e2a2d20ed23fd56f9b73c6df5102e83
| 1,789
|
py
|
Python
|
Megatron-LM-v1.1.5-ZeRO3/seelog.py
|
zhuzilin/DeepSpeedExamples
|
c86ec361d16f940b93f2fbb17c49733b2a71297c
|
[
"MIT"
] | null | null | null |
Megatron-LM-v1.1.5-ZeRO3/seelog.py
|
zhuzilin/DeepSpeedExamples
|
c86ec361d16f940b93f2fbb17c49733b2a71297c
|
[
"MIT"
] | null | null | null |
Megatron-LM-v1.1.5-ZeRO3/seelog.py
|
zhuzilin/DeepSpeedExamples
|
c86ec361d16f940b93f2fbb17c49733b2a71297c
|
[
"MIT"
] | null | null | null |
import os
import sys
class MyResDict(object):
def __init__(self):
self._data = []
print("here")
def insert(self, model_name, gpu, mp, bs, perf):
self._data.append(((model_name, gpu, mp, bs), perf))
def showme(self):
self._data = sorted(self._data, key=lambda elem: elem[0])
print(("model_name", "gpu", "mp", "bs", "perf"))
for elem in self._data:
print(elem)
def extract_info_from_file(path, file, res_dict):
model_name = ""
gpu_num = 0
bs = 0
# if the file is not exist.
# do not execute training
if not os.path.isfile(path + "/" + file):
return
f = open(path + "/" + file)
if not os.path.isdir(file):
fn_list = file.split(".")[1].split("_")
# log.model_6B_bs_16_gpu_8_mp_1
for i in range(len(fn_list)):
if "model" in fn_list[i]:
model_name = fn_list[i + 1]
elif "bs" == fn_list[i]:
bs = fn_list[i + 1]
elif "gpu" == fn_list[i]:
gpu = fn_list[i + 1]
elif "mp" == fn_list[i]:
mp = fn_list[i + 1]
iter_f = iter(f)
best_perf = 0
for line in iter_f:
if "GPU:" in line:
sline = line.split()
get_index = sline.index('GPU:')
perf = float(sline[get_index + 1])
best_perf = perf
if best_perf != 0:
res_dict.insert(model_name, gpu, mp, bs, best_perf)
if __name__ == "__main__":
if len(sys.argv) > 1:
PATH = str(sys.argv[1])
else:
PATH = "./logs"
files = os.listdir(PATH)
res_dict = MyResDict()
for f in files:
extract_info_from_file(PATH, f, res_dict)
res_dict.showme()
| 28.396825
| 67
| 0.515931
|
15ed054ef881ac10d238c2a13c57232f61ae4e50
| 3,372
|
py
|
Python
|
melodic/src/rqt_bag/rqt_bag/src/rqt_bag/plugins/message_view.py
|
disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA
|
3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0
|
[
"BSD-3-Clause"
] | 2
|
2021-07-14T12:33:55.000Z
|
2021-11-21T07:14:13.000Z
|
melodic/src/rqt_bag/rqt_bag/src/rqt_bag/plugins/message_view.py
|
disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA
|
3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0
|
[
"BSD-3-Clause"
] | null | null | null |
melodic/src/rqt_bag/rqt_bag/src/rqt_bag/plugins/message_view.py
|
disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA
|
3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0
|
[
"BSD-3-Clause"
] | null | null | null |
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import QObject
class MessageView(QObject):
"""
A message details renderer. When registered with rqt_bag, a MessageView is called
whenever the timeline playhead moves.
"""
name = 'Untitled'
def __init__(self, timeline, topic):
super(MessageView, self).__init__()
self.timeline = timeline
self.topic = topic
def message_viewed(self, bag, msg_details):
"""
View the message.
@param bag: the bag file the message is contained in
@type bag: rosbag.Bag
@param msg_details: the details of the message to be viewed
@type msg_details: tuple (topic, msg, time)
@param topic: the message topic
@type topic: str
@param msg: the message
@param t: the message timestamp
@type t: rospy.Time
"""
pass
def message_cleared(self):
"""
Clear the currently viewed message (if any).
"""
pass
def timeline_changed(self):
"""
Called when the messages in a timeline change, e.g. if a new message is recorded, or
a bag file is added
"""
pass
def close(self):
"""
Close the message view, releasing any resources.
"""
pass
# NOTE: event function should not be changed in subclasses
def event(self, event):
"""
This function will be called to process events posted by post_event
it will call message_cleared or message_viewed with the relevant data
"""
bag, msg_data = event.data
if msg_data:
self.message_viewed(bag, msg_data)
else:
self.message_cleared()
return True
| 35.125
| 92
| 0.678826
|
132a743de740fffe73cde54d80e503a7cc9dbdbe
| 15,692
|
py
|
Python
|
Scripts/plot_MSFigure_3_squeeze.py
|
zmlabe/predictGMSTrate
|
2bde4a106de1988d772f15a52d283d23bb7128f4
|
[
"MIT"
] | 2
|
2022-01-20T20:20:04.000Z
|
2022-02-21T12:33:37.000Z
|
Scripts/plot_MSFigure_3_squeeze.py
|
zmlabe/predictGMSTrate
|
2bde4a106de1988d772f15a52d283d23bb7128f4
|
[
"MIT"
] | null | null | null |
Scripts/plot_MSFigure_3_squeeze.py
|
zmlabe/predictGMSTrate
|
2bde4a106de1988d772f15a52d283d23bb7128f4
|
[
"MIT"
] | 3
|
2022-01-19T16:25:37.000Z
|
2022-03-22T13:25:00.000Z
|
"""
Make figure of LRP maps for manuscript
Author : Zachary M. Labe
Date : 7 October 2021
Version : 2
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import numpy as np
from netCDF4 import Dataset
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import cmasher as cmr
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
from sklearn.metrics import accuracy_score
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
### Hyperparamters for files of the ANN model
rm_ensemble_mean = True
if rm_ensemble_mean == False:
vari_predict = ['OHC100']
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
yearsall = np.arange(1990,2090+1,1)
elif rm_ensemble_mean == True:
vari_predict = ['OHC100']
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
yearsall = np.arange(1990,2090+1,1)
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+vari_predict[0]+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
directoryfigure = '/Users/zlabe/Desktop/GmstTrendPrediction/MS-Figures_v1/'
###############################################################################
###############################################################################
###############################################################################
### Read in LRP data for testing and obs
nametypetest = 'Testing'
filenametest = directorydata + 'LRPMap' + nametypetest + '_' + savename + '.nc'
datatest = Dataset(filenametest,'r')
lat = datatest.variables['lat'][:]
lon = datatest.variables['lon'][:]
lrp_test = datatest.variables['LRP'][:]
datatest.close()
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
testindices = np.genfromtxt(directorydata + 'testingEnsIndices_' + savename + '.txt')
actual_test = np.genfromtxt(directorydata + 'testingTrueLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'testingPredictedLabels_' + savename+ '.txt')
###############################################################################
###############################################################################
###############################################################################
### Read in OHC
data = Dataset(directorydata + 'OHC_comp/OHCcomp_' + 'hiatus_True' + '.nc')
ohc_hiatus = data.variables['OHC100'][:]
data.close()
data = Dataset(directorydata + 'OHC_comp/OHCcomp_' + 'climatechange_True' + '.nc')
ohc_cc = data.variables['OHC100'][:]
data.close()
##############################################################################
##############################################################################
##############################################################################
### Composite hiatues based on accuracy or not
def LRPcomp(accurateH,accurateR):
lrp_hiatus = []
for i in range(lrp_test.shape[0]):
if accurateH == False:
if predict_test[i] == 1:
lrp_hiatus.append(lrp_test[i])
elif accurateH == True:
if (predict_test[i]) == 1 and (actual_test[i] == 1):
lrp_hiatus.append(lrp_test[i])
elif accurateH == 'WRONG':
if (predict_test[i]) == 1 and (actual_test[i] == 0):
lrp_hiatus.append(lrp_test[i])
else:
print(ValueError('WRONG COMPOSITE METRIC!!'))
sys.exit()
lrp_hiatus = np.asarray(lrp_hiatus)
##############################################################################
##############################################################################
##############################################################################
### Composite regular period based on accuracy or not
lrp_regular = []
for i in range(lrp_test.shape[0]):
if accurateR == False:
if predict_test[i] == 0:
lrp_regular.append(lrp_test[i])
elif accurateR == True:
if (predict_test[i]) == 0 and (actual_test[i] == 0):
lrp_regular.append(lrp_test[i])
elif accurateR == 'WRONG':
if (predict_test[i]) == 0 and (actual_test[i] == 1):
lrp_regular.append(lrp_test[i])
else:
print(ValueError('WRONG COMPOSITE METRIC!!'))
sys.exit()
lrp_regular = np.asarray(lrp_regular)
return lrp_hiatus,lrp_regular
correct_hia,correct_reg = LRPcomp(True,True)
wrong_hia,wrong_reg = LRPcomp('WRONG','WRONG')
##############################################################################
##############################################################################
##############################################################################
### Average across hiatus and regular period
hiatus_correct = np.nanmean(correct_hia,axis=0)
hiatus_wrong = np.nanmean(wrong_hia,axis=0)
regular_correct = np.nanmean(correct_reg,axis=0)
regular_wrong = np.nanmean(wrong_reg,axis=0)
###############################################################################
###############################################################################
###############################################################################
### Plot subplot of observations
letters = ["b","c","d","e","f","g","h","i","j","k","l","m","n"]
limit = np.arange(0,0.81,0.005)
barlim = np.round(np.arange(0,0.81,0.1),2)
cmap = cm.cubehelix2_16.mpl_colormap
label = r'\textbf{LRP [Relevance]}'
limitd = np.arange(-1.5,1.6,0.02)
barlimd = np.round(np.arange(-1.5,1.6,0.5),2)
cmapd = cmocean.cm.balance
labeld = r'\textbf{OHC100 [Normalized]}'
fig = plt.figure(figsize=(10,8))
###############################################################################
ax1 = plt.subplot(221)
m = Basemap(projection='robin',lon_0=-180,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
### Variable
hiatus_correctz = hiatus_correct/np.max(hiatus_correct)
lon = np.where(lon >180,lon-360,lon)
x, y = np.meshgrid(lon,lat)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,hiatus_correctz ,limit,extend='max',latlon=True)
cs1.set_cmap(cmap)
### Box 1
la1 = 25
la2 = 45
lo1 = 140
lo2 = 180+(180-145)
lonsslice = np.linspace(lo1,lo2,lo2-lo1+1)
latsslice = np.ones(len(lonsslice))*la2
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
latsslice = np.ones(len(lonsslice))*la1
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
m.drawgreatcircle(lo1, la1, lo1, la2,linewidth=1.5,color='aqua',zorder=4)
m.drawgreatcircle(lo2, la2, lo2, la1,linewidth=1.5,color='aqua',zorder=4)
### Box 2
la1 = -10
la2 = 10
lo1 = 170
lo2 = 180+(180-90)
lonsslice = np.linspace(lo1,lo2,lo2-lo1+1)
latsslice = np.ones(len(lonsslice))*la2
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
latsslice = np.ones(len(lonsslice))*la1
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
m.drawgreatcircle(lo1, la1, lo1, la2,linewidth=1.5,color='aqua',zorder=4)
m.drawgreatcircle(lo2, la2, lo2, la1,linewidth=1.5,color='aqua',zorder=4)
### Box 3
la1 = -50
la2 = -15
lo1 = 150
lo2 = 180+(180-160)
lonsslice = np.linspace(lo1,lo2,lo2-lo1+1)
latsslice = np.ones(len(lonsslice))*la2
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
latsslice = np.ones(len(lonsslice))*la1
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
m.drawgreatcircle(lo1, la1, lo1, la2,linewidth=1.5,color='aqua',zorder=4)
m.drawgreatcircle(lo2, la2, lo2, la1,linewidth=1.5,color='aqua',zorder=4)
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
plt.title(r'\textbf{CORRECT SLOWDOWN PREDICTIONS}',fontsize=13,color='dimgrey')
ax1.annotate(r'\textbf{[%s]}' % letters[0],xy=(0,0),xytext=(0.98,0.84),
textcoords='axes fraction',color='k',fontsize=9,
rotation=0,ha='center',va='center')
###############################################################################
###############################################################################
###############################################################################
ax2 = plt.subplot(223)
m = Basemap(projection='robin',lon_0=-180,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
### Variable
regular_correctz = regular_correct/np.max(regular_correct)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs2 = m.contourf(x,y,regular_correctz,limit,extend='max',latlon=True)
cs2.set_cmap(cmap)
### Box 1
la1 = 25
la2 = 45
lo1 = 140
lo2 = 180+(180-145)
lonsslice = np.linspace(lo1,lo2,lo2-lo1+1)
latsslice = np.ones(len(lonsslice))*la2
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
latsslice = np.ones(len(lonsslice))*la1
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
m.drawgreatcircle(lo1, la1, lo1, la2,linewidth=1.5,color='aqua',zorder=4)
m.drawgreatcircle(lo2, la2, lo2, la1,linewidth=1.5,color='aqua',zorder=4)
### Box 2
la1 = -10
la2 = 10
lo1 = 170
lo2 = 180+(180-90)
lonsslice = np.linspace(lo1,lo2,lo2-lo1+1)
latsslice = np.ones(len(lonsslice))*la2
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
latsslice = np.ones(len(lonsslice))*la1
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
m.drawgreatcircle(lo1, la1, lo1, la2,linewidth=1.5,color='aqua',zorder=4)
m.drawgreatcircle(lo2, la2, lo2, la1,linewidth=1.5,color='aqua',zorder=4)
### Box 3
la1 = -50
la2 = -15
lo1 = 150
lo2 = 180+(180-160)
lonsslice = np.linspace(lo1,lo2,lo2-lo1+1)
latsslice = np.ones(len(lonsslice))*la2
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
latsslice = np.ones(len(lonsslice))*la1
m.plot(lonsslice, latsslice, color='aqua', linewidth=1.5, latlon=True,zorder=4)
m.drawgreatcircle(lo1, la1, lo1, la2,linewidth=1.5,color='aqua',zorder=4)
m.drawgreatcircle(lo2, la2, lo2, la1,linewidth=1.5,color='aqua',zorder=4)
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
plt.title(r'\textbf{CORRECT \textit{NO} SLOWDOWN PREDICTIONS}',fontsize=13,color='dimgrey')
ax2.annotate(r'\textbf{[%s]}' % letters[2],xy=(0,0),xytext=(0.98,0.84),
textcoords='axes fraction',color='k',fontsize=9,
rotation=0,ha='center',va='center')
###############################################################################
###############################################################################
###############################################################################
ax2 = plt.subplot(222)
m = Basemap(projection='robin',lon_0=-180,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
### Variable
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs2 = m.contourf(x,y,ohc_hiatus,limitd,extend='max',latlon=True)
cs2.set_cmap(cmapd)
csc = m.contour(x,y,hiatus_correctz,np.arange(0.2,1.1,0.2),linestyles='-',latlon=True,
colors='gold',linewidths=0.7)
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
plt.title(r'\textbf{SLOWDOWN COMPOSITE}',fontsize=13,color='dimgrey')
ax2.annotate(r'\textbf{[%s]}' % letters[1],xy=(0,0),xytext=(0.98,0.84),
textcoords='axes fraction',color='k',fontsize=9,
rotation=0,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.155,0.1,0.2,0.025])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='max',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=6,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=4)
cbar1.outline.set_edgecolor('dimgrey')
###############################################################################
###############################################################################
###############################################################################
ax2 = plt.subplot(224)
m = Basemap(projection='robin',lon_0=-180,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
### Variable
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs4 = m.contourf(x,y,ohc_cc,limitd,extend='both',latlon=True)
cs4.set_cmap(cmapd)
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
plt.title(r'\textbf{\textit{NO} SLOWDOWN COMPOSITE}',fontsize=13,color='dimgrey')
ax2.annotate(r'\textbf{[%s]}' % letters[3],xy=(0,0),xytext=(0.98,0.84),
textcoords='axes fraction',color='k',fontsize=9,
rotation=0,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.155,0.1,0.2,0.025])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='max',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=10,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=7)
cbar1.outline.set_edgecolor('dimgrey')
###############################################################################
cbar_axd1 = fig.add_axes([0.65,0.1,0.2,0.025])
cbard1 = fig.colorbar(cs4,cax=cbar_axd1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbard1.set_label(labeld,fontsize=10,color='dimgrey',labelpad=1.4)
cbard1.set_ticks(barlimd)
cbard1.set_ticklabels(list(map(str,barlimd)))
cbard1.ax.tick_params(axis='x', size=.01,labelsize=7)
cbard1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(hspace=-0.4)
plt.text(2.6,50.5,r'\textbf{-- LRP: Relevance}',fontsize=8,color='gold')
plt.savefig(directoryfigure + 'Figure_3_squeeze.png',dpi=600)
| 41.734043
| 287
| 0.571947
|
4edfc145fe3ee3774cbf04016c2524d2ed4cf7dd
| 14,886
|
py
|
Python
|
reid/models/resnet_fusion.py
|
johnzhang1999/Spatial-Attention
|
9e8e90ba624e52dcccba47c7289bb305765f5da6
|
[
"MIT"
] | 228
|
2018-12-04T10:32:46.000Z
|
2020-07-19T07:32:19.000Z
|
reid/models/resnet_fusion.py
|
YUE-FAN/Spatial-Attention
|
71cf324f0fb0829355e5ca322058ebbb9d8be610
|
[
"MIT"
] | 13
|
2018-12-23T02:06:54.000Z
|
2020-05-27T06:47:42.000Z
|
reid/models/resnet_fusion.py
|
YUE-FAN/Spatial-Attention
|
71cf324f0fb0829355e5ca322058ebbb9d8be610
|
[
"MIT"
] | 32
|
2018-12-05T13:46:48.000Z
|
2020-07-01T04:35:49.000Z
|
from __future__ import absolute_import
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
import torch
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self,block,layers, depth, pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=0, FCN=False, radius=1., thresh=0.5):
super(ResNet, self).__init__()
self.depth = depth
self.pretrained = pretrained
self.cut_at_pooling = cut_at_pooling
self.FCN = FCN
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
# ==========================add dilation=============================#
if self.FCN:
self.num_features = num_features
self.num_classes = 751 # num_classes
self.dropout = dropout
# out_planes = self.base.fc.in_features
self.local_conv1 = nn.Conv2d(256, self.num_features, kernel_size=1, padding=0, bias=False)
self.local_conv2 = nn.Conv2d(512, self.num_features, kernel_size=1, padding=0, bias=False)
self.local_conv3 = nn.Conv2d(1024, self.num_features, kernel_size=1, padding=0, bias=False)
self.local_conv4 = nn.Conv2d(2048, self.num_features, kernel_size=1, padding=0, bias=False)
init.kaiming_normal(self.local_conv1.weight, mode='fan_out')
init.kaiming_normal(self.local_conv2.weight, mode='fan_out')
init.kaiming_normal(self.local_conv3.weight, mode='fan_out')
init.kaiming_normal(self.local_conv4.weight, mode='fan_out')
# init.constant(self.local_conv.bias,0)
self.feat_bn2d = nn.BatchNorm2d(self.num_features) # may not be used, not working on caffe
init.constant(self.feat_bn2d.weight, 1) # initialize BN, may not be used
init.constant(self.feat_bn2d.bias, 0) # iniitialize BN, may not be used
# self.offset = ConvOffset2D(256)
##---------------------------stripe1----------------------------------------------#
self.instance0 = nn.Linear(self.num_features, self.num_classes)
init.normal(self.instance0.weight, std=0.001)
init.constant(self.instance0.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
self.instance1 = nn.Linear(self.num_features, self.num_classes)
init.normal(self.instance1.weight, std=0.001)
init.constant(self.instance1.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
self.instance2 = nn.Linear(self.num_features, self.num_classes)
init.normal(self.instance2.weight, std=0.001)
init.constant(self.instance2.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
self.instance3 = nn.Linear(self.num_features, self.num_classes)
init.normal(self.instance3.weight, std=0.001)
init.constant(self.instance3.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
self.instance4 = nn.Linear(self.num_features, self.num_classes)
init.normal(self.instance4.weight, std=0.001)
init.constant(self.instance4.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
self.instance5 = nn.Linear(self.num_features, self.num_classes)
init.normal(self.instance5.weight, std=0.001)
init.constant(self.instance5.bias, 0)
##---------------------------stripe1----------------------------------------------#
##---------------------------stripe1----------------------------------------------#
self.instance6 = nn.Linear(self.num_features, self.num_classes)
init.normal(self.instance5.weight, std=0.001)
init.constant(self.instance5.bias, 0)
self.drop = nn.Dropout(self.dropout)
if not self.pretrained:
self.reset_params()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# for name, module in self.base._modules.items():
# if name == 'avgpool':
# break
# x = module(x)
#
# if self.cut_at_pooling:
# return x
# =======================FCN===============================#
if self.FCN:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
stage1 = self.layer1(x)
stage2 = self.layer2(stage1)
stage3 = self.layer3(stage2)
stage4 = self.layer4(stage3)
print stage1.size()
print stage2.size()
print stage3.size()
print stage4.size()
# z = stage4[:, :, 5:17, :]
# z = self.drop(z)
# z = self.local_conv(z)
# z = self.feat_bn2d(z)
# z = F.relu(z) # relu for local_conv feature
# x6 = F.avg_pool2d(z, kernel_size=(12, 8), stride=(1, 1))
# x6 = x6.contiguous().view(x6.size(0), -1)
# # print x6.size()
# c6 = self.instance6(x6)
z = stage4
sx = z.size(2) / 6
kx = z.size(2) - sx * 5
z = F.avg_pool2d(z, kernel_size=(kx, z.size(3)), stride=(sx, z.size(3))) # H4 W8
out0 = z/z.norm(2,1).unsqueeze(1).expand_as(z)
stages = [stage1,stage2,stage3,stage4]
losses = []
num = 0
for stage in stages:
x = stage
sx = x.size(2) / 6
kx = x.size(2) - sx * 5
x = F.avg_pool2d(x, kernel_size=(kx, x.size(3)),
stride=(sx, x.size(3))) # H4 W8 #torch.Size([64, 2048, 6, 1])
# print sx, kx, x.size()
# ========================================================================#
x = self.drop(x)
# x = self.local_conv(x)
# x = self.local_convs[num](x)
num+=1
if num == 1:
x = self.local_conv1(x)
elif num == 2:
x = self.local_conv2(x)
elif num == 3:
x = self.local_conv3(x)
elif num == 4:
x = self.local_conv4(x)
x = self.feat_bn2d(x)
x = F.relu(x) # relu for local_conv feature
x = x.chunk(6, 2)
# print x[0].size()
x0 = x[0].contiguous().view(x[0].size(0), -1)
x1 = x[1].contiguous().view(x[1].size(0), -1)
x2 = x[2].contiguous().view(x[2].size(0), -1)
x3 = x[3].contiguous().view(x[3].size(0), -1)
x4 = x[4].contiguous().view(x[4].size(0), -1)
x5 = x[5].contiguous().view(x[5].size(0), -1)
# print x0.size()
c0 = self.instance0(x0)
# print c0.size()
c1 = self.instance1(x1)
c2 = self.instance2(x2)
c3 = self.instance3(x3)
c4 = self.instance4(x4)
c5 = self.instance5(x5)
# print c6.size()
losses.append(c0)
losses.append(c1)
losses.append(c2)
losses.append(c3)
losses.append(c4)
losses.append(c5)
# losses.append(out0)
return out0,losses
# ==========================================================#
# =======================DCN===============================#
# if self.FCN:
# y = x.unsqueeze(1)
# y = F.avg_pool3d(x, (16, 1, 1)).squeeze(1)
# sx = x.size(2) / 6
# kx = x.size(2) - sx * 5
# x = F.avg_pool2d(x, kernel_size=(kx, x.size(3)), stride=(sx, x.size(3))) # H4 W8
# # print sx,kx,x.size()
# # ========================================================================#
#
# out0 = x.view(x.size(0), -1)
# out0 = x / x.norm(2, 1).unsqueeze(1).expand_as(x)
# x = self.drop(x)
# x = self.local_conv(x)
# x = self.offset(x)
# out1 = x / x.norm(2, 1).unsqueeze(1).expand_as(x)
# x = self.feat_bn2d(x)
# x = F.relu(x) # relu for local_conv feature
#
# x = x.chunk(6, 2)
# x0 = x[0].contiguous().view(x[0].size(0), -1)
# x1 = x[1].contiguous().view(x[1].size(0), -1)
# x2 = x[2].contiguous().view(x[2].size(0), -1)
# x3 = x[3].contiguous().view(x[3].size(0), -1)
# x4 = x[4].contiguous().view(x[4].size(0), -1)
# x5 = x[5].contiguous().view(x[5].size(0), -1)
# c0 = self.instance0(x0)
# c1 = self.instance1(x1)
# c2 = self.instance2(x2)
# c3 = self.instance3(x3)
# c4 = self.instance4(x4)
# c5 = self.instance5(x5)
# return out0, (c0, c1, c2, c3, c4, c5)
# ==========================================================#
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(x.size(0), -1)
out1 = x.view(x.size(0), -1)
center = out1.mean(0).unsqueeze(0).expand_as(out1)
out2 = x / x.norm(2, 1).unsqueeze(1).expand_as(x)
if self.has_embedding:
x = self.feat(x)
out3 = x / x.norm(2, 1).unsqueeze(1).expand_as(x)
x = self.feat_bn(x)
if self.norm:
x = x / x.norm(2, 1).unsqueeze(1).expand_as(x)
elif self.has_embedding: # adding relu after fc, not used in softmax but in tripletloss
x = F.relu(x)
out4 = x / x.norm(2, 1).unsqueeze(1).expand_as(x)
if self.dropout > 0:
x = self.drop(x)
if self.num_classes > 0:
x = self.classifier(x)
return out2, x, out2, out2
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=0.001)
if m.bias is not None:
init.constant(m.bias, 0)
def resnet18(**kwargs):
return ResNet(18, **kwargs)
def resnet34(**kwargs):
return ResNet(34, **kwargs)
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
}
def resnet50(**kwargs):
# return ResNet(Bottleneck, [3, 4, 6, 3],50, **kwargs)
model = ResNet(Bottleneck, [3, 4, 6, 3],50, **kwargs)
print('load resnet50 ok')
model_dict = model.state_dict()
params = model_zoo.load_url(model_urls['resnet50'])
params = {k: v for k, v in params.items() if k in model_dict}
model_dict.update(params)
model.load_state_dict(model_dict)
return model
def resnet101(**kwargs):
return ResNet(101, **kwargs)
def resnet152(**kwargs):
return ResNet(152, **kwargs)
| 42.169972
| 105
| 0.469636
|
f25c302798289c29557bfd42fbf827f186adf320
| 254
|
py
|
Python
|
21-07/Starters7/1.py
|
allenalvin333/Codechef_Competitions
|
44c3626de33cd9e17d1acfc74abe0aab809efbad
|
[
"MIT"
] | null | null | null |
21-07/Starters7/1.py
|
allenalvin333/Codechef_Competitions
|
44c3626de33cd9e17d1acfc74abe0aab809efbad
|
[
"MIT"
] | null | null | null |
21-07/Starters7/1.py
|
allenalvin333/Codechef_Competitions
|
44c3626de33cd9e17d1acfc74abe0aab809efbad
|
[
"MIT"
] | null | null | null |
# https://www.codechef.com/START7C/problems/CHSFORMT
for T in range(int(input())):
a,b=map(int,input().split())
if((a+b)<3): print(1)
elif((a+b) in range(3,11)): print(2)
elif((a+b) in range(11,61)): print(3)
elif((a+b)>60): print(4)
| 31.75
| 52
| 0.586614
|
6185ca727a546bc14cd1cb818ff3024b71b15ef2
| 12,086
|
py
|
Python
|
neutron/tests/unit/db/metering/test_db_metering.py
|
hughsaunders/neutron
|
8e600ec5b1f0f4ed99b9bf8c72226ca3dab3efad
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/db/metering/test_db_metering.py
|
hughsaunders/neutron
|
8e600ec5b1f0f4ed99b9bf8c72226ca3dab3efad
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/db/metering/test_db_metering.py
|
hughsaunders/neutron
|
8e600ec5b1f0f4ed99b9bf8c72226ca3dab3efad
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import webob.exc
from neutron.api import extensions
from neutron.common import config
from neutron import context
import neutron.extensions
from neutron.extensions import metering
from neutron.plugins.common import constants
from neutron.services.metering import metering_plugin
from neutron.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
DB_METERING_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
extensions_path = ':'.join(neutron.extensions.__path__)
class MeteringPluginDbTestCaseMixin(object):
def _create_metering_label(self, fmt, name, description, **kwargs):
data = {'metering_label': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test-tenant'),
'description': description}}
req = self.new_create_request('metering-labels', data,
fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id'],
is_admin=kwargs.get('is_admin', True)))
return req.get_response(self.ext_api)
def _make_metering_label(self, fmt, name, description, **kwargs):
res = self._create_metering_label(fmt, name, description, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _create_metering_label_rule(self, fmt, metering_label_id, direction,
remote_ip_prefix, excluded, **kwargs):
data = {'metering_label_rule':
{'metering_label_id': metering_label_id,
'tenant_id': kwargs.get('tenant_id', 'test-tenant'),
'direction': direction,
'excluded': excluded,
'remote_ip_prefix': remote_ip_prefix}}
req = self.new_create_request('metering-label-rules',
data, fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return req.get_response(self.ext_api)
def _make_metering_label_rule(self, fmt, metering_label_id, direction,
remote_ip_prefix, excluded, **kwargs):
res = self._create_metering_label_rule(fmt, metering_label_id,
direction, remote_ip_prefix,
excluded, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def metering_label(self, name='label', description='desc',
fmt=None, no_delete=False, **kwargs):
if not fmt:
fmt = self.fmt
metering_label = self._make_metering_label(fmt, name,
description, **kwargs)
yield metering_label
if not no_delete:
self._delete('metering-labels',
metering_label['metering_label']['id'])
@contextlib.contextmanager
def metering_label_rule(self, metering_label_id=None, direction='ingress',
remote_ip_prefix='10.0.0.0/24',
excluded='false', fmt=None, no_delete=False):
if not fmt:
fmt = self.fmt
metering_label_rule = self._make_metering_label_rule(fmt,
metering_label_id,
direction,
remote_ip_prefix,
excluded)
yield metering_label_rule
if not no_delete:
self._delete('metering-label-rules',
metering_label_rule['metering_label_rule']['id'])
class MeteringPluginDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase,
MeteringPluginDbTestCaseMixin):
fmt = 'json'
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, plugin=None):
service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS}
super(MeteringPluginDbTestCase, self).setUp(
plugin=plugin,
service_plugins=service_plugins
)
self.plugin = metering_plugin.MeteringPlugin()
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.METERING: self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
def test_create_metering_label(self):
name = 'my label'
description = 'my metering label'
keys = [('name', name,), ('description', description)]
with self.metering_label(name, description) as metering_label:
for k, v, in keys:
self.assertEqual(metering_label['metering_label'][k], v)
def test_delete_metering_label(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description,
no_delete=True) as metering_label:
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
def test_list_metering_label(self):
name = 'my label'
description = 'my metering label'
with contextlib.nested(
self.metering_label(name, description),
self.metering_label(name, description)) as metering_label:
self._test_list_resources('metering-label', metering_label)
def test_create_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
keys = [('metering_label_id', metering_label_id),
('direction', direction),
('excluded', excluded),
('remote_ip_prefix', remote_ip_prefix)]
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as label_rule:
for k, v, in keys:
self.assertEqual(label_rule['metering_label_rule'][k], v)
def test_delete_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded,
no_delete=True) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._delete('metering-label-rules', rule_id, 204)
def test_list_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with contextlib.nested(
self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded),
self.metering_label_rule(metering_label_id,
'ingress',
remote_ip_prefix,
excluded)) as metering_label_rule:
self._test_list_resources('metering-label-rule',
metering_label_rule)
def test_create_metering_label_rules(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with contextlib.nested(
self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded),
self.metering_label_rule(metering_label_id,
direction,
'0.0.0.0/0',
False)) as metering_label_rule:
self._test_list_resources('metering-label-rule',
metering_label_rule)
def test_create_metering_label_rule_two_labels(self):
name1 = 'my label 1'
name2 = 'my label 2'
description = 'my metering label'
with self.metering_label(name1, description) as metering_label1:
metering_label_id1 = metering_label1['metering_label']['id']
with self.metering_label(name2, description) as metering_label2:
metering_label_id2 = metering_label2['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with contextlib.nested(
self.metering_label_rule(metering_label_id1,
direction,
remote_ip_prefix,
excluded),
self.metering_label_rule(metering_label_id2,
direction,
remote_ip_prefix,
excluded)) as metering_label_rule:
self._test_list_resources('metering-label-rule',
metering_label_rule)
class TestMeteringDbXML(MeteringPluginDbTestCase):
fmt = 'xml'
| 41.390411
| 79
| 0.554443
|
f4ec40fcb150319966c787a85389671714c5dd14
| 21,934
|
py
|
Python
|
tests/components/zha/test_channels.py
|
GuyKh/core
|
859bcb6eb4dbb9a8b87b6e4e888e074502db5df1
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
tests/components/zha/test_channels.py
|
GuyKh/core
|
859bcb6eb4dbb9a8b87b6e4e888e074502db5df1
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/zha/test_channels.py
|
marecabo/home-assistant
|
e33774a61e7fcc88aff752dfa4618dd26a746872
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""Test ZHA Core channels."""
import asyncio
import math
from unittest import mock
from unittest.mock import AsyncMock, patch
import pytest
import zigpy.profiles.zha
import zigpy.types as t
import zigpy.zcl.clusters
import homeassistant.components.zha.core.channels as zha_channels
import homeassistant.components.zha.core.channels.base as base_channels
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.registries as registries
from .common import get_zha_gateway, make_zcl_header
from .conftest import SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_TYPE
from tests.common import async_capture_events
@pytest.fixture
def ieee():
"""IEEE fixture."""
return t.EUI64.deserialize(b"ieeeaddr")[0]
@pytest.fixture
def nwk():
"""NWK fixture."""
return t.NWK(0xBEEF)
@pytest.fixture
async def zha_gateway(hass, setup_zha):
"""Return ZhaGateway fixture."""
await setup_zha()
return get_zha_gateway(hass)
@pytest.fixture
def zigpy_coordinator_device(zigpy_device_mock):
"""Coordinator device fixture."""
coordinator = zigpy_device_mock(
{1: {SIG_EP_INPUT: [0x1000], SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
with patch.object(coordinator, "add_to_group", AsyncMock(return_value=[0])):
yield coordinator
@pytest.fixture
def channel_pool(zigpy_coordinator_device):
"""Endpoint Channels fixture."""
ch_pool_mock = mock.MagicMock(spec_set=zha_channels.ChannelPool)
ch_pool_mock.endpoint.device.application.get_device.return_value = (
zigpy_coordinator_device
)
type(ch_pool_mock).skip_configuration = mock.PropertyMock(return_value=False)
ch_pool_mock.id = 1
return ch_pool_mock
@pytest.fixture
def poll_control_ch(channel_pool, zigpy_device_mock):
"""Poll control channel fixture."""
cluster_id = zigpy.zcl.clusters.general.PollControl.cluster_id
zigpy_dev = zigpy_device_mock(
{1: {SIG_EP_INPUT: [cluster_id], SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
cluster = zigpy_dev.endpoints[1].in_clusters[cluster_id]
channel_class = registries.ZIGBEE_CHANNEL_REGISTRY.get(cluster_id)
return channel_class(cluster, channel_pool)
@pytest.fixture
async def poll_control_device(zha_device_restored, zigpy_device_mock):
"""Poll control device fixture."""
cluster_id = zigpy.zcl.clusters.general.PollControl.cluster_id
zigpy_dev = zigpy_device_mock(
{1: {SIG_EP_INPUT: [cluster_id], SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
zha_device = await zha_device_restored(zigpy_dev)
return zha_device
@pytest.mark.parametrize(
"cluster_id, bind_count, attrs",
[
(0x0000, 0, {}),
(0x0001, 1, {"battery_voltage", "battery_percentage_remaining"}),
(0x0003, 0, {}),
(0x0004, 0, {}),
(0x0005, 1, {}),
(0x0006, 1, {"on_off"}),
(0x0007, 1, {}),
(0x0008, 1, {"current_level"}),
(0x0009, 1, {}),
(0x000C, 1, {"present_value"}),
(0x000D, 1, {"present_value"}),
(0x000E, 1, {"present_value"}),
(0x000D, 1, {"present_value"}),
(0x0010, 1, {"present_value"}),
(0x0011, 1, {"present_value"}),
(0x0012, 1, {"present_value"}),
(0x0013, 1, {"present_value"}),
(0x0014, 1, {"present_value"}),
(0x0015, 1, {}),
(0x0016, 1, {}),
(0x0019, 0, {}),
(0x001A, 1, {}),
(0x001B, 1, {}),
(0x0020, 1, {}),
(0x0021, 0, {}),
(0x0101, 1, {"lock_state"}),
(
0x0201,
1,
{
"local_temp",
"occupied_cooling_setpoint",
"occupied_heating_setpoint",
"unoccupied_cooling_setpoint",
"unoccupied_heating_setpoint",
"running_mode",
"running_state",
"system_mode",
"occupancy",
"pi_cooling_demand",
"pi_heating_demand",
},
),
(0x0202, 1, {"fan_mode"}),
(0x0300, 1, {"current_x", "current_y", "color_temperature"}),
(0x0400, 1, {"measured_value"}),
(0x0401, 1, {"level_status"}),
(0x0402, 1, {"measured_value"}),
(0x0403, 1, {"measured_value"}),
(0x0404, 1, {"measured_value"}),
(0x0405, 1, {"measured_value"}),
(0x0406, 1, {"occupancy"}),
(0x0702, 1, {"instantaneous_demand"}),
(
0x0B04,
1,
{
"active_power",
"active_power_max",
"apparent_power",
"rms_current",
"rms_current_max",
"rms_voltage",
"rms_voltage_max",
},
),
],
)
async def test_in_channel_config(
cluster_id, bind_count, attrs, channel_pool, zigpy_device_mock, zha_gateway
):
"""Test ZHA core channel configuration for input clusters."""
zigpy_dev = zigpy_device_mock(
{1: {SIG_EP_INPUT: [cluster_id], SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
cluster = zigpy_dev.endpoints[1].in_clusters[cluster_id]
channel_class = registries.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base_channels.ZigbeeChannel
)
channel = channel_class(cluster, channel_pool)
await channel.async_configure()
assert cluster.bind.call_count == bind_count
assert cluster.configure_reporting.call_count == 0
assert cluster.configure_reporting_multiple.call_count == math.ceil(len(attrs) / 3)
reported_attrs = {
a
for a in attrs
for attr in cluster.configure_reporting_multiple.call_args_list
for attrs in attr[0][0]
}
assert set(attrs) == reported_attrs
@pytest.mark.parametrize(
"cluster_id, bind_count",
[
(0x0000, 0),
(0x0001, 1),
(0x0003, 0),
(0x0004, 0),
(0x0005, 1),
(0x0006, 1),
(0x0007, 1),
(0x0008, 1),
(0x0009, 1),
(0x0015, 1),
(0x0016, 1),
(0x0019, 0),
(0x001A, 1),
(0x001B, 1),
(0x0020, 1),
(0x0021, 0),
(0x0101, 1),
(0x0202, 1),
(0x0300, 1),
(0x0400, 1),
(0x0402, 1),
(0x0403, 1),
(0x0405, 1),
(0x0406, 1),
(0x0702, 1),
(0x0B04, 1),
],
)
async def test_out_channel_config(
cluster_id, bind_count, channel_pool, zigpy_device_mock, zha_gateway
):
"""Test ZHA core channel configuration for output clusters."""
zigpy_dev = zigpy_device_mock(
{1: {SIG_EP_OUTPUT: [cluster_id], SIG_EP_INPUT: [], SIG_EP_TYPE: 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
cluster = zigpy_dev.endpoints[1].out_clusters[cluster_id]
cluster.bind_only = True
channel_class = registries.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base_channels.ZigbeeChannel
)
channel = channel_class(cluster, channel_pool)
await channel.async_configure()
assert cluster.bind.call_count == bind_count
assert cluster.configure_reporting.call_count == 0
def test_channel_registry():
"""Test ZIGBEE Channel Registry."""
for (cluster_id, channel) in registries.ZIGBEE_CHANNEL_REGISTRY.items():
assert isinstance(cluster_id, int)
assert 0 <= cluster_id <= 0xFFFF
assert issubclass(channel, base_channels.ZigbeeChannel)
def test_epch_unclaimed_channels(channel):
"""Test unclaimed channels."""
ch_1 = channel(zha_const.CHANNEL_ON_OFF, 6)
ch_2 = channel(zha_const.CHANNEL_LEVEL, 8)
ch_3 = channel(zha_const.CHANNEL_COLOR, 768)
ep_channels = zha_channels.ChannelPool(
mock.MagicMock(spec_set=zha_channels.Channels), mock.sentinel.ep
)
all_channels = {ch_1.id: ch_1, ch_2.id: ch_2, ch_3.id: ch_3}
with mock.patch.dict(ep_channels.all_channels, all_channels, clear=True):
available = ep_channels.unclaimed_channels()
assert ch_1 in available
assert ch_2 in available
assert ch_3 in available
ep_channels.claimed_channels[ch_2.id] = ch_2
available = ep_channels.unclaimed_channels()
assert ch_1 in available
assert ch_2 not in available
assert ch_3 in available
ep_channels.claimed_channels[ch_1.id] = ch_1
available = ep_channels.unclaimed_channels()
assert ch_1 not in available
assert ch_2 not in available
assert ch_3 in available
ep_channels.claimed_channels[ch_3.id] = ch_3
available = ep_channels.unclaimed_channels()
assert ch_1 not in available
assert ch_2 not in available
assert ch_3 not in available
def test_epch_claim_channels(channel):
"""Test channel claiming."""
ch_1 = channel(zha_const.CHANNEL_ON_OFF, 6)
ch_2 = channel(zha_const.CHANNEL_LEVEL, 8)
ch_3 = channel(zha_const.CHANNEL_COLOR, 768)
ep_channels = zha_channels.ChannelPool(
mock.MagicMock(spec_set=zha_channels.Channels), mock.sentinel.ep
)
all_channels = {ch_1.id: ch_1, ch_2.id: ch_2, ch_3.id: ch_3}
with mock.patch.dict(ep_channels.all_channels, all_channels, clear=True):
assert ch_1.id not in ep_channels.claimed_channels
assert ch_2.id not in ep_channels.claimed_channels
assert ch_3.id not in ep_channels.claimed_channels
ep_channels.claim_channels([ch_2])
assert ch_1.id not in ep_channels.claimed_channels
assert ch_2.id in ep_channels.claimed_channels
assert ep_channels.claimed_channels[ch_2.id] is ch_2
assert ch_3.id not in ep_channels.claimed_channels
ep_channels.claim_channels([ch_3, ch_1])
assert ch_1.id in ep_channels.claimed_channels
assert ep_channels.claimed_channels[ch_1.id] is ch_1
assert ch_2.id in ep_channels.claimed_channels
assert ep_channels.claimed_channels[ch_2.id] is ch_2
assert ch_3.id in ep_channels.claimed_channels
assert ep_channels.claimed_channels[ch_3.id] is ch_3
assert "1:0x0300" in ep_channels.claimed_channels
@mock.patch(
"homeassistant.components.zha.core.channels.ChannelPool.add_client_channels"
)
@mock.patch(
"homeassistant.components.zha.core.discovery.PROBE.discover_entities",
mock.MagicMock(),
)
def test_ep_channels_all_channels(m1, zha_device_mock):
"""Test EndpointChannels adding all channels."""
zha_device = zha_device_mock(
{
1: {
SIG_EP_INPUT: [0, 1, 6, 8],
SIG_EP_OUTPUT: [],
SIG_EP_TYPE: zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
},
2: {
SIG_EP_INPUT: [0, 1, 6, 8, 768],
SIG_EP_OUTPUT: [],
SIG_EP_TYPE: 0x0000,
},
}
)
channels = zha_channels.Channels(zha_device)
ep_channels = zha_channels.ChannelPool.new(channels, 1)
assert "1:0x0000" in ep_channels.all_channels
assert "1:0x0001" in ep_channels.all_channels
assert "1:0x0006" in ep_channels.all_channels
assert "1:0x0008" in ep_channels.all_channels
assert "1:0x0300" not in ep_channels.all_channels
assert "2:0x0000" not in ep_channels.all_channels
assert "2:0x0001" not in ep_channels.all_channels
assert "2:0x0006" not in ep_channels.all_channels
assert "2:0x0008" not in ep_channels.all_channels
assert "2:0x0300" not in ep_channels.all_channels
channels = zha_channels.Channels(zha_device)
ep_channels = zha_channels.ChannelPool.new(channels, 2)
assert "1:0x0000" not in ep_channels.all_channels
assert "1:0x0001" not in ep_channels.all_channels
assert "1:0x0006" not in ep_channels.all_channels
assert "1:0x0008" not in ep_channels.all_channels
assert "1:0x0300" not in ep_channels.all_channels
assert "2:0x0000" in ep_channels.all_channels
assert "2:0x0001" in ep_channels.all_channels
assert "2:0x0006" in ep_channels.all_channels
assert "2:0x0008" in ep_channels.all_channels
assert "2:0x0300" in ep_channels.all_channels
@mock.patch(
"homeassistant.components.zha.core.channels.ChannelPool.add_client_channels"
)
@mock.patch(
"homeassistant.components.zha.core.discovery.PROBE.discover_entities",
mock.MagicMock(),
)
def test_channel_power_config(m1, zha_device_mock):
"""Test that channels only get a single power channel."""
in_clusters = [0, 1, 6, 8]
zha_device = zha_device_mock(
{
1: {SIG_EP_INPUT: in_clusters, SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x0000},
2: {
SIG_EP_INPUT: [*in_clusters, 768],
SIG_EP_OUTPUT: [],
SIG_EP_TYPE: 0x0000,
},
}
)
channels = zha_channels.Channels.new(zha_device)
pools = {pool.id: pool for pool in channels.pools}
assert "1:0x0000" in pools[1].all_channels
assert "1:0x0001" in pools[1].all_channels
assert "1:0x0006" in pools[1].all_channels
assert "1:0x0008" in pools[1].all_channels
assert "1:0x0300" not in pools[1].all_channels
assert "2:0x0000" in pools[2].all_channels
assert "2:0x0001" not in pools[2].all_channels
assert "2:0x0006" in pools[2].all_channels
assert "2:0x0008" in pools[2].all_channels
assert "2:0x0300" in pools[2].all_channels
zha_device = zha_device_mock(
{
1: {SIG_EP_INPUT: [], SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x0000},
2: {SIG_EP_INPUT: in_clusters, SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x0000},
}
)
channels = zha_channels.Channels.new(zha_device)
pools = {pool.id: pool for pool in channels.pools}
assert "1:0x0001" not in pools[1].all_channels
assert "2:0x0001" in pools[2].all_channels
zha_device = zha_device_mock(
{2: {SIG_EP_INPUT: in_clusters, SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x0000}}
)
channels = zha_channels.Channels.new(zha_device)
pools = {pool.id: pool for pool in channels.pools}
assert "2:0x0001" in pools[2].all_channels
async def test_ep_channels_configure(channel):
"""Test unclaimed channels."""
ch_1 = channel(zha_const.CHANNEL_ON_OFF, 6)
ch_2 = channel(zha_const.CHANNEL_LEVEL, 8)
ch_3 = channel(zha_const.CHANNEL_COLOR, 768)
ch_3.async_configure = AsyncMock(side_effect=asyncio.TimeoutError)
ch_3.async_initialize = AsyncMock(side_effect=asyncio.TimeoutError)
ch_4 = channel(zha_const.CHANNEL_ON_OFF, 6)
ch_5 = channel(zha_const.CHANNEL_LEVEL, 8)
ch_5.async_configure = AsyncMock(side_effect=asyncio.TimeoutError)
ch_5.async_initialize = AsyncMock(side_effect=asyncio.TimeoutError)
channels = mock.MagicMock(spec_set=zha_channels.Channels)
type(channels).semaphore = mock.PropertyMock(return_value=asyncio.Semaphore(3))
ep_channels = zha_channels.ChannelPool(channels, mock.sentinel.ep)
claimed = {ch_1.id: ch_1, ch_2.id: ch_2, ch_3.id: ch_3}
client_chans = {ch_4.id: ch_4, ch_5.id: ch_5}
with mock.patch.dict(
ep_channels.claimed_channels, claimed, clear=True
), mock.patch.dict(ep_channels.client_channels, client_chans, clear=True):
await ep_channels.async_configure()
await ep_channels.async_initialize(mock.sentinel.from_cache)
for ch in [*claimed.values(), *client_chans.values()]:
assert ch.async_initialize.call_count == 1
assert ch.async_initialize.await_count == 1
assert ch.async_initialize.call_args[0][0] is mock.sentinel.from_cache
assert ch.async_configure.call_count == 1
assert ch.async_configure.await_count == 1
assert ch_3.warning.call_count == 2
assert ch_5.warning.call_count == 2
async def test_poll_control_configure(poll_control_ch):
"""Test poll control channel configuration."""
await poll_control_ch.async_configure()
assert poll_control_ch.cluster.write_attributes.call_count == 1
assert poll_control_ch.cluster.write_attributes.call_args[0][0] == {
"checkin_interval": poll_control_ch.CHECKIN_INTERVAL
}
async def test_poll_control_checkin_response(poll_control_ch):
"""Test poll control channel checkin response."""
rsp_mock = AsyncMock()
set_interval_mock = AsyncMock()
fast_poll_mock = AsyncMock()
cluster = poll_control_ch.cluster
patch_1 = mock.patch.object(cluster, "checkin_response", rsp_mock)
patch_2 = mock.patch.object(cluster, "set_long_poll_interval", set_interval_mock)
patch_3 = mock.patch.object(cluster, "fast_poll_stop", fast_poll_mock)
with patch_1, patch_2, patch_3:
await poll_control_ch.check_in_response(33)
assert rsp_mock.call_count == 1
assert set_interval_mock.call_count == 1
assert fast_poll_mock.call_count == 1
await poll_control_ch.check_in_response(33)
assert cluster.endpoint.request.call_count == 3
assert cluster.endpoint.request.await_count == 3
assert cluster.endpoint.request.call_args_list[0][0][1] == 33
assert cluster.endpoint.request.call_args_list[0][0][0] == 0x0020
assert cluster.endpoint.request.call_args_list[1][0][0] == 0x0020
async def test_poll_control_cluster_command(hass, poll_control_device):
"""Test poll control channel response to cluster command."""
checkin_mock = AsyncMock()
poll_control_ch = poll_control_device.channels.pools[0].all_channels["1:0x0020"]
cluster = poll_control_ch.cluster
events = async_capture_events(hass, "zha_event")
with mock.patch.object(poll_control_ch, "check_in_response", checkin_mock):
tsn = 22
hdr = make_zcl_header(0, global_command=False, tsn=tsn)
assert not events
cluster.handle_message(
hdr, [mock.sentinel.args, mock.sentinel.args2, mock.sentinel.args3]
)
await hass.async_block_till_done()
assert checkin_mock.call_count == 1
assert checkin_mock.await_count == 1
assert checkin_mock.await_args[0][0] == tsn
assert len(events) == 1
data = events[0].data
assert data["command"] == "checkin"
assert data["args"][0] is mock.sentinel.args
assert data["args"][1] is mock.sentinel.args2
assert data["args"][2] is mock.sentinel.args3
assert data["unique_id"] == "00:11:22:33:44:55:66:77:1:0x0020"
assert data["device_id"] == poll_control_device.device_id
async def test_poll_control_ignore_list(hass, poll_control_device):
"""Test poll control channel ignore list."""
set_long_poll_mock = AsyncMock()
poll_control_ch = poll_control_device.channels.pools[0].all_channels["1:0x0020"]
cluster = poll_control_ch.cluster
with mock.patch.object(cluster, "set_long_poll_interval", set_long_poll_mock):
await poll_control_ch.check_in_response(33)
assert set_long_poll_mock.call_count == 1
set_long_poll_mock.reset_mock()
poll_control_ch.skip_manufacturer_id(4151)
with mock.patch.object(cluster, "set_long_poll_interval", set_long_poll_mock):
await poll_control_ch.check_in_response(33)
assert set_long_poll_mock.call_count == 0
async def test_poll_control_ikea(hass, poll_control_device):
"""Test poll control channel ignore list for ikea."""
set_long_poll_mock = AsyncMock()
poll_control_ch = poll_control_device.channels.pools[0].all_channels["1:0x0020"]
cluster = poll_control_ch.cluster
poll_control_device.device.node_desc.manufacturer_code = 4476
with mock.patch.object(cluster, "set_long_poll_interval", set_long_poll_mock):
await poll_control_ch.check_in_response(33)
assert set_long_poll_mock.call_count == 0
@pytest.fixture
def zigpy_zll_device(zigpy_device_mock):
"""ZLL device fixture."""
return zigpy_device_mock(
{1: {SIG_EP_INPUT: [0x1000], SIG_EP_OUTPUT: [], SIG_EP_TYPE: 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
async def test_zll_device_groups(
zigpy_zll_device, channel_pool, zigpy_coordinator_device
):
"""Test adding coordinator to ZLL groups."""
cluster = zigpy_zll_device.endpoints[1].lightlink
channel = zha_channels.lightlink.LightLink(cluster, channel_pool)
with patch.object(
cluster, "command", AsyncMock(return_value=[1, 0, []])
) as cmd_mock:
await channel.async_configure()
assert cmd_mock.await_count == 1
assert (
cluster.server_commands[cmd_mock.await_args[0][0]][0]
== "get_group_identifiers"
)
assert cluster.bind.call_count == 0
assert zigpy_coordinator_device.add_to_group.await_count == 1
assert zigpy_coordinator_device.add_to_group.await_args[0][0] == 0x0000
zigpy_coordinator_device.add_to_group.reset_mock()
group_1 = zigpy.zcl.clusters.lightlink.GroupInfoRecord(0xABCD, 0x00)
group_2 = zigpy.zcl.clusters.lightlink.GroupInfoRecord(0xAABB, 0x00)
with patch.object(
cluster, "command", AsyncMock(return_value=[1, 0, [group_1, group_2]])
) as cmd_mock:
await channel.async_configure()
assert cmd_mock.await_count == 1
assert (
cluster.server_commands[cmd_mock.await_args[0][0]][0]
== "get_group_identifiers"
)
assert cluster.bind.call_count == 0
assert zigpy_coordinator_device.add_to_group.await_count == 2
assert (
zigpy_coordinator_device.add_to_group.await_args_list[0][0][0]
== group_1.group_id
)
assert (
zigpy_coordinator_device.add_to_group.await_args_list[1][0][0]
== group_2.group_id
)
| 35.263666
| 87
| 0.669463
|
d00a6a78d554d3b551d847fba8c32d0052666e61
| 47,645
|
py
|
Python
|
geowombat/core/io.py
|
jgrss/geowombat
|
e691102a3dcce13b272810b43bc9586681ae6934
|
[
"MIT"
] | 38
|
2020-01-13T22:45:18.000Z
|
2022-03-31T05:44:35.000Z
|
geowombat/core/io.py
|
jgrss/geowombat
|
e691102a3dcce13b272810b43bc9586681ae6934
|
[
"MIT"
] | 41
|
2020-01-27T00:57:58.000Z
|
2022-03-10T19:52:20.000Z
|
geowombat/core/io.py
|
jgrss/geowombat
|
e691102a3dcce13b272810b43bc9586681ae6934
|
[
"MIT"
] | 4
|
2020-01-14T19:27:38.000Z
|
2021-08-23T03:08:37.000Z
|
import os
from pathlib import Path
import shutil
import itertools
import ctypes
import concurrent.futures
import multiprocessing as multi
import threading
import random
import string
import logging
from ..handler import add_handler
from ..backends.rasterio_ import to_gtiff, WriteDaskArray
from .windows import get_window_offsets
from .base import _client_dummy, _cluster_dummy
try:
from ..backends.zarr_ import to_zarr
import zarr
ZARR_INSTALLED = True
except:
ZARR_INSTALLED = False
import numpy as np
from osgeo import gdal
import xarray as xr
import dask.array as da
from dask import is_dask_collection
from dask.diagnostics import ProgressBar
from dask.distributed import Client, LocalCluster
import rasterio as rio
from rasterio.windows import Window
from rasterio.vrt import WarpedVRT
from rasterio.enums import Resampling
from rasterio import shutil as rio_shutil
from affine import Affine
from tqdm import tqdm
try:
MKL_LIB = ctypes.CDLL('libmkl_rt.so')
except:
MKL_LIB = None
logger = logging.getLogger(__name__)
logger = add_handler(logger)
def get_norm_indices(n_bands, window_slice, indexes_multi):
# Prepend the band position index to the window slice
if n_bands == 1:
window_slice = tuple([slice(0, 1)] + list(window_slice))
indexes = 1
else:
window_slice = tuple([slice(0, n_bands)] + list(window_slice))
indexes = indexes_multi
return window_slice, indexes
def _window_worker(w):
"""
Helper to return window slice
"""
return slice(w.row_off, w.row_off + w.height), slice(w.col_off, w.col_off + w.width)
def _window_worker_time(w, n_bands, tidx, n_time):
"""
Helper to return window slice
"""
window_slice = (slice(w.row_off, w.row_off + w.height), slice(w.col_off, w.col_off + w.width))
# Prepend the band position index to the window slice
if n_bands == 1:
window_slice = tuple([slice(tidx, n_time)] + [slice(0, 1)] + list(window_slice))
else:
window_slice = tuple([slice(tidx, n_time)] + [slice(0, n_bands)] + list(window_slice))
return window_slice
# def _compressor(*args):
#
# w_, b_, f_, o_ = list(itertools.chain(*args))
#
# with rio.open(f_, mode='r+', sharing=False) as dst_:
#
# dst_.write(np.squeeze(b_),
# window=w_,
# indexes=o_)
# def _block_write_func(*args):
#
# ofn_, fn_, g_, t_ = list(itertools.chain(*args))
#
# if t_ == 'zarr':
#
# group_node = zarr.open(fn_, mode='r')[g_]
#
# w_ = Window(row_off=group_node.attrs['row_off'],
# col_off=group_node.attrs['col_off'],
# height=group_node.attrs['height'],
# width=group_node.attrs['width'])
#
# out_data_ = np.squeeze(group_node['data'][:])
#
# else:
#
# w_ = Window(row_off=int(os.path.splitext(os.path.basename(fn_))[0].split('_')[-4][1:]),
# col_off=int(os.path.splitext(os.path.basename(fn_))[0].split('_')[-3][1:]),
# height=int(os.path.splitext(os.path.basename(fn_))[0].split('_')[-2][1:]),
# width=int(os.path.splitext(os.path.basename(fn_))[0].split('_')[-1][1:]))
#
# with rio.open(fn_) as src_:
# out_data_ = np.squeeze(src_.read(window=w_))
#
# out_indexes_ = 1 if len(out_data_.shape) == 2 else list(range(1, out_data_.shape[0]+1))
#
# with rio.open(ofn_, mode='r+', sharing=False) as dst_:
#
# dst_.write(out_data_,
# window=w_,
# indexes=out_indexes_)
def _block_read_func(fn_, g_, t_):
"""
Function for block writing with ``concurrent.futures``
"""
# fn_, g_, t_ = list(itertools.chain(*args))
if t_ == 'zarr':
group_node = zarr.open(fn_, mode='r')[g_]
w_ = Window(row_off=group_node.attrs['row_off'],
col_off=group_node.attrs['col_off'],
height=group_node.attrs['height'],
width=group_node.attrs['width'])
out_data_ = np.squeeze(group_node['data'][:])
else:
w_ = Window(row_off=int(os.path.splitext(os.path.basename(fn_))[0].split('_')[-4][1:]),
col_off=int(os.path.splitext(os.path.basename(fn_))[0].split('_')[-3][1:]),
height=int(os.path.splitext(os.path.basename(fn_))[0].split('_')[-2][1:]),
width=int(os.path.splitext(os.path.basename(fn_))[0].split('_')[-1][1:]))
out_data_ = np.squeeze(rio.open(fn_).read(window=w_))
out_indexes_ = 1 if len(out_data_.shape) == 2 else list(range(1, out_data_.shape[0]+1))
return w_, out_indexes_, out_data_
def _check_offsets(block, out_data_, window_, oleft, otop, ocols, orows, left_, top_):
# Check if the data were read at larger
# extents than the write bounds.
obottom = otop - (orows * abs(block.gw.celly))
oright = oleft + (ocols * abs(block.gw.cellx))
bottom_ = top_ - (window_.height * abs(block.gw.celly))
right_ = left_ - (window_.width * abs(block.gw.cellx))
left_diff = 0
right_diff = 0
top_diff = 0
bottom_diff = 0
if left_ < oleft:
left_diff = int(abs(oleft - left_) / abs(block.gw.cellx))
right_diff = out_data_.shape[-1]
elif right_ > oright:
left_diff = 0
right_diff = int(abs(oright - right_) / abs(block.gw.cellx))
if bottom_ < obottom:
bottom_diff = int(abs(obottom - bottom_) / abs(block.gw.celly))
top_diff = 0
elif top_ > otop:
bottom_diff = out_data_.shape[-2]
top_diff = int(abs(otop - top_) / abs(block.gw.celly))
if (left_diff != 0) or (top_diff != 0) or (bottom_diff != 0) or (right_diff != 0):
dshape = out_data_.shape
if len(dshape) == 2:
out_data_ = out_data_[top_diff:bottom_diff, left_diff:right_diff]
elif len(dshape) == 3:
out_data_ = out_data_[:, top_diff:bottom_diff, left_diff:right_diff]
elif len(dshape) == 4:
out_data_ = out_data_[:, :, top_diff:bottom_diff, left_diff:right_diff]
window_ = Window(col_off=window_.col_off,
row_off=window_.row_off,
width=out_data_.shape[-1],
height=out_data_.shape[-2])
return out_data_, window_
def _compute_block(block, wid, window_, padded_window_, n_workers, num_workers, oleft, otop, ocols, orows):
"""
Computes a DataArray window block of data
Args:
block (DataArray): The ``xarray.DataArray`` to compute.
wid (int): The window id.
window_ (namedtuple): The window ``rasterio.windows.Window`` object.
padded_window_ (namedtuple): A padded window ``rasterio.windows.Window`` object.
n_workers (int): The number of parallel workers for chunks.
num_workers (int): The number of parallel workers for ``dask.compute``.
oleft (float): The output image left coordinate.
otop (float): The output image top coordinate.
ocols (int): The output image columns.
orows (int): The output image rows.
Returns:
``numpy.ndarray``, ``rasterio.windows.Window``, ``int`` | ``list``
"""
# The geo-transform is needed on the block
# left_, top_ = Affine(*block.transform) * (window_.col_off, window_.row_off)
out_data_ = None
if 'apply' in block.attrs:
attrs = block.attrs.copy()
# Update the block transform
attrs['transform'] = Affine(*block.gw.transform)
attrs['window_id'] = wid
block = block.assign_attrs(**attrs)
if ('apply' in block.attrs) and hasattr(block.attrs['apply'], 'wombat_func_'):
if padded_window_:
logger.warning(' Padding is not supported with lazy functions.')
if block.attrs['apply'].wombat_func_:
# Add the data to the keyword arguments
block.attrs['apply_kwargs']['data'] = block
out_data_ = block.attrs['apply'](**block.attrs['apply_kwargs'])
if n_workers == 1:
out_data_ = out_data_.data.compute(scheduler='threads', num_workers=num_workers)
else:
with threading.Lock():
out_data_ = out_data_.data.compute(scheduler='threads', num_workers=num_workers)
else:
logger.exception(' The lazy wombat function is turned off.')
else:
###############################
# Get the data as a NumPy array
###############################
if n_workers == 1:
out_data_ = block.data.compute(scheduler='threads', num_workers=num_workers)
else:
with threading.Lock():
out_data_ = block.data.compute(scheduler='threads', num_workers=num_workers)
if ('apply' in block.attrs) and not hasattr(block.attrs['apply'], 'wombat_func_'):
if padded_window_:
# Add extra padding on the image borders
rspad = padded_window_.height - window_.height if window_.row_off == 0 else 0
cspad = padded_window_.width - window_.width if window_.col_off == 0 else 0
repad = padded_window_.height - window_.height if (window_.row_off != 0) and (window_.height < block.gw.row_chunks) else 0
cepad = padded_window_.width - window_.width if (window_.col_off != 0) and (window_.width < block.gw.col_chunks) else 0
dshape = out_data_.shape
if (rspad > 0) or (cspad > 0) or (repad > 0) or (cepad > 0):
if len(dshape) == 2:
out_data_ = np.pad(out_data_, ((rspad, repad), (cspad, cepad)), mode='reflect')
elif len(dshape) == 3:
out_data_ = np.pad(out_data_, ((0, 0), (rspad, repad), (cspad, cepad)), mode='reflect')
elif len(dshape) == 4:
out_data_ = np.pad(out_data_, ((0, 0), (0, 0), (rspad, repad), (cspad, cepad)), mode='reflect')
# Apply the user function
if ('apply_args' in block.attrs) and ('apply_kwargs' in block.attrs):
out_data_ = block.attrs['apply'](out_data_, *block.attrs['apply_args'], **block.attrs['apply_kwargs'])
elif ('apply_args' in block.attrs) and ('apply_kwargs' not in block.attrs):
out_data_ = block.attrs['apply'](out_data_, *block.attrs['apply_args'])
elif ('apply_args' not in block.attrs) and ('apply_kwargs' in block.attrs):
out_data_ = block.attrs['apply'](out_data_, **block.attrs['apply_kwargs'])
else:
out_data_ = block.attrs['apply'](out_data_)
if padded_window_:
##########################
# Remove the extra padding
##########################
dshape = out_data_.shape
if len(dshape) == 2:
out_data_ = out_data_[rspad:rspad+padded_window_.height, cspad:cspad+padded_window_.width]
elif len(dshape) == 3:
out_data_ = out_data_[:, rspad:rspad+padded_window_.height, cspad:cspad+padded_window_.width]
elif len(dshape) == 4:
out_data_ = out_data_[:, :, rspad:rspad+padded_window_.height, cspad:cspad+padded_window_.width]
dshape = out_data_.shape
####################
# Remove the padding
####################
# Get the non-padded array slice
row_diff = abs(window_.row_off - padded_window_.row_off)
col_diff = abs(window_.col_off - padded_window_.col_off)
if len(dshape) == 2:
out_data_ = out_data_[row_diff:row_diff+window_.height, col_diff:col_diff+window_.width]
elif len(dshape) == 3:
out_data_ = out_data_[:, row_diff:row_diff+window_.height, col_diff:col_diff+window_.width]
elif len(dshape) == 4:
out_data_ = out_data_[:, :, row_diff:row_diff+window_.height, col_diff:col_diff+window_.width]
else:
if padded_window_:
logger.warning(' Padding is only supported with user functions.')
if not isinstance(out_data_, np.ndarray):
logger.exception(' The data were not computed properly for block {:,d}'.format(wid))
dshape = out_data_.shape
if len(dshape) > 2:
out_data_ = out_data_.squeeze()
if len(dshape) == 2:
indexes_ = 1
else:
indexes_ = 1 if dshape[0] == 1 else list(range(1, dshape[0]+1))
return out_data_, indexes_, window_
def _write_xarray(*args):
"""
Writes a DataArray to file
Args:
args (iterable): A tuple from the window generator.
Reference:
https://github.com/dask/dask/issues/3600
Returns:
``str`` | None
"""
zarr_file = None
block, filename, wid, block_window, padded_window, n_workers, n_threads, separate, chunks, root, out_block_type, tags, oleft, otop, ocols, orows, kwargs = list(itertools.chain(*args))
output, out_indexes, block_window = _compute_block(block, wid, block_window, padded_window, n_workers, n_threads, oleft, otop, ocols, orows)
if separate and (out_block_type.lower() == 'zarr'):
zarr_file = to_zarr(filename, output, block_window, chunks, root=root)
else:
to_gtiff(filename, output, block_window, out_indexes, block.gw.transform, n_workers, separate, tags, kwargs)
return zarr_file
def to_vrt(data,
filename,
overwrite=False,
resampling=None,
nodata=None,
init_dest_nodata=True,
warp_mem_limit=128):
"""
Writes a file to a VRT file
Args:
data (DataArray): The ``xarray.DataArray`` to write.
filename (str): The output file name to write to.
overwrite (Optional[bool]): Whether to overwrite an existing VRT file.
resampling (Optional[object]): The resampling algorithm for ``rasterio.vrt.WarpedVRT``. Default is 'nearest'.
nodata (Optional[float or int]): The 'no data' value for ``rasterio.vrt.WarpedVRT``.
init_dest_nodata (Optional[bool]): Whether or not to initialize output to ``nodata`` for ``rasterio.vrt.WarpedVRT``.
warp_mem_limit (Optional[int]): The GDAL memory limit for ``rasterio.vrt.WarpedVRT``.
Example:
>>> import geowombat as gw
>>> from rasterio.enums import Resampling
>>>
>>> # Transform a CRS and save to VRT
>>> with gw.config.update(ref_crs=102033):
>>> with gw.open('image.tif') as src:
>>> gw.to_vrt(src,
>>> 'output.vrt',
>>> resampling=Resampling.cubic,
>>> warp_mem_limit=256)
>>>
>>> # Load multiple files set to a common geographic extent
>>> bounds = (left, bottom, right, top)
>>> with gw.config.update(ref_bounds=bounds):
>>> with gw.open(['image1.tif', 'image2.tif'], mosaic=True) as src:
>>> gw.to_vrt(src, 'output.vrt')
"""
if Path(filename).is_file():
if overwrite:
Path(filename).unlink()
else:
logger.warning(f' The VRT file {filename} already exists.')
return
if not resampling:
resampling = Resampling.nearest
if isinstance(data.attrs['filename'], str) or isinstance(data.attrs['filename'], Path):
# Open the input file on disk
with rio.open(data.attrs['filename']) as src:
with WarpedVRT(src,
src_crs=src.crs, # the original CRS
crs=data.crs, # the transformed CRS
src_transform=src.gw.transform, # the original transform
transform=data.gw.transform, # the new transform
dtype=data.dtype,
resampling=resampling,
nodata=nodata,
init_dest_nodata=init_dest_nodata,
warp_mem_limit=warp_mem_limit) as vrt:
rio_shutil.copy(vrt, filename, driver='VRT')
else:
if not data.gw.filenames:
logger.exception(' The data filenames attribute is empty. Use gw.open(..., persist_filenames=True).')
raise KeyError
separate = True if data.gw.data_are_separate and data.gw.data_are_stacked else False
vrt_options = gdal.BuildVRTOptions(outputBounds=data.gw.bounds,
xRes=data.gw.cellx,
yRes=data.gw.celly,
separate=separate,
outputSRS=data.crs)
ds = gdal.BuildVRT(filename, data.gw.filenames, options=vrt_options)
ds = None
def to_netcdf(data, filename, *args, **kwargs):
"""
Writes an Xarray DataArray to a NetCDF file
Args:
data (DataArray): The ``xarray.DataArray`` to write.
filename (str): The output file name to write to.
args (DataArray): Additional ``DataArrays`` to stack.
kwargs (dict): Encoding arguments.
Example:
>>> import geowombat as gw
>>> import xarray as xr
>>>
>>> # Write a single DataArray to a .nc file
>>> with gw.config.update(sensor='l7'):
>>> with gw.open('LC08_L1TP_225078_20200219_20200225_01_T1.tif') as src:
>>> gw.to_netcdf(src, 'filename.nc', zlib=True, complevel=5)
>>>
>>> # Add extra layers
>>> with gw.config.update(sensor='l7'):
>>> with gw.open('LC08_L1TP_225078_20200219_20200225_01_T1.tif') as src, \
>>> gw.open('LC08_L1TP_225078_20200219_20200225_01_T1_angles.tif', band_names=['zenith', 'azimuth']) as ang:
>>>
>>> src = xr.where(src == 0, -32768, src)\
>>> .astype('int16')\
>>> .assign_attrs(**src.attrs)
>>>
>>> gw.to_netcdf(src, 'filename.nc', ang.astype('int16'), zlib=True, complevel=5)
>>>
>>> # Open the data and convert to a DataArray
>>> with xr.open_dataset('filename.nc', engine='h5netcdf', chunks=256) as ds:
>>> src = ds.to_array(dim='band')
"""
encodings = {}
for band_name in data.band.values.tolist():
encode_dict = {'chunksizes': (data.gw.row_chunks, data.gw.col_chunks),
'dtype': data.dtype}
encode_dict.update(**kwargs)
encodings[band_name] = encode_dict
res = data
for other_data in args:
for band_name in other_data.band.values.tolist():
encode_dict = {'chunksizes': (other_data.gw.row_chunks, other_data.gw.col_chunks),
'dtype': other_data.dtype}
encode_dict.update(**kwargs)
encodings[band_name] = encode_dict
res = xr.concat((res, other_data), dim='band')
res.to_dataset(dim='band')\
.assign_attrs(**data.attrs)\
.to_netcdf(path=filename,
mode='w',
format='NETCDF4',
engine='h5netcdf',
encoding=encodings,
compute=True)
def to_raster(data,
filename,
readxsize=None,
readysize=None,
use_dask_store=False,
separate=False,
out_block_type='gtiff',
keep_blocks=False,
verbose=0,
overwrite=False,
gdal_cache=512,
scheduler='mpool',
n_jobs=1,
n_workers=None,
n_threads=None,
n_chunks=None,
use_client=False,
address=None,
total_memory=24,
processes=False,
padding=None,
tags=None,
**kwargs):
"""
Writes a ``dask`` array to a raster file
Args:
data (DataArray): The ``xarray.DataArray`` to write.
filename (str): The output file name to write to.
readxsize (Optional[int]): The size of column chunks to read. If not given, ``readxsize`` defaults to Dask
chunk size.
readysize (Optional[int]): The size of row chunks to read. If not given, ``readysize`` defaults to Dask
chunk size.
separate (Optional[bool]): Whether to write blocks as separate files. Otherwise, write to a single file.
use_dask_store (Optional[bool]): Whether to use ``dask.array.store`` to save with Dask task graphs.
out_block_type (Optional[str]): The output block type. Choices are ['gtiff', 'zarr'].
Only used if ``separate`` = ``True``.
keep_blocks (Optional[bool]): Whether to keep the blocks stored on disk. Only used if ``separate`` = ``True``.
verbose (Optional[int]): The verbosity level.
overwrite (Optional[bool]): Whether to overwrite an existing file.
gdal_cache (Optional[int]): The ``GDAL`` cache size (in MB).
scheduler (Optional[str]): The parallel task scheduler to use. Choices are ['processes', 'threads', 'mpool'].
mpool: process pool of workers using ``multiprocessing.Pool``
processes: process pool of workers using ``concurrent.futures``
threads: thread pool of workers using ``concurrent.futures``
n_jobs (Optional[int]): The total number of parallel jobs.
n_workers (Optional[int]): The number of process workers.
n_threads (Optional[int]): The number of thread workers.
n_chunks (Optional[int]): The chunk size of windows. If not given, equal to ``n_workers`` x 50.
overviews (Optional[bool or list]): Whether to build overview layers.
resampling (Optional[str]): The resampling method for overviews when ``overviews`` is ``True`` or a ``list``.
Choices are ['average', 'bilinear', 'cubic', 'cubic_spline', 'gauss', 'lanczos', 'max', 'med', 'min', 'mode', 'nearest'].
use_client (Optional[bool]): Whether to use a ``dask`` client.
address (Optional[str]): A cluster address to pass to client. Only used when ``use_client`` = ``True``.
total_memory (Optional[int]): The total memory (in GB) required when ``use_client`` = ``True``.
processes (Optional[bool]): Whether to use process workers with the ``dask.distributed`` client. Only applies
when ``use_client`` = ``True``.
padding (Optional[tuple]): Padding for each window. ``padding`` should be given as a tuple
of (left pad, bottom pad, right pad, top pad). If ``padding`` is given, the returned list will contain
a tuple of ``rasterio.windows.Window`` objects as (w1, w2), where w1 contains the normal window offsets
and w2 contains the padded window offsets.
tags (Optional[dict]): Image tags to write to file.
kwargs (Optional[dict]): Additional keyword arguments to pass to ``rasterio.write``.
Returns:
``dask.delayed`` object
Examples:
>>> import geowombat as gw
>>>
>>> # Use 8 parallel workers
>>> with gw.open('input.tif') as ds:
>>> gw.to_raster(ds, 'output.tif', n_jobs=8)
>>>
>>> # Use 4 process workers and 2 thread workers
>>> with gw.open('input.tif') as ds:
>>> gw.to_raster(ds, 'output.tif', n_workers=4, n_threads=2)
>>>
>>> # Control the window chunks passed to concurrent.futures
>>> with gw.open('input.tif') as ds:
>>> gw.to_raster(ds, 'output.tif', n_workers=4, n_threads=2, n_chunks=16)
>>>
>>> # Compress the output and build overviews
>>> with gw.open('input.tif') as ds:
>>> gw.to_raster(ds, 'output.tif', n_jobs=8, overviews=True, compress='lzw')
"""
if MKL_LIB:
__ = MKL_LIB.MKL_Set_Num_Threads(n_threads)
if separate and not ZARR_INSTALLED and (out_block_type.lower() == 'zarr'):
logger.exception(' zarr must be installed to write separate blocks.')
raise ImportError
pfile = Path(filename)
if scheduler.lower() == 'mpool':
pool_executor = multi.Pool
else:
pool_executor = concurrent.futures.ProcessPoolExecutor if scheduler.lower() == 'processes' else concurrent.futures.ThreadPoolExecutor
if overwrite:
if pfile.is_file():
pfile.unlink()
if pfile.is_file():
logger.warning(' The output file already exists.')
return
if not is_dask_collection(data.data):
logger.exception(' The data should be a dask array.')
if use_client:
if address:
cluster_object = _cluster_dummy
else:
cluster_object = LocalCluster
client_object = Client
else:
cluster_object = _cluster_dummy
client_object = _client_dummy
if isinstance(n_workers, int) and isinstance(n_threads, int):
n_jobs = n_workers * n_threads
else:
n_workers = n_jobs
n_threads = 1
mem_per_core = int(total_memory / n_workers)
if not isinstance(n_chunks, int):
n_chunks = n_workers * 50
if not isinstance(readxsize, int):
readxsize = data.gw.col_chunks
if not isinstance(readysize, int):
readysize = data.gw.row_chunks
chunksize = (data.gw.row_chunks, data.gw.col_chunks)
# Force tiled outputs with no file sharing
kwargs['sharing'] = False
if data.gw.tiled:
kwargs['tiled'] = True
if 'compress' in kwargs:
# boolean True or '<>'
if kwargs['compress']:
if isinstance(kwargs['compress'], str) and kwargs['compress'].lower() == 'none':
compress = False
else:
if 'num_threads' in kwargs:
if use_dask_store:
compress = True
else:
compress = False
else:
compress = True
if compress:
# Store the compression type because
# it is removed in concurrent writing
compress_type = kwargs['compress']
del kwargs['compress']
else:
compress = False
elif isinstance(data.gw.compress, str) and (data.gw.compress.lower() in ['lzw', 'deflate']):
compress = True
compress_type = data.gw.compress
else:
compress = False
if 'nodata' not in kwargs:
if isinstance(data.gw.nodata, int) or isinstance(data.gw.nodata, float):
kwargs['nodata'] = data.gw.nodata
if 'blockxsize' not in kwargs:
kwargs['blockxsize'] = data.gw.col_chunks
if 'blockysize' not in kwargs:
kwargs['blockysize'] = data.gw.row_chunks
if 'bigtiff' not in kwargs:
kwargs['bigtiff'] = data.gw.bigtiff
if 'driver' not in kwargs:
kwargs['driver'] = data.gw.driver
if 'count' not in kwargs:
kwargs['count'] = data.gw.nbands
if 'width' not in kwargs:
kwargs['width'] = data.gw.ncols
if 'height' not in kwargs:
kwargs['height'] = data.gw.nrows
if 'crs' not in kwargs:
kwargs['crs'] = data.crs
if 'transform' not in kwargs:
kwargs['transform'] = data.gw.transform
if 'num_threads' in kwargs:
if isinstance(kwargs['num_threads'], str):
kwargs['num_threads'] = 'all_cpus'
root = None
if separate and (out_block_type.lower() == 'zarr'):
d_name = pfile.parent
sub_dir = d_name.joinpath('sub_tmp_')
sub_dir.mkdir(parents=True, exist_ok=True)
zarr_file = str(sub_dir.joinpath('data.zarr'))
root = zarr.open(zarr_file, mode='w')
else:
if not separate:
if verbose > 0:
logger.info(' Creating the file ...\n')
with rio.open(filename, mode='w', **kwargs) as rio_dst:
if tags:
rio_dst.update_tags(**tags)
if verbose > 0:
logger.info(' Writing data to file ...\n')
with rio.Env(GDAL_CACHEMAX=gdal_cache):
if not use_dask_store:
windows = get_window_offsets(data.gw.nrows,
data.gw.ncols,
readysize,
readxsize,
return_as='list',
padding=padding)
n_windows = len(windows)
oleft, otop = kwargs['transform'][2], kwargs['transform'][5]
ocols, orows = kwargs['width'], kwargs['height']
# Iterate over the windows in chunks
for wchunk in range(0, n_windows, n_chunks):
window_slice = windows[wchunk:wchunk+n_chunks]
n_windows_slice = len(window_slice)
if verbose > 0:
logger.info(' Windows {:,d}--{:,d} of {:,d} ...'.format(wchunk+1,
wchunk+n_windows_slice,
n_windows))
if padding:
# Read the padded window
if len(data.shape) == 2:
data_gen = ((data[w[1].row_off:w[1].row_off + w[1].height, w[1].col_off:w[1].col_off + w[1].width],
filename, widx+wchunk, w[0], w[1], n_workers, n_threads, separate, chunksize, root, out_block_type, tags, oleft, otop, ocols, orows, kwargs) for widx, w in enumerate(window_slice))
elif len(data.shape) == 3:
data_gen = ((data[:, w[1].row_off:w[1].row_off + w[1].height, w[1].col_off:w[1].col_off + w[1].width],
filename, widx+wchunk, w[0], w[1], n_workers, n_threads, separate, chunksize, root, out_block_type, tags, oleft, otop, ocols, orows, kwargs) for widx, w in enumerate(window_slice))
else:
data_gen = ((data[:, :, w[1].row_off:w[1].row_off + w[1].height, w[1].col_off:w[1].col_off + w[1].width],
filename, widx+wchunk, w[0], w[1], n_workers, n_threads, separate, chunksize, root, out_block_type, tags, oleft, otop, ocols, orows, kwargs) for widx, w in enumerate(window_slice))
else:
if len(data.shape) == 2:
data_gen = ((data[w.row_off:w.row_off + w.height, w.col_off:w.col_off + w.width],
filename, widx+wchunk, w, None, n_workers, n_threads, separate, chunksize, root, out_block_type, tags, oleft, otop, ocols, orows, kwargs) for widx, w in enumerate(window_slice))
elif len(data.shape) == 3:
data_gen = ((data[:, w.row_off:w.row_off + w.height, w.col_off:w.col_off + w.width],
filename, widx+wchunk, w, None, n_workers, n_threads, separate, chunksize, root, out_block_type, tags, oleft, otop, ocols, orows, kwargs) for widx, w in enumerate(window_slice))
else:
data_gen = ((data[:, :, w.row_off:w.row_off + w.height, w.col_off:w.col_off + w.width],
filename, widx+wchunk, w, None, n_workers, n_threads, separate, chunksize, root, out_block_type, tags, oleft, otop, ocols, orows, kwargs) for widx, w in enumerate(window_slice))
if n_workers == 1:
for __ in tqdm(map(_write_xarray, data_gen), total=n_windows_slice):
pass
else:
with pool_executor(n_workers) as executor:
if scheduler == 'mpool':
for __ in tqdm(executor.imap_unordered(_write_xarray, data_gen), total=n_windows_slice):
pass
else:
for __ in tqdm(executor.map(_write_xarray, data_gen), total=n_windows_slice):
pass
# if overviews:
#
# if not isinstance(overviews, list):
# overviews = [2, 4, 8, 16]
#
# if resampling not in ['average', 'bilinear', 'cubic', 'cubic_spline',
# 'gauss', 'lanczos', 'max', 'med', 'min', 'mode', 'nearest']:
#
# logger.warning(" The resampling method is not supported by rasterio. Setting to 'nearest'")
#
# resampling = 'nearest'
#
# if verbose > 0:
# logger.info(' Building pyramid overviews ...')
#
# rio_dst.build_overviews(overviews, getattr(Resampling, resampling))
# rio_dst.update_tags(ns='overviews', resampling=resampling)
else:
with cluster_object(n_workers=n_workers,
threads_per_worker=n_threads,
scheduler_port=0,
processes=processes,
memory_limit=f'{mem_per_core}GB') as cluster:
cluster_address = address if address else cluster
with client_object(address=cluster_address) as client:
with WriteDaskArray(filename,
overwrite=overwrite,
separate=separate,
out_block_type=out_block_type,
keep_blocks=keep_blocks,
gdal_cache=gdal_cache,
**kwargs) as dst:
# Store the data and return a lazy evaluator
res = da.store(da.squeeze(data.data),
dst,
lock=False,
compute=False)
if verbose > 0:
logger.info(' Writing data to file ...')
# Send the data to file
#
# *Note that the progress bar will
# not work with a client.
if use_client:
res.compute(num_workers=n_jobs)
else:
with ProgressBar():
res.compute(num_workers=n_jobs)
if verbose > 0:
logger.info(' Finished writing data to file.')
out_block_type = dst.out_block_type
keep_blocks = dst.keep_blocks
zarr_file = dst.zarr_file
sub_dir = dst.sub_dir
if compress:
if separate:
if out_block_type.lower() == 'zarr':
group_keys = list(root.group_keys())
n_groups = len(group_keys)
if out_block_type.lower() == 'zarr':
open_file = zarr_file
kwargs['compress'] = compress_type
n_windows = len(group_keys)
# Compress into one file
with rio.open(filename, mode='w', **kwargs) as dst_:
if tags:
dst_.update_tags(**tags)
# Iterate over the windows in chunks
for wchunk in range(0, n_groups, n_chunks):
group_keys_slice = group_keys[wchunk:wchunk + n_chunks]
n_windows_slice = len(group_keys_slice)
if verbose > 0:
logger.info(' Windows {:,d}--{:,d} of {:,d} ...'.format(wchunk + 1,
wchunk + n_windows_slice,
n_windows))
################################################
data_gen = ((open_file, group, 'zarr') for group in group_keys_slice)
# for f in tqdm(executor.map(_compressor, data_gen), total=n_windows_slice):
# pass
#
# futures = [executor.submit(_compress_dummy, iter_[0], iter_[1], None) for iter_ in data_gen]
#
# for f in tqdm(concurrent.futures.as_completed(futures), total=n_windows_slice):
#
# out_window, out_block = f.result()
#
# dst_.write(np.squeeze(out_block),
# window=out_window,
# indexes=out_indexes_)
################################################
# data_gen = ((root, group, 'zarr') for group in group_keys_slice)
# for f, g, t in tqdm(data_gen, total=n_windows_slice):
#
# out_window, out_indexes, out_block = _block_read_func(f, g, t)
# executor.map(_block_write_func, data_gen)
with concurrent.futures.ProcessPoolExecutor(max_workers=n_workers) as executor:
# Submit all of the tasks as futures
futures = [executor.submit(_block_read_func, f, g, t) for f, g, t in data_gen]
for f in tqdm(concurrent.futures.as_completed(futures), total=n_windows_slice):
out_window, out_indexes, out_block = f.result()
dst_.write(out_block,
window=out_window,
indexes=out_indexes)
futures = None
if not keep_blocks:
shutil.rmtree(sub_dir)
else:
if verbose > 0:
logger.info(' Compressing output file ...')
p = Path(filename)
d_name = p.parent
f_base, f_ext = os.path.splitext(p.name)
ld = string.ascii_letters + string.digits
rstr = ''.join(random.choice(ld) for i in range(0, 9))
temp_file = d_name.joinpath('{f_base}_temp_{rstr}{f_ext}'.format(f_base=f_base,
rstr=rstr,
f_ext=f_ext))
compress_raster(filename,
str(temp_file),
n_jobs=n_jobs,
gdal_cache=gdal_cache,
compress=compress_type,
tags=tags)
temp_file.rename(filename)
if verbose > 0:
logger.info(' Finished compressing')
if verbose > 0:
logger.info('\nFinished writing the data.')
def _arg_gen(arg_, iter_):
for i_ in iter_:
yield arg_
def apply(infile,
outfile,
block_func,
args=None,
count=1,
scheduler='processes',
gdal_cache=512,
n_jobs=4,
overwrite=False,
tags=None,
**kwargs):
"""
Applies a function and writes results to file
Args:
infile (str): The input file to process.
outfile (str): The output file.
block_func (func): The user function to apply to each block. The function should always return the window,
the data, and at least one argument. The block data inside the function will be a 2d array if the
input image has 1 band, otherwise a 3d array.
args (Optional[tuple]): Additional arguments to pass to ``block_func``.
count (Optional[int]): The band count for the output file.
scheduler (Optional[str]): The ``concurrent.futures`` scheduler to use. Choices are ['threads', 'processes'].
gdal_cache (Optional[int]): The ``GDAL`` cache size (in MB).
n_jobs (Optional[int]): The number of blocks to process in parallel.
overwrite (Optional[bool]): Whether to overwrite an existing output file.
tags (Optional[dict]): Image tags to write to file.
kwargs (Optional[dict]): Additional keyword arguments to pass to ``rasterio.open``.
Returns:
None
Examples:
>>> import geowombat as gw
>>>
>>> # Here is a function with no arguments
>>> def my_func0(w, block, arg):
>>> return w, block
>>>
>>> gw.apply('input.tif',
>>> 'output.tif',
>>> my_func0,
>>> n_jobs=8)
>>>
>>> # Here is a function with 1 argument
>>> def my_func1(w, block, arg):
>>> return w, block * arg
>>>
>>> gw.apply('input.tif',
>>> 'output.tif',
>>> my_func1,
>>> args=(10.0,),
>>> n_jobs=8)
"""
if not args:
args = (None,)
if overwrite:
if os.path.isfile(outfile):
os.remove(outfile)
kwargs['sharing'] = False
kwargs['tiled'] = True
io_mode = 'r+' if os.path.isfile(outfile) else 'w'
out_indexes = 1 if count == 1 else list(range(1, count+1))
futures_executor = concurrent.futures.ThreadPoolExecutor if scheduler == 'threads' else concurrent.futures.ProcessPoolExecutor
with rio.Env(GDAL_CACHEMAX=gdal_cache):
with rio.open(infile) as src:
profile = src.profile.copy()
if 'dtype' not in kwargs:
kwargs['dtype'] = profile['dtype']
if 'nodata' not in kwargs:
kwargs['nodata'] = profile['nodata']
if 'blockxsize' not in kwargs:
kwargs['blockxsize'] = profile['blockxsize']
if 'blockxsize' not in kwargs:
kwargs['blockysize'] = profile['blockysize']
# Create a destination dataset based on source params. The
# destination will be tiled, and we'll process the tiles
# concurrently.
profile.update(count=count,
**kwargs)
with rio.open(outfile, io_mode, **profile) as dst:
if tags:
dst.update_tags(**tags)
# Materialize a list of destination block windowsmode == 'w'
# that we will use in several statements below.
# windows = get_window_offsets(src.height,
# src.width,
# blockysize,
# blockxsize, return_as='list')
# This generator comprehension gives us raster data
# arrays for each window. Later we will zip a mapping
# of it with the windows list to get (window, result)
# pairs.
# if nbands == 1:
data_gen = (src.read(window=w, out_dtype=profile['dtype']) for ij, w in src.block_windows(1))
# else:
#
# data_gen = (np.array([rio.open(fn).read(window=w,
# out_dtype=dtype) for fn in infile], dtype=dtype)
# for ij, w in src.block_windows(1))
if args:
args = [_arg_gen(arg, src.block_windows(1)) for arg in args]
with futures_executor(max_workers=n_jobs) as executor:
# Submit all of the tasks as futures
futures = [executor.submit(block_func,
iter_[0][1], # window object
*iter_[1:]) # other arguments
for iter_ in zip(list(src.block_windows(1)), data_gen, *args)]
for f in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
out_window, out_block = f.result()
dst.write(np.squeeze(out_block),
window=out_window,
indexes=out_indexes)
# We map the block_func() function over the raster
# data generator, zip the resulting iterator with
# the windows list, and as pairs come back we
# write data to the destination dataset.
# for window_tuple, result in tqdm(zip(list(src.block_windows(1)),
# executor.map(block_func,
# data_gen,
# *args)),
# total=n_windows):
#
# dst.write(result,
# window=window_tuple[1],
# indexes=out_indexes)
def _compress_dummy(w, block, dummy):
"""
Dummy function to pass to concurrent writing
"""
return w, block
def compress_raster(infile, outfile, n_jobs=1, gdal_cache=512, compress='lzw', tags=None):
"""
Compresses a raster file
Args:
infile (str): The file to compress.
outfile (str): The output file.
n_jobs (Optional[int]): The number of concurrent blocks to write.
gdal_cache (Optional[int]): The ``GDAL`` cache size (in MB).
compress (Optional[str]): The compression method.
tags (Optional[dict]): Image tags to write to file.
Returns:
None
"""
with rio.open(infile) as src:
profile = src.profile.copy()
profile.update(compress=compress)
apply(infile,
outfile,
_compress_dummy,
scheduler='processes',
args=(None,),
gdal_cache=gdal_cache,
n_jobs=n_jobs,
tags=tags,
count=src.count,
dtype=src.profile['dtype'],
nodata=src.profile['nodata'],
tiled=src.profile['tiled'],
blockxsize=src.profile['blockxsize'],
blockysize=src.profile['blockysize'],
compress=compress)
| 37.048989
| 217
| 0.530129
|
ee01a7e4fc91e3027e3504f16ddf27372a05f9c9
| 8,164
|
py
|
Python
|
sitepackages/django/db/backends/mysql/operations.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | 2
|
2017-06-21T09:46:55.000Z
|
2018-05-30T10:07:32.000Z
|
sitepackages/django/db/backends/mysql/operations.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | 4
|
2020-02-12T02:53:08.000Z
|
2021-06-10T21:37:06.000Z
|
sitepackages/django/db/backends/mysql/operations.py
|
bitcpf/djangoage
|
f116860cbfa799eb6c47306a72d742b63c970dce
|
[
"Apache-2.0"
] | 2
|
2021-03-24T12:11:48.000Z
|
2021-06-10T19:56:03.000Z
|
from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import six, timezone
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 65535),
PositiveIntegerField=(0, 4294967295),
)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
sql = "DAYOFWEEK(%s)" % field_name
else:
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, timedelta):
return "INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND" % (
timedelta.days, timedelta.seconds, timedelta.microseconds), []
def format_for_duration_arithmetic(self, sql):
if self.connection.features.supports_microsecond_precision:
return 'INTERVAL %s MICROSECOND' % sql
else:
return 'INTERVAL FLOOR(%s / 1000000) SECOND' % sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return six.text_type(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
if internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
return converters
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_textfield_value(self, value, expression, connection, context):
if value is not None:
value = force_text(value)
return value
| 39.439614
| 113
| 0.607178
|
604f7e300947d8a668b1a5a12ff4d02f5e3e5b2c
| 2,447
|
py
|
Python
|
twitoff/app.py
|
cicbeast/TwitOff
|
1b040036a4f0698f32ee559ea5f172e5e65893fc
|
[
"MIT"
] | null | null | null |
twitoff/app.py
|
cicbeast/TwitOff
|
1b040036a4f0698f32ee559ea5f172e5e65893fc
|
[
"MIT"
] | 2
|
2021-06-02T00:59:42.000Z
|
2021-06-08T20:34:25.000Z
|
twitoff/app.py
|
cicbeast/TwitOff
|
1b040036a4f0698f32ee559ea5f172e5e65893fc
|
[
"MIT"
] | null | null | null |
from decouple import config
from dotenv import load_dotenv
from flask import Flask, render_template, request
from .model import DB, User
from .predict import predict_user
from .twitter import add_or_update_user, update_all_users, add_default_users
load_dotenv()
def create_app():
'''Create and configure an instance of the Flask application.'''
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
@app.route('/')
def root():
return render_template('base.html', title='TwitOff', users=User.query.all())
@app.route('/user', methods=['POST'])
@app.route('/user/<name>', methods=['GET'])
def user(name=None, message=''):
name = name or request.values['user_name']
try:
if request.method == 'POST':
add_or_update_user(name)
message = "User {} successfully added!".format(name)
tweets = User.query.filter(User.username == name).one().tweets
except Exception as e:
message = 'Error adding {}: {}'.format(name, e)
tweets = []
return render_template('user.html', title=name, tweets=tweets, message=message)
@app.route('/compare', methods=['POST'])
def compare(message=''):
user1 = request.values['user1']
user2 = request.values['user2']
tweet_text = request.values['tweet_text']
if user1 == user2:
message = 'Cannot compare a user to themselves!'
else:
prediction = predict_user(user1, user2, tweet_text)
message = "'{}' is more likely to be said by {} than {}".format(
request.values['tweet_text'], user1 if prediction else user2,
user2 if prediction else user1)
return render_template('prediction.html', title='Prediction', message=message)
@app.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='Reset database!')
@app.route('/update')
def update():
update_all_users()
return render_template('base.html', users=User.query.all(), title='All Tweets updated!')
@app.route('/add_default')
def add_default():
add_default_users()
return render_template('base_html', users=User.query.all(), title='Reset database!')
return app
| 35.985294
| 96
| 0.630568
|
8b426cdd92be0771821dc9b14d2e9b557cbd9e13
| 286
|
py
|
Python
|
examples/example_pydantic.py
|
katunilya/mona
|
8f44a9e06910466afbc9b2bcfb42144dcd25ed5a
|
[
"MIT"
] | 2
|
2022-03-26T15:27:31.000Z
|
2022-03-28T22:00:32.000Z
|
examples/example_pydantic.py
|
katunilya/mona
|
8f44a9e06910466afbc9b2bcfb42144dcd25ed5a
|
[
"MIT"
] | null | null | null |
examples/example_pydantic.py
|
katunilya/mona
|
8f44a9e06910466afbc9b2bcfb42144dcd25ed5a
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
from moona import asgi, http
class User(BaseModel):
username: str
name: str
email: str
age: int
user = User(username="jdoe", name="John Doe", email="john_doe@example.org", age=33)
app = asgi.create(http_handler=http.negotiate(user))
| 17.875
| 83
| 0.702797
|
816087262936cfca997be614b4f7179d14ec71ec
| 7,733
|
py
|
Python
|
tools/symsrc/source_index.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | 1
|
2015-10-12T09:14:22.000Z
|
2015-10-12T09:14:22.000Z
|
tools/symsrc/source_index.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | null | null | null |
tools/symsrc/source_index.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | 1
|
2020-11-04T07:22:28.000Z
|
2020-11-04T07:22:28.000Z
|
#!/usr/bin/env python
# Copyright (c) 2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: <win-path-to-pdb.pdb>
This tool will take a PDB on the command line, extract the source files that
were used in building the PDB, query SVN for which repository and revision
these files are at, and then finally write this information back into the PDB
in a format that the debugging tools understand. This allows for automatic
source debugging, as all of the information is contained in the PDB, and the
debugger can go out and fetch the source files via SVN.
You most likely want to run these immediately after a build, since the source
input files need to match the generated PDB, and we want the correct SVN
revision information for the exact files that were used for the build.
The following files from a windbg + source server installation are expected
to reside in the same directory as this python script:
dbghelp.dll
pdbstr.exe
srctool.exe
NOTE: Expected to run under a native win32 python, NOT cygwin. All paths are
dealt with as win32 paths, since we have to interact with the Microsoft tools.
"""
import sys
import os
import time
import subprocess
import tempfile
# This serves two purposes. First, it acts as a whitelist, and only files
# from repositories listed here will be source indexed. Second, it allows us
# to map from one SVN URL to another, so we can map to external SVN servers.
REPO_MAP = {
"svn://chrome-svn/chrome": "http://src.chromium.org/svn",
"svn://chrome-svn.corp.google.com/chrome": "http://src.chromium.org/svn",
"http://v8.googlecode.com/svn": None,
"http://google-breakpad.googlecode.com/svn": None,
"http://googletest.googlecode.com/svn": None,
"http://open-vcdiff.googlecode.com/svn": None,
"http://google-url.googlecode.com/svn": None,
}
def FindFile(filename):
"""Return the full windows path to a file in the same dir as this code."""
thisdir = os.path.dirname(os.path.join(os.path.curdir, __file__))
return os.path.abspath(os.path.join(thisdir, filename))
def ExtractSourceFiles(pdb_filename):
"""Extract a list of local paths of the source files from a PDB."""
srctool = subprocess.Popen([FindFile('srctool.exe'), '-r', pdb_filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
filelist = srctool.stdout.read()
res = srctool.wait()
if res != 0 or filelist.startswith("srctool: "):
raise "srctool failed: " + filelist
return [x for x in filelist.split('\r\n') if len(x) != 0]
def ReadSourceStream(pdb_filename):
"""Read the contents of the source information stream from a PDB."""
srctool = subprocess.Popen([FindFile('pdbstr.exe'),
'-r', '-s:srcsrv',
'-p:%s' % pdb_filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data = srctool.stdout.read()
res = srctool.wait()
if (res != 0 and res != -1) or data.startswith("pdbstr: "):
raise "pdbstr failed: " + data
return data
def WriteSourceStream(pdb_filename, data):
"""Write the contents of the source information stream to a PDB."""
# Write out the data to a temporary filename that we can pass to pdbstr.
(f, fname) = tempfile.mkstemp()
f = os.fdopen(f, "wb")
f.write(data)
f.close()
srctool = subprocess.Popen([FindFile('pdbstr.exe'),
'-w', '-s:srcsrv',
'-i:%s' % fname,
'-p:%s' % pdb_filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data = srctool.stdout.read()
res = srctool.wait()
if (res != 0 and res != -1) or data.startswith("pdbstr: "):
raise "pdbstr failed: " + data
os.unlink(fname)
# TODO for performance, we should probably work in directories instead of
# files. I'm scared of DEPS and generated files, so for now we query each
# individual file, and don't make assumptions that all files in the same
# directory are part of the same repository or at the same revision number.
def ExtractSvnInfo(local_filename):
"""Calls svn info to extract the repository, path, and revision."""
# We call svn.bat to make sure and get the depot tools SVN and not cygwin.
srctool = subprocess.Popen(['svn.bat', 'info', local_filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
info = srctool.stdout.read()
res = srctool.wait()
if res != 0:
return None
# Hack up into a dictionary of the fields printed by svn info.
vals = dict((y.split(': ', 2) for y in info.split('\r\n') if y))
root = vals['Repository Root']
if not vals['URL'].startswith(root):
raise "URL is not inside of the repository root?!?"
path = vals['URL'][len(root):]
rev = int(vals['Revision'])
return [root, path, rev]
def UpdatePDB(pdb_filename, verbose=False):
"""Update a pdb file with source information."""
dir_blacklist = { }
# TODO(deanm) look into "compressing" our output, by making use of vars
# and other things, so we don't need to duplicate the repo path and revs.
lines = [
'SRCSRV: ini ------------------------------------------------',
'VERSION=1',
'INDEXVERSION=2',
'VERCTRL=Subversion',
'DATETIME=%s' % time.asctime(),
'SRCSRV: variables ------------------------------------------',
'SVN_EXTRACT_TARGET=%targ%\%fnbksl%(%var3%)\%var4%\%fnfile%(%var1%)',
'SVN_EXTRACT_CMD=cmd /c svn cat "%var2%%var3%@%var4%" --non-interactive > "%svn_extract_target%"',
'SRCSRVTRG=%SVN_extract_target%',
'SRCSRVCMD=%SVN_extract_cmd%',
'SRCSRV: source files ---------------------------------------',
]
if ReadSourceStream(pdb_filename):
raise "PDB already has source indexing information!"
filelist = ExtractSourceFiles(pdb_filename)
for filename in filelist:
filedir = os.path.dirname(filename)
if verbose:
print "Processing: %s" % filename
# This directory is blacklisted, either because it's not part of the SVN
# repository, or from one we're not interested in indexing.
if dir_blacklist.get(filedir, False):
if verbose:
print " skipping, directory is blacklisted."
continue
info = ExtractSvnInfo(filename)
# Skip the file if it's not under an svn repository. To avoid constantly
# querying SVN for files outside of SVN control (for example, the CRT
# sources), check if the directory is outside of SVN and blacklist it.
if not info:
if not ExtractSvnInfo(filedir):
dir_blacklist[filedir] = True
if verbose:
print " skipping, file is not in an SVN repository"
continue
root = info[0]
path = info[1]
rev = info[2]
# Check if file was from a svn repository we don't know about, or don't
# want to index. Blacklist the entire directory.
if not REPO_MAP.has_key(info[0]):
if verbose:
print " skipping, file is from an unknown SVN repository %s" % root
dir_blacklist[filedir] = True
continue
# We might want to map an internal repository URL to an external repository.
if REPO_MAP[root]:
root = REPO_MAP[root]
lines.append('%s*%s*%s*%s' % (filename, root, path, rev))
if verbose:
print " indexed file."
lines.append('SRCSRV: end ------------------------------------------------')
WriteSourceStream(pdb_filename, '\r\n'.join(lines))
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print "usage: file.pdb [-v]"
sys.exit(1)
verbose = False
if len(sys.argv) == 3:
verbose = (sys.argv[2] == '-v')
UpdatePDB(sys.argv[1], verbose=verbose)
| 38.093596
| 102
| 0.654856
|
8b5f92e2831788e4e5d354bd9414187479487f39
| 215
|
py
|
Python
|
src/clock.py
|
ohjelmistotekniikka-hy/pygame-sokoban
|
4b9937166bbcd3adc7dc6522fcf898b90400926a
|
[
"MIT"
] | null | null | null |
src/clock.py
|
ohjelmistotekniikka-hy/pygame-sokoban
|
4b9937166bbcd3adc7dc6522fcf898b90400926a
|
[
"MIT"
] | null | null | null |
src/clock.py
|
ohjelmistotekniikka-hy/pygame-sokoban
|
4b9937166bbcd3adc7dc6522fcf898b90400926a
|
[
"MIT"
] | 2
|
2021-05-16T20:32:04.000Z
|
2021-12-07T17:26:30.000Z
|
import pygame
class Clock:
def __init__(self):
self._clock = pygame.time.Clock()
def tick(self, fps):
self._clock.tick(fps)
def get_ticks(self):
return pygame.time.get_ticks()
| 17.916667
| 41
| 0.623256
|
de58366f4a46d853cc93958128aded4de3b4ee8e
| 803
|
py
|
Python
|
OCRTTS.py
|
dys0763/ocr-tts
|
0dc8642ab0d2f030823210b5d829e0ace44ded09
|
[
"MIT"
] | null | null | null |
OCRTTS.py
|
dys0763/ocr-tts
|
0dc8642ab0d2f030823210b5d829e0ace44ded09
|
[
"MIT"
] | null | null | null |
OCRTTS.py
|
dys0763/ocr-tts
|
0dc8642ab0d2f030823210b5d829e0ace44ded09
|
[
"MIT"
] | null | null | null |
import pyttsx3
from pytesseract import *
def ocr(img, lang='kor'):
#recognizes a given letter image
#returns the recognized letters
print('Letter Recognizing Started.')
text = image_to_string(img, lang=lang)
return text
def tts(text, lang='kor+kor100', ADD_VOLUME=0, RATE=150):
#transform a given text into speech
#outputs the voice signal through a connected audio device
print('Speech Synthesis Started.')
engine = pyttsx3.init()
volume = engine.getProperty('volume')
engine.setProperty('volume', volume + ADD_VOLUME) #set volume
engine.setProperty('rate', RATE) #set voice rate(speed)
if lang == 'eng':
engine.setProperty('voice', 'f1')
else:
engine.setProperty('voice', 'fk')
engine.say(text)
engine.runAndWait()
| 27.689655
| 65
| 0.678705
|
ef01b69296a50a6896325d3e990b868de2adf91a
| 3,977
|
py
|
Python
|
examples/adwords/v201509/basic_operations/add_campaigns.py
|
fosterwei/adwords-keyword-planner-API-googleads-python-lib
|
b80b8b3741a55f1d00c5974bc58f92540663c6f6
|
[
"Apache-2.0"
] | 1
|
2020-05-23T11:32:32.000Z
|
2020-05-23T11:32:32.000Z
|
examples/adwords/v201509/basic_operations/add_campaigns.py
|
fosterwei/adwords-keyword-planner-API-googleads-python-lib
|
b80b8b3741a55f1d00c5974bc58f92540663c6f6
|
[
"Apache-2.0"
] | null | null | null |
examples/adwords/v201509/basic_operations/add_campaigns.py
|
fosterwei/adwords-keyword-planner-API-googleads-python-lib
|
b80b8b3741a55f1d00c5974bc58f92540663c6f6
|
[
"Apache-2.0"
] | 2
|
2018-04-20T02:16:33.000Z
|
2020-11-12T20:58:54.000Z
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds campaigns.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import datetime
import uuid
from googleads import adwords
def main(client):
# Initialize appropriate services.
campaign_service = client.GetService('CampaignService', version='v201509')
budget_service = client.GetService('BudgetService', version='v201509')
# Create a budget, which can be shared by multiple campaigns.
budget = {
'name': 'Interplanetary budget #%s' % uuid.uuid4(),
'amount': {
'microAmount': '50000000'
},
'deliveryMethod': 'STANDARD',
'period': 'DAILY'
}
budget_operations = [{
'operator': 'ADD',
'operand': budget
}]
# Add the budget.
budget_id = budget_service.mutate(budget_operations)['value'][0][
'budgetId']
# Construct operations and add campaigns.
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Interplanetary Cruise #%s' % uuid.uuid4(),
'status': 'PAUSED',
'advertisingChannelType': 'SEARCH',
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC',
},
'endDate': (datetime.datetime.now() +
datetime.timedelta(365)).strftime('%Y%m%d'),
# Note that only the budgetId is required
'budget': {
'budgetId': budget_id
},
'networkSetting': {
'targetGoogleSearch': 'true',
'targetSearchNetwork': 'true',
'targetContentNetwork': 'false',
'targetPartnerSearchNetwork': 'false'
},
# Optional fields
'startDate': (datetime.datetime.now() +
datetime.timedelta(1)).strftime('%Y%m%d'),
'adServingOptimizationStatus': 'ROTATE',
'frequencyCap': {
'impressions': '5',
'timeUnit': 'DAY',
'level': 'ADGROUP'
},
'settings': [
{
'xsi_type': 'GeoTargetTypeSetting',
'positiveGeoTargetType': 'DONT_CARE',
'negativeGeoTargetType': 'DONT_CARE'
}
]
}
}, {
'operator': 'ADD',
'operand': {
'name': 'Interplanetary Cruise banner #%s' % uuid.uuid4(),
'status': 'PAUSED',
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC'
},
'endDate': (datetime.datetime.now() +
datetime.timedelta(365)).strftime('%Y%m%d'),
# Note that only the budgetId is required
'budget': {
'budgetId': budget_id
},
'advertisingChannelType': 'DISPLAY'
}
}]
campaigns = campaign_service.mutate(operations)
# Display results.
for campaign in campaigns['value']:
print ('Campaign with name \'%s\' and id \'%s\' was added.'
% (campaign['name'], campaign['id']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| 31.314961
| 77
| 0.597184
|
f47eb63e8b30f8fa46c943d06a4f70c203c1ad5c
| 646
|
py
|
Python
|
CLV-dataset-weekly-training-and-prediction/config.py
|
mbrummerstedt/CLV_Production_Google_Cloud
|
7e34567503b070b273a1baebae1c0892bd16d5af
|
[
"Apache-2.0"
] | 1
|
2021-09-07T18:45:48.000Z
|
2021-09-07T18:45:48.000Z
|
CLV-dataset-weekly-training-and-prediction/config.py
|
mbrummerstedt/CLV_Production_Google_Cloud
|
7e34567503b070b273a1baebae1c0892bd16d5af
|
[
"Apache-2.0"
] | null | null | null |
CLV-dataset-weekly-training-and-prediction/config.py
|
mbrummerstedt/CLV_Production_Google_Cloud
|
7e34567503b070b273a1baebae1c0892bd16d5af
|
[
"Apache-2.0"
] | 1
|
2021-02-25T03:30:25.000Z
|
2021-02-25T03:30:25.000Z
|
config_vars = {
# Set Variables that will be used in script
'PENALIZER_COEF': 0.03,
'DISCOUNT_RATE': 0.01,
'PREDICTION_LENGTH_IN_MONTHS': 6,
'MODEL_TYPE':'BGNBD',
'FREQUENZY':'M',
'GCS_BUCKET_MODELS': 'your_company_trained_ml_models_production',
'GCS_BUCKET_PREDICTIONS': 'your_company_ml_models_predictions',
'LOCAL_STORAGE_FOLDER': '/tmp/',
'TRAINING_DATA_QUERY': 'CLV-dataset-weekly-training-and-prediction.sql',
'ACTUAL_CUSTOMER_VALUE_QUERY': 'CLV-dataset-weekly-training-and-prediction-customer-summary.sql',
'UPDATE_BIGQUERY_RESULT_TABLE': 'CLV-weekly-update-result-bigquery-table.sql'
}
| 43.066667
| 101
| 0.733746
|
899df1e1496ff34b2e49013400691a267f60a3e2
| 3,675
|
py
|
Python
|
batch-norm/bn/bn/train_vgg.py
|
vbursztyn/deep-learning-for-practitioners
|
7b88473f32e6114053068ff90de00398eaefa128
|
[
"MIT"
] | 6
|
2020-04-12T20:05:36.000Z
|
2022-02-23T03:03:20.000Z
|
batch-norm/bn/bn/train_vgg.py
|
vbursztyn/deep-learning-for-practitioners
|
7b88473f32e6114053068ff90de00398eaefa128
|
[
"MIT"
] | 2
|
2020-05-03T23:59:22.000Z
|
2020-05-08T18:05:24.000Z
|
batch-norm/bn/bn/train_vgg.py
|
vbursztyn/deep-learning-for-practitioners
|
7b88473f32e6114053068ff90de00398eaefa128
|
[
"MIT"
] | 20
|
2020-04-20T21:05:50.000Z
|
2021-03-31T09:53:47.000Z
|
from enum import Enum
from datetime import datetime
import importlib
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from bn import VGGToy, VGG11
class ModelType(str, Enum):
VGG11 = 'VGG11'
VGG_Toy = 'VGGToy'
def test_loop(model, testloader, writer, device, loss_fn, n_iter):
model.eval()
correct = 0
total = 0
running_loss = 0
with torch.no_grad():
for data, labels in testloader:
data = data.to(device)
labels = labels.to(device)
outputs = model(data)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
running_loss += loss_fn(outputs, labels)
accuracy = (correct / total) * 100.
loss = (running_loss / total)
writer.add_scalar('testing/loss', loss, n_iter)
writer.add_scalar('testing/accuracy', accuracy, n_iter)
print('Accurarcy: %f' % accuracy)
def train_vgg(model_type=ModelType.VGG_Toy, batch_size=4, batch_norm=False, noise=False, learning_rate=0.01, num_epochs=2):
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print('training on %s' % device)
module = importlib.import_module("bn")
class_ = getattr(module, model_type.value)
model = class_(device=device, num_classes=10, init_weights=True, batch_norm=batch_norm, noise_injection=noise)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
model.to(device)
model_id = ''
if batch_norm:
model_id += 'batch_norm'
if noise:
model_id += 'noise'
writer = SummaryWriter(log_dir='runs/%s_%s_%s' % (model_type.value, model_id, datetime.now().strftime("%H:%M:%S")))
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
print(trainloader)
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
n_iter = (epoch * len(trainloader)) + i
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
writer.add_scalar('training/loss', loss.item(), n_iter)
for inps_idx, inps in enumerate(model.layer_inputs):
inps = inps.cpu().numpy()
writer.add_scalar('inputs/layer%i/mean' % (inps_idx + 1), inps.mean(), n_iter)
writer.add_histogram('inputs/layer%i/dist' % (inps_idx + 1), inps, n_iter)
running_loss += loss.item()
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
test_loop(model, testloader, writer, device, criterion, n_iter)
| 33.108108
| 123
| 0.623129
|
f2507060f24225590a88d3c1930b94c043e18d72
| 1,121
|
py
|
Python
|
reactdjango/users/tests/test_forms.py
|
nimacgit/reactdjango
|
b76f1d4ed5f504ee9f27d9cfae11942160a65140
|
[
"Apache-2.0"
] | null | null | null |
reactdjango/users/tests/test_forms.py
|
nimacgit/reactdjango
|
b76f1d4ed5f504ee9f27d9cfae11942160a65140
|
[
"Apache-2.0"
] | null | null | null |
reactdjango/users/tests/test_forms.py
|
nimacgit/reactdjango
|
b76f1d4ed5f504ee9f27d9cfae11942160a65140
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from reactdjango.users.forms import UserCreationForm
from reactdjango.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 27.341463
| 59
| 0.595897
|
0b285e4b2792fbfaaaf0b342b457f06d1ced78fa
| 6,182
|
py
|
Python
|
App/src/smtp_engine.py
|
JiJiU33C43I/UCI-Schedule-Assistant
|
ed7d8cd92da816a3909f34d0eb5e2ef3ee507a7d
|
[
"MIT"
] | null | null | null |
App/src/smtp_engine.py
|
JiJiU33C43I/UCI-Schedule-Assistant
|
ed7d8cd92da816a3909f34d0eb5e2ef3ee507a7d
|
[
"MIT"
] | 13
|
2018-12-15T11:43:54.000Z
|
2022-03-11T23:42:57.000Z
|
App/src/smtp_engine.py
|
JiJiU33C43I/UCI-Schedule-Assistant
|
ed7d8cd92da816a3909f34d0eb5e2ef3ee507a7d
|
[
"MIT"
] | null | null | null |
'''
MIT License
Copyright (c) 2019 JiJiU33C43I Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
##### smtp_engine.py #####
# This is a python module that is able to use the SMTP protocol to send message to a specified email account
#=======================================
#== IMPORTS MODULE ==
#=======================================
import smtplib, ssl;
import getpass;
#=======================================
#== GLOBAL CONSTANTS ==
#=======================================
DEFAULT_SERVER_DOMAIN = "smtp.gmail.com";
SSL_PORT = 465;
TLS_PORT = 587;
#=======================================
#== Source Code ==
#=======================================
class SETUP_SMTP_CONNECTION_ERROR(Exception):
pass;
class FAILED_TO_SENDMAIL(Exception):
pass;
class LOGIN_FAILURE(Exception):
pass;
class Email_Engine:
default_server_domain = DEFAULT_SERVER_DOMAIN;
ssl_port = SSL_PORT;
tls_port = TLS_PORT;
def __init__(self):
self.ssl_context = ssl.create_default_context(); # This may not be needed!!!
self.connection = None;
def __del__(self):
try:
if self.connection != None:
self.connection.quit();
self.connection = None;
except Exception:
print('WARNINGS: Failure Detected during SMTP connection Closure!!');
print('SMTP Connection Object is DELETED!!');
print('Please RE-SETUP your SMTP Connection!!');
@staticmethod
def console_login():
return (input("Please Enter Your Email Accout: "),input("Please Enter Your Password: "));
def login(self, account_info: tuple):
if self.connection != None:
try:
self.connection.login(account_info[0], account_info[1]);
except:
print()
print("-------------------------- IMPORTANT!!!! --------------------------");
print("If you are atttempting to send mails to a gmail-likewise account, ")
print(" YOU MUST TURN ON THE FOLLOWING PERMISSION INSIDE YOUR EMAIL ACCOUNT:")
print(" <Let less secure apps access your account> ")
print("-------------------------------------------------------------------")
print("------------------------ FAILED TO LOGIN --------------------------")
print(" !!!! PLEASE RECHECK YOUR ACCOUNT NAME AND PASSWORD !!!! ")
print("-------------------------------------------------------------------")
self.__del__();
raise LOGIN_FAILURE();
else:
raise LOGIN_FAILURE("Connection is not established");
def sendmail(self, sender_email:str, recv_email:str, msg:str):
if self.connection != None:
try:
self.connection.sendmail(sender_email, recv_email, msg);
except:
print()
print("-------------------------- IMPORTANT!!!! --------------------------");
print("If you are atttempting to send mails to a gmail-likewise account, ")
print(" YOU MUST TURN ON THE FOLLOWING PERMISSION INSIDE YOUR EMAIL ACCOUNT:")
print(" <Let less secure apps access your account> ")
print("-------------------------------------------------------------------")
print()
self.__del__();
raise FAILED_TO_SENDMAIL();
else:
raise FAILED_TO_SENDMAIL("Connection is not established");
def setup_tls_connection(self, server_domain = default_server_domain, port = tls_port):
if self.connection == None:
try:
self.connection = smtplib.SMTP(server_domain, port);
self.connection.ehlo();
self.connection.starttls();
except:
raise SETUP_SMTP_CONNECTION_ERROR("Failed to establish TLS connection: Connection to server failed")
else:
raise SETUP_SMTP_CONNECTION_ERROR("Failed to establish TLS connection: Detected already established connection");
def setup_ssl_connection(self, server_domain = default_server_domain, port = ssl_port):
if self.connection == None:
try:
self.connection = smtplib.SMTP_SSL(server_domain, port, context= self.ssl_context);
except:
raise SETUP_SMTP_CONNECTION_ERROR("Failed to establish TLS connection: Connection to server failed")
else:
raise SETUP_SMTP_CONNECTION_ERROR("Failed to establish TLS connection: Detected already established connection ");
#=======================================
#== DEBUGGING AND TESTING ==
#=======================================
if __name__ == "__main__":
EE = Email_Engine();
EE.setup_tls_connection();
EE.login(Email_Engine.console_login());
EE.sendmail("tezktenr@gmail.com", "tezktenr@gmail.com", "Hi, This is an Educational Practice to use SMTP protocol to send emails");
EE.__del__();
| 40.671053
| 135
| 0.560984
|
bc8c4d7974cb7c9a4c3197ca6701e37add8e7e12
| 1,642
|
py
|
Python
|
datalad/tests/test__main__.py
|
christinerogers/datalad
|
8b91f3767b45371e213aa7ade146a290a13c00f2
|
[
"MIT"
] | 1
|
2021-06-11T19:54:19.000Z
|
2021-06-11T19:54:19.000Z
|
datalad/tests/test__main__.py
|
christinerogers/datalad
|
8b91f3767b45371e213aa7ade146a290a13c00f2
|
[
"MIT"
] | 21
|
2015-03-12T11:48:18.000Z
|
2020-08-31T09:28:05.000Z
|
datalad/tests/test__main__.py
|
christinerogers/datalad
|
8b91f3767b45371e213aa7ade146a290a13c00f2
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import sys
from unittest.mock import patch
from io import StringIO
from tempfile import NamedTemporaryFile
from datalad.tests.utils import (
assert_equal,
assert_raises,
)
from .. import __main__, __version__
from ..auto import AutomagicIO
from datalad.tests.utils import known_failure_githubci_win
@patch('sys.stdout', new_callable=StringIO)
def test_main_help(stdout):
assert_raises(SystemExit, __main__.main, ['__main__.py', '--help'])
assert(
stdout.getvalue().startswith(
"Usage: %s -m datalad [OPTIONS] <file> [ARGS]\n" % sys.executable
))
@patch('sys.stdout', new_callable=StringIO)
def test_main_version(stdout):
assert_raises(SystemExit, __main__.main, ['__main__.py', '--version'])
assert_equal(stdout.getvalue().rstrip(), "datalad %s" % __version__)
@known_failure_githubci_win
@patch.object(AutomagicIO, 'activate')
@patch('sys.stdout', new_callable=StringIO)
def test_main_run_a_script(stdout, mock_activate):
f = NamedTemporaryFile()
f.write('print("Running the script")\n'.encode()); f.flush()
__main__.main(['__main__.py', f.name])
assert_equal(stdout.getvalue().rstrip(), "Running the script")
# And we have "activated"
mock_activate.assert_called_once_with()
| 33.510204
| 87
| 0.652253
|
437b6cc7af0503ba8d9c2e7c4d616ce2bed6ca91
| 9,478
|
py
|
Python
|
dopamine/continuous_domains/run_experiment.py
|
crawlingcub/dopamine
|
0d155c12f96606188a97a001e02189bdd3723d4d
|
[
"Apache-2.0"
] | null | null | null |
dopamine/continuous_domains/run_experiment.py
|
crawlingcub/dopamine
|
0d155c12f96606188a97a001e02189bdd3723d4d
|
[
"Apache-2.0"
] | null | null | null |
dopamine/continuous_domains/run_experiment.py
|
crawlingcub/dopamine
|
0d155c12f96606188a97a001e02189bdd3723d4d
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining classes and helper methods for general agents."""
from typing import Optional
from absl import logging
from dopamine.discrete_domains import gym_lib
from dopamine.discrete_domains import iteration_statistics
from dopamine.discrete_domains import run_experiment as base_run_experiment
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.jax.agents.sac import sac_agent
from flax.metrics import tensorboard
import gin
from gym import spaces
load_gin_configs = base_run_experiment.load_gin_configs
@gin.configurable
def create_continuous_agent(
environment: gym_lib.GymPreprocessing,
agent_name: str,
summary_writer: Optional[tensorboard.SummaryWriter] = None
) -> dqn_agent.JaxDQNAgent:
"""Creates an agent.
Args:
environment: A gym environment.
agent_name: str, name of the agent to create.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
Returns:
An RL agent.
Raises:
ValueError: If `agent_name` is not in supported list.
"""
assert agent_name is not None
if agent_name == 'sac':
assert isinstance(environment.action_space, spaces.Box)
assert isinstance(environment.observation_space, spaces.Box)
return sac_agent.SACAgent(
action_shape=environment.action_space.shape,
action_limits=(environment.action_space.low,
environment.action_space.high),
observation_shape=environment.observation_space.shape,
action_dtype=environment.action_space.dtype,
observation_dtype=environment.observation_space.dtype,
summary_writer=summary_writer)
else:
raise ValueError(f'Unknown agent: {agent_name}')
@gin.configurable
def create_continuous_runner(base_dir, schedule='continuous_train_and_eval'):
"""Creates an experiment Runner.
Args:
base_dir: str, base directory for hosting all subdirectories.
schedule: string, which type of Runner to use.
Returns:
runner: A `Runner` like object.
Raises:
ValueError: When an unknown schedule is encountered.
"""
assert base_dir is not None
# Continuously runs training and evaluation until max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return ContinuousRunner(base_dir, create_continuous_agent)
# Continuously runs training until max num_iterations is hit.
elif schedule == 'continuous_train':
return ContinuousTrainRunner(base_dir, create_continuous_agent)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
@gin.configurable
class ContinuousRunner(base_run_experiment.Runner):
"""Object that handles running Dopamine experiments.
This is mostly the same as discrete_domains.Runner, but is written solely for
JAX/Flax agents.
"""
def __init__(self,
base_dir,
create_agent_fn,
create_environment_fn=gym_lib.create_gym_environment,
checkpoint_file_prefix='ckpt',
logging_file_prefix='log',
log_every_n=1,
num_iterations=200,
training_steps=250000,
evaluation_steps=125000,
max_steps_per_episode=1000,
clip_rewards=False):
"""Initialize the Runner object in charge of running a full experiment.
Args:
base_dir: str, the base directory to host all required sub-directories.
create_agent_fn: A function that takes as argument an environment, and
returns an agent.
create_environment_fn: A function which receives a problem name and
creates a Gym environment for that problem (e.g. an Atari 2600 game).
checkpoint_file_prefix: str, the prefix to use for checkpoint files.
logging_file_prefix: str, prefix to use for the log files.
log_every_n: int, the frequency for writing logs.
num_iterations: int, the iteration number threshold (must be greater than
start_iteration).
training_steps: int, the number of training steps to perform.
evaluation_steps: int, the number of evaluation steps to perform.
max_steps_per_episode: int, maximum number of steps after which an episode
terminates.
clip_rewards: bool, whether to clip rewards in [-1, 1].
This constructor will take the following actions:
- Initialize an environment.
- Initialize a logger.
- Initialize an agent.
- Reload from the latest checkpoint, if available, and initialize the
Checkpointer object.
"""
assert base_dir is not None
self._logging_file_prefix = logging_file_prefix
self._log_every_n = log_every_n
self._num_iterations = num_iterations
self._training_steps = training_steps
self._evaluation_steps = evaluation_steps
self._max_steps_per_episode = max_steps_per_episode
self._base_dir = base_dir
self._clip_rewards = clip_rewards
self._create_directories()
self._summary_writer = tensorboard.SummaryWriter(base_dir)
self._environment = create_environment_fn()
self._agent = create_agent_fn(self._environment,
summary_writer=self._summary_writer)
self._initialize_checkpointer_and_maybe_resume(checkpoint_file_prefix)
def _save_tensorboard_summaries(self, iteration,
num_episodes_train,
average_reward_train,
num_episodes_eval,
average_reward_eval,
average_steps_per_second):
"""Save statistics as tensorboard summaries.
Args:
iteration: int, The current iteration number.
num_episodes_train: int, number of training episodes run.
average_reward_train: float, The average training reward.
num_episodes_eval: int, number of evaluation episodes run.
average_reward_eval: float, The average evaluation reward.
average_steps_per_second: float, The average number of steps per second.
"""
metrics = [('Train/NumEpisodes', num_episodes_train),
('Train/AverageReturns', average_reward_train),
('Train/AverageStepsPerSecond', average_steps_per_second),
('Eval/NumEpisodes', num_episodes_eval),
('Eval/AverageReturns', average_reward_eval)]
for name, value in metrics:
self._summary_writer.scalar(name, value, iteration)
self._summary_writer.flush()
@gin.configurable
class ContinuousTrainRunner(ContinuousRunner):
"""Object that handles running experiments.
This is mostly the same as discrete_domains.TrainRunner, but is written solely
for JAX/Flax agents.
"""
def __init__(self, base_dir, create_agent_fn,
create_environment_fn=gym_lib.create_gym_environment):
"""Initialize the TrainRunner object in charge of running a full experiment.
Args:
base_dir: str, the base directory to host all required sub-directories.
create_agent_fn: A function that takes as args a Tensorflow session and an
environment, and returns an agent.
create_environment_fn: A function which receives a problem name and
creates a Gym environment for that problem (e.g. an Atari 2600 game).
"""
logging.info('Creating ContinuousTrainRunner ...')
super().__init__(base_dir, create_agent_fn, create_environment_fn)
self._agent.eval_mode = False
def _run_one_iteration(self, iteration):
"""Runs one iteration of agent/environment interaction.
An iteration involves running several episodes until a certain number of
steps are obtained. This method differs from the `_run_one_iteration` method
in the base `Runner` class in that it only runs the train phase.
Args:
iteration: int, current iteration number, used as a global_step for saving
Tensorboard summaries.
Returns:
A dict containing summary statistics for this iteration.
"""
statistics = iteration_statistics.IterationStatistics()
num_episodes_train, average_reward_train, average_steps_per_second = (
self._run_train_phase(statistics))
self._save_tensorboard_summaries(iteration, num_episodes_train,
average_reward_train,
average_steps_per_second)
return statistics.data_lists
def _save_tensorboard_summaries(self, iteration, num_episodes,
average_reward, average_steps_per_second):
"""Save statistics as tensorboard summaries."""
metrics = [('Train/NumEpisodes', num_episodes),
('Train/AverageReturns', average_reward),
('Train/AverageStepsPerSecond', average_steps_per_second)]
for name, value in metrics:
self._summary_writer.scalar(name, value, iteration)
self._summary_writer.flush()
| 39.823529
| 80
| 0.715235
|
16a0941f4e6f5ba700521cfd30e33eebd0c4fcef
| 1,544
|
py
|
Python
|
projects/flow.sklearn.init-ParameterGrid/init.py
|
phmalek/signac-examples
|
d48def3106afcde70e58f07940ab0de3779bad4b
|
[
"BSD-3-Clause"
] | 1
|
2020-03-12T19:27:46.000Z
|
2020-03-12T19:27:46.000Z
|
projects/flow.sklearn.init-ParameterGrid/init.py
|
phmalek/signac-examples
|
d48def3106afcde70e58f07940ab0de3779bad4b
|
[
"BSD-3-Clause"
] | null | null | null |
projects/flow.sklearn.init-ParameterGrid/init.py
|
phmalek/signac-examples
|
d48def3106afcde70e58f07940ab0de3779bad4b
|
[
"BSD-3-Clause"
] | null | null | null |
"""This example performs grid search, saving the results of each evaluated
parameter set into a signac data space.
See also:
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html
"""
import joblib
import signac
from sklearn import datasets, svm
from sklearn.model_selection import ParameterGrid, train_test_split
if __name__ == '__main__':
# Load sample data
dataset = datasets.load_digits()
X = dataset.data
y = dataset.target
class_names = dataset.target_names.tolist()
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Initialize the signac project and save the training/testing data into the
# project's HDF5 data store
project = signac.init_project('gridsearch')
with project.data:
project.data.X_train = X_train
project.data.X_test = X_test
project.data.y_train = y_train
project.data.y_test = y_test
# Class names are non-numeric so they go into the project document.
project.doc.class_names = class_names
param_grid = {
'kernel': ('linear', 'rbf'),
'C': (0.1, 1, 10, 100),
'gamma': ('scale',)
}
# Create the jobs for each estimator
for params in ParameterGrid(param_grid):
print('Creating job for', params)
job = project.open_job(params).init()
estimator = svm.SVC(**params)
joblib.dump(estimator, job.fn('estimator.joblib'))
| 32.166667
| 97
| 0.685233
|
bae55349cea202d3ccfe436423aa243a6c3ead33
| 3,228
|
py
|
Python
|
region_count_prediction/data_setup.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
region_count_prediction/data_setup.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
region_count_prediction/data_setup.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import pandas as pd
from sklearn.model_selection import train_test_split
from data.data_helpers import get_original_data
from root import ROOT_DIR
from weighted_mean_prediction.data_setup import one_hot_encode_df, split_data
global REGIONS
REGIONS = [f"R{_}" for _ in range(1, 9)]
def get_normalised_region_counts(df: pd.DataFrame = None) -> pd.DataFrame:
df = get_original_data() if df is None else df
df = df.copy()
df[REGIONS] = df[REGIONS].div(df[REGIONS].sum(axis=1), axis=0)
return df
def save_encoded_data(X_train, X_val, X_test, y_train, y_val, y_test) -> None:
X_train.to_csv(f"{ROOT_DIR}/region_count_prediction/model_data/X_train.csv", index=False)
X_val.to_csv(f"{ROOT_DIR}/region_count_prediction/model_data/X_val.csv", index=False)
X_test.to_csv(f"{ROOT_DIR}/region_count_prediction/model_data/X_test.csv", index=False)
y_train.to_csv(f"{ROOT_DIR}/region_count_prediction/model_data/y_train.csv", index=False)
y_val.to_csv(f"{ROOT_DIR}/region_count_prediction/model_data/y_val.csv", index=False)
y_test.to_csv(f"{ROOT_DIR}/region_count_prediction/model_data/y_test.csv", index=False)
def get_encoded_split_data():
try:
X_train = pd.read_csv(f"{ROOT_DIR}/region_count_prediction/model_data/X_train.csv", )
X_val = pd.read_csv(f"{ROOT_DIR}/region_count_prediction/model_data/X_val.csv", )
X_test = pd.read_csv(f"{ROOT_DIR}/region_count_prediction/model_data/X_test.csv", )
y_train = pd.read_csv(f"{ROOT_DIR}/region_count_prediction/model_data/y_train.csv", )
y_val = pd.read_csv(f"{ROOT_DIR}/region_count_prediction/model_data/y_val.csv", )
y_test = pd.read_csv(f"{ROOT_DIR}/region_count_prediction/model_data/y_test.csv", )
return X_train, X_val, X_test, y_train, y_val, y_test
except FileNotFoundError:
data = split_data()
save_encoded_data(*data)
return data
def split_data(df: pd.DataFrame = None, train_size=0.8, val_size=0.1, random_state=0) \
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
df = get_original_data() if df is None else df
X = df.drop(REGIONS, axis=1)
y = df[REGIONS]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 - train_size,
shuffle=True, random_state=random_state)
test_ratio = 1 - train_size - val_size
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test,
test_size=test_ratio / (test_ratio + val_size),
random_state=random_state)
return X_train, X_val, X_test, y_train, y_val, y_test
if __name__ == "__main__":
df = get_normalised_region_counts()
df = one_hot_encode_df(df)
X_train, X_val, X_test, y_train, y_val, y_test = split_data(df)
print(f"{len(X_train)} Training Sequences ({len(X_train) / 260510})")
print(f"{len(X_val)} Validation Sequences ({len(X_val) / 260510})")
print(f"{len(X_test)} Testing Sequences ({len(X_test) / 260510})")
save_encoded_data(X_train, X_val, X_test, y_train, y_val, y_test)
| 43.04
| 101
| 0.694238
|
66ad2872aed877a8b3bba2a3776bd95125587f83
| 562
|
py
|
Python
|
tests/DistributedAverager/Sensor.py
|
timkrentz/riaps-pycom
|
5b4d9b3c80f9191ec9f680264e6d0ccb14f171ba
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2019-02-24T10:14:46.000Z
|
2020-07-08T16:22:17.000Z
|
tests/DistributedAverager/Sensor.py
|
timkrentz/riaps-pycom
|
5b4d9b3c80f9191ec9f680264e6d0ccb14f171ba
|
[
"ECL-2.0",
"Apache-2.0"
] | 36
|
2018-10-16T04:39:54.000Z
|
2022-02-09T17:38:05.000Z
|
tests/DistributedAverager/Sensor.py
|
timkrentz/riaps-pycom
|
5b4d9b3c80f9191ec9f680264e6d0ccb14f171ba
|
[
"ECL-2.0",
"Apache-2.0"
] | 8
|
2018-10-23T22:09:18.000Z
|
2021-04-23T02:53:24.000Z
|
# import riaps
from riaps.run.comp import Component
import logging
import random
class Sensor(Component):
def __init__(self,value):
super(Sensor, self).__init__()
if value == 0.0:
self.myValue = (10.0 * random.random()) - 5.0
else:
self.myValue = value
def on_clock(self):
now = self.clock.recv_pyobj() # Receive time (as float)
# self.logger.info('on_clock():%s',msg)
msg = (now,self.myValue) # Send (timestamp,value)
self.sensorReady.send_pyobj(msg)
| 28.1
| 65
| 0.588968
|
c866ff62f7349b4bd29b73f13532c693fd143ca1
| 48,902
|
py
|
Python
|
python/ccxt/probit.py
|
TheTexasFarmer/ccxt
|
38a9d3a8ec14bf8aaa0a7d1dfb60f1b57c8df402
|
[
"MIT"
] | 2
|
2021-07-19T06:59:47.000Z
|
2021-12-17T21:01:08.000Z
|
python/ccxt/probit.py
|
TheTexasFarmer/ccxt
|
38a9d3a8ec14bf8aaa0a7d1dfb60f1b57c8df402
|
[
"MIT"
] | null | null | null |
python/ccxt/probit.py
|
TheTexasFarmer/ccxt
|
38a9d3a8ec14bf8aaa0a7d1dfb60f1b57c8df402
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import BadResponse
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class probit(Exchange):
def describe(self):
return self.deep_extend(super(probit, self).describe(), {
'id': 'probit',
'name': 'ProBit',
'countries': ['SC', 'KR'], # Seychelles, South Korea
'rateLimit': 250, # ms
'has': {
'CORS': True,
'fetchTime': True,
'fetchMarkets': True,
'fetchCurrencies': True,
'fetchTickers': True,
'fetchTicker': True,
'fetchOHLCV': True,
'fetchOrderBook': True,
'fetchTrades': True,
'fetchBalance': True,
'createOrder': True,
'createMarketOrder': True,
'cancelOrder': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
'signIn': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'10m': '10m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '1W',
'1M': '1M',
},
'version': 'v1',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/79268032-c4379480-7ea2-11ea-80b3-dd96bb29fd0d.jpg',
'api': {
'accounts': 'https://accounts.probit.com',
'public': 'https://api.probit.com/api/exchange',
'private': 'https://api.probit.com/api/exchange',
},
'www': 'https://www.probit.com',
'doc': [
'https://docs-en.probit.com',
'https://docs-ko.probit.com',
],
'fees': 'https://support.probit.com/hc/en-us/articles/360020968611-Trading-Fees',
'referral': 'https://www.probit.com/r/34608773',
},
'api': {
'public': {
'get': [
'market',
'currency',
'currency_with_platform',
'time',
'ticker',
'order_book',
'trade',
'candle',
],
},
'private': {
'post': [
'new_order',
'cancel_order',
'withdrawal',
],
'get': [
'balance',
'order',
'open_order',
'order_history',
'trade_history',
'deposit_address',
],
},
'accounts': {
'post': [
'token',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
},
'exceptions': {
'exact': {
'UNAUTHORIZED': AuthenticationError,
'INVALID_ARGUMENT': BadRequest, # Parameters are not a valid format, parameters are empty, or out of range, or a parameter was sent when not required.
'TRADING_UNAVAILABLE': ExchangeNotAvailable,
'NOT_ENOUGH_BALANCE': InsufficientFunds,
'NOT_ALLOWED_COMBINATION': BadRequest,
'INVALID_ORDER': InvalidOrder, # Requested order does not exist, or it is not your order
'RATE_LIMIT_EXCEEDED': RateLimitExceeded, # You are sending requests too frequently. Please try it later.
'MARKET_UNAVAILABLE': ExchangeNotAvailable, # Market is closed today
'INVALID_MARKET': BadSymbol, # Requested market is not exist
'MARKET_CLOSED': BadSymbol, # {"errorCode":"MARKET_CLOSED"}
'MARKET_NOT_FOUND': BadSymbol, # {"errorCode":"MARKET_NOT_FOUND","message":"8e2b8496-0a1e-5beb-b990-a205b902eabe","details":{}}
'INVALID_CURRENCY': BadRequest, # Requested currency is not exist on ProBit system
'TOO_MANY_OPEN_ORDERS': DDoSProtection, # Too many open orders
'DUPLICATE_ADDRESS': InvalidAddress, # Address already exists in withdrawal address list
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'precisionMode': TICK_SIZE,
'options': {
'createMarketBuyOrderRequiresPrice': True,
'timeInForce': {
'limit': 'gtc',
'market': 'ioc',
},
},
'commonCurrencies': {
'AUTO': 'Cube',
'BCC': 'BCC',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'CBC': 'CryptoBharatCoin',
'EPS': 'Epanus', # conflict with EPS Ellipsis https://github.com/ccxt/ccxt/issues/8909
'HBC': 'Hybrid Bank Cash',
'ORC': 'Oracle System',
'SOC': 'Soda Coin',
'UNI': 'UNICORN Token',
'UNISWAP': 'UNI',
},
})
def fetch_markets(self, params={}):
response = self.publicGetMarket(params)
#
# {
# "data":[
# {
# "id":"MONA-USDT",
# "base_currency_id":"MONA",
# "quote_currency_id":"USDT",
# "min_price":"0.001",
# "max_price":"9999999999999999",
# "price_increment":"0.001",
# "min_quantity":"0.0001",
# "max_quantity":"9999999999999999",
# "quantity_precision":4,
# "min_cost":"1",
# "max_cost":"9999999999999999",
# "cost_precision":8,
# "taker_fee_rate":"0.2",
# "maker_fee_rate":"0.2",
# "show_in_ui":true,
# "closed":false
# },
# ]
# }
#
markets = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency_id')
quoteId = self.safe_string(market, 'quote_currency_id')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
closed = self.safe_value(market, 'closed', False)
active = not closed
amountPrecision = self.safe_string(market, 'quantity_precision')
costPrecision = self.safe_string(market, 'cost_precision')
amountTickSize = self.parse_precision(amountPrecision)
costTickSize = self.parse_precision(costPrecision)
precision = {
'amount': self.parse_number(amountTickSize),
'price': self.safe_number(market, 'price_increment'),
'cost': self.parse_number(costTickSize),
}
takerFeeRate = self.safe_string(market, 'taker_fee_rate')
taker = Precise.string_div(takerFeeRate, '100')
makerFeeRate = self.safe_string(market, 'maker_fee_rate')
maker = Precise.string_div(makerFeeRate, '100')
result.append({
'id': id,
'info': market,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'taker': self.parse_number(taker),
'maker': self.parse_number(maker),
'limits': {
'amount': {
'min': self.safe_number(market, 'min_quantity'),
'max': self.safe_number(market, 'max_quantity'),
},
'price': {
'min': self.safe_number(market, 'min_price'),
'max': self.safe_number(market, 'max_price'),
},
'cost': {
'min': self.safe_number(market, 'min_cost'),
'max': self.safe_number(market, 'max_cost'),
},
},
})
return result
def fetch_currencies(self, params={}):
response = self.publicGetCurrencyWithPlatform(params)
#
# {
# "data":[
# {
# "id":"USDT",
# "display_name":{"ko-kr":"테더","en-us":"Tether"},
# "show_in_ui":true,
# "platform":[
# {
# "id":"ETH",
# "priority":1,
# "deposit":true,
# "withdrawal":true,
# "currency_id":"USDT",
# "precision":6,
# "min_confirmation_count":15,
# "require_destination_tag":false,
# "display_name":{"name":{"ko-kr":"ERC-20","en-us":"ERC-20"}},
# "min_deposit_amount":"0",
# "min_withdrawal_amount":"1",
# "withdrawal_fee":[
# {"amount":"0.01","priority":2,"currency_id":"ETH"},
# {"amount":"1.5","priority":1,"currency_id":"USDT"},
# ],
# "deposit_fee":{},
# "suspended_reason":"",
# "deposit_suspended":false,
# "withdrawal_suspended":false
# },
# {
# "id":"OMNI",
# "priority":2,
# "deposit":true,
# "withdrawal":true,
# "currency_id":"USDT",
# "precision":6,
# "min_confirmation_count":3,
# "require_destination_tag":false,
# "display_name":{"name":{"ko-kr":"OMNI","en-us":"OMNI"}},
# "min_deposit_amount":"0",
# "min_withdrawal_amount":"5",
# "withdrawal_fee":[{"amount":"5","priority":1,"currency_id":"USDT"}],
# "deposit_fee":{},
# "suspended_reason":"wallet_maintenance",
# "deposit_suspended":false,
# "withdrawal_suspended":false
# }
# ],
# "stakeable":false,
# "unstakeable":false,
# "auto_stake":false,
# "auto_stake_amount":"0"
# }
# ]
# }
#
currencies = self.safe_value(response, 'data')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'id')
code = self.safe_currency_code(id)
displayName = self.safe_value(currency, 'display_name')
name = self.safe_string(displayName, 'en-us')
platforms = self.safe_value(currency, 'platform', [])
platformsByPriority = self.sort_by(platforms, 'priority')
platform = self.safe_value(platformsByPriority, 0, {})
precision = self.safe_integer(platform, 'precision')
depositSuspended = self.safe_value(platform, 'deposit_suspended')
withdrawalSuspended = self.safe_value(platform, 'withdrawal_suspended')
active = not (depositSuspended and withdrawalSuspended)
withdrawalFees = self.safe_value(platform, 'withdrawal_fee', {})
fees = []
# sometimes the withdrawal fee is an empty object
# [{'amount': '0.015', 'priority': 1, 'currency_id': 'ETH'}, {}]
for j in range(0, len(withdrawalFees)):
withdrawalFee = withdrawalFees[j]
amount = self.safe_number(withdrawalFee, 'amount')
priority = self.safe_integer(withdrawalFee, 'priority')
if (amount is not None) and (priority is not None):
fees.append(withdrawalFee)
withdrawalFeesByPriority = self.sort_by(fees, 'priority')
withdrawalFee = self.safe_value(withdrawalFeesByPriority, 0, {})
fee = self.safe_number(withdrawalFee, 'amount')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'deposit': {
'min': self.safe_number(platform, 'min_deposit_amount'),
'max': None,
},
'withdraw': {
'min': self.safe_number(platform, 'min_withdrawal_amount'),
'max': None,
},
},
}
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalance(params)
#
# {
# data: [
# {
# "currency_id":"XRP",
# "total":"100",
# "available":"0",
# }
# ]
# }
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
data = self.safe_value(response, 'data')
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'currency_id')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'total')
account['free'] = self.safe_string(balance, 'available')
result[code] = account
return self.parse_balance(result, False)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
response = self.publicGetOrderBook(self.extend(request, params))
#
# {
# data: [
# {side: 'buy', price: '0.000031', quantity: '10'},
# {side: 'buy', price: '0.00356007', quantity: '4.92156877'},
# {side: 'sell', price: '0.1857', quantity: '0.17'},
# ]
# }
#
data = self.safe_value(response, 'data', [])
dataBySide = self.group_by(data, 'side')
return self.parse_order_book(dataBySide, symbol, None, 'buy', 'sell', 'price', 'quantity')
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
marketIds = self.market_ids(symbols)
request['market_ids'] = ','.join(marketIds)
response = self.publicGetTicker(self.extend(request, params))
#
# {
# "data":[
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_tickers(data, symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_ids': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
#
# {
# "data":[
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
ticker = self.safe_value(data, 0)
if ticker is None:
raise BadResponse(self.id + ' fetchTicker() returned an empty response')
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
#
# {
# "last":"0.022902",
# "low":"0.021693",
# "high":"0.024093",
# "change":"-0.000047",
# "base_volume":"15681.986",
# "quote_volume":"360.514403624",
# "market_id":"ETH-BTC",
# "time":"2020-04-12T18:43:38.000Z"
# }
#
timestamp = self.parse8601(self.safe_string(ticker, 'time'))
marketId = self.safe_string(ticker, 'market_id')
symbol = self.safe_symbol(marketId, market, '-')
close = self.safe_number(ticker, 'last')
change = self.safe_number(ticker, 'change')
percentage = None
open = None
if change is not None:
if close is not None:
open = close - change
percentage = (change / open) * 100
baseVolume = self.safe_number(ticker, 'base_volume')
quoteVolume = self.safe_number(ticker, 'quote_volume')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None, # previous day close
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
'limit': 100,
'start_time': self.iso8601(0),
'end_time': self.iso8601(self.milliseconds()),
}
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
if since is not None:
request['start_time'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = self.privateGetTradeHistory(self.extend(request, params))
#
# {
# data: [
# {
# "id":"BTC-USDT:183566",
# "order_id":"17209376",
# "side":"sell",
# "fee_amount":"0.657396569175",
# "fee_currency_id":"USDT",
# "status":"settled",
# "price":"6573.96569175",
# "quantity":"0.1",
# "cost":"657.396569175",
# "time":"2018-08-10T06:06:46.000Z",
# "market_id":"BTC-USDT"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
'limit': 100,
'start_time': '1970-01-01T00:00:00.000Z',
'end_time': self.iso8601(self.milliseconds()),
}
if since is not None:
request['start_time'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# {
# "data":[
# {
# "id":"ETH-BTC:3331886",
# "price":"0.022981",
# "quantity":"12.337",
# "time":"2020-04-12T20:55:42.371Z",
# "side":"sell",
# "tick_direction":"down"
# },
# {
# "id":"ETH-BTC:3331885",
# "price":"0.022982",
# "quantity":"6.472",
# "time":"2020-04-12T20:55:39.652Z",
# "side":"sell",
# "tick_direction":"down"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"ETH-BTC:3331886",
# "price":"0.022981",
# "quantity":"12.337",
# "time":"2020-04-12T20:55:42.371Z",
# "side":"sell",
# "tick_direction":"down"
# }
#
# fetchMyTrades(private)
#
# {
# "id":"BTC-USDT:183566",
# "order_id":"17209376",
# "side":"sell",
# "fee_amount":"0.657396569175",
# "fee_currency_id":"USDT",
# "status":"settled",
# "price":"6573.96569175",
# "quantity":"0.1",
# "cost":"657.396569175",
# "time":"2018-08-10T06:06:46.000Z",
# "market_id":"BTC-USDT"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'time'))
id = self.safe_string(trade, 'id')
marketId = None
if id is not None:
parts = id.split(':')
marketId = self.safe_string(parts, 0)
marketId = self.safe_string(trade, 'market_id', marketId)
symbol = self.safe_symbol(marketId, market, '-')
side = self.safe_string(trade, 'side')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'quantity')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
orderId = self.safe_string(trade, 'order_id')
feeCost = self.safe_number(trade, 'fee_amount')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'fee_currency_id')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {"data":"2020-04-12T18:54:25.390Z"}
#
timestamp = self.parse8601(self.safe_string(response, 'data'))
return timestamp
def normalize_ohlcv_timestamp(self, timestamp, timeframe, after=False):
duration = self.parse_timeframe(timeframe)
if timeframe == '1M':
iso8601 = self.iso8601(timestamp)
parts = iso8601.split('-')
year = self.safe_string(parts, 0)
month = self.safe_integer(parts, 1)
if after:
month = self.sum(month, 1)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
return year + '-' + month + '-01T00:00:00.000Z'
elif timeframe == '1w':
timestamp = int(timestamp / 1000)
firstSunday = 259200 # 1970-01-04T00:00:00.000Z
difference = timestamp - firstSunday
numWeeks = int(math.floor(difference / duration))
previousSunday = self.sum(firstSunday, numWeeks * duration)
if after:
previousSunday = self.sum(previousSunday, duration)
return self.iso8601(previousSunday * 1000)
else:
timestamp = int(timestamp / 1000)
timestamp = duration * int(timestamp / duration)
if after:
timestamp = self.sum(timestamp, duration)
return self.iso8601(timestamp * 1000)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
interval = self.timeframes[timeframe]
limit = 100 if (limit is None) else limit
requestLimit = self.sum(limit, 1)
requestLimit = min(1000, requestLimit) # max 1000
request = {
'market_ids': market['id'],
'interval': interval,
'sort': 'asc', # 'asc' will always include the start_time, 'desc' will always include end_time
'limit': requestLimit, # max 1000
}
now = self.milliseconds()
duration = self.parse_timeframe(timeframe)
startTime = since
endTime = now
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires either a since argument or a limit argument')
else:
startTime = now - limit * duration * 1000
else:
if limit is None:
endTime = now
else:
endTime = self.sum(since, self.sum(limit, 1) * duration * 1000)
startTimeNormalized = self.normalize_ohlcv_timestamp(startTime, timeframe)
endTimeNormalized = self.normalize_ohlcv_timestamp(endTime, timeframe, True)
request['start_time'] = startTimeNormalized
request['end_time'] = endTimeNormalized
response = self.publicGetCandle(self.extend(request, params))
#
# {
# "data":[
# {
# "market_id":"ETH-BTC",
# "open":"0.02811",
# "close":"0.02811",
# "low":"0.02811",
# "high":"0.02811",
# "base_volume":"0.0005",
# "quote_volume":"0.000014055",
# "start_time":"2018-11-30T18:19:00.000Z",
# "end_time":"2018-11-30T18:20:00.000Z"
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "market_id":"ETH-BTC",
# "open":"0.02811",
# "close":"0.02811",
# "low":"0.02811",
# "high":"0.02811",
# "base_volume":"0.0005",
# "quote_volume":"0.000014055",
# "start_time":"2018-11-30T18:19:00.000Z",
# "end_time":"2018-11-30T18:20:00.000Z"
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'start_time')),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'base_volume'),
]
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
since = self.parse8601(since)
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
response = self.privateGetOpenOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_orders(data, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'start_time': self.iso8601(0),
'end_time': self.iso8601(self.milliseconds()),
'limit': 100,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market_id'] = market['id']
if since:
request['start_time'] = self.iso8601(since)
if limit:
request['limit'] = limit
response = self.privateGetOrderHistory(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_orders(data, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
else:
request['order_id'] = id
query = self.omit(params, ['clientOrderId', 'client_order_id'])
response = self.privateGetOrder(self.extend(request, query))
data = self.safe_value(response, 'data', [])
order = self.safe_value(data, 0)
return self.parse_order(order, market)
def parse_order_status(self, status):
statuses = {
'open': 'open',
'cancelled': 'canceled',
'filled': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# id: string,
# user_id: string,
# market_id: string,
# type: 'orderType',
# side: 'side',
# quantity: string,
# limit_price: string,
# time_in_force: 'timeInForce',
# filled_cost: string,
# filled_quantity: string,
# open_quantity: string,
# cancelled_quantity: string,
# status: 'orderStatus',
# time: 'date',
# client_order_id: string,
# }
#
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
marketId = self.safe_string(order, 'market_id')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.parse8601(self.safe_string(order, 'time'))
price = self.safe_number(order, 'limit_price')
filled = self.safe_number(order, 'filled_quantity')
remaining = self.safe_number(order, 'open_quantity')
canceledAmount = self.safe_number(order, 'cancelled_quantity')
if canceledAmount is not None:
remaining = self.sum(remaining, canceledAmount)
amount = self.safe_number(order, 'quantity', self.sum(filled, remaining))
cost = self.safe_number_2(order, 'filled_cost', 'cost')
if type == 'market':
price = None
clientOrderId = self.safe_string(order, 'client_order_id')
if clientOrderId == '':
clientOrderId = None
timeInForce = self.safe_string_upper(order, 'time_in_force')
return self.safe_order({
'id': id,
'info': order,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'side': side,
'status': status,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'average': None,
'cost': cost,
'fee': None,
'trades': None,
})
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
options = self.safe_value(self.options, 'timeInForce')
defaultTimeInForce = self.safe_value(options, type)
timeInForce = self.safe_string_2(params, 'timeInForce', 'time_in_force', defaultTimeInForce)
request = {
'market_id': market['id'],
'type': type,
'side': side,
'time_in_force': timeInForce,
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
costToPrecision = None
if type == 'limit':
request['limit_price'] = self.price_to_precision(symbol, price)
request['quantity'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
cost = self.safe_number(params, 'cost')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if cost is None:
cost = amount * price
elif cost is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument for market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'cost' extra parameter(the exchange-specific behaviour)")
else:
cost = amount if (cost is None) else cost
costToPrecision = self.cost_to_precision(symbol, cost)
request['cost'] = costToPrecision
else:
request['quantity'] = self.amount_to_precision(symbol, amount)
query = self.omit(params, ['timeInForce', 'time_in_force', 'clientOrderId', 'client_order_id'])
response = self.privatePostNewOrder(self.extend(request, query))
#
# {
# data: {
# id: string,
# user_id: string,
# market_id: string,
# type: 'orderType',
# side: 'side',
# quantity: string,
# limit_price: string,
# time_in_force: 'timeInForce',
# filled_cost: string,
# filled_quantity: string,
# open_quantity: string,
# cancelled_quantity: string,
# status: 'orderStatus',
# time: 'date',
# client_order_id: string,
# }
# }
#
data = self.safe_value(response, 'data')
order = self.parse_order(data, market)
# a workaround for incorrect huge amounts
# returned by the exchange on market buys
if (type == 'market') and (side == 'buy'):
order['amount'] = None
order['cost'] = float(costToPrecision)
order['remaining'] = None
return order
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
'order_id': id,
}
response = self.privatePostCancelOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_order(data)
def parse_deposit_address(self, depositAddress, currency=None):
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'destination_tag')
currencyId = self.safe_string(depositAddress, 'currency_id')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency_id': currency['id'],
}
response = self.privateGetDepositAddress(self.extend(request, params))
#
# {
# "data":[
# {
# "currency_id":"ETH",
# "address":"0x12e2caf3c4051ba1146e612f532901a423a9898a",
# "destination_tag":null
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
firstAddress = self.safe_value(data, 0)
if firstAddress is None:
raise InvalidAddress(self.id + ' fetchDepositAddress returned an empty response')
return self.parse_deposit_address(firstAddress, currency)
def fetch_deposit_addresses(self, codes=None, params={}):
self.load_markets()
request = {}
if codes:
currencyIds = []
for i in range(0, len(codes)):
currency = self.currency(codes[i])
currencyIds.append(currency['id'])
request['currency_id'] = ','.join(codes)
response = self.privateGetDepositAddress(self.extend(request, params))
data = self.safe_value(response, 'data', [])
return self.parse_deposit_addresses(data)
def withdraw(self, code, amount, address, tag=None, params={}):
# In order to use self method
# you need to allow API withdrawal from the API Settings Page, and
# and register the list of withdrawal addresses and destination tags on the API Settings page
# you can only withdraw to the registered addresses using the API
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag is None:
tag = ''
request = {
'currency_id': currency['id'],
# 'platform_id': 'ETH', # if omitted it will use the default platform for the currency
'address': address,
'destination_tag': tag,
'amount': self.number_to_string(amount),
# which currency to pay the withdrawal fees
# only applicable for currencies that accepts multiple withdrawal fee options
# 'fee_currency_id': 'ETH', # if omitted it will use the default fee policy for each currency
# whether the amount field includes fees
# 'include_fee': False, # makes sense only when fee_currency_id is equal to currency_id
}
response = self.privatePostWithdrawal(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_transaction(data, currency)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'id')
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'destination_tag')
txid = self.safe_string(transaction, 'hash')
timestamp = self.parse8601(self.safe_string(transaction, 'time'))
type = self.safe_string(transaction, 'type')
currencyId = self.safe_string(transaction, 'currency_id')
code = self.safe_currency_code(currencyId)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
feeCost = self.safe_number(transaction, 'fee')
fee = None
if feeCost is not None and feeCost != 0:
fee = {
'currency': code,
'cost': feeCost,
}
return {
'id': id,
'currency': code,
'amount': amount,
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'status': status,
'type': type,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
'info': transaction,
}
def parse_transaction_status(self, status):
statuses = {
'requested': 'pending',
'pending': 'pending',
'confirming': 'pending',
'confirmed': 'pending',
'applying': 'pending',
'done': 'ok',
'cancelled': 'canceled',
'cancelling': 'canceled',
}
return self.safe_string(statuses, status, status)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/'
query = self.omit(params, self.extract_params(path))
if api == 'accounts':
self.check_required_credentials()
url += self.implode_params(path, params)
auth = self.apiKey + ':' + self.secret
auth64 = self.string_to_base64(auth)
headers = {
'Authorization': 'Basic ' + self.decode(auth64),
'Content-Type': 'application/json',
}
if query:
body = self.json(query)
else:
url += self.version + '/'
if api == 'public':
url += self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
now = self.milliseconds()
self.check_required_credentials()
expires = self.safe_integer(self.options, 'expires')
if (expires is None) or (expires < now):
raise AuthenticationError(self.id + ' access token expired, call signIn() method')
accessToken = self.safe_string(self.options, 'accessToken')
headers = {
'Authorization': 'Bearer ' + accessToken,
}
url += self.implode_params(path, params)
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif query:
body = self.json(query)
headers['Content-Type'] = 'application/json'
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def sign_in(self, params={}):
self.check_required_credentials()
request = {
'grant_type': 'client_credentials', # the only supported value
}
response = self.accountsPostToken(self.extend(request, params))
#
# {
# access_token: '0ttDv/2hTTn3bLi8GP1gKaneiEQ6+0hOBenPrxNQt2s=',
# token_type: 'bearer',
# expires_in: 900
# }
#
expiresIn = self.safe_integer(response, 'expires_in')
accessToken = self.safe_string(response, 'access_token')
self.options['accessToken'] = accessToken
self.options['expires'] = self.sum(self.milliseconds(), expiresIn * 1000)
return response
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'errorCode' in response:
errorCode = self.safe_string(response, 'errorCode')
message = self.safe_string(response, 'message')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback)
| 41.337278
| 512
| 0.486176
|
ddbc34cca175cb340353d9c9c36085f929c84419
| 23,936
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/virtual_wans_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/virtual_wans_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/virtual_wans_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualWansOperations(object):
"""VirtualWansOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-10-01"
self.config = config
def get(
self, resource_group_name, virtual_wan_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the details of a VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being retrieved.
:type virtual_wan_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualWAN or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_10_01.models.VirtualWAN or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_10_01.models.ErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualWAN', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'}
def _create_or_update_initial(
self, resource_group_name, virtual_wan_name, wan_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(wan_parameters, 'VirtualWAN')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualWAN', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualWAN', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_wan_name, wan_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a VirtualWAN resource if it doesn't exist else updates the
existing VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being created or
updated.
:type virtual_wan_name: str
:param wan_parameters: Parameters supplied to create or update
VirtualWAN.
:type wan_parameters:
~azure.mgmt.network.v2018_10_01.models.VirtualWAN
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualWAN or
ClientRawResponse<VirtualWAN> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.VirtualWAN]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.VirtualWAN]]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_10_01.models.ErrorException>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
wan_parameters=wan_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualWAN', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'}
def _update_tags_initial(
self, resource_group_name, virtual_wan_name, tags=None, custom_headers=None, raw=False, **operation_config):
wan_parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(wan_parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualWAN', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualWAN', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_wan_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a VirtualWAN tags.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being updated.
:type virtual_wan_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualWAN or
ClientRawResponse<VirtualWAN> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.VirtualWAN]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.VirtualWAN]]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_10_01.models.ErrorException>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualWAN', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'}
def _delete_initial(
self, resource_group_name, virtual_wan_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_wan_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being deleted.
:type virtual_wan_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_10_01.models.ErrorException>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the VirtualWANs in a resource group.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualWAN
:rtype:
~azure.mgmt.network.v2018_10_01.models.VirtualWANPaged[~azure.mgmt.network.v2018_10_01.models.VirtualWAN]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_10_01.models.ErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.VirtualWANPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualWANPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the VirtualWANs in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualWAN
:rtype:
~azure.mgmt.network.v2018_10_01.models.VirtualWANPaged[~azure.mgmt.network.v2018_10_01.models.VirtualWAN]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_10_01.models.ErrorException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.VirtualWANPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualWANPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualWans'}
| 46.387597
| 166
| 0.672543
|
78a1ba7d74557f0cd64ea97a5e4ecd39de551725
| 458
|
py
|
Python
|
plotly/validators/histogram2dcontour/_hoverinfosrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/histogram2dcontour/_hoverinfosrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/histogram2dcontour/_hoverinfosrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='hoverinfosrc',
parent_name='histogram2dcontour',
**kwargs
):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='none',
role='info',
**kwargs
)
| 24.105263
| 71
| 0.611354
|
dd09657b269332f7febe917db709a47bbc98d9f1
| 12,698
|
py
|
Python
|
localstack/services/dynamodb/dynamodb_listener.py
|
jrideout/localstack
|
5ad9414b9aa0446efaccc4a537ef8c0f0cc822a0
|
[
"Apache-2.0"
] | null | null | null |
localstack/services/dynamodb/dynamodb_listener.py
|
jrideout/localstack
|
5ad9414b9aa0446efaccc4a537ef8c0f0cc822a0
|
[
"Apache-2.0"
] | null | null | null |
localstack/services/dynamodb/dynamodb_listener.py
|
jrideout/localstack
|
5ad9414b9aa0446efaccc4a537ef8c0f0cc822a0
|
[
"Apache-2.0"
] | 1
|
2020-03-07T18:21:17.000Z
|
2020-03-07T18:21:17.000Z
|
import re
import json
import random
import logging
import threading
from binascii import crc32
from requests.models import Response
from localstack import config
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_bytes, to_str, clone
from localstack.utils.analytics import event_publisher
from localstack.constants import DEFAULT_REGION
from localstack.services.awslambda import lambda_api
from localstack.services.dynamodbstreams import dynamodbstreams_api
from localstack.services.generic_proxy import ProxyListener
# cache table definitions - used for testing
TABLE_DEFINITIONS = {}
# action header prefix
ACTION_PREFIX = 'DynamoDB_20120810'
# set up logger
LOGGER = logging.getLogger(__name__)
class ProxyListenerDynamoDB(ProxyListener):
thread_local = threading.local()
def __init__(self):
self._table_ttl_map = {}
def forward_request(self, method, path, data, headers):
data = json.loads(to_str(data))
if random.random() < config.DYNAMODB_ERROR_PROBABILITY:
return error_response_throughput()
action = headers.get('X-Amz-Target')
if action in ('%s.PutItem' % ACTION_PREFIX, '%s.UpdateItem' % ACTION_PREFIX, '%s.DeleteItem' % ACTION_PREFIX):
# find an existing item and store it in a thread-local, so we can access it in return_response,
# in order to determine whether an item already existed (MODIFY) or not (INSERT)
ProxyListenerDynamoDB.thread_local.existing_item = find_existing_item(data)
elif action == '%s.UpdateTimeToLive' % ACTION_PREFIX:
# TODO: TTL status is maintained/mocked but no real expiry is happening for items
response = Response()
response.status_code = 200
self._table_ttl_map[data['TableName']] = {
'AttributeName': data['TimeToLiveSpecification']['AttributeName'],
'Status': data['TimeToLiveSpecification']['Enabled']
}
response._content = json.dumps({'TimeToLiveSpecification': data['TimeToLiveSpecification']})
fix_headers_for_updated_response(response)
return response
elif action == '%s.DescribeTimeToLive' % ACTION_PREFIX:
response = Response()
response.status_code = 200
if data['TableName'] in self._table_ttl_map:
if self._table_ttl_map[data['TableName']]['Status']:
ttl_status = 'ENABLED'
else:
ttl_status = 'DISABLED'
response._content = json.dumps({
'TimeToLiveDescription': {
'AttributeName': self._table_ttl_map[data['TableName']]['AttributeName'],
'TimeToLiveStatus': ttl_status
}
})
else: # TTL for dynamodb table not set
response._content = json.dumps({'TimeToLiveDescription': {'TimeToLiveStatus': 'DISABLED'}})
fix_headers_for_updated_response(response)
return response
elif action == '%s.TagResource' % ACTION_PREFIX or action == '%s.UntagResource' % ACTION_PREFIX:
response = Response()
response.status_code = 200
response._content = '' # returns an empty body on success.
fix_headers_for_updated_response(response)
return response
elif action == '%s.ListTagsOfResource' % ACTION_PREFIX:
response = Response()
response.status_code = 200
response._content = json.dumps({'Tags': []}) # TODO: mocked and returns an empty list of tags for now.
fix_headers_for_updated_response(response)
return response
return True
def return_response(self, method, path, data, headers, response):
data = json.loads(to_str(data))
# update table definitions
if data and 'TableName' in data and 'KeySchema' in data:
TABLE_DEFINITIONS[data['TableName']] = data
if response._content:
# fix the table ARN (DynamoDBLocal hardcodes "ddblocal" as the region)
content_replaced = re.sub(r'"TableArn"\s*:\s*"arn:aws:dynamodb:ddblocal:([^"]+)"',
r'"TableArn": "arn:aws:dynamodb:%s:\1"' % aws_stack.get_local_region(), to_str(response._content))
if content_replaced != response._content:
response._content = content_replaced
fix_headers_for_updated_response(response)
action = headers.get('X-Amz-Target')
if not action:
return
record = {
'eventID': '1',
'eventVersion': '1.0',
'dynamodb': {
'StreamViewType': 'NEW_AND_OLD_IMAGES',
'SizeBytes': -1
},
'awsRegion': DEFAULT_REGION,
'eventSource': 'aws:dynamodb'
}
records = [record]
if action == '%s.UpdateItem' % ACTION_PREFIX:
updated_item = find_existing_item(data)
if not updated_item:
return
record['eventName'] = 'MODIFY'
record['dynamodb']['Keys'] = data['Key']
record['dynamodb']['OldImage'] = ProxyListenerDynamoDB.thread_local.existing_item
record['dynamodb']['NewImage'] = updated_item
record['dynamodb']['SizeBytes'] = len(json.dumps(updated_item))
elif action == '%s.BatchWriteItem' % ACTION_PREFIX:
records = []
for table_name, requests in data['RequestItems'].items():
for request in requests:
put_request = request.get('PutRequest')
if put_request:
keys = dynamodb_extract_keys(item=put_request['Item'], table_name=table_name)
if isinstance(keys, Response):
return keys
new_record = clone(record)
new_record['eventName'] = 'INSERT'
new_record['dynamodb']['Keys'] = keys
new_record['dynamodb']['NewImage'] = put_request['Item']
new_record['eventSourceARN'] = aws_stack.dynamodb_table_arn(table_name)
records.append(new_record)
elif action == '%s.PutItem' % ACTION_PREFIX:
existing_item = ProxyListenerDynamoDB.thread_local.existing_item
ProxyListenerDynamoDB.thread_local.existing_item = None
record['eventName'] = 'INSERT' if not existing_item else 'MODIFY'
keys = dynamodb_extract_keys(item=data['Item'], table_name=data['TableName'])
if isinstance(keys, Response):
return keys
record['dynamodb']['Keys'] = keys
record['dynamodb']['NewImage'] = data['Item']
record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item']))
elif action == '%s.GetItem' % ACTION_PREFIX:
if response.status_code == 200:
content = json.loads(to_str(response.content))
# make sure we append 'ConsumedCapacity', which is properly
# returned by dynalite, but not by AWS's DynamoDBLocal
if 'ConsumedCapacity' not in content and data.get('ReturnConsumedCapacity') in ('TOTAL', 'INDEXES'):
content['ConsumedCapacity'] = {
'CapacityUnits': 0.5, # TODO hardcoded
'TableName': data['TableName']
}
response._content = json.dumps(content)
fix_headers_for_updated_response(response)
elif action == '%s.DeleteItem' % ACTION_PREFIX:
old_item = ProxyListenerDynamoDB.thread_local.existing_item
record['eventName'] = 'REMOVE'
record['dynamodb']['Keys'] = data['Key']
record['dynamodb']['OldImage'] = old_item
elif action == '%s.CreateTable' % ACTION_PREFIX:
if 'StreamSpecification' in data:
create_dynamodb_stream(data)
event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
payload={'n': event_publisher.get_hash(data['TableName'])})
return
elif action == '%s.DeleteTable' % ACTION_PREFIX:
event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
payload={'n': event_publisher.get_hash(data['TableName'])})
return
elif action == '%s.UpdateTable' % ACTION_PREFIX:
if 'StreamSpecification' in data:
create_dynamodb_stream(data)
return
else:
# nothing to do
return
if len(records) > 0 and 'eventName' in records[0]:
if 'TableName' in data:
records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(data['TableName'])
forward_to_lambda(records)
forward_to_ddb_stream(records)
# instantiate listener
UPDATE_DYNAMODB = ProxyListenerDynamoDB()
def find_existing_item(put_item):
table_name = put_item['TableName']
ddb_client = aws_stack.connect_to_service('dynamodb')
search_key = {}
if 'Key' in put_item:
search_key = put_item['Key']
else:
schema = ddb_client.describe_table(TableName=table_name)
schemas = [schema['Table']['KeySchema']]
for index in schema['Table'].get('GlobalSecondaryIndexes', []):
# schemas.append(index['KeySchema'])
pass
for schema in schemas:
for key in schema:
key_name = key['AttributeName']
search_key[key_name] = put_item['Item'][key_name]
if not search_key:
return
req = {'TableName': table_name, 'Key': search_key}
existing_item = aws_stack.dynamodb_get_item_raw(req)
if 'Item' not in existing_item:
if 'message' in existing_item:
table_names = ddb_client.list_tables()['TableNames']
msg = ('Unable to get item from DynamoDB (existing tables: %s): %s' %
(table_names, existing_item['message']))
LOGGER.warning(msg)
return
return existing_item.get('Item')
def fix_headers_for_updated_response(response):
response.headers['content-length'] = len(to_bytes(response.content))
response.headers['x-amz-crc32'] = calculate_crc32(response)
def calculate_crc32(response):
return crc32(to_bytes(response.content)) & 0xffffffff
def create_dynamodb_stream(data):
stream = data['StreamSpecification']
enabled = stream.get('StreamEnabled')
if enabled not in [False, 'False']:
table_name = data['TableName']
view_type = stream['StreamViewType']
dynamodbstreams_api.add_dynamodb_stream(table_name=table_name,
view_type=view_type, enabled=enabled)
def forward_to_lambda(records):
for record in records:
sources = lambda_api.get_event_sources(source_arn=record['eventSourceARN'])
event = {
'Records': [record]
}
for src in sources:
lambda_api.run_lambda(event=event, context={}, func_arn=src['FunctionArn'])
def forward_to_ddb_stream(records):
dynamodbstreams_api.forward_events(records)
def dynamodb_extract_keys(item, table_name):
result = {}
if table_name not in TABLE_DEFINITIONS:
LOGGER.warning('Unknown table: %s not found in %s' % (table_name, TABLE_DEFINITIONS))
return None
for key in TABLE_DEFINITIONS[table_name]['KeySchema']:
attr_name = key['AttributeName']
if attr_name not in item:
return error_response(error_type='ValidationException',
message='One of the required keys was not given a value')
result[attr_name] = item[attr_name]
return result
def error_response(message=None, error_type=None, code=400):
if not message:
message = 'Unknown error'
if not error_type:
error_type = 'UnknownError'
if 'com.amazonaws.dynamodb' not in error_type:
error_type = 'com.amazonaws.dynamodb.v20120810#%s' % error_type
response = Response()
response.status_code = code
content = {
'message': message,
'__type': error_type
}
response._content = json.dumps(content)
return response
def error_response_throughput():
message = ('The level of configured provisioned throughput for the table was exceeded. ' +
'Consider increasing your provisioning level with the UpdateTable API')
error_type = 'ProvisionedThroughputExceededException'
return error_response(message, error_type)
| 42.186047
| 118
| 0.621121
|
786c064bbef4bff3805e114ab466bfa525e874ac
| 9,078
|
py
|
Python
|
code/showcases/recursive-voronoi.py
|
geo7/scientific-visualization-book
|
71f6bac4db7ee2f26e88052fe7faa800303d8b00
|
[
"BSD-2-Clause"
] | 2
|
2021-11-17T15:10:09.000Z
|
2021-12-24T13:31:10.000Z
|
code/showcases/recursive-voronoi.py
|
WuShichao/scientific-visualization-book
|
389766215aa6b234ed1cf560a3768437d41d1d37
|
[
"BSD-2-Clause"
] | 1
|
2021-12-12T11:37:48.000Z
|
2021-12-12T11:39:00.000Z
|
code/showcases/recursive-voronoi.py
|
WuShichao/scientific-visualization-book
|
389766215aa6b234ed1cf560a3768437d41d1d37
|
[
"BSD-2-Clause"
] | 2
|
2021-12-30T12:20:07.000Z
|
2022-02-24T06:36:41.000Z
|
# ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: Nicolas P. Rougier
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import scipy.spatial
import matplotlib.pyplot as plt
import matplotlib.path as mpath
from shapely.geometry import Polygon
from matplotlib.collections import PolyCollection
from math import sqrt, ceil, floor, pi, cos, sin
def blue_noise(shape, radius, k=30, seed=None):
"""
Generate blue noise over a two-dimensional rectangle of size (width,height)
Parameters
----------
shape : tuple
Two-dimensional domain (width x height)
radius : float
Minimum distance between samples
k : int, optional
Limit of samples to choose before rejection (typically k = 30)
seed : int, optional
If provided, this will set the random seed before generating noise,
for valid pseudo-random comparisons.
References
----------
.. [1] Fast Poisson Disk Sampling in Arbitrary Dimensions, Robert Bridson,
Siggraph, 2007. :DOI:`10.1145/1278780.1278807`
"""
def sqdist(a, b):
""" Squared Euclidean distance """
dx, dy = a[0] - b[0], a[1] - b[1]
return dx * dx + dy * dy
def grid_coords(p):
""" Return index of cell grid corresponding to p """
return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize))
def fits(p, radius):
""" Check whether p can be added to the queue """
radius2 = radius * radius
gx, gy = grid_coords(p)
for x in range(max(gx - 2, 0), min(gx + 3, grid_width)):
for y in range(max(gy - 2, 0), min(gy + 3, grid_height)):
g = grid[x + y * grid_width]
if g is None:
continue
if sqdist(p, g) <= radius2:
return False
return True
# When given a seed, we use a private random generator in order to not
# disturb the default global random generator
if seed is not None:
from numpy.random.mtrand import RandomState
rng = RandomState(seed=seed)
else:
rng = np.random
width, height = shape
cellsize = radius / sqrt(2)
grid_width = int(ceil(width / cellsize))
grid_height = int(ceil(height / cellsize))
grid = [None] * (grid_width * grid_height)
p = rng.uniform(0, shape, 2)
queue = [p]
grid_x, grid_y = grid_coords(p)
grid[grid_x + grid_y * grid_width] = p
while queue:
qi = rng.randint(len(queue))
qx, qy = queue[qi]
queue[qi] = queue[-1]
queue.pop()
for _ in range(k):
theta = rng.uniform(0, 2 * pi)
r = radius * np.sqrt(rng.uniform(1, 4))
p = qx + r * cos(theta), qy + r * sin(theta)
if not (0 <= p[0] < width and 0 <= p[1] < height) or not fits(p, radius):
continue
queue.append(p)
gx, gy = grid_coords(p)
grid[gx + gy * grid_width] = p
return np.array([p for p in grid if p is not None])
def bounded_voronoi(points):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite regions.
Parameters
----------
vor : Voronoi
Input diagram
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
vor = scipy.spatial.Voronoi(points)
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
radius = vor.points.ptp().max() * 2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
def poly_random_points_safe(V, n=10):
def random_point_inside_triangle(A, B, C):
r1 = np.sqrt(np.random.uniform(0, 1))
r2 = np.random.uniform(0, 1)
return (1 - r1) * A + r1 * (1 - r2) * B + r1 * r2 * C
def triangle_area(A, B, C):
return 0.5 * np.abs(
(B[0] - A[0]) * (C[1] - A[1]) - (C[0] - A[0]) * (B[1] - A[1])
)
C = V.mean(axis=0)
T = [(C, V[i], V[i + 1]) for i in range(len(V) - 1)]
A = np.array([triangle_area(*t) for t in T])
A /= A.sum()
points = [C]
for i in np.random.choice(len(A), size=n - 1, p=A):
points.append(random_point_inside_triangle(*T[i]))
return points
def poly_random_points(V, n=10):
path = mpath.Path(V)
xmin, xmax = V[:, 0].min(), V[:, 0].max()
ymin, ymax = V[:, 1].min(), V[:, 1].max()
xscale, yscale = xmax - xmin, ymax - ymin
if xscale > yscale:
xscale, yscale = 1, yscale / xscale
else:
xscale, yscale = xscale / yscale, 1
radius = 0.85 * np.sqrt(2 * xscale * yscale / (n * np.pi))
points = blue_noise((xscale, yscale), radius)
points = [xmin, ymin] + points * [xmax - xmin, ymax - ymin]
inside = path.contains_points(points)
P = points[inside]
if len(P) < 5:
return poly_random_points_safe(V, n)
np.random.shuffle(P)
return P[:n]
def voronoi(V, npoints, level, maxlevel, color=None):
if level == maxlevel:
return []
linewidths = [1.50, 1.00, 0.75, 0.50, 0.25, 0.10]
edgecolors = [
(0, 0, 0, 1.00),
(0, 0, 0, 0.50),
(0, 0, 0, 0.25),
(0, 0, 0, 0.10),
(0, 0, 0, 0.10),
(0, 0, 0, 0.10),
]
if level == 1:
color = np.random.uniform(0, 1, 4)
color[3] = 0.5
points = poly_random_points(V, npoints - level)
regions, vertices = bounded_voronoi(points)
clip = Polygon(V)
cells = []
for region in regions:
polygon = Polygon(vertices[region]).intersection(clip)
polygon = np.array([point for point in polygon.exterior.coords])
linewidth = linewidths[level]
edgecolor = edgecolors[level]
facecolor = "none"
if level > 1:
alpha = color[3] + (1 / (level + 1)) * 0.25 * np.random.uniform(-1, 0.5)
color = color[0], color[1], color[2], min(max(alpha, 0.1), 1)
if level == maxlevel - 1:
facecolor = color
zorder = -level
cells.append((polygon, linewidth, edgecolor, facecolor, zorder))
cells.extend(voronoi(polygon, npoints, level + 1, maxlevel, color))
return cells
np.random.seed(12345)
T = np.linspace(0, 2 * np.pi, 100, endpoint=False)
R = 100
X, Y = R * np.cos(T), R * np.sin(T)
V = np.c_[X, Y]
V = 100 * np.array([[-1, -1], [-1, 1], [1, 1], [1, -1]])
fig = plt.figure(figsize=(8, 8))
d = R - 1
ax = fig.add_axes([0, 0, 1, 1], aspect=1, xlim=[-d, d], ylim=[-d, d])
ax.axis("off")
cells = voronoi(V, 11, level=0, maxlevel=5)
zorder = [cell[-1] for cell in cells]
cells = [cells[i] for i in np.argsort(zorder)]
polygons = [cell[0] for cell in cells]
linewidths = [cell[1] for cell in cells]
edgecolors = [cell[2] for cell in cells]
facecolors = [cell[3] for cell in cells]
collection = PolyCollection(
polygons, linewidth=linewidths, edgecolor=edgecolors, facecolor=facecolors
)
ax.add_collection(collection)
plt.savefig("../../figures/showcases/recursive-voronoi.pdf")
plt.savefig("../../figures/showcases/recursive-voronoi.png", dpi=600)
plt.show()
| 31.852632
| 85
| 0.564772
|
10bcdfc38a07a0250d6f7c82b6914700baaaa5cf
| 3,708
|
py
|
Python
|
app/lex_interface.py
|
rwdavis513/bible-verse-lookup
|
643146fdf77d701c050f4f36526b974136094c9a
|
[
"MIT"
] | null | null | null |
app/lex_interface.py
|
rwdavis513/bible-verse-lookup
|
643146fdf77d701c050f4f36526b974136094c9a
|
[
"MIT"
] | 1
|
2021-06-01T22:12:06.000Z
|
2021-06-01T22:12:06.000Z
|
app/lex_interface.py
|
rwdavis513/bible-verse-lookup
|
643146fdf77d701c050f4f36526b974136094c9a
|
[
"MIT"
] | null | null | null |
import datetime
import dateutil.parser
# --- Helpers that build all of the responses ---
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message,
response_card=None):
if type(message) != dict:
message = {'contentType': 'PlainText',
'content': message
}
if not response_card:
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message
}
}
else:
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message,
'responseCard': response_card
}
}
def confirm_intent(session_attributes, intent_name, slots, message):
if type(message) != dict:
message = {'contentType': 'PlainText',
'content': message
}
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ConfirmIntent',
'intentName': intent_name,
'slots': slots,
'message': message
}
}
def elicit_intent(session_attributes, intent_name, message):
return {
'sessionAttributes': session_attributes,
"dialogAction": {
"type": "ElicitIntent",
"message": message
}
}
def close(session_attributes, fulfillment_state, message):
if type(message) != dict:
message = {'contentType': 'PlainText',
'content': message
}
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def delegate(session_attributes, slots):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
'slots': slots
}
}
# --- Helper Functions ---
def safe_int(n):
"""
Safely convert n value to int.
"""
if n is not None:
return int(n)
return n
def try_ex(func):
"""
Call passed in function in try block. If KeyError is encountered return None.
This function is intended to be used to safely access dictionary.
Note that this function would have negative impact on performance.
"""
try:
return func()
except KeyError:
return None
def isvalid_date(date):
try:
dateutil.parser.parse(date)
return True
except ValueError:
return False
def get_day_difference(later_date, earlier_date):
later_datetime = dateutil.parser.parse(later_date).date()
earlier_datetime = dateutil.parser.parse(earlier_date).date()
return abs(later_datetime - earlier_datetime).days
def add_days(date, number_of_days):
new_date = dateutil.parser.parse(date).date()
new_date += datetime.timedelta(days=number_of_days)
return new_date.strftime('%Y-%m-%d')
def build_validation_result(isvalid, violated_slot, message_content):
return {
'isValid': isvalid,
'violatedSlot': violated_slot,
'message': {'contentType': 'PlainText', 'content': message_content}
}
| 25.93007
| 81
| 0.565804
|
ddaf6317f8f8438f882f9f96ef8a57588d45ce1f
| 908
|
py
|
Python
|
tensorflow1/dataset_benchmark.py
|
Alwaysproblem/explore-ipu
|
7f039768d40e1f3163e0941e2f8246f11ab953c2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow1/dataset_benchmark.py
|
Alwaysproblem/explore-ipu
|
7f039768d40e1f3163e0941e2f8246f11ab953c2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow1/dataset_benchmark.py
|
Alwaysproblem/explore-ipu
|
7f039768d40e1f3163e0941e2f8246f11ab953c2
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow
import json
# from tensorflow.python.ipu import (ipu_compiler, ipu_infeed_queue, ipu_outfeed_queue, loops, scopes, utils)
# import tensorflow.compat.v1 as tf
import tensorflow_core._api.v1.compat.v1 as tf
from tensorflow_core.python import ipu
from tensorflow_core.python.ipu import (ipu_compiler, ipu_infeed_queue, ipu_outfeed_queue, loops, scopes, utils)
from tensorflow_core.python.ipu.scopes import ipu_scope
from tensorflow_core.python.ipu.utils import (create_ipu_config, set_ipu_model_options, auto_select_ipus, configure_ipu_system)
from tensorflow_core.python import keras
tf.disable_v2_behavior()
ds = tf.data.Dataset.from_tensors(tf.random.normal(shape=[2, 20]))
ds = ds.repeat()
ds = ds.prefetch(1000)
benchmark_op = ipu.dataset_benchmark.dataset_benchmark(ds, 10, 512)
with tf.Session() as sess:
json_string = sess.run(benchmark_op)
print(json_string[0].decode())
| 37.833333
| 127
| 0.80837
|
5f3de83522daddd7f3b016f3f44b305b60b25a8f
| 4,125
|
py
|
Python
|
train_with_your_data/scripts/cpmg/automated_metabolite_quantification/model_utils.py
|
ciceklab/targeted_brain_tumor_margin_assessment
|
2cf729019dfc1785992208a69c353a659c9b6448
|
[
"MIT"
] | 1
|
2021-12-11T20:06:39.000Z
|
2021-12-11T20:06:39.000Z
|
train_with_your_data/scripts/eretic_cpmg/automated_metabolite_quantification/model_utils.py
|
ciceklab/targeted_brain_tumor_margin_assessment
|
2cf729019dfc1785992208a69c353a659c9b6448
|
[
"MIT"
] | null | null | null |
train_with_your_data/scripts/eretic_cpmg/automated_metabolite_quantification/model_utils.py
|
ciceklab/targeted_brain_tumor_margin_assessment
|
2cf729019dfc1785992208a69c353a659c9b6448
|
[
"MIT"
] | 2
|
2021-12-15T18:17:17.000Z
|
2021-12-16T12:08:30.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.modules.module import _addindent
from torch.utils.data import Dataset
import numpy as np
from collections import Counter
import pdb
import matplotlib.pyplot as plt
import os
# summarize Pytorch model
def summary(model, show_weights=True, show_parameters=True):
"""Summarizes torch model by showing trainable parameters and weights."""
tmpstr = model.__class__.__name__ + ' (\n'
for key, module in model._modules.items():
# if it contains layers let call it recursively to get params and weights
if type(module) in [
torch.nn.modules.container.Container,
torch.nn.modules.container.Sequential
]:
modstr = torch_summarize(module)
else:
modstr = module.__repr__()
modstr = _addindent(modstr, 2)
params = sum([np.prod(p.size()) for p in module.parameters()])
weights = tuple([tuple(p.size()) for p in module.parameters()])
tmpstr += ' (' + key + '): ' + modstr
if show_weights:
tmpstr += ', weights={}'.format(weights)
if show_parameters:
tmpstr += ', parameters={}'.format(params)
tmpstr += '\n'
tmpstr = tmpstr + ')'
return tmpstr
# multi indexing of values in a dictionary combined in a result dictionary.
def create_data_variables(dct, idx):
result_dct = {}
for x in dct.keys():
result_dct[x] = dct[x][idx]
return result_dct
# Quantification dataset for all data
class HRMASDataset_Type_A(Dataset):
def __init__(self, dct):
super(HRMASDataset_Type_A, self).__init__()
self.data_dct = dct
def __len__(self):
return self.data_dct["spectra"].shape[0]
def __getitem__(self, idx):
return (self.data_dct["spectra"][idx], self.data_dct["ppm_spectra"][idx], self.data_dct["quant"][idx], self.data_dct["quant_availability"][idx], self.data_dct["class_labels"][idx])
# Quantification dataset for all data
class HRMASDataset_Type_B(Dataset):
def __init__(self, dct):
super(HRMASDataset_Type_B, self).__init__()
self.data_dct = dct
def __len__(self):
return self.data_dct["spectra"].shape[0]
def __getitem__(self, idx):
return (self.data_dct["spectra"][idx], self.data_dct["ppm_spectra"][idx], self.data_dct["quant"][idx], self.data_dct["class_labels"][idx])
# changed and used from a MIT licensed repo on github
# reference: https://github.com/Bjarten/early-stopping-pytorch/blob/master/pytorchtools.py
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss):
score = -val_loss
if self.best_score is None:
self.best_score = score
elif score < self.best_score + self.delta:
self.counter += 1
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
# absolute percentage error per prediction and ground truth level (no aggregation applied).
def absolute_percentage_error(y_true, y_pred):
return np.abs(y_true - y_pred) / np.abs(y_true)
| 38.915094
| 189
| 0.624727
|
5551cbb05754e4131257cbcc8d205fde41f44f1c
| 125
|
py
|
Python
|
ozone/message/__init__.py
|
wenbo1188/ozone
|
962f9bfbbe4ea29eb7cb50eff8058806efee7143
|
[
"MIT"
] | null | null | null |
ozone/message/__init__.py
|
wenbo1188/ozone
|
962f9bfbbe4ea29eb7cb50eff8058806efee7143
|
[
"MIT"
] | 8
|
2018-03-24T01:44:42.000Z
|
2018-08-25T06:43:49.000Z
|
ozone/message/__init__.py
|
wenbo1188/ozone
|
962f9bfbbe4ea29eb7cb50eff8058806efee7143
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
message_page = Blueprint("message", __name__, template_folder="templates")
from . import views
| 20.833333
| 74
| 0.792
|
1fae64af4a40b962011e2c09986ef492bf48bc13
| 16,300
|
py
|
Python
|
finrl/env/env_stocktrading_cashpenalty.py
|
kura52/FinRL-Library
|
238f8fd85e936ab3cf77144f9a4588a84cb41c9c
|
[
"MIT"
] | null | null | null |
finrl/env/env_stocktrading_cashpenalty.py
|
kura52/FinRL-Library
|
238f8fd85e936ab3cf77144f9a4588a84cb41c9c
|
[
"MIT"
] | null | null | null |
finrl/env/env_stocktrading_cashpenalty.py
|
kura52/FinRL-Library
|
238f8fd85e936ab3cf77144f9a4588a84cb41c9c
|
[
"MIT"
] | null | null | null |
import logging
import numpy as np
import pandas as pd
import random
from copy import deepcopy
import gym
import time
from gym.utils import seeding
from gym import spaces
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines3.common import logger
class StockTradingEnvCashpenalty(gym.Env):
"""
A stock trading environment for OpenAI gym
This environment penalizes the model for not maintaining a reserve of cash.
This enables the model to manage cash reserves in addition to performing trading procedures.
Reward at any step is given as follows
r_i = (sum(cash, asset_value) - initial_cash - max(0, sum(cash, asset_value)*cash_penalty_proportion-cash))/(days_elapsed)
This reward function takes into account a liquidity requirement, as well as long-term accrued rewards.
Parameters:
df (pandas.DataFrame): Dataframe containing data
buy_cost_pct (float): cost for buying shares
sell_cost_pct (float): cost for selling shares
hmax (int, array): maximum cash to be traded in each trade per asset. If an array is provided, then each index correspond to each asset
discrete_actions (bool): option to choose whether perform dicretization on actions space or not
shares_increment (int): multiples number of shares can be bought in each trade. Only applicable if discrete_actions=True
turbulence_threshold (float): Maximum turbulence allowed in market for purchases to occur. If exceeded, positions are liquidated
print_verbosity(int): When iterating (step), how often to print stats about state of env
initial_amount: (int, float): Amount of cash initially available
daily_information_columns (list(str)): Columns to use when building state space from the dataframe. It could be OHLC columns or any other variables such as technical indicators and turbulence index
cash_penalty_proportion (int, float): Penalty to apply if the algorithm runs out of cash
patient (bool): option to choose whether end the cycle when we're running out of cash or just don't buy anything until we got additional cash
RL Inputs and Outputs
action space: [<n_assets>,] in range {-1, 1}
state space: {start_cash, [shares_i for in in assets], [[indicator_j for j in indicators] for i in assets]]}
TODO:
Organize functions
Write README
Document tests
"""
metadata = {"render.modes": ["human"]}
def __init__(
self,
df,
buy_cost_pct=3e-3,
sell_cost_pct=3e-3,
date_col_name="date",
hmax=10,
discrete_actions=False,
shares_increment=1,
turbulence_threshold=None,
print_verbosity=10,
initial_amount=1e6,
daily_information_cols=["open", "close", "high", "low", "volume"],
cache_indicator_data=True,
cash_penalty_proportion=0.1,
random_start=True,
patient=False,
currency="$",
):
self.df = df
self.stock_col = "tic"
self.assets = df[self.stock_col].unique()
self.dates = df[date_col_name].sort_values().unique()
self.random_start = random_start
self.discrete_actions = discrete_actions
self.patient = patient
self.currency = currency
self.df = self.df.set_index(date_col_name)
self.shares_increment = shares_increment
self.hmax = hmax
self.initial_amount = initial_amount
self.print_verbosity = print_verbosity
self.buy_cost_pct = buy_cost_pct
self.sell_cost_pct = sell_cost_pct
self.turbulence_threshold = turbulence_threshold
self.daily_information_cols = daily_information_cols
self.state_space = (
1 + len(self.assets) + len(self.assets) * len(self.daily_information_cols)
)
self.action_space = spaces.Box(low=-1, high=1, shape=(len(self.assets),))
self.observation_space = spaces.Box(
low=-np.inf, high=np.inf, shape=(self.state_space,)
)
self.turbulence = 0
self.episode = -1 # initialize so we can call reset
self.episode_history = []
self.printed_header = False
self.cache_indicator_data = cache_indicator_data
self.cached_data = None
self.cash_penalty_proportion = cash_penalty_proportion
if self.cache_indicator_data:
logging.info("caching data")
self.cached_data = [
self.get_date_vector(i) for i, _ in enumerate(self.dates)
]
logging.info("data cached!")
def seed(self, seed=None):
if seed is None:
seed = int(round(time.time() * 1000))
random.seed(seed)
@property
def current_step(self):
return self.date_index - self.starting_point
@property
def cash_on_hand(self):
# amount of cash held at current timestep
return self.state_memory[-1][0]
@property
def holdings(self):
# Quantity of shares held at current timestep
return self.state_memory[-1][1 : len(self.assets) + 1]
@property
def closings(self):
return np.array(self.get_date_vector(self.date_index, cols=["close"]))
def reset(self):
self.seed()
self.sum_trades = 0
if self.random_start:
starting_point = random.choice(range(int(len(self.dates) * 0.5)))
self.starting_point = starting_point
else:
self.starting_point = 0
self.date_index = self.starting_point
self.turbulence = 0
self.episode += 1
self.actions_memory = []
self.transaction_memory = []
self.state_memory = []
self.account_information = {
"cash": [],
"asset_value": [],
"total_assets": [],
"reward": [],
}
init_state = np.array(
[self.initial_amount]
+ [0] * len(self.assets)
+ self.get_date_vector(self.date_index)
)
self.state_memory.append(init_state)
return init_state
def get_date_vector(self, date, cols=None):
if (cols is None) and (self.cached_data is not None):
return self.cached_data[date]
else:
date = self.dates[date]
if cols is None:
cols = self.daily_information_cols
trunc_df = self.df.loc[date]
v = []
for a in self.assets:
subset = trunc_df[trunc_df[self.stock_col] == a]
v += subset.loc[date, cols].tolist()
assert len(v) == len(self.assets) * len(cols)
return v
def return_terminal(self, reason="Last Date", reward=0):
state = self.state_memory[-1]
self.log_step(reason=reason, terminal_reward=reward)
# Add outputs to logger interface
gl_pct = self.account_information["total_assets"][-1] / self.initial_amount
logger.record("environment/GainLoss_pct", (gl_pct - 1) * 100)
logger.record(
"environment/total_assets",
int(self.account_information["total_assets"][-1]),
)
reward_pct = self.account_information["total_assets"][-1] / self.initial_amount
logger.record("environment/total_reward_pct", (reward_pct - 1) * 100)
logger.record("environment/total_trades", self.sum_trades)
logger.record(
"environment/avg_daily_trades",
self.sum_trades / (self.current_step),
)
logger.record(
"environment/avg_daily_trades_per_asset",
self.sum_trades / (self.current_step) / len(self.assets),
)
logger.record("environment/completed_steps", self.current_step)
logger.record(
"environment/sum_rewards", np.sum(self.account_information["reward"])
)
logger.record(
"environment/cash_proportion",
self.account_information["cash"][-1]
/ self.account_information["total_assets"][-1],
)
return state, reward, True, {}
def log_step(self, reason, terminal_reward=None):
if terminal_reward is None:
terminal_reward = self.account_information["reward"][-1]
cash_pct = (
self.account_information["cash"][-1]
/ self.account_information["total_assets"][-1]
)
gl_pct = self.account_information["total_assets"][-1] / self.initial_amount
rec = [
self.episode,
self.date_index - self.starting_point,
reason,
f"{self.currency}{'{:0,.0f}'.format(float(self.account_information['cash'][-1]))}",
f"{self.currency}{'{:0,.0f}'.format(float(self.account_information['total_assets'][-1]))}",
f"{terminal_reward*100:0.5f}%",
f"{(gl_pct - 1)*100:0.5f}%",
f"{cash_pct*100:0.2f}%",
]
self.episode_history.append(rec)
logging.info(self.template.format(*rec))
def log_header(self):
if self.printed_header is False:
self.template = "{0:4}|{1:4}|{2:15}|{3:15}|{4:15}|{5:10}|{6:10}|{7:10}" # column widths: 8, 10, 15, 7, 10
logging.info(
self.template.format(
"EPISODE",
"STEPS",
"TERMINAL_REASON",
"CASH",
"TOT_ASSETS",
"TERMINAL_REWARD_unsc",
"GAINLOSS_PCT",
"CASH_PROPORTION",
)
)
self.printed_header = True
def get_reward(self):
if self.current_step == 0:
return 0
else:
assets = self.account_information["total_assets"][-1]
cash = self.account_information["cash"][-1]
cash_penalty = max(0, (assets * self.cash_penalty_proportion - cash))
assets -= cash_penalty
reward = (assets / self.initial_amount) - 1
reward /= self.current_step
return reward
def get_transactions(self, actions):
"""
This function takes in a raw 'action' from the model and makes it into realistic transactions
This function includes logic for discretizing
It also includes turbulence logic.
"""
# record actions of the model
self.actions_memory.append(actions)
# multiply actions by the hmax value
actions = actions * self.hmax
# Do nothing for shares with zero value
actions = np.where(self.closings > 0, actions, 0)
# discretize optionally
if self.discrete_actions:
# convert into integer because we can't buy fraction of shares
actions = actions // self.closings
actions = actions.astype(int)
# round down actions to the nearest multiplies of shares_increment
actions = np.where(
actions >= 0,
(actions // self.shares_increment) * self.shares_increment,
((actions + self.shares_increment) // self.shares_increment)
* self.shares_increment,
)
else:
actions = actions / self.closings
# can't sell more than we have
actions = np.maximum(actions, -np.array(self.holdings))
# deal with turbulence
if self.turbulence_threshold is not None:
# if turbulence goes over threshold, just clear out all positions
if self.turbulence >= self.turbulence_threshold:
actions = -(np.array(self.holdings))
self.log_step(reason="TURBULENCE")
return actions
def step(self, actions):
# let's just log what we're doing in terms of max actions at each step.
self.sum_trades += np.sum(np.abs(actions))
self.log_header()
# print if it's time.
if (self.current_step + 1) % self.print_verbosity == 0:
self.log_step(reason="update")
# if we're at the end
if self.date_index == len(self.dates) - 1:
# if we hit the end, set reward to total gains (or losses)
return self.return_terminal(reward=self.get_reward())
else:
"""
First, we need to compute values of holdings, save these, and log everything.
Then we can reward our model for its earnings.
"""
# compute value of cash + assets
begin_cash = self.cash_on_hand
assert min(self.holdings) >= 0
asset_value = np.dot(self.holdings, self.closings)
# log the values of cash, assets, and total assets
self.account_information["cash"].append(begin_cash)
self.account_information["asset_value"].append(asset_value)
self.account_information["total_assets"].append(begin_cash + asset_value)
# compute reward once we've computed the value of things!
reward = self.get_reward()
self.account_information["reward"].append(reward)
"""
Now, let's get down to business at hand.
"""
transactions = self.get_transactions(actions)
# compute our proceeds from sells, and add to cash
sells = -np.clip(transactions, -np.inf, 0)
proceeds = np.dot(sells, self.closings)
costs = proceeds * self.sell_cost_pct
coh = begin_cash + proceeds
# compute the cost of our buys
buys = np.clip(transactions, 0, np.inf)
spend = np.dot(buys, self.closings)
costs += spend * self.buy_cost_pct
# if we run out of cash...
if (spend + costs) > coh:
if self.patient:
# ... just don't buy anything until we got additional cash
self.log_step(reason="CASH SHORTAGE")
transactions = np.where(transactions > 0, 0, transactions)
spend = 0
costs = 0
else:
# ... end the cycle and penalize
return self.return_terminal(
reason="CASH SHORTAGE", reward=self.get_reward()
)
self.transaction_memory.append(
transactions
) # capture what the model's could do
# verify we didn't do anything impossible here
assert (spend + costs) <= coh
# update our holdings
coh = coh - spend - costs
holdings_updated = self.holdings + transactions
self.date_index += 1
if self.turbulence_threshold is not None:
self.turbulence = self.get_date_vector(
self.date_index, cols=["turbulence"]
)[0]
# Update State
state = (
[coh] + list(holdings_updated) + self.get_date_vector(self.date_index)
)
self.state_memory.append(state)
return state, reward, False, {}
def get_sb_env(self):
def get_self():
return deepcopy(self)
e = DummyVecEnv([get_self])
obs = e.reset()
return e, obs
def get_multiproc_env(self, n=10):
def get_self():
return deepcopy(self)
e = SubprocVecEnv([get_self for _ in range(n)], start_method="fork")
obs = e.reset()
return e, obs
def save_asset_memory(self):
if self.current_step == 0:
return None
else:
self.account_information["date"] = self.dates[
-len(self.account_information["cash"]) :
]
return pd.DataFrame(self.account_information)
def save_action_memory(self):
if self.current_step == 0:
return None
else:
return pd.DataFrame(
{
"date": self.dates[-len(self.account_information["cash"]) :],
"actions": self.actions_memory,
"transactions": self.transaction_memory,
}
)
| 39.659367
| 205
| 0.592883
|
af530cfa7e8fadb091dca66d7947a5723157f366
| 276
|
py
|
Python
|
UnityEngine/ParticleSystemSubEmitterType/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/ParticleSystemSubEmitterType/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/ParticleSystemSubEmitterType/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class ParticleSystemSubEmitterType:
def __new__(cls, arg1=None):
'''
:returns: ParticleSystemSubEmitterType
:rtype: UnityEngine.ParticleSystemSubEmitterType
'''
pass
| 23
| 56
| 0.692029
|
77cbedd875b2297903582a0286d744fa40b64bb6
| 39,625
|
py
|
Python
|
syncplay/messages_de.py
|
muaz742/syncplay
|
36239ce01c2ce9ebc8e83b163ae38916ea7a5226
|
[
"Apache-2.0"
] | null | null | null |
syncplay/messages_de.py
|
muaz742/syncplay
|
36239ce01c2ce9ebc8e83b163ae38916ea7a5226
|
[
"Apache-2.0"
] | null | null | null |
syncplay/messages_de.py
|
muaz742/syncplay
|
36239ce01c2ce9ebc8e83b163ae38916ea7a5226
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf8
"""Deutsch dictionary"""
de = {
"LANGUAGE": "Deutsch", # (German)
# Client notifications
"config-cleared-notification": "Einstellungen gelöscht. Änderungen werden gespeichert, wenn du eine gültige Konfiguration speicherst.",
"relative-config-notification": "Relative Konfigurationsdatei(en) geladen: {}",
"connection-attempt-notification": "Verbinde mit {}:{}", # Port, IP
"reconnection-attempt-notification": "Verbindung zum Server verloren, versuche erneut",
"disconnection-notification": "Verbindung zum Server beendet",
"connection-failed-notification": "Verbindung zum Server fehlgeschlagen",
"connected-successful-notification": "Erfolgreich mit Server verbunden",
"retrying-notification": "%s, versuche erneut in %d Sekunden...", # Seconds
"reachout-successful-notification": "{} ({}) erfolgreich erreicht",
"rewind-notification": "Zurückgespult wegen Zeitdifferenz mit {}", # User
"fastforward-notification": "Vorgespult wegen Zeitdifferenz mit {}", # User
"slowdown-notification": "Verlangsamt wegen Zeitdifferenz mit {}", # User
"revert-notification": "Normalgeschwindigkeit",
"pause-notification": "{} pausierte", # User
"unpause-notification": "{} startete", # User
"seek-notification": "{} sprang von {} nach {}", # User, from time, to time
"current-offset-notification": "Aktueller Offset: {} Sekunden", # Offset
"media-directory-list-updated-notification": "Syncplay-Medienverzeichnisse wurden aktualisiert.",
"room-join-notification": "{} hat den Raum „{}“ betreten", # User
"left-notification": "{} ist gegangen", # User
"left-paused-notification": "{} ist gegangen, {} pausierte", # User who left, User who paused
"playing-notification": "{} spielt „{}“ ({})", # User, file, duration
"playing-notification/room-addendum": " in Raum: „{}“", # Room
"not-all-ready": "Noch nicht bereit: {}", # Usernames
"all-users-ready": "Alle sind bereit ({} Nutzer)", # Number of ready users
"ready-to-unpause-notification": "Du bist bereit - noch einmal fortsetzen klicken zum abspielen",
"set-as-ready-notification": "Du bist bereit",
"set-as-not-ready-notification": "Du bist nicht bereit",
"autoplaying-notification": "Starte in {}...", # Number of seconds until playback will start
"identifying-as-controller-notification": "Identifiziere als Raumleiter mit Passwort „{}“...", # TODO: find a better translation to "room operator"
"failed-to-identify-as-controller-notification": "{} konnte sich nicht als Raumleiter identifizieren.",
"authenticated-as-controller-notification": "{} authentifizierte sich als Raumleiter",
"created-controlled-room-notification": "Gesteuerten Raum „{}“ mit Passwort „{}“ erstellt. Bitte diese Informationen für die Zukunft aufheben! \n\nIn managed rooms everyone is kept in sync with the room operator(s) who are the only ones who can pause, unpause, seek, and change the playlist.\n\nYou should ask regular viewers to join the room '{}' but the room operators can join the room '{}' to automatically authenticate themselves.", # RoomName, operatorPassword, roomName, roomName:operatorPassword # TODO: Translate
"file-different-notification": "Deine Datei scheint sich von {}s zu unterscheiden", # User
"file-differences-notification": "Deine Datei unterscheidet sich auf folgende Art: {}",
"room-file-differences": "Unterschiedlich in: {}", # File differences (filename, size, and/or duration)
"file-difference-filename": "Name",
"file-difference-filesize": "Größe",
"file-difference-duration": "Dauer",
"alone-in-the-room": "Du bist alleine im Raum",
"different-filesize-notification": " (ihre Dateigröße ist anders als deine!)",
"userlist-playing-notification": "{} spielt:", # Username
"file-played-by-notification": "Datei: {} wird gespielt von:", # File
"no-file-played-notification": "{} spielt keine Datei ab", # Username
"notplaying-notification": "Personen im Raum, die keine Dateien spielen:",
"userlist-room-notification": "In Raum „{}“:", # Room
"userlist-file-notification": "Datei",
"controller-userlist-userflag": "Raumleiter",
"ready-userlist-userflag": "Bereit",
"update-check-failed-notification": "Konnte nicht automatisch prüfen, ob Syncplay {} aktuell ist. Soll https://syncplay.pl/ geöffnet werden, um manuell nach Updates zu suchen?", # Syncplay version
"syncplay-uptodate-notification": "Syncplay ist aktuell",
"syncplay-updateavailable-notification": "Eine neuere Version von Syncplay ist verfügbar. Soll die Download-Seite geöffnet werden?",
"mplayer-file-required-notification": "Syncplay für mplayer benötigt eine Dateiangabe beim Start",
"mplayer-file-required-notification/example": "Anwendungsbeispiel: syncplay [optionen] [url|pfad/]Dateiname",
"mplayer2-required": "Syncplay ist inkompatibel zu MPlayer 1.x, bitte nutze MPlayer2 oder mpv",
"unrecognized-command-notification": "Unbekannter Befehl",
"commandlist-notification": "Verfügbare Befehle:",
"commandlist-notification/room": "\tr [Name] - Raum ändern",
"commandlist-notification/list": "\tl - Nutzerliste anzeigen",
"commandlist-notification/undo": "\tu - Letzter Zeitsprung rückgängig",
"commandlist-notification/pause": "\tp - Pausieren / weiter",
"commandlist-notification/seek": "\t[s][+-]Zeit - zu einer bestimmten Zeit spulen, ohne + oder - wird als absolute Zeit gewertet; Angabe in Sekunden oder Minuten:Sekunden",
"commandlist-notification/help": "\th - Diese Hilfe",
"commandlist-notification/toggle": "\tt - Bereitschaftsanzeige umschalten",
"commandlist-notification/create": "\tc [name] - erstelle zentral gesteuerten Raum mit dem aktuellen Raumnamen",
"commandlist-notification/auth": "\ta [password] - authentifiziere als Raumleiter mit Passwort",
"commandlist-notification/chat": "\tch [message] - Chatnachricht an einem Raum senden",
"commandList-notification/queue": "\tqa [file/url] - add file or url to bottom of playlist", # TO DO: Translate
"commandList-notification/playlist": "\tql - show the current playlist", # TO DO: Translate
"commandList-notification/select": "\tqs [index] - select given entry in the playlist", # TO DO: Translate
"commandList-notification/delete": "\tqd [index] - delete the given entry from the playlist", # TO DO: Translate
"syncplay-version-notification": "Syncplay Version: {}", # syncplay.version
"more-info-notification": "Weitere Informationen auf: {}", # projectURL
"gui-data-cleared-notification": "Syncplay hat die Pfad und Fensterdaten der Syncplay-GUI zurückgesetzt.",
"language-changed-msgbox-label": "Die Sprache wird geändert, wenn du Syncplay neu startest.",
"promptforupdate-label": "Soll Syncplay regelmäßig nach Updates suchen?",
"media-player-latency-warning": "Warnung: Der Mediaplayer brauchte {} Sekunden zum Antworten. Wenn Probleme bei der Synchronisation auftreten, schließe bitte andere Anwendungen, um Ressourcen freizugeben. Sollte das nicht funktionieren, versuche es mit einem anderen Media-Player.", # Seconds to respond
"mpv-unresponsive-error": "MPV hat für {} Sekunden nicht geantwortet und scheint abgestürzt zu sein. Bitte starte Syncplay neu.", # Seconds to respond
# Client prompts
"enter-to-exit-prompt": "Enter drücken zum Beenden\n",
# Client errors
"missing-arguments-error": "Notwendige Argumente fehlen, siehe --help",
"server-timeout-error": "Timeout: Verbindung zum Server fehlgeschlagen",
"mpc-slave-error": "Kann MPC nicht im Slave-Modus starten!",
"mpc-version-insufficient-error": "MPC-Version nicht ausreichend, bitte nutze `mpc-hc` >= `{}`",
"mpc-be-version-insufficient-error": "MPC-Version nicht ausreichend, bitte nutze `mpc-be` >= `{}`",
"mpv-version-error": "Syncplay ist nicht kompatibel mit dieser Version von mpv. Bitte benutze eine andere Version (z.B. Git HEAD).",
"mpv-failed-advice": "The reason mpv cannot start may be due to the use of unsupported command line arguments or an unsupported version of mpv.", # TODO: Translate
"player-file-open-error": "Fehler beim Öffnen der Datei durch den Player",
"player-path-error": "Ungültiger Player-Pfad. Unterstützte Player sind: mpv, mpv.net, VLC, MPC-HC, MPC-BE und mplayer2",
"hostname-empty-error": "Hostname darf nicht leer sein",
"empty-error": "{} darf nicht leer sein", # Configuration
"media-player-error": "Player-Fehler: \"{}\"", # Error line
"unable-import-gui-error": "Konnte die GUI-Bibliotheken nicht importieren. PySide muss installiert sein, damit die grafische Oberfläche funktioniert.",
"unable-import-twisted-error": "Twisted konnte nicht importiert werden. Bitte installiere Twisted v16.4.0 oder höher",
"arguments-missing-error": "Notwendige Argumente fehlen, siehe --help",
"unable-to-start-client-error": "Client kann nicht gestartet werden",
"player-path-config-error": "Player-Pfad ist nicht ordnungsgemäß gesetzt. Unterstützte Player sind: mpv, mpv.net, VLC, MPC-HC, MPC-BE und mplayer2",
"no-file-path-config-error": "Es muss eine Datei ausgewählt werden, bevor der Player gestartet wird.",
"no-hostname-config-error": "Hostname darf nicht leer sein",
"invalid-port-config-error": "Port muss gültig sein",
"empty-value-config-error": "{} darf nicht leer sein", # Config option
"not-json-error": "Kein JSON-String\n",
"hello-arguments-error": "Zu wenige Hello-Argumente\n",
"version-mismatch-error": "Verschiedene Versionen auf Client und Server\n",
"vlc-failed-connection": "Kann nicht zu VLC verbinden. Wenn du syncplay.lua nicht installiert hast, findest du auf https://syncplay.pl/LUA/ [Englisch] eine Anleitung. Syncplay and VLC 4 are not currently compatible, so either use VLC 3 or an alternative such as mpv.", # TO DO: TRANSLATE
"vlc-failed-noscript": "Laut VLC ist das syncplay.lua Interface-Skript nicht installiert. Auf https://syncplay.pl/LUA/ [Englisch] findest du eine Anleitung.",
"vlc-failed-versioncheck": "Diese VLC-Version wird von Syncplay nicht unterstützt. Bitte nutze VLC 2.0",
"vlc-initial-warning": 'VLC does not always provide accurate position information to Syncplay, especially for .mp4 and .avi files. If you experience problems with erroneous seeking then please try an alternative media player such as <a href="https://mpv.io/">mpv</a> (or <a href="https://github.com/stax76/mpv.net/">mpv.net</a> for Windows users).', # TODO: Translate
"feature-sharedPlaylists": "Geteilte Playlists", # used for not-supported-by-server-error
"feature-chat": "Chat", # used for not-supported-by-server-error
"feature-readiness": "Bereitschaftsstatus", # used for not-supported-by-server-error
"feature-managedRooms": "Zentral gesteuerte Räume", # used for not-supported-by-server-error
"not-supported-by-server-error": "Diese Funktion wird vom Server nicht unterstützt. Es wird ein Server mit Syncplay Version {}+ benötigt, aktuell verwendet wird jedoch Version {}.", # minVersion, serverVersion
"shared-playlists-not-supported-by-server-error": "Die Geteilte-Playlists-Funktion wird von diesem Server eventuell nicht unterstützt. Um ein korrektes Funktionieren sicherzustellen wird ein Server mit Syncplay Version {}+ benötigt, aktuell verwendet wird jedoch Version {}.", # minVersion, serverVersion
"shared-playlists-disabled-by-server-error": "Die Geteilte-Playlists-Funktion wurde in der Serverkonfiguration deaktiviert. Um diese Funktion zu verwenden, musst du dich mit einem anderen Server verbinden.",
"invalid-seek-value": "Ungültige Zeitangabe",
"invalid-offset-value": "Ungültiger Offset-Wert",
"switch-file-not-found-error": "Konnte nicht zur Datei „{0}“ wechseln. Syncplay sucht im Verzeichnis der aktuellen Datei und angegebenen Medienverzeichnissen.", # File not found, folder it was not found in
"folder-search-timeout-error": "Die Suche nach Medien in den Medienverzeichnissen wurde abgebrochen, weil es zu lange gedauert hat, „{}“ zu durchsuchen. Das kann passieren, wenn du in deiner Liste der Medienverzeichnisse ein Verzeichnis mit zu vielen Unterverzeichnissen auswhälst. Damit der automatische Dateiwechsel wieder funktioniert, wähle Datei->Medienverzeichnisse auswählen in der Menüleiste und entferne dieses Verzeichnis oder ersetze es mit einem geeigneten Unterverzeichnis. Wenn das Verzeichnis in Ordnung ist, kannst du es reaktivieren, indem du Datei->Medienverzeichnisse auswählen wählst und „OK“ drückst.", # Folder
"folder-search-first-file-timeout-error": "Die Suche nach Medien in den Medienverzeichnissen wurde abgebrochen, weil es zu lange gedauert hat, auf „{}“ zuzugreifen. Das kann passieren, wenn es sich dabei um ein Netzwerkgerät handelt und du eingestellt hast, dass es sich nach Inaktivität ausschaltet. Damit der automatische Dateiwechsel wieder funktioniert, wähle Datei->Medienverzeichnisse auswählen in der Menüleiste und entferne dieses Verzeichnis oder löse das Problem (z.B. indem du die Energiespareinstellungen anpasst).", # Folder
"added-file-not-in-media-directory-error": "Du hast eine Datei in im Verzeichnis „{}“ geladeden, welches kein bekanntes Medienverzeichnis ist. Du kannst es als Medienverzeichnis hinzufügen, indem du Datei->Medienverzeichnisse auswählen in der Menüleiste wählst.", # Folder
"no-media-directories-error": "Es wurden keine Medienverzeichnisse ausgewählt. Damit geteilte Playlists und Dateiwechsel korrekt funktionieren, wähle Datei->Medienverzeichnisse auswählen in der Menüleiste und gib an, wo Syncplay nach Mediendateien suchen soll.",
"cannot-find-directory-error": "Das Medienverzeichnis „{}“ konnte nicht gefunden werden. Um deine Liste an Medienverzeichnissen anzupassen, wähle Datei->Medienverzeichnisse auswählen in der Menüleiste und gib an, wo Syncplay nach Mediendateien suchen soll.",
"failed-to-load-server-list-error": "Konnte die Liste der öffentlichen Server nicht laden. Bitte besuche https://www.syncplay.pl/ [Englisch] mit deinem Browser.",
# Client arguments
"argument-description": 'Syncplay ist eine Anwendung um mehrere MPlayer, MPC-HC, MPC-BE und VLC-Instanzen über das Internet zu synchronisieren.',
"argument-epilog": 'Wenn keine Optionen angegeben sind, werden die _config-Werte verwendet',
"nogui-argument": 'Keine GUI anzeigen',
"host-argument": 'Server-Adresse',
"name-argument": 'Gewünschter Nutzername',
"debug-argument": 'Debug-Modus',
"force-gui-prompt-argument": 'Einstellungsfenster anzeigen',
"no-store-argument": 'keine Werte in .syncplay speichern',
"room-argument": 'Standard-Raum',
"password-argument": 'Server-Passwort',
"player-path-argument": 'Pfad zum Player',
"file-argument": 'Abzuspielende Datei',
"args-argument": 'Player-Einstellungen; Wenn du Einstellungen, die mit - beginnen, nutzen willst, stelle ein einzelnes \'--\'-Argument davor',
"clear-gui-data-argument": 'Setzt die Pfad- und GUI-Fenster-Daten die in den QSettings gespeichert sind zurück',
"language-argument": 'Sprache für Syncplay-Nachrichten (de/en/ru/it/es/pt_BR/pt_PT/tr)',
"version-argument": 'gibt die aktuelle Version aus',
"version-message": "Du verwendest Syncplay v. {} ({})",
"load-playlist-from-file-argument": "lädt eine Playlist aus einer Textdatei (ein Eintrag pro Zeile)",
# Client labels
"config-window-title": "Syncplay-Konfiguration",
"connection-group-title": "Verbindungseinstellungen",
"host-label": "Server-Adresse:",
"name-label": "Benutzername (optional):",
"password-label": "Server-Passwort (falls nötig):",
"room-label": "Standard-Raum:",
"roomlist-msgbox-label": "Edit room list (one per line)", # TODO: Translate
"media-setting-title": "Media-Player Einstellungen",
"executable-path-label": "Pfad zum Media-Player:",
"media-path-label": "Pfad zum Video (optional):",
"player-arguments-label": "Playerparameter:",
"browse-label": "Durchsuchen",
"update-server-list-label": "Liste aktualisieren",
"more-title": "Mehr Einstellungen zeigen",
"never-rewind-value": "Niemals",
"seconds-suffix": " sek",
"privacy-sendraw-option": "Klartext senden",
"privacy-sendhashed-option": "Hash senden",
"privacy-dontsend-option": "Nicht senden",
"filename-privacy-label": "Dateiname:",
"filesize-privacy-label": "Dateigröße:",
"checkforupdatesautomatically-label": "Automatisch nach Updates suchen",
"slowondesync-label": "Verlangsamen wenn nicht synchron (nicht unterstützt mit MPC-HC/BE)",
"dontslowdownwithme-label": "Nie verlangsamen oder andere zurückspulen (Experimentell)",
"pausing-title": "Pausiere",
"pauseonleave-label": "Pausieren wenn ein Benutzer austritt",
"readiness-title": "Anfänglicher Bereitschaftsstatus",
"readyatstart-label": "Standardmäßig auf „Bereit“ stellen",
"forceguiprompt-label": "Diesen Dialog nicht mehr anzeigen",
"showosd-label": "OSD-Nachrichten anzeigen",
"showosdwarnings-label": "Zeige Warnungen (z.B. wenn Dateien verschieden)",
"showsameroomosd-label": "Zeige Ereignisse in deinem Raum",
"shownoncontrollerosd-label": "Zeige Ereignisse von nicht geführten Räumen in geführten Räumen.",
"showdifferentroomosd-label": "Zeige Ereignisse in anderen Räumen",
"showslowdownosd-label": "Zeige Verlangsamungs/Zurücksetzungs-Benachrichtigung",
"language-label": "Sprache:",
"automatic-language": "Automatisch ({})", # Default language
"showdurationnotification-label": "Zeige Warnung wegen unterschiedlicher Dauer",
"basics-label": "Grundlagen",
"readiness-label": "Play/Pause",
"misc-label": "Diverse",
"core-behaviour-title": "Verhalten des Raumes",
"syncplay-internals-title": "Syncplay intern",
"syncplay-mediasearchdirectories-title": "In diesen Verzeichnissen nach Medien suchen", # needs to be checked
"syncplay-mediasearchdirectories-label": "In diesen Verzeichnissen nach Medien suchen (ein Pfad pro Zeile)",
"sync-label": "Synchronisation",
"sync-otherslagging-title": "Wenn andere laggen...",
"sync-youlaggging-title": "Wenn du laggst...",
"messages-label": "Nachrichten",
"messages-osd-title": "OSD-(OnScreenDisplay)-Einstellungen",
"messages-other-title": "Weitere Display-Einstellungen",
"chat-label": "Chat",
"privacy-label": "Privatsphäre",
"privacy-title": "Privatsphäreneinstellungen",
"unpause-title": "Wenn du Play drückst, auf Bereit setzen und:",
"unpause-ifalreadyready-option": "Wiedergeben wenn bereits als Bereit gesetzt",
"unpause-ifothersready-option": "Wiedergeben wenn bereits als Bereit gesetzt oder alle anderen bereit sind (Standard)",
"unpause-ifminusersready-option": "Wiedergeben wenn bereits als Bereit gesetzt oder die minimale Anzahl anderer Nutzer bereit ist",
"unpause-always": "Immer wiedergeben",
"syncplay-trusteddomains-title": "Vertrauenswürdige Domains (für Streamingdienste und gehostete Inhalte)",
"chat-title": "Chatnachrichten-Eingabe",
"chatinputenabled-label": "Chateingabe via mpv erlauben (mit der Entertaste)",
"chatdirectinput-label": "Sofotige Chateingabe erlauben (ohne die Entertaste zu drücken)",
"chatinputfont-label": "Chateingabe-Schriftart",
"chatfont-label": "Schriftart wählen",
"chatcolour-label": "Farbe wählen",
"chatinputposition-label": "Position des Nachrichteneingabe-Felds in mpv",
"chat-top-option": "Oben",
"chat-middle-option": "Mitte",
"chat-bottom-option": "Unten",
"chatoutputheader-label": "Chatnachrichten-Eingabe",
"chatoutputfont-label": "Chateingabe-Schriftart",
"chatoutputenabled-label": "Chatausgabe im Medienplayer aktivieren (bisher nur mpv)",
"chatoutputposition-label": "Ausgabemodus",
"chat-chatroom-option": "Chatroom-Stil",
"chat-scrolling-option": "Scrolling-Stil",
"mpv-key-tab-hint": "[TAB] um Zugriff auf die Buchstabentastenkürzel ein-/auszuschalten.",
"mpv-key-hint": "[ENTER] um eine Nachricht zu senden. [ESC] um den Chatmodus zu verlassen.",
"alphakey-mode-warning-first-line": "Du kannst vorübergehend die alten mpv-Tastaturkürzel mit den a–z-Tasten verwenden.",
"alphakey-mode-warning-second-line": "Drücke [TAB], um in den Syncplay-Chatmodus zurückzukehren.",
"help-label": "Hilfe",
"reset-label": "Auf Standardwerte zurücksetzen",
"run-label": "Syncplay starten",
"storeandrun-label": "Konfiguration speichern und Syncplay starten",
"contact-label": "Du hast eine Idee, einen Bug gefunden oder möchtest Feedback geben? Sende eine E-Mail an <a href=\"mailto:dev@syncplay.pl\">dev@syncplay.pl</a>, chatte auf dem <a href=\"https://webchat.freenode.net/?channels=#syncplay\">#Syncplay IRC-Kanal</a> auf irc.freenode.net oder <a href=\"https://github.com/Uriziel/syncplay/issues\">öffne eine Fehlermeldung auf GitHub</a>. Außerdem findest du auf <a href=\"https://syncplay.pl/\">https://syncplay.pl/</a> weitere Informationen, Hilfestellungen und Updates. Chatnachrichten sind nicht verschlüsselt, also verwende Syncplay nicht, um sensible Daten zu verschicken.",
"joinroom-label": "Raum beitreten",
"joinroom-menu-label": "Raum beitreten {}", # TODO: Might want to fix this
"seektime-menu-label": "Spule zu Zeit",
"undoseek-menu-label": "Rückgängig",
"play-menu-label": "Wiedergabe",
"pause-menu-label": "Pause",
"playbackbuttons-menu-label": "Wiedergabesteuerung anzeigen",
"autoplay-menu-label": "Auto-Play-Knopf anzeigen",
"autoplay-guipushbuttonlabel": "Automatisch abspielen wenn alle bereit sind",
"autoplay-minimum-label": "Minimum an Nutzern:",
"sendmessage-label": "Senden",
"ready-guipushbuttonlabel": "Ich bin bereit zum Gucken!",
"roomuser-heading-label": "Raum / Benutzer",
"size-heading-label": "Größe",
"duration-heading-label": "Länge",
"filename-heading-label": "Dateiname",
"notifications-heading-label": "Benachrichtigungen",
"userlist-heading-label": "Liste der gespielten Dateien",
"browseformedia-label": "Nach Mediendateien durchsuchen",
"file-menu-label": "&Datei", # & precedes shortcut key
"openmedia-menu-label": "&Mediendatei öffnen...",
"openstreamurl-menu-label": "&Stream URL öffnen",
"setmediadirectories-menu-label": "Medienverzeichnisse &auswählen",
"loadplaylistfromfile-menu-label": "&Lade Playlist aus Datei",
"saveplaylisttofile-menu-label": "&Speichere Playlist in Datei",
"exit-menu-label": "&Beenden",
"advanced-menu-label": "&Erweitert",
"window-menu-label": "&Fenster",
"setoffset-menu-label": "&Offset einstellen",
"createcontrolledroom-menu-label": "&Zentral gesteuerten Raum erstellen",
"identifyascontroller-menu-label": "Als Raumleiter &identifizieren",
"settrusteddomains-menu-label": "&Vertrauenswürdige Domains auswählen",
"addtrusteddomain-menu-label": "{} als vertrauenswürdige Domain hinzufügen", # Domain
"edit-menu-label": "&Bearbeiten",
"cut-menu-label": "Aus&schneiden",
"copy-menu-label": "&Kopieren",
"paste-menu-label": "&Einsetzen",
"selectall-menu-label": "&Alles auswälhen",
"playback-menu-label": "&Wiedergabe",
"help-menu-label": "&Hilfe",
"userguide-menu-label": "&Benutzerhandbuch öffnen",
"update-menu-label": "auf &Aktualisierung prüfen",
# startTLS messages
"startTLS-initiated": "Sichere Verbindung wird versucht",
"startTLS-secure-connection-ok": "Sichere Verbindung hergestellt ({})",
"startTLS-server-certificate-invalid": 'Sichere Verbindung fehlgeschlagen. Der Server benutzt ein ungültiges Sicherheitszertifikat. Der Kanal könnte von Dritten abgehört werden. Für weitere Details und Problemlösung siehe <a href="https://syncplay.pl/trouble">hier</a> [Englisch].',
"startTLS-server-certificate-invalid-DNS-ID": "Syncplay does not trust this server because it uses a certificate that is not valid for its hostname.", # TODO: Translate
"startTLS-not-supported-client": "Dieser Server unterstützt kein TLS",
"startTLS-not-supported-server": "Dieser Server unterstützt kein TLS",
# TLS certificate dialog
"tls-information-title": "Zertifikatdetails",
"tls-dialog-status-label": "<strong>Syncplay nutzt eine verschlüsselte Verbindung zu {}.</strong>",
"tls-dialog-desc-label": "Verschlüsselung mit einem digitalen Zertifikat hält Informationen geheim, die vom Server {} gesendet oder empfangen werden.",
"tls-dialog-connection-label": "Daten werden verschlüsselt mit Transport Layer Security (TLS) Version {} und <br/>folgender Chiffre: {}.",
"tls-dialog-certificate-label": "Zertifikat ausgestellt durch {} gültig bis {}.",
# About dialog
"about-menu-label": "&Über Syncplay",
"about-dialog-title": "Über Syncplay",
"about-dialog-release": "Version {} Release {}",
"about-dialog-license-text": "Lizensiert unter der Apache-Lizenz Version 2.0",
"about-dialog-license-button": "Lizenz",
"about-dialog-dependencies": "Abhängigkeiten",
"setoffset-msgbox-label": "Offset einstellen",
"offsetinfo-msgbox-label": "Offset (siehe https://syncplay.pl/guide/ für eine Anleitung [Englisch]):",
"promptforstreamurl-msgbox-label": "Stream-URL öffnen",
"promptforstreamurlinfo-msgbox-label": "Stream-URL",
"addfolder-label": "Verzeichnis hinzufügen",
"adduris-msgbox-label": "URLs zur Playlist hinzufügen (ein Eintrag pro Zeile)",
"editplaylist-msgbox-label": "Playlist auswählen (ein Eintrag pro Zeile)",
"trusteddomains-msgbox-label": "Domains, zu denen automatisch gewechselt werden darf (ein Eintrag pro Zeile)",
"createcontrolledroom-msgbox-label": "Zentral gesteuerten Raum erstellen",
"controlledroominfo-msgbox-label": "Namen des zentral gesteuerten Raums eingeben\r\n(siehe https://syncplay.pl/guide/ für eine Anleitung [Englisch]):",
"identifyascontroller-msgbox-label": "Als Raumleiter identifizieren",
"identifyinfo-msgbox-label": "Passwort des zentral gesteuerten Raums eingeben\r\n(siehe https://syncplay.pl/guide/ für eine Anleitung [Englisch]):",
"public-server-msgbox-label": "Einen öffentlichen Server für diese Sitzung auswählen",
"megabyte-suffix": " MB",
# Tooltips
"host-tooltip": "Hostname oder IP zu der verbunden werden soll. Optional mit Port (z.B.. syncplay.pl:8999). Synchronisation findet nur mit Personen auf dem selben Server und Port statt.",
"name-tooltip": "Dein Benutzername. Keine Registrierung, kann einfach geändert werden. Bei fehlender Angabe wird ein zufälliger Name generiert.",
"password-tooltip": "Passwörter sind nur bei Verbindung zu privaten Servern nötig.",
"room-tooltip": "Der Raum, der betreten werden soll, kann ein x-beliebiger sein. Allerdings werden nur Clients im selben Raum synchronisiert.",
"edit-rooms-tooltip": "Edit room list.", # TO DO: Translate
"executable-path-tooltip": "Pfad zum ausgewählten, unterstützten Mediaplayer (mpv, mpv.net, VLC, MPC-HC/BE or mplayer2).",
"media-path-tooltip": "Pfad zum wiederzugebenden Video oder Stream. Notwendig für mplayer2.",
"player-arguments-tooltip": "Zusätzliche Kommandozeilenparameter/-schalter für diesen Mediaplayer.",
"mediasearcdirectories-arguments-tooltip": "Verzeichnisse, in denen Syncplay nach Mediendateien suchen soll, z.B. wenn du die Click-to-switch-Funktion verwendest. Syncplay wird Unterverzeichnisse rekursiv durchsuchen.", # TODO: Translate Click-to-switch? (or use as name for feature)
"more-tooltip": "Weitere Einstellungen anzeigen.",
"filename-privacy-tooltip": "Privatheitsmodus beim Senden des Namens der aktuellen Datei zum Server.",
"filesize-privacy-tooltip": "Privatheitsmodus beim Senden der Größe der aktuellen Datei zum Server.",
"privacy-sendraw-tooltip": "Die Information im Klartext übertragen. Dies ist die Standard-Einstellung mit der besten Funktionalität.",
"privacy-sendhashed-tooltip": "Die Informationen gehasht übertragen, um sie für andere Clients schwerer lesbar zu machen.",
"privacy-dontsend-tooltip": "Diese Information nicht übertragen. Dies garantiert den größtmöglichen Datanschutz.",
"checkforupdatesautomatically-tooltip": "Regelmäßig auf der Syncplay-Website nach Updates suchen.",
"autosavejoinstolist-tooltip": "When you join a room in a server, automatically remember the room name in the list of rooms to join.", # TO DO: Translate
"autosavejoinstolist-label": "Add rooms you join to the room list e", # TO DO: Translate
"slowondesync-tooltip": "Reduziert die Abspielgeschwindigkeit zeitweise, um die Synchronität zu den anderen Clients wiederherzustellen.",
"rewindondesync-label": "Zurückspulen bei großer Zeitdifferenz (empfohlen)",
"fastforwardondesync-label": "Vorspulen wenn das Video laggt (empfohlen)",
"dontslowdownwithme-tooltip": "Lässt andere nicht langsamer werden oder zurückspringen, wenn deine Wiedergabe hängt.",
"pauseonleave-tooltip": "Wiedergabe anhalten, wenn deine Verbindung verloren geht oder jemand den Raum verlässt.",
"readyatstart-tooltip": "Zu Beginn auf „Bereit“ setzen (sonst bist du als „Nicht Bereit“ gesetzt, bis du den Status änderst)",
"forceguiprompt-tooltip": "Der Konfigurationsdialog wird nicht angezeigt, wenn eine Datei mit Syncplay geöffnet wird.",
"nostore-tooltip": "Syncplay mit den angegebenen Einstellungen starten, diese aber nicht dauerhaft speichern.",
"rewindondesync-tooltip": "Zum Wiederherstellen der Synchronität in der Zeit zurückspringen (empfohlen)",
"fastforwardondesync-tooltip": "Nach vorne springen, wenn asynchron zum Raumleiter (oder deine vorgetäuschte Position, falls „Niemals verlangsamen oder andere zurückspulen“ aktiviert ist).",
"showosd-tooltip": "Syncplay-Nachrichten auf dem OSD (= OnScreenDisplay, ein eingeblendetes Textfeld) des Players anzeigen.",
"showosdwarnings-tooltip": "Warnungen bei Unterschiedlichen Dateien oder Alleinsein im Raum anzeigen.",
"showsameroomosd-tooltip": "OSD-Meldungen über Ereignisse im selben Raum anzeigen.",
"shownoncontrollerosd-tooltip": "OSD-Meldungen bei Ereignissen verursacht durch nicht-Raumleiter in zentral gesteuerten Räumen anzeigen.",
"showdifferentroomosd-tooltip": "OSD-Meldungen zu anderen Räumen als dem aktuell betretenen anzeigen.",
"showslowdownosd-tooltip": "Meldungen bei Geschwindigkeitsänderung anzeigen.",
"showdurationnotification-tooltip": "Nützlich, wenn z.B. ein Teil eines mehrteiligen Videos fehlt, kann jedoch auch fehlerhaft anschlagen.",
"language-tooltip": "Die verwendete Sprache von Syncplay",
"unpause-always-tooltip": "Wiedergabe startet immer (anstatt nur den Bereitschaftsstatus zu ändern)",
"unpause-ifalreadyready-tooltip": "Wenn du nicht bereit bist und Play drückst wirst du als bereit gesetzt - zum Starten der Wiedergabe nochmal drücken.",
"unpause-ifothersready-tooltip": "Wenn du Play drückst und nicht bereit bist, wird nur gestartet, wenn alle anderen bereit sind.",
"unpause-ifminusersready-tooltip": "Wenn du Play drückst und nicht bereit bist, wird nur gestartet, wenn die minimale Anzahl anderer Benutzer bereit ist.",
"trusteddomains-arguments-tooltip": "Domains, zu denen Syncplay automatisch wechself darf, wenn geteilte Playlists aktiviert sind.",
"chatinputenabled-tooltip": "Chateingabe in mpv aktivieren (Drücke Enter zum Chatten, Enter zum Senden, Esc um abzubrechen)",
"chatdirectinput-tooltip": "Überspringe, Enter drücken zu müssen, um in mpv in den Chatmodus zu gelangen. Drücke TAB, um diese Funktion vorübergehend zu deaktivieren.",
"font-label-tooltip": "Schriftart für die Darstellung der Chateingabe in mpv. Nur clientseitig, beeinflusst also nicht, was andere sehen.",
"set-input-font-tooltip": "Schriftfamilie für die Darstellung der Chateingabe in mpv. Nur clientseitig, beeinflusst also nicht, was andere sehen.",
"set-input-colour-tooltip": "Schriftfarbe für die Darstellung der Chateingabe in mpv. Nur clientseitig, beeinflusst also nicht, was andere sehen.",
"chatinputposition-tooltip": "Position in mpv, an der Text der Chateingabe erscheint, wenn du Enter drückst und tippst.",
"chatinputposition-top-tooltip": "Chateingabe oben im mpv-Fenster platzieren.",
"chatinputposition-middle-tooltip": "Chateingabe mittig im mpv-Fenster platzieren.",
"chatinputposition-bottom-tooltip": "Chateingabe unten im mpv-Fenster platzieren.",
"chatoutputenabled-tooltip": "Chatnachrichten im OSD anzeigen (sofern vom Medienplayer unterstützt).",
"font-output-label-tooltip": "Chatausgabe-Schriftart.",
"set-output-font-tooltip": "Schriftart für die Darstellung von Chatnachrichten.",
"chatoutputmode-tooltip": "Wie Chatnachrichten dargestellt werden.",
"chatoutputmode-chatroom-tooltip": "Neue Chatzeilen unmittelbar unterder vorangehenden Zeile anzeigen.",
"chatoutputmode-scrolling-tooltip": "Chat-Text von rechts nach links scrollen lassen",
"help-tooltip": "Öffnet Hilfe auf syncplay.pl [Englisch]",
"reset-tooltip": "Alle Einstellungen auf Standardwerte zurücksetzen.",
"update-server-list-tooltip": "Mit syncplay.pl verbinden um die Liste öffentlicher Server zu aktualisieren.",
"sslconnection-tooltip": "Sicher mit Server verbunden. Klicken, um Zertifikatdetails anzuzeigen.",
"joinroom-tooltip": "Den aktuellen Raum verlassen und stattdessen den angegebenen betreten.",
"seektime-msgbox-label": "Springe zur angegebenen Zeit (in Sekunden oder min:sek). Verwende +/- zum relativen Springen.",
"ready-tooltip": "Zeigt an, ob du bereit zum anschauen bist",
"autoplay-tooltip": "Automatisch abspielen, wenn alle Nutzer bereit sind oder die minimale Nutzerzahl erreicht ist.",
"switch-to-file-tooltip": "Doppelklicken um zu {} zu wechseln", # Filename
"sendmessage-tooltip": "Nachricht an Raum senden",
# In-userlist notes (GUI)
"differentsize-note": "Verschiedene Größe!",
"differentsizeandduration-note": "Verschiedene Größe und Dauer!",
"differentduration-note": "Verschiedene Dauer!",
"nofile-note": "(keine Datei wird abgespielt)",
# Server messages to client
"new-syncplay-available-motd-message": "Du nutzt Syncplay Version {}, aber es gibt eine neuere Version auf https://syncplay.pl", # ClientVersion
# Server notifications
"welcome-server-notification": "Willkommen zum Syncplay-Server, v. {0}", # version
"client-connected-room-server-notification": "{0}({2}) hat den Raum „{1}“ betreten", # username, host, room
"client-left-server-notification": "{0} hat den Server verlassen", # name
"no-salt-notification": "WICHTIGER HINWEIS: Damit von dem Server generierte Passwörter für geführte Räume auch nach einem Serverneustart funktionieren, starte den Server mit dem folgenden Parameter: --salt {}", # Salt
# Server arguments
"server-argument-description": 'Anwendung, um mehrere MPlayer, MPC-HC/BE und VLC-Instanzen über das Internet zu synchronisieren. Server',
"server-argument-epilog": 'Wenn keine Optionen angegeben sind, werden die _config-Werte verwendet',
"server-port-argument": 'Server TCP-Port',
"server-password-argument": 'Server-Passwort',
"server-isolate-room-argument": 'Sollen die Räume isoliert sein?',
"server-salt-argument": "zufällige Zeichenkette, die zur Erstellung von Passwörtern verwendet wird",
"server-disable-ready-argument": "Bereitschaftsfeature deaktivieren",
"server-motd-argument": "Pfad zur Datei, von der die Nachricht des Tages geladen wird",
"server-chat-argument": "Soll Chat deaktiviert werden?",
"server-chat-maxchars-argument": "Maximale Zeichenzahl in einer Chatnachricht (Standard ist {})",
"server-maxusernamelength-argument": "Maximale Zeichenzahl in einem Benutzernamen (Standard ist {})",
"server-stats-db-file-argument": "Aktiviere Server-Statistiken mithilfe der bereitgestellten SQLite-db-Datei",
"server-startTLS-argument": "Erlaube TLS-Verbindungen mit den Zertifikatdateien im Angegebenen Pfad",
"server-messed-up-motd-unescaped-placeholders": "Die Nachricht des Tages hat unmaskierte Platzhalter. Alle $-Zeichen sollten verdoppelt werden ($$).",
"server-messed-up-motd-too-long": "Die Nachricht des Tages ist zu lang - Maximal {} Zeichen, aktuell {}.",
# Server errors
"unknown-command-server-error": "Unbekannter Befehl {}", # message
"not-json-server-error": "Kein JSON-String {}", # message
"line-decode-server-error": "Keine utf-8-Zeichenkette",
"not-known-server-error": "Der Server muss dich kennen, bevor du diesen Befehl nutzen kannst",
"client-drop-server-error": "Client verloren: {} -- {}", # host, error
"password-required-server-error": "Passwort nötig",
"wrong-password-server-error": "Ungültiges Passwort",
"hello-server-error": "Zu wenige Hello-Argumente",
# Playlists
"playlist-selection-changed-notification": "{} hat die Playlist-Auswahl geändert", # Username
"playlist-contents-changed-notification": "{} hat die Playlist aktualisiert", # Username
"cannot-find-file-for-playlist-switch-error": "Die Datei {} konnte zum Dateiwechsel nicht in den Medienverzeichnissen gefunden werden!", # Filename
"cannot-add-duplicate-error": "Konnte zweiten Eintrag für „{}“ nicht zur Playlist hinzufügen, weil Dubletten nicht erlaubt sind.", # Filename
"cannot-add-unsafe-path-error": "{} konnte nicht automatisch geladen werden, weil es sich nicht um eine vertrauenswürdige Domain handelt. Du kannst manuell zu der URL wechseln, indem du sie in der Playlist doppelklickst oder vertrauenswürdige Domains unter Datei->Erweitert->Vertrauenswürdige Domains auswählen hinzufügst. Wenn du einen Rechtsklick auf eine URL ausführst, kannst du ihre Domain im Kontextmenü als vertrauenswürdig hinzufügen.", # Filename
"sharedplaylistenabled-label": "Geteilte Playlists aktivieren",
"removefromplaylist-menu-label": "Von Playlist entfernen",
"shuffleremainingplaylist-menu-label": "Verbleibende Playlist shuffeln",
"shuffleentireplaylist-menu-label": "Gesamte Playlist shuffeln",
"undoplaylist-menu-label": "Letze Playlist-Änderung rückgängig machen",
"addfilestoplaylist-menu-label": "Datei(en) zum Ende der Playlist hinzufügen",
"addurlstoplaylist-menu-label": "URL(s) zum Ende der Playlist hinzufügen",
"editplaylist-menu-label": "Playlist bearbeiten",
"open-containing-folder": "Übergeordnetes Verzeichnis der Datei öffnen",
"addyourfiletoplaylist-menu-label": "Deine Datei zur Playlist hinzufügen",
"addotherusersfiletoplaylist-menu-label": "{}s Datei zur Playlist hinzufügen", # [Username]
"addyourstreamstoplaylist-menu-label": "Deinen Stream zur Playlist hinzufügen",
"addotherusersstreamstoplaylist-menu-label": "{}s Stream zur Playlist hinzufügen", # [Username]
"openusersstream-menu-label": "{}s Stream öffnen", # [username]'s
"openusersfile-menu-label": "{}s Datei öffnen", # [username]'s
"playlist-instruction-item-message": "Zieh eine Datei hierher, um sie zur geteilten Playlist hinzuzufügen.",
"sharedplaylistenabled-tooltip": "Raumleiter können Dateien zu einer geteilten Playlist hinzufügen und es so erleichtern, gemeinsam das Gleiche zu gucken. Konfiguriere Medienverzeichnisse unter „Diverse“",
"playlist-empty-error": "Playlist is currently empty.", # TO DO: Translate
"playlist-invalid-index-error": "Invalid playlist index", # TO DO: Translate
}
| 75.909962
| 637
| 0.737615
|
0278dfff32933c7fae2d78b99f1ce026decbe046
| 28,525
|
py
|
Python
|
keystone/tests/unit/backend/domain_config/core.py
|
whitepages/keystone
|
7a0874f6f69852584061fa384f75dfb0d5f1c229
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/unit/backend/domain_config/core.py
|
whitepages/keystone
|
7a0874f6f69852584061fa384f75dfb0d5f1c229
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/unit/backend/domain_config/core.py
|
whitepages/keystone
|
7a0874f6f69852584061fa384f75dfb0d5f1c229
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:38:30.000Z
|
2021-03-21T11:38:30.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import mock
from testtools import matchers
from keystone import exception
from keystone.tests import unit
class DomainConfigTests(object):
def setUp(self):
self.domain = unit.new_domain_ref()
self.resource_api.create_domain(self.domain['id'], self.domain)
self.addCleanup(self.clean_up_domain)
def clean_up_domain(self):
# NOTE(henry-nash): Deleting the domain will also delete any domain
# configs for this domain.
self.domain['enabled'] = False
self.resource_api.update_domain(self.domain['id'], self.domain)
self.resource_api.delete_domain(self.domain['id'])
del self.domain
def _domain_config_crud(self, sensitive):
group = uuid.uuid4().hex
option = uuid.uuid4().hex
value = uuid.uuid4().hex
self.domain_config_api.create_config_option(
self.domain['id'], group, option, value, sensitive)
res = self.domain_config_api.get_config_option(
self.domain['id'], group, option, sensitive)
config = {'group': group, 'option': option, 'value': value}
self.assertEqual(config, res)
value = uuid.uuid4().hex
self.domain_config_api.update_config_option(
self.domain['id'], group, option, value, sensitive)
res = self.domain_config_api.get_config_option(
self.domain['id'], group, option, sensitive)
config = {'group': group, 'option': option, 'value': value}
self.assertEqual(config, res)
self.domain_config_api.delete_config_options(
self.domain['id'], group, option, sensitive)
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.get_config_option,
self.domain['id'], group, option, sensitive)
# ...and silent if we try to delete it again
self.domain_config_api.delete_config_options(
self.domain['id'], group, option, sensitive)
def test_whitelisted_domain_config_crud(self):
self._domain_config_crud(sensitive=False)
def test_sensitive_domain_config_crud(self):
self._domain_config_crud(sensitive=True)
def _list_domain_config(self, sensitive):
"""Test listing by combination of domain, group & option."""
config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
# Put config2 in the same group as config1
config2 = {'group': config1['group'], 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
config3 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
'value': 100}
for config in [config1, config2, config3]:
self.domain_config_api.create_config_option(
self.domain['id'], config['group'], config['option'],
config['value'], sensitive)
# Try listing all items from a domain
res = self.domain_config_api.list_config_options(
self.domain['id'], sensitive=sensitive)
self.assertThat(res, matchers.HasLength(3))
for res_entry in res:
self.assertIn(res_entry, [config1, config2, config3])
# Try listing by domain and group
res = self.domain_config_api.list_config_options(
self.domain['id'], group=config1['group'], sensitive=sensitive)
self.assertThat(res, matchers.HasLength(2))
for res_entry in res:
self.assertIn(res_entry, [config1, config2])
# Try listing by domain, group and option
res = self.domain_config_api.list_config_options(
self.domain['id'], group=config2['group'],
option=config2['option'], sensitive=sensitive)
self.assertThat(res, matchers.HasLength(1))
self.assertEqual(config2, res[0])
def test_list_whitelisted_domain_config_crud(self):
self._list_domain_config(False)
def test_list_sensitive_domain_config_crud(self):
self._list_domain_config(True)
def _delete_domain_configs(self, sensitive):
"""Test deleting by combination of domain, group & option."""
config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
# Put config2 and config3 in the same group as config1
config2 = {'group': config1['group'], 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
config3 = {'group': config1['group'], 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
config4 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
for config in [config1, config2, config3, config4]:
self.domain_config_api.create_config_option(
self.domain['id'], config['group'], config['option'],
config['value'], sensitive)
# Try deleting by domain, group and option
res = self.domain_config_api.delete_config_options(
self.domain['id'], group=config2['group'],
option=config2['option'], sensitive=sensitive)
res = self.domain_config_api.list_config_options(
self.domain['id'], sensitive=sensitive)
self.assertThat(res, matchers.HasLength(3))
for res_entry in res:
self.assertIn(res_entry, [config1, config3, config4])
# Try deleting by domain and group
res = self.domain_config_api.delete_config_options(
self.domain['id'], group=config4['group'], sensitive=sensitive)
res = self.domain_config_api.list_config_options(
self.domain['id'], sensitive=sensitive)
self.assertThat(res, matchers.HasLength(2))
for res_entry in res:
self.assertIn(res_entry, [config1, config3])
# Try deleting all items from a domain
res = self.domain_config_api.delete_config_options(
self.domain['id'], sensitive=sensitive)
res = self.domain_config_api.list_config_options(
self.domain['id'], sensitive=sensitive)
self.assertThat(res, matchers.HasLength(0))
def test_delete_whitelisted_domain_configs(self):
self._delete_domain_configs(False)
def test_delete_sensitive_domain_configs(self):
self._delete_domain_configs(True)
def _create_domain_config_twice(self, sensitive):
"""Test conflict error thrown if create the same option twice."""
config = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
self.domain_config_api.create_config_option(
self.domain['id'], config['group'], config['option'],
config['value'], sensitive=sensitive)
self.assertRaises(exception.Conflict,
self.domain_config_api.create_config_option,
self.domain['id'], config['group'], config['option'],
config['value'], sensitive=sensitive)
def test_create_whitelisted_domain_config_twice(self):
self._create_domain_config_twice(False)
def test_create_sensitive_domain_config_twice(self):
self._create_domain_config_twice(True)
def test_delete_domain_deletes_configs(self):
"""Test domain deletion clears the domain configs."""
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
# Put config2 in the same group as config1
config2 = {'group': config1['group'], 'option': uuid.uuid4().hex,
'value': uuid.uuid4().hex}
self.domain_config_api.create_config_option(
domain['id'], config1['group'], config1['option'],
config1['value'])
self.domain_config_api.create_config_option(
domain['id'], config2['group'], config2['option'],
config2['value'], sensitive=True)
res = self.domain_config_api.list_config_options(
domain['id'])
self.assertThat(res, matchers.HasLength(1))
res = self.domain_config_api.list_config_options(
domain['id'], sensitive=True)
self.assertThat(res, matchers.HasLength(1))
# Now delete the domain
domain['enabled'] = False
self.resource_api.update_domain(domain['id'], domain)
self.resource_api.delete_domain(domain['id'])
# Check domain configs have also been deleted
res = self.domain_config_api.list_config_options(
domain['id'])
self.assertThat(res, matchers.HasLength(0))
res = self.domain_config_api.list_config_options(
domain['id'], sensitive=True)
self.assertThat(res, matchers.HasLength(0))
def test_create_domain_config_including_sensitive_option(self):
config = {'ldap': {'url': uuid.uuid4().hex,
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
# password is sensitive, so check that the whitelisted portion and
# the sensitive piece have been stored in the appropriate locations.
res = self.domain_config_api.get_config(self.domain['id'])
config_whitelisted = copy.deepcopy(config)
config_whitelisted['ldap'].pop('password')
self.assertEqual(config_whitelisted, res)
res = self.domain_config_api.get_config_option(
self.domain['id'], 'ldap', 'password', sensitive=True)
self.assertEqual(config['ldap']['password'], res['value'])
# Finally, use the non-public API to get back the whole config
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
self.assertEqual(config, res)
def test_get_partial_domain_config(self):
config = {'ldap': {'url': uuid.uuid4().hex,
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
res = self.domain_config_api.get_config(self.domain['id'],
group='identity')
config_partial = copy.deepcopy(config)
config_partial.pop('ldap')
self.assertEqual(config_partial, res)
res = self.domain_config_api.get_config(
self.domain['id'], group='ldap', option='user_tree_dn')
self.assertEqual({'user_tree_dn': config['ldap']['user_tree_dn']}, res)
# ...but we should fail to get a sensitive option
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.get_config, self.domain['id'],
group='ldap', option='password')
def test_delete_partial_domain_config(self):
config = {'ldap': {'url': uuid.uuid4().hex,
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
self.domain_config_api.delete_config(
self.domain['id'], group='identity')
config_partial = copy.deepcopy(config)
config_partial.pop('identity')
config_partial['ldap'].pop('password')
res = self.domain_config_api.get_config(self.domain['id'])
self.assertEqual(config_partial, res)
self.domain_config_api.delete_config(
self.domain['id'], group='ldap', option='url')
config_partial = copy.deepcopy(config_partial)
config_partial['ldap'].pop('url')
res = self.domain_config_api.get_config(self.domain['id'])
self.assertEqual(config_partial, res)
def test_get_options_not_in_domain_config(self):
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.get_config, self.domain['id'])
config = {'ldap': {'url': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.get_config, self.domain['id'],
group='identity')
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.get_config, self.domain['id'],
group='ldap', option='user_tree_dn')
def test_get_sensitive_config(self):
config = {'ldap': {'url': uuid.uuid4().hex,
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
self.assertEqual({}, res)
self.domain_config_api.create_config(self.domain['id'], config)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
self.assertEqual(config, res)
def test_update_partial_domain_config(self):
config = {'ldap': {'url': uuid.uuid4().hex,
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
# Try updating a group
new_config = {'ldap': {'url': uuid.uuid4().hex,
'user_filter': uuid.uuid4().hex}}
res = self.domain_config_api.update_config(
self.domain['id'], new_config, group='ldap')
expected_config = copy.deepcopy(config)
expected_config['ldap']['url'] = new_config['ldap']['url']
expected_config['ldap']['user_filter'] = (
new_config['ldap']['user_filter'])
expected_full_config = copy.deepcopy(expected_config)
expected_config['ldap'].pop('password')
res = self.domain_config_api.get_config(self.domain['id'])
self.assertEqual(expected_config, res)
# The sensitive option should still exist
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
self.assertEqual(expected_full_config, res)
# Try updating a single whitelisted option
self.domain_config_api.delete_config(self.domain['id'])
self.domain_config_api.create_config(self.domain['id'], config)
new_config = {'url': uuid.uuid4().hex}
res = self.domain_config_api.update_config(
self.domain['id'], new_config, group='ldap', option='url')
# Make sure whitelisted and full config is updated
expected_whitelisted_config = copy.deepcopy(config)
expected_whitelisted_config['ldap']['url'] = new_config['url']
expected_full_config = copy.deepcopy(expected_whitelisted_config)
expected_whitelisted_config['ldap'].pop('password')
self.assertEqual(expected_whitelisted_config, res)
res = self.domain_config_api.get_config(self.domain['id'])
self.assertEqual(expected_whitelisted_config, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
self.assertEqual(expected_full_config, res)
# Try updating a single sensitive option
self.domain_config_api.delete_config(self.domain['id'])
self.domain_config_api.create_config(self.domain['id'], config)
new_config = {'password': uuid.uuid4().hex}
res = self.domain_config_api.update_config(
self.domain['id'], new_config, group='ldap', option='password')
# The whitelisted config should not have changed...
expected_whitelisted_config = copy.deepcopy(config)
expected_full_config = copy.deepcopy(config)
expected_whitelisted_config['ldap'].pop('password')
self.assertEqual(expected_whitelisted_config, res)
res = self.domain_config_api.get_config(self.domain['id'])
self.assertEqual(expected_whitelisted_config, res)
expected_full_config['ldap']['password'] = new_config['password']
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
# ...but the sensitive piece should have.
self.assertEqual(expected_full_config, res)
def test_update_invalid_partial_domain_config(self):
config = {'ldap': {'url': uuid.uuid4().hex,
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
# An extra group, when specifying one group should fail
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.update_config,
self.domain['id'], config, group='ldap')
# An extra option, when specifying one option should fail
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.update_config,
self.domain['id'], config['ldap'],
group='ldap', option='url')
# Now try the right number of groups/options, but just not
# ones that are in the config provided
config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}}
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.update_config,
self.domain['id'], config, group='identity')
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.update_config,
self.domain['id'], config['ldap'], group='ldap',
option='url')
# Now some valid groups/options, but just not ones that are in the
# existing config
config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
config_wrong_group = {'identity': {'driver': uuid.uuid4().hex}}
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.update_config,
self.domain['id'], config_wrong_group,
group='identity')
config_wrong_option = {'url': uuid.uuid4().hex}
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.update_config,
self.domain['id'], config_wrong_option,
group='ldap', option='url')
# And finally just some bad groups/options
bad_group = uuid.uuid4().hex
config = {bad_group: {'user': uuid.uuid4().hex}}
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.update_config,
self.domain['id'], config, group=bad_group,
option='user')
bad_option = uuid.uuid4().hex
config = {'ldap': {bad_option: uuid.uuid4().hex}}
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.update_config,
self.domain['id'], config, group='ldap',
option=bad_option)
def test_create_invalid_domain_config(self):
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.create_config,
self.domain['id'], {})
config = {uuid.uuid4().hex: uuid.uuid4().hex}
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.create_config,
self.domain['id'], config)
config = {uuid.uuid4().hex: {uuid.uuid4().hex: uuid.uuid4().hex}}
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.create_config,
self.domain['id'], config)
config = {'ldap': {uuid.uuid4().hex: uuid.uuid4().hex}}
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.create_config,
self.domain['id'], config)
# Try an option that IS in the standard conf, but neither whitelisted
# or marked as sensitive
config = {'identity': {'user_tree_dn': uuid.uuid4().hex}}
self.assertRaises(exception.InvalidDomainConfig,
self.domain_config_api.create_config,
self.domain['id'], config)
def test_delete_invalid_partial_domain_config(self):
config = {'ldap': {'url': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
# Try deleting a group not in the config
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.delete_config,
self.domain['id'], group='identity')
# Try deleting an option not in the config
self.assertRaises(exception.DomainConfigNotFound,
self.domain_config_api.delete_config,
self.domain['id'],
group='ldap', option='user_tree_dn')
def test_sensitive_substitution_in_domain_config(self):
# Create a config that contains a whitelisted option that requires
# substitution of a sensitive option.
config = {'ldap': {'url': 'my_url/%(password)s',
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
# Read back the config with the internal method and ensure that the
# substitution has taken place.
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
expected_url = (
config['ldap']['url'] % {'password': config['ldap']['password']})
self.assertEqual(expected_url, res['ldap']['url'])
def test_invalid_sensitive_substitution_in_domain_config(self):
"""Check that invalid substitutions raise warnings."""
mock_log = mock.Mock()
invalid_option_config = {
'ldap': {'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
for invalid_option in ['my_url/%(passssword)s',
'my_url/%(password',
'my_url/%(password)',
'my_url/%(password)d']:
invalid_option_config['ldap']['url'] = invalid_option
self.domain_config_api.create_config(
self.domain['id'], invalid_option_config)
with mock.patch('keystone.resource.core.LOG', mock_log):
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
mock_log.warning.assert_any_call(mock.ANY)
self.assertEqual(
invalid_option_config['ldap']['url'], res['ldap']['url'])
def test_escaped_sequence_in_domain_config(self):
"""Check that escaped '%(' doesn't get interpreted."""
mock_log = mock.Mock()
escaped_option_config = {
'ldap': {'url': 'my_url/%%(password)s',
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
self.domain_config_api.create_config(
self.domain['id'], escaped_option_config)
with mock.patch('keystone.resource.core.LOG', mock_log):
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
self.assertFalse(mock_log.warn.called)
# The escaping '%' should have been removed
self.assertEqual('my_url/%(password)s', res['ldap']['url'])
@unit.skip_if_cache_disabled('domain_config')
def test_cache_layer_get_sensitive_config(self):
config = {'ldap': {'url': uuid.uuid4().hex,
'user_tree_dn': uuid.uuid4().hex,
'password': uuid.uuid4().hex},
'identity': {'driver': uuid.uuid4().hex}}
self.domain_config_api.create_config(self.domain['id'], config)
# cache the result
res = self.domain_config_api.get_config_with_sensitive_info(
self.domain['id'])
self.assertEqual(config, res)
# delete, bypassing domain config manager api
self.domain_config_api.delete_config_options(self.domain['id'])
self.domain_config_api.delete_config_options(self.domain['id'],
sensitive=True)
self.assertDictEqual(
res, self.domain_config_api.get_config_with_sensitive_info(
self.domain['id']))
self.domain_config_api.get_config_with_sensitive_info.invalidate(
self.domain_config_api, self.domain['id'])
self.assertDictEqual(
{},
self.domain_config_api.get_config_with_sensitive_info(
self.domain['id']))
def test_config_registration(self):
type = uuid.uuid4().hex
self.domain_config_api.obtain_registration(
self.domain['id'], type)
self.domain_config_api.release_registration(
self.domain['id'], type=type)
# Make sure that once someone has it, nobody else can get it.
# This includes the domain who already has it.
self.domain_config_api.obtain_registration(
self.domain['id'], type)
self.assertFalse(
self.domain_config_api.obtain_registration(
self.domain['id'], type))
# Make sure we can read who does have it
self.assertEqual(
self.domain['id'],
self.domain_config_api.read_registration(type))
# Make sure releasing it is silent if the domain specified doesn't
# have the registration
domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(domain2['id'], domain2)
self.domain_config_api.release_registration(
domain2['id'], type=type)
# If nobody has the type registered, then trying to read it should
# raise ConfigRegistrationNotFound
self.domain_config_api.release_registration(
self.domain['id'], type=type)
self.assertRaises(exception.ConfigRegistrationNotFound,
self.domain_config_api.read_registration,
type)
# Finally check multiple registrations are cleared if you free the
# registration without specifying the type
type2 = uuid.uuid4().hex
self.domain_config_api.obtain_registration(
self.domain['id'], type)
self.domain_config_api.obtain_registration(
self.domain['id'], type2)
self.domain_config_api.release_registration(self.domain['id'])
self.assertRaises(exception.ConfigRegistrationNotFound,
self.domain_config_api.read_registration,
type)
self.assertRaises(exception.ConfigRegistrationNotFound,
self.domain_config_api.read_registration,
type2)
| 47.860738
| 79
| 0.61234
|
377110a72bd812519724bbec0d55b3984c5d9c86
| 728
|
py
|
Python
|
decorators.py
|
pavel-yas1405/teamwork
|
52a984576046a4d9ad2a6e48e52ec1a17688cd99
|
[
"MIT"
] | null | null | null |
decorators.py
|
pavel-yas1405/teamwork
|
52a984576046a4d9ad2a6e48e52ec1a17688cd99
|
[
"MIT"
] | null | null | null |
decorators.py
|
pavel-yas1405/teamwork
|
52a984576046a4d9ad2a6e48e52ec1a17688cd99
|
[
"MIT"
] | null | null | null |
from functools import wraps
from flask import current_app, flash, request, redirect, url_for
from flask_login import config, current_user
def admin_required(func):
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method in config.EXEMPT_METHODS:
return func(*args, **kwargs)
elif current_app.config.get('LOGIN_DISABLED'):
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
elif not current_user.is_admin:
flash('Эта страница доступна только админам')
return redirect(url_for('index'))
return func(*args, **kwargs)
return decorated_view
| 36.4
| 64
| 0.679945
|
497b16aeeccd0896664a6533fc5cccdf4ed770be
| 1,558
|
py
|
Python
|
tests/test_mod.py
|
smnorris/terraincache
|
02de5ee7fd697cb0dd47d79bbc750c7f49a9199c
|
[
"MIT"
] | 3
|
2020-10-05T22:19:26.000Z
|
2021-12-23T02:50:27.000Z
|
tests/test_mod.py
|
smnorris/terraincache
|
02de5ee7fd697cb0dd47d79bbc750c7f49a9199c
|
[
"MIT"
] | 4
|
2019-10-12T00:08:45.000Z
|
2019-11-12T21:40:44.000Z
|
tests/test_mod.py
|
smnorris/terraincache
|
02de5ee7fd697cb0dd47d79bbc750c7f49a9199c
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
import mercantile
from mercantile import Tile
from terraincache import TerrainTiles
TEST_TILE = Tile(x=622, y=1364, z=12)
TEST_BOUNDS = (-125.271412, 51.370639, -125.254793, 51.376881)
TEST_ZOOM_1 = 12
TEST_ZOOM_2 = 14
def tile_path(tile):
return "/".join([str(tile.z), str(tile.x), str(tile.y) + ".tif"])
def test_download_single_tile(tmpdir):
tt = TerrainTiles(TEST_BOUNDS, TEST_ZOOM_1, cache_dir=str(tmpdir))
tt.download_tile(TEST_TILE)
assert Path(str(tmpdir)).joinpath(tile_path(TEST_TILE)).exists()
def test_download_bounds(tmpdir):
tt = TerrainTiles(TEST_BOUNDS, TEST_ZOOM_2, cache_dir=str(tmpdir))
for tile in mercantile.tiles(*TEST_BOUNDS, TEST_ZOOM_2):
assert Path(str(tmpdir)).joinpath(tile_path(tile)).exists()
def test_merge(tmpdir):
tt = TerrainTiles(TEST_BOUNDS, TEST_ZOOM_2, cache_dir=str(tmpdir))
tt.merge(out_file=os.path.join(str(tmpdir), "merged.tif"))
assert (Path(str(tmpdir)) / "merged.tif").exists
def test_warp(tmpdir):
tt = TerrainTiles(
TEST_BOUNDS,
TEST_ZOOM_2,
cache_dir=str(tmpdir),
dst_crs="EPSG:3005",
resolution=50,
)
tt.merge()
tt.warp(out_file=os.path.join(str(tmpdir), "warped.tif"))
assert (Path(str(tmpdir)) / "warped.tif").exists
def test_load(tmpdir):
tt = TerrainTiles(
TEST_BOUNDS,
TEST_ZOOM_2,
cache_dir=str(tmpdir),
dst_crs="EPSG:3005",
resolution=50,
)
array = tt.load()
assert array.shape == (14, 23)
| 25.540984
| 70
| 0.675225
|
bd4281a17a4fa547ca70434925bf77f99fe013bb
| 25,779
|
py
|
Python
|
LSIMasks/losses/latent_mask_loss.py
|
charlesity/LSIMasks
|
f3ff1984c6cd83baf3aee9a336d5b515898eec29
|
[
"MIT"
] | null | null | null |
LSIMasks/losses/latent_mask_loss.py
|
charlesity/LSIMasks
|
f3ff1984c6cd83baf3aee9a336d5b515898eec29
|
[
"MIT"
] | null | null | null |
LSIMasks/losses/latent_mask_loss.py
|
charlesity/LSIMasks
|
f3ff1984c6cd83baf3aee9a336d5b515898eec29
|
[
"MIT"
] | null | null | null |
import warnings
import gc
import numpy as np
from copy import deepcopy
import torch
import torch.utils.data
import torch.nn as nn
from torch.nn.parallel.data_parallel import data_parallel
from inferno.extensions.criteria.set_similarity_measures import SorensenDiceLoss
from inferno.extensions.containers.graph import Identity
from speedrun.log_anywhere import log_image, log_embedding, log_scalar
from segmfriends.utils.various import parse_data_slice
from segmfriends.transform.volume import DownSampleAndCropTensorsInBatch
from segmfriends.utils.various import parse_data_slice
from ..utils.various import auto_crop_tensor_to_shape
from .sparse_affinitiees_loss import MultiLevelSparseAffinityLoss
class LatentMaskLoss(nn.Module):
def __init__(self, model, apply_checkerboard=False, loss_type="Dice",
ignore_label=0,
train_glia_mask=False,
boundary_label=None,
glia_label=None,
train_patches_on_glia=False,
fix_bug_multiscale_patches=False,
defected_label=None,
IoU_loss_kwargs=None,
sparse_affs_loss_kwargs=None,
indx_trained_patchNets=None,
model_kwargs=None, devices=(0, 1)):
super(LatentMaskLoss, self).__init__()
if loss_type == "Dice":
self.loss = SorensenDiceLoss()
elif loss_type == "MSE":
self.loss = nn.MSELoss()
elif loss_type == "BCE":
self.loss = nn.BCELoss()
else:
raise ValueError
self.apply_checkerboard = apply_checkerboard
self.fix_bug_multiscale_patches = fix_bug_multiscale_patches
self.ignore_label = ignore_label
self.boundary_label = boundary_label
self.glia_label = glia_label
self.defected_label = defected_label
self.train_glia_mask = train_glia_mask
self.train_patches_on_glia = train_patches_on_glia
self.indx_trained_patchNets = indx_trained_patchNets
self.add_IoU_loss = False
if IoU_loss_kwargs is not None:
raise NotImplementedError()
# self.add_IoU_loss = True
# from .compute_IoU import IoULoss
# self.IoU_loss = IoULoss(model, model_kwargs=model_kwargs, devices=devices, **IoU_loss_kwargs)
self.devices = devices
# TODO: get rid of kwargs
self.model_kwargs = model_kwargs
self.MSE_loss = nn.MSELoss()
self.smoothL1_loss = nn.SmoothL1Loss()
# TODO: use nn.BCEWithLogitsLoss()
self.BCE = nn.BCELoss()
self.soresen_loss = SorensenDiceLoss()
self.model = model
self.train_sparse_loss = False
self.sparse_multilevelDiceLoss = None
if sparse_affs_loss_kwargs is not None:
self.train_sparse_loss = True
self.sparse_multilevelDiceLoss = MultiLevelSparseAffinityLoss(model, model_kwargs=model_kwargs,
devices=devices,
**sparse_affs_loss_kwargs)
def forward(self, all_predictions, target):
mdl = self.model
nb_inputs = mdl.number_multiscale_inputs
# Plot some patches with the raw:
if self.model.return_input:
raw_inputs = all_predictions[-nb_inputs:]
all_predictions = all_predictions[:-nb_inputs]
loss = 0
if self.train_sparse_loss:
raise NotImplementedError
loss = loss + self.sparse_multilevelDiceLoss(all_predictions, target)
# Delete affinities from targets:
target = [tar[:, :2].int() for tar in target]
# ----------------------------
# Loss on patches:
# ----------------------------
for mask_dec_indx in range(len(all_predictions)):
# ----------------------------
# Initializations:
# ----------------------------
mask_dec = self.model.mask_decoders[mask_dec_indx]
pred = all_predictions[mask_dec_indx]
gt_segm = target[mask_dec.target_index]
# Collect options from config:
mask_shape = mask_dec.mask_shape
mask_dws_fact = mask_dec.mask_dws_fact
sample_strides = mask_dec.sample_strides
pred_dws_fact = mask_dec.pred_dws_fact
crop_slice_prediction = mask_dec.crop_slice_prediction
limit_nb_decoded_masks_to = mask_dec.limit_nb_decoded_masks_to
if crop_slice_prediction is not None:
precrop_pred_slice = (slice(None), slice(None)) + parse_data_slice(crop_slice_prediction)
pred = pred[precrop_pred_slice]
max_random_crop = mask_dec.max_random_crop
real_shape_mask = tuple(pt * fc for pt, fc in zip(mask_shape, mask_dws_fact))
full_target_shape = gt_segm.shape[-3:]
assert all([i <= j for i, j in zip(real_shape_mask, full_target_shape)]), "Real-sized patch is too large!"
# ----------------------------
# Deduce crop size of the prediction and select target patches accordingly:
# ----------------------------
# TODO: explain better what is going on here
crop_slice_targets, crop_slice_prediction = get_slicing_crops(pred.shape[2:], full_target_shape,
pred_dws_fact, real_shape_mask)
gt_segm = gt_segm[crop_slice_targets]
pred = pred[crop_slice_prediction]
full_target_shape = gt_segm.shape[-3:]
# # # ----------------------------
# # # Plot some random patches with associated raw patch:
# # # ----------------------------
# if self.model.return_input and mask_dec_indx<5:
# # raw = raw_inputs[kwargs["nb_target"]][crop_slice_targets]
# # FIXME: raw is not correct for deeper ones
# raw = raw_inputs[0][crop_slice_targets]
# raw_to_plot, gt_labels_to_plot, gt_masks_to_plot, pred_emb_to_plot = [], [], [], []
# for n in range(40):
# # Select a random pixel and define sliding-window crop slices:
# selected_coord = [np.random.randint(shp) for shp in pred.shape[2:]]
# # selected_coord[0] = 4 # For plots, get always 4
# full_patch_slice = (slice(None), slice(0,1)) + tuple(
# slice(selected_coord[i], selected_coord[i] + real_shape_mask[i]) for i in range(len(selected_coord)))
# emb_slice = (slice(None), slice(0,1)) + tuple(slice(selected_coord[i] + int(real_shape_mask[i] / 2),
# selected_coord[i] + int(
# real_shape_mask[i] / 2) + 1) for i in
# range(len(selected_coord)))
# pred_center_coord = [int(selected_coord[i] / pred_dws_fact[i]) for i in range(len(selected_coord))]
# emb_slice_pred = (slice(None), slice(None)) + tuple(
# slice(pred_center_coord[i], pred_center_coord[i] + 1)
# for i in range(len(selected_coord)))
#
# # Collect data for current sliding window:
# center_label = gt_segm[emb_slice]
# center_label_repeated = center_label.repeat(1, 1, *real_shape_mask)
# gt_patch_labels = gt_segm[full_patch_slice]
# gt_masks_to_plot.append(gt_patch_labels != center_label_repeated)
# gt_labels_to_plot.append(gt_patch_labels)
# # ignore_mask_patch = (gt_patch_labels == 0)
# pred_emb_to_plot.append(pred[emb_slice_pred])
#
# raw_to_plot.append(raw[full_patch_slice])
#
# # Highlight center pixel:
# raw_to_plot = torch.cat(raw_to_plot, dim=0)
# center_pixel_coord = (slice(None), 0) + tuple(int(shp / 2) for shp in real_shape_mask)
# raw_to_plot[center_pixel_coord] = raw_to_plot.min() - 1.
#
# gt_labels_to_plot = torch.cat(gt_labels_to_plot, dim=0)
# gt_masks_to_plot = torch.cat(gt_masks_to_plot, dim=0)
# pred_emb_to_plot = torch.cat(pred_emb_to_plot, dim=0)
#
# # Decode embeddings:
# ptch_num = kwargs["patchNet_number"]
# pred_patch_to_plot = data_parallel(self.model.patch_models[ptch_num], pred_emb_to_plot[:, :, 0, 0, 0], self.devices)
#
# # Downscale and rescale targets:
# down_sc_slice = (slice(None), slice(None)) + tuple(
# slice(int(dws_fact / 2), None, dws_fact) for dws_fact in mask_dws_fact)
# gt_masks_to_plot = torch.nn.functional.interpolate(gt_masks_to_plot[down_sc_slice].float(), scale_factor=tuple(mask_dws_fact))
# pred_patch_to_plot = torch.nn.functional.interpolate(pred_patch_to_plot,
# scale_factor=tuple(mask_dws_fact))
#
# gt_masks_to_plot = 1. - gt_masks_to_plot
# if mask_dws_fact[1] <= 6:
# pred_patch_to_plot = 1. - pred_patch_to_plot
#
# log_image("raw_patch_l{}".format(mask_dec_indx), raw_to_plot)
# log_image("gt_label_patch_l{}".format(mask_dec_indx), gt_labels_to_plot)
# log_image("gt_mask_patch_l{}".format(mask_dec_indx), gt_masks_to_plot)
# log_image("pred_patch_l{}".format(mask_dec_indx), pred_patch_to_plot)
# # ----------------------------
# # Patch-Loss:
# # ----------------------------
# If multiple strides were given, process all of them:
sample_strides = sample_strides if isinstance(sample_strides[0], list) else [sample_strides]
if limit_nb_decoded_masks_to is not None:
limit_nb_decoded_masks_to = limit_nb_decoded_masks_to if isinstance(limit_nb_decoded_masks_to[0],
list) else [
limit_nb_decoded_masks_to]
else:
limit_nb_decoded_masks_to = [None for _ in sample_strides]
for nb_stride, smpl_stride, max_nb_masks in zip(range(len(sample_strides)), sample_strides,
limit_nb_decoded_masks_to):
# ----------------------------
# Get some random prediction embeddings:
# ----------------------------
prediction_strides = get_prediction_strides(pred_dws_fact, smpl_stride)
selected_embeddings, crop_slice_pred, nb_selected_masks = extract_patches_torch(pred, (1, 1, 1),
stride=prediction_strides,
max_random_crop=max_random_crop)
# ----------------------------
# Collect gt_segm patches and corresponding center labels:
# ----------------------------
crop_slice_targets = tuple(slice(sl.start, None) for sl in crop_slice_pred)
gt_patches, _, _ = extract_patches_torch(gt_segm, real_shape_mask, stride=smpl_stride,
apply_specific_crop_slice=crop_slice_targets,
limit_patches_nb_to=nb_selected_masks)
gt_patches = gt_patches[:, [0]]
# Make sure to crop some additional border and get the centers correctly:
# TODO: this can be now easily done by cropping the gt_patches...
crop_slice_center_labels = (slice(None), slice(None)) + tuple(
slice(slc.start + int(sh / 2), slc.stop) for slc, sh in
zip(crop_slice_targets[2:], real_shape_mask))
target_at_patch_center, _, _ = extract_patches_torch(gt_segm, (1, 1, 1), stride=smpl_stride,
apply_specific_crop_slice=crop_slice_center_labels,
limit_patches_nb_to=nb_selected_masks)
# Get GT and other masks separately:
label_at_patch_center = target_at_patch_center[:, [0]]
mask_at_patch_center = target_at_patch_center[:, [1]]
# ----------------------------
# Ignore patches on the boundary or involving ignore-label:
# ----------------------------
# Ignore pixels involving ignore-labels:
ignore_masks = (gt_patches == self.ignore_label)
valid_patches = (label_at_patch_center != self.ignore_label)
patch_is_on_boundary = None
if self.boundary_label is not None:
patch_is_on_boundary = (mask_at_patch_center == self.boundary_label).repeat(1, 1, *real_shape_mask)
# Delete non-valid patches from batch:
valid_batch_indices = np.argwhere(valid_patches[:, 0, 0, 0, 0].cpu().detach().numpy())[:, 0]
if max_nb_masks is not None:
limit = max_nb_masks[0]
if max_nb_masks[1] == 'number':
if valid_batch_indices.shape[0] > limit:
valid_batch_indices = np.random.choice(valid_batch_indices, limit, replace=False)
elif max_nb_masks[1] == 'factor':
assert limit <= 1. and limit >= 0.
valid_batch_indices = np.random.choice(valid_batch_indices,
int(limit * valid_batch_indices.shape[0]), replace=False)
if valid_batch_indices.shape[0] == 0:
# Avoid problems if all patches are invalid and
# torch complaining that autograd cannot be performed:
loss += selected_embeddings.sum() * 0.
print("ZERO valid patches at level {}".format(mask_dec_indx))
continue
# ----------------------------
# Compute the actual (inverted) MeMasks targets: (0 is me, 1 are the others)
# best targets for Dice loss (usually more me than others)
# ----------------------------
center_labels_repeated = label_at_patch_center.repeat(1, 1, *real_shape_mask)
target_me_masks = gt_patches != center_labels_repeated
if patch_is_on_boundary is not None:
# If on boundary, we make (inverted) me_masks completely 1 (split from everything)
target_me_masks = target_me_masks | patch_is_on_boundary
# Downscaling patches:
down_sc_slice = (slice(None), slice(None)) + tuple(
slice(int(dws_fact / 2), None, dws_fact) for dws_fact in mask_dws_fact)
# Final targets:
target_me_masks = target_me_masks[valid_batch_indices].float()[down_sc_slice]
ignore_masks = ignore_masks[valid_batch_indices][down_sc_slice].byte()
# Invert MeMasks:
# best targets for Dice loss are: meMask == 0; others == 1
# TODO: generalize
if mask_dws_fact[1] > 6:
target_me_masks = 1. - target_me_masks
assert valid_batch_indices.max() < selected_embeddings.shape[
0], "Something went wrong, more target patches were collected than those predicted: {} targets vs {} pred...".format(
valid_batch_indices.max(), selected_embeddings.shape[0])
selected_embeddings = selected_embeddings[valid_batch_indices]
selected_embeddings = selected_embeddings[:, :, 0, 0, 0]
# ----------------------------
# Decode the actual predicted using the decoder models:
# ----------------------------
decoded_masks = data_parallel(mask_dec, selected_embeddings, self.devices)
# print(expanded_patches.shape)
assert decoded_masks.shape[1] == 1, "MaskDecoder should output only single-channel masks!"
# Some logs:
if nb_stride == 0:
log_image("ptc_trg_l{}".format(mask_dec_indx), target_me_masks)
log_image("ptc_pred_l{}".format(mask_dec_indx), decoded_masks)
# log_image("ptc_ign_l{}".format(nb_patch_net), patch_ignore_masks)
log_scalar("avg_targets_l{}".format(mask_dec_indx), target_me_masks.float().mean())
# ----------------------------
# Apply ignore mask and compute loss:
# ----------------------------
valid_masks = 1. - ignore_masks.float()
decoded_masks = decoded_masks * valid_masks
target_me_masks = target_me_masks * valid_masks
with warnings.catch_warnings(record=True) as w:
reconstruction_loss = data_parallel(self.loss, (decoded_masks, target_me_masks.float()),
self.devices).mean()
loss = loss + reconstruction_loss
if nb_stride == 0:
log_scalar("loss_l{}".format(mask_dec_indx), reconstruction_loss)
log_scalar("nb_patches_l{}".format(mask_dec_indx), decoded_masks.shape[0])
gc.collect()
return loss
def get_slicing_crops(pred_shape, target_shape, pred_ds_factor, real_patch_shape):
"""
In few words, this function tries to deduce how the target and predicted tensors should be cropped, so that
if we extract patches from both of them, these patches are consistent.
Let's see some examples:
1) If the target and prediction tensors have the same shape:
then the prediction tensor should be partially cropped, because the embedding in the top left corner
will need some extra context from the target tensor in order to be trained properly.
2) However, in some cases the prediction tensor will be much smaller than the target (for example because some crops
were performed inside the UNet model), so we will further crop the target so that they match.
:return: Two tuples containing the crop slices to be applied to the target and prediction tensors, respectively.
"""
# Compute new left crops:
# (we do not care about the right crops, because anyway the extra patches are
# ignored with the option `limit_patches_to`)
upscaled_pred_shape = [sh * fctr for sh, fctr in zip(pred_shape, pred_ds_factor)]
shape_diff = [orig - trg for orig, trg in zip(target_shape, upscaled_pred_shape)]
assert all([diff >= 0 for diff in shape_diff]), "Prediction should be smaller or equal to the targets!"
assert all([diff % 2 == 0 for diff in shape_diff])
padding = [int(diff / 2) for diff in shape_diff]
crop_slice_targets = [slice(None), slice(None)]
crop_slice_prediction = [slice(None), slice(None)]
import math
for dim, pad in enumerate(padding):
# Consider the patch-padding:
real_pad = pad - int(real_patch_shape[dim] / 2)
if real_pad > 0:
# We should crop targets
crop_slice_targets.append(slice(real_pad, -real_pad))
crop_slice_prediction.append(slice(None))
elif real_pad < 0:
# We should crop prediction:
# (use floor to round up, since pad is negative)
crop_slice_prediction.append(
slice(-math.floor(real_pad / pred_ds_factor[dim]), math.floor(real_pad / pred_ds_factor[dim])))
crop_slice_targets.append(slice(None))
else:
# No need to crop:
crop_slice_targets.append(slice(None))
crop_slice_prediction.append(slice(None))
return tuple(crop_slice_targets), tuple(crop_slice_prediction)
def get_prediction_strides(pred_ds_factor, strides):
# Compute updated strides:
assert all(strd % pred_fctr == 0 for strd, pred_fctr in
zip(strides, pred_ds_factor)), "Stride {} should be divisible by downscaling factor {}".format(strides,
pred_ds_factor)
pred_strides = tuple(int(strd / pred_fctr) for strd, pred_fctr in zip(strides, pred_ds_factor))
return pred_strides
def extract_patches_torch(tensor, shape, stride,
precrop_tensor=None,
max_random_crop=None,
apply_specific_crop_slice=None,
limit_patches_nb_to=None,
reshape_to_batch_dim=True):
"""
:param tensor: PyTorch tensor from which to extract patches
:param shape: Shape of the extracted patches
:param stride: Stride of the extracted patches
:param precrop_tensor: How much to precrop the tensor (list of length tensor.dim()-2)
Example: [(0,0), (2,4), (1,4)]
:param max_random_crop: How much to randomly crop the tensor (to create some variability in the strided sampling).
Same format as `precrop_tensor`
:param apply_specific_crop_slice:
This is the second argument that is returned by the function: it represents the
actual crop that was performed (including a possible random crop that was applied).
If you apply this function to multiple tensors and you want to get consistent results,
then the second time call the function passing the output_crop from the first call using
this argument.
If this is passed, then `precrop_tensor` and `max_random_crop` should be None.
:param limit_patches_nb_to:
This is the third argument that is returned by the function and it represents how many
patches were extracted along each dimension of the original tensor.
Use this argument to make sure that if you apply the function to multiple tensors,
you get the same number of patches for all of them.
:param reshape_to_batch_dim:
:return: See description of `apply_specific_crop_slice` and `limit_patches_nb_to`
"""
assert tensor.dim() == 4 or tensor.dim() == 5
dim = tensor.dim() - 2
assert len(shape) == dim and len(stride) == dim
if apply_specific_crop_slice is not None:
assert max_random_crop is None and precrop_tensor is None
if precrop_tensor is not None:
assert len(precrop_tensor) == dim
assert all([isinstance(sl, (tuple, list)) for sl in precrop_tensor]) and all(
[len(sl) == 2 for sl in precrop_tensor])
else:
precrop_tensor = [(0, 0) for _ in range(dim)]
max_random_crop = [0 for _ in range(dim)] if max_random_crop is None else deepcopy(max_random_crop)
assert len(max_random_crop) == dim
if isinstance(max_random_crop, tuple):
max_random_crop = list(max_random_crop)
for d in range(dim):
max = tensor.shape[2 + d] - precrop_tensor[d][0] - precrop_tensor[d][1] - shape[d]
if max_random_crop[d] > max:
max_random_crop[d] = max
if limit_patches_nb_to is not None:
assert len(limit_patches_nb_to) == dim
# Pick a random crop:
if apply_specific_crop_slice is None:
rnd_crop = [np.random.randint(max_offs + 1) for max_offs in max_random_crop]
apply_specific_crop_slice = (slice(None), slice(None)) + tuple(
slice(precrop[0] + off, full_shp - precrop[1]) for off, precrop, full_shp in
zip(rnd_crop, precrop_tensor, tensor.shape[2:]))
# Unfold it:
tensor = tensor[apply_specific_crop_slice]
N, C = tensor.shape[:2]
for d in range(dim):
tensor = tensor.unfold(d + 2, size=shape[d], step=stride[d])
# Reshape:
nb_patches = tensor.shape[2:2 + len(shape)]
# Along each dimension, we make sure to keep only a specific number of patches (not more):
# This assures compatibility with other patches already extracted from other tensors.
if limit_patches_nb_to is not None:
actual_limits = tuple(lim if lim < nb else nb for nb, lim in zip(nb_patches, limit_patches_nb_to))
valid_patch_slice = (slice(None), slice(None)) + tuple(slice(None, lim) for lim in actual_limits)
tensor = tensor[valid_patch_slice]
nb_patches = actual_limits
# Reshape
if reshape_to_batch_dim:
tensor = tensor.contiguous().view(N, C, -1, *shape)
tensor = tensor.permute(0, 2, 1, *range(3, 3 + dim)).contiguous().view(-1, C, *shape)
return tensor, apply_specific_crop_slice, nb_patches
| 52.503055
| 144
| 0.574848
|
a045e1858abc454a2c43aa739b0ff2bba3c45b31
| 4,351
|
py
|
Python
|
sel_dedicated_codegen/models/price_model.py
|
xRocketPowerx/python-sel-dedicated
|
3b9b41fbd7abc05d427e8abf688b007b6dcf9e96
|
[
"MIT"
] | 2
|
2019-10-12T08:56:31.000Z
|
2019-10-12T17:34:51.000Z
|
sel_dedicated_codegen/models/price_model.py
|
xRocketPowerx/python-sel-dedicated
|
3b9b41fbd7abc05d427e8abf688b007b6dcf9e96
|
[
"MIT"
] | null | null | null |
sel_dedicated_codegen/models/price_model.py
|
xRocketPowerx/python-sel-dedicated
|
3b9b41fbd7abc05d427e8abf688b007b6dcf9e96
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Seido User REST API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 2.4.8
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PriceModel(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'rub': 'PricePeriodModel',
'eur': 'PricePeriodModel',
'usd': 'PricePeriodModel'
}
attribute_map = {
'rub': 'RUB',
'eur': 'EUR',
'usd': 'USD'
}
def __init__(self, rub=None, eur=None, usd=None): # noqa: E501
"""PriceModel - a model defined in OpenAPI""" # noqa: E501
self._rub = None
self._eur = None
self._usd = None
self.discriminator = None
self.rub = rub
self.eur = eur
self.usd = usd
@property
def rub(self):
"""Gets the rub of this PriceModel. # noqa: E501
:return: The rub of this PriceModel. # noqa: E501
:rtype: PricePeriodModel
"""
return self._rub
@rub.setter
def rub(self, rub):
"""Sets the rub of this PriceModel.
:param rub: The rub of this PriceModel. # noqa: E501
:type: PricePeriodModel
"""
if rub is None:
raise ValueError("Invalid value for `rub`, must not be `None`") # noqa: E501
self._rub = rub
@property
def eur(self):
"""Gets the eur of this PriceModel. # noqa: E501
:return: The eur of this PriceModel. # noqa: E501
:rtype: PricePeriodModel
"""
return self._eur
@eur.setter
def eur(self, eur):
"""Sets the eur of this PriceModel.
:param eur: The eur of this PriceModel. # noqa: E501
:type: PricePeriodModel
"""
if eur is None:
raise ValueError("Invalid value for `eur`, must not be `None`") # noqa: E501
self._eur = eur
@property
def usd(self):
"""Gets the usd of this PriceModel. # noqa: E501
:return: The usd of this PriceModel. # noqa: E501
:rtype: PricePeriodModel
"""
return self._usd
@usd.setter
def usd(self, usd):
"""Sets the usd of this PriceModel.
:param usd: The usd of this PriceModel. # noqa: E501
:type: PricePeriodModel
"""
if usd is None:
raise ValueError("Invalid value for `usd`, must not be `None`") # noqa: E501
self._usd = usd
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PriceModel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.89881
| 124
| 0.546541
|
69914c5ccbb400d14eb5c094cce1360dc676e927
| 2,600
|
py
|
Python
|
com/LimePencil/Q12844/XOR.py
|
LimePencil/baekjoonProblems
|
61eeeeb875585d165d9e39ecdb3d905b4ba6aa87
|
[
"MIT"
] | null | null | null |
com/LimePencil/Q12844/XOR.py
|
LimePencil/baekjoonProblems
|
61eeeeb875585d165d9e39ecdb3d905b4ba6aa87
|
[
"MIT"
] | null | null | null |
com/LimePencil/Q12844/XOR.py
|
LimePencil/baekjoonProblems
|
61eeeeb875585d165d9e39ecdb3d905b4ba6aa87
|
[
"MIT"
] | null | null | null |
# import sys
# import math
# def init(node, start, end):
# if start == end:
# tree[node] = arr[start]
# else:
# init(node*2, start, (start+end)//2)
# init(node*2+1, (start+end)//2+1, end)
# tree[node] = tree[node*2] ^ tree[node*2+1]
# def lazy_update(node,start,end):
# if lazy[node] !=0:
# tree[node] ^= (end-start+1)%2*lazy[node]
# if start!=end:
# lazy[node*2] ^= lazy[node]
# lazy[node*2+1] ^= lazy[node]
# lazy[node] = 0
# def lazy_range(node, start,end,left,right,value):
# lazy_update(node,start,end)
# if left > end or right < start:
# return
# if left <= start and end <= right:
# tree[node] ^= (end-start+1)%2*value
# if start != end:
# lazy[node*2] ^= value
# lazy[node*2+1] ^= value
# return
# lazy_range(node*2,start,(start+end)//2,left,right,value)
# lazy_range(node*2+1,(start+end)//2+1,end,left,right,value)
# tree[node] = tree[node*2] ^ tree[node*2+1]
# def find_xor(node,start,end,left,right):
# lazy_update(node,start,end)
# if left > end or right < start:
# return 0
# if left <= start and end <= right:
# return tree[node]
# return find_xor(node*2,start,(start+end)//2,left,right) ^ find_xor(node*2+1,(start+end)//2+1,end,left,right)
# input = sys.stdin.readline
# n = int(input())
# arr = list(map(int,input().split(" ")))
# tree_depth = 2**(math.ceil(math.log2(n))+1)
# tree = [0]*tree_depth
# lazy = [0]*tree_depth
# init(1,0,n-1)
# m = int(input())
# for _ in range(m):
# l = list(map(int,input().split(" ")))
# if l[0] == 1:
# lazy_range(1,0,n-1,l[1],l[2],l[3])
# else:
# print(find_xor(1,0,n-1,l[1],l[2]))
# fenwick
import sys
def update(idx, val1,val2):
while idx <= n:
tree_one[idx]^= val1
tree_two[idx]^= val2
idx+= (idx&-idx)
def range_update(left,right,val):
update(left,val,((-left+1)%2)*val)
update(right+1,val,((right)%2)*val)
def query(idx):
val1=0
val2=0
f=idx
while idx>0:
val1^= tree_one[idx]
val2^= tree_two[idx]
idx-=(idx&-idx)
return (val1*(f%2))^val2
input = sys.stdin.readline
n=int(input())
tree_one = [0]*(n+1)
tree_two = [0]*(n+1)
weight=list(map(int,input().split()))
for i in range(n):
range_update(i+1,i+1,weight[i])
m=int(input())
for _ in range(m):
l = list(map(int,input().split()))
if l[0] ==1:
range_update(l[1]+1,l[2]+1,l[3])
else:
a=query(l[1])
b=query(l[2]+1)
print(a^b)
| 26.804124
| 114
| 0.543462
|
321f39d2a75d0c5398ab530e26d009e5951cb69a
| 2,314
|
py
|
Python
|
plugins/radio.py
|
MizoTelegram/tgvc-RadioBot
|
c4d18c0aadc85c1a56ee6841620e97408244180a
|
[
"MIT"
] | null | null | null |
plugins/radio.py
|
MizoTelegram/tgvc-RadioBot
|
c4d18c0aadc85c1a56ee6841620e97408244180a
|
[
"MIT"
] | null | null | null |
plugins/radio.py
|
MizoTelegram/tgvc-RadioBot
|
c4d18c0aadc85c1a56ee6841620e97408244180a
|
[
"MIT"
] | null | null | null |
#MIT License
#Copyright (c) 2021 Zaute Km
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram import Client, filters
from pyrogram.types import Message
from utils import mp, RADIO, USERNAME
from config import Config
from config import STREAM
CHAT=Config.CHAT
ADMINS=Config.ADMINS
@Client.on_message(filters.command(["radio", f"radio@{USERNAME}"]) & filters.user(ADMINS) & (filters.chat(CHAT) | filters.private))
async def radio(client, message: Message):
if 1 in RADIO:
k=await message.reply_text("Radio Stream i tih tawp leh duh chuan /stopradio tih lo thawn rawh!")
await mp.delete(k)
await mp.delete(message)
return
await mp.start_radio()
k=await message.reply_text(f"Started Radio: <code>{STREAM}</code>")
await mp.delete(k)
await mp.delete(message)
@Client.on_message(filters.command(['stopradio', f"stopradio@{USERNAME}"]) & filters.user(ADMINS) & (filters.chat(CHAT) | filters.private))
async def stop(_, message: Message):
if 0 in RADIO:
k=await message.reply_text("Radio i Stream leh duh chuan /radio tih lo thawn rawh!")
await mp.delete(k)
await mp.delete(message)
return
await mp.stop_radio()
k=await message.reply_text("Radio Stream tih tawp a ni.")
await mp.delete(k)
await mp.delete(message)
| 43.660377
| 139
| 0.743302
|
b71f655a33582d30740edf4a44926d35ff4064b8
| 1,480
|
py
|
Python
|
dags/dag_factory/components/utils.py
|
newspipe/Newspipe
|
752becd1b0c6e0358f087d6e5ced01175841a09f
|
[
"Apache-2.0"
] | 3
|
2021-11-24T19:30:51.000Z
|
2022-01-21T05:38:59.000Z
|
dags/dag_factory/components/utils.py
|
newspipe/Newspipe
|
752becd1b0c6e0358f087d6e5ced01175841a09f
|
[
"Apache-2.0"
] | 1
|
2020-09-19T21:00:20.000Z
|
2020-09-23T18:16:48.000Z
|
dags/dag_factory/components/utils.py
|
newspipe/Newspipe
|
752becd1b0c6e0358f087d6e5ced01175841a09f
|
[
"Apache-2.0"
] | 1
|
2021-12-26T07:16:33.000Z
|
2021-12-26T07:16:33.000Z
|
import os
import yaml
import time
from datetime import datetime
def get_all_csv_paths(path):
csv_paths = []
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".csv"):
csv_paths.append(os.path.join(root, file))
return csv_paths
def date_str_to_unixtime(date_str):
if str(date_str).isnumeric():
return int(date_str)
else:
d = None
try:
d = datetime.strptime(date_str)
except:
pass
try:
d = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S %Z')
except:
pass
try:
d = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S %z')
except:
pass
try:
d = datetime.strptime(date_str.split(
'+')[0].replace("T", " "), '%Y-%m-%d %H:%M:%S')
except:
pass
try:
d = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S %z')
except:
pass
try:
d = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
except:
pass
return int(time.mktime(d.timetuple())) if d else None
def tag_dict_to_dict(tag_dict):
if not tag_dict:
return None
tags = []
tag_list = yaml.load(tag_dict)
if isinstance(tag_list, list):
for tag in tag_list:
tags.append(tag["term"])
return ','.join(tags)
return None
| 24.666667
| 71
| 0.506757
|
7f3ddea355743fa25b1859937a10fa00dd6544ef
| 236
|
py
|
Python
|
Mundo1/des16.1.py
|
julimoraislima/Python-CursoEmVideo
|
d21b0485d2f5767039d819cf743255dfd0f27b18
|
[
"MIT"
] | 2
|
2021-01-05T12:31:00.000Z
|
2021-03-20T00:31:18.000Z
|
Mundo1/des16.1.py
|
julimoraislima/Python-CursoEmVideo
|
d21b0485d2f5767039d819cf743255dfd0f27b18
|
[
"MIT"
] | null | null | null |
Mundo1/des16.1.py
|
julimoraislima/Python-CursoEmVideo
|
d21b0485d2f5767039d819cf743255dfd0f27b18
|
[
"MIT"
] | 1
|
2020-12-28T22:56:10.000Z
|
2020-12-28T22:56:10.000Z
|
#desafio 16: usando o trunc, o trunc quebra o número real deixando ele inteiro.
from math import trunc
num = float(input('Digite um número real: '))
print('='*40)
print(f'O número {num} tem a parte inteira {trunc(num)}')
print('='*40)
| 29.5
| 79
| 0.70339
|
7f6c43922571e7929d354c75aad5be84af390dd5
| 704
|
py
|
Python
|
movingpandas/__init__.py
|
menegon/movingpandas
|
d1dbf633ee44a9caeabf1a16bf25bc855e0b00ce
|
[
"BSD-3-Clause"
] | null | null | null |
movingpandas/__init__.py
|
menegon/movingpandas
|
d1dbf633ee44a9caeabf1a16bf25bc855e0b00ce
|
[
"BSD-3-Clause"
] | null | null | null |
movingpandas/__init__.py
|
menegon/movingpandas
|
d1dbf633ee44a9caeabf1a16bf25bc855e0b00ce
|
[
"BSD-3-Clause"
] | null | null | null |
"""
`movingpandas`: Implementation of Trajectory classes and functions built on top of GeoPandas
"""
from .trajectory import Trajectory
from .trajectory_generalizer import TrajectoryGeneralizer, MaxDistanceGeneralizer, MinDistanceGeneralizer, \
MinTimeDeltaGeneralizer, DouglasPeuckerGeneralizer
from .trajectory_collection import TrajectoryCollection
from .trajectory_aggregator import TrajectoryCollectionAggregator
from .trajectory_splitter import TrajectorySplitter, TemporalSplitter, ObservationGapSplitter, SpeedSplitter, \
StopSplitter
from .trajectory_stop_detector import TrajectoryStopDetector
from .point_clusterer import PointClusterer
name = 'movingpandas'
__version__ = '0.6.rc1'
| 41.411765
| 111
| 0.855114
|
84ee92c53bdccb84d2fad511c493ad5a42dca7ab
| 8,643
|
py
|
Python
|
main/help_window.py
|
Gloryness/YoutubeDL-GUI
|
c7052969af1e31ff5d6ff1f1ff027c0d29b0e8ba
|
[
"MIT"
] | 3
|
2020-06-18T10:57:16.000Z
|
2020-06-23T09:10:43.000Z
|
main/help_window.py
|
Gloryness/Download-Videos
|
c7052969af1e31ff5d6ff1f1ff027c0d29b0e8ba
|
[
"MIT"
] | null | null | null |
main/help_window.py
|
Gloryness/Download-Videos
|
c7052969af1e31ff5d6ff1f1ff027c0d29b0e8ba
|
[
"MIT"
] | null | null | null |
import threading
import webbrowser
from tkinter import *
from tkinter import ttk
from tkinter.font import *
count = 1
def reset(win):
global count
count = 1
win.destroy()
class Help:
def __init__(self, version):
self.version = version
global count
if count == 1:
self.help_win = Toplevel()
self.help_win.title('Help | YoutubeDL GUI | v{}'.format(self.version))
self.help_win.iconbitmap('images/#app.ico')
self.help_win.resizable(False, False)
self.help_win.configure(bg='#cbdbfc', bd=5)
self.help_win.geometry("500x300")
self.help_win.protocol('WM_DELETE_WINDOW', lambda: reset(self.help_win))
self.f = Font(family='TkDefaultFont', size=13, weight=BOLD)
exit_btn = ttk.Button(self.help_win, text="Exit", style="some.TButton", command=lambda: reset(self.help_win))
exit_btn.place(x=410, y=2)
count = 2
# threading
def ffmpeg_thread(self):
thread = threading.Thread(target=self.ffmpeg_help)
thread.start()
def detect_urls_thread(self):
thread = threading.Thread(target=self.detect_urls_help)
thread.start()
def downloading_videos_thread(self):
thread = threading.Thread(target=self.downloading_videos_help)
thread.start()
def other_issues_thread(self):
thread = threading.Thread(target=self.other_issues)
thread.start()
def about_gui_thread(self):
thread = threading.Thread(target=self.about_gui)
thread.start()
def add_label(self, win, text, bg="#ffffff", fg="black", x=1, y=1, font=None, bind=(False, None, None), bind1=(False, None), bind2=(False, None)):
label_adder = Label(win, text=text, fg=fg, bg=bg, font=font if font is not None else "TkDefaultFont")
label_adder.place(x=x, y=y)
if bind[0]:
label_adder.bind(bind[1], bind[2])
if bind1[0]:
label_adder.bind(bind1[1], lambda event: label_adder.config(bg="#859fd4"))
if bind2[0]:
label_adder.bind(bind2[1], lambda event: label_adder.config(bg="#cbdbfc"))
def ffmpeg_help(self):
self.add_label(self.help_win, "FFmpeg - Help", '#cbdbfc', x=190, y=3, font=self.f)
self.add_label(self.help_win, "If you have not already read the How-To on github, then it's all explained here:", '#cbdbfc', x=5, y=40)
self.add_label(self.help_win, "If you've tried downloading a video, and got an error based on files that couldnt merge"
"\nor too high quality then that means you have not installed FFmpeg.", '#cbdbfc', x=5, y=80)
self.add_label(self.help_win, "To install FFmpeg, simply go to the 'Tools' tab of this GUI and click 'Install FFmpeg'.", '#cbdbfc', x=5, y=127)
self.add_label(self.help_win, "After that, I recommend choosing the latest version and your architecture"
"\n and then choose 'Static' for linking which should be automatic.", '#cbdbfc', x=5, y=160)
self.add_label(self.help_win, "Once installed, find the folder that contains 'ffmpeg.exe', 'ffplay.exe' or 'ffprobe.exe'"
"\nand then set that folder as your System Environmental Variable.", '#cbdbfc', x=5, y=210)
f = Font(family="TkDefaultFont", size=8, weight=BOLD)
self.add_label(self.help_win, "(Control Panel>System>Advanced System Settings>Environmental Variables>Path>Edit)", '#cbdbfc', x=-3, y=245, font=f)
self.add_label(self.help_win, "Then you should be done! If you have any issues, Google may have a better answer.", '#cbdbfc', x=5, y=270)
def detect_urls_help(self):
self.add_label(self.help_win, "Selenium - Help", '#cbdbfc', x=190, y=3, font=self.f)
self.add_label(self.help_win, "The option that is next to the download button looks very intimidating, doesn't it?", '#cbdbfc', x=5, y=27)
self.add_label(self.help_win, "It uses the well-known python module 'Selenium' for webbrowser automation.", '#cbdbfc', x=5, y=47)
self.add_label(self.help_win, "You can use it as a normal browser, and clicking 'Execute' to catch all downloadable URLs.", '#cbdbfc', x=5, y=67)
self.add_label(self.help_win, "But how do you use it?", '#cbdbfc', x=5, y=90)
f = Font(family="TkDefaultFont", size=8, weight=BOLD)
self.add_label(self.help_win, "- Install a WebDriver for the browser you want to use.", '#cbdbfc', x=5, y=110, font=f)
self.add_label(self.help_win, "- You can do this by going to the 'Tools' tab and choosing 'Install WebDriver'.", '#cbdbfc', x=5, y=140, font=f)
self.add_label(self.help_win, "- Next, find that .exe file that you installed and place it anywhere on your machine.", '#cbdbfc', x=5, y=170, font=f)
self.add_label(self.help_win, "- Then go to 'Settings' in the 'File' tab and in Selenium Settings set the PATH to the .exe!", '#cbdbfc', x=5, y=200, font=f)
self.add_label(self.help_win, "- If your using the Firefox browser you can choose to link your Firefox profile (optional).", '#cbdbfc', x=5, y=230, font=f)
self.add_label(self.help_win, "- When you open selenium, a command-line for the .exe will come up. Do not close it!", '#cbdbfc', x=5, y=260, font=f)
def downloading_videos_help(self):
self.add_label(self.help_win, "Downloading Videos - Help", '#cbdbfc', x=140, y=3, font=self.f)
self.add_label(self.help_win, "Looks like you need help with Downloading Videos. I got you covered!", '#cbdbfc', x=5, y=30)
self.add_label(self.help_win, "Here are some common issues:", '#cbdbfc', x=5, y=50)
f = Font(family="TkDefaultFont", size=8, weight=BOLD)
self.add_label(self.help_win, "- Blocked Websites", '#cbdbfc', x=5, y=70, font=f)
self.add_label(self.help_win, "- Bad Network", '#cbdbfc', x=5, y=90, font=f)
self.add_label(self.help_win, "- Checked downloading.log or not?", '#cbdbfc', x=5, y=110, font=f)
self.add_label(self.help_win, "- Has FFmpeg or something simallur been installed?", '#cbdbfc', x=5, y=130, font=f)
self.add_label(self.help_win, "- Is FFmpeg a System Environmental Variable?", '#cbdbfc', x=5, y=150, font=f)
self.add_label(self.help_win, "- Is the video available in your country?", '#cbdbfc', x=5, y=170, font=f)
self.add_label(self.help_win, "If none of them options help, consider searching up the problem.", '#cbdbfc', x=5, y=200)
self.add_label(self.help_win, "If still nothing, go to Other Options and check 'Print various debugging info' and then", '#cbdbfc', x=5, y=240)
self.add_label(self.help_win, "download the video again and screenshot it and make an issue on ", '#cbdbfc', x=5, y=260)
self.add_label(self.help_win, "GitHub.", '#cbdbfc', "blue", x=356, y=260,
bind=(True, "<Button-1>", lambda event: webbrowser.open('https://github.com/Gloryness/YoutubeDL-GUI/issues')), bind1=(True, "<Enter>"), bind2=(True, "<Leave>"))
def other_issues(self):
f = Font(family="TkDefaultFont", size=10, weight=BOLD)
self.add_label(self.help_win, "Other Issues - Help", '#cbdbfc', x=180, y=3, font=self.f)
self.add_label(self.help_win, "If you need any other help, then feel free to create an Issue on github.", '#cbdbfc', x=25, y=120, font=f)
self.add_label(self.help_win, "A response usually comes within the day.", '#cbdbfc', x=100, y=140, font=f)
self.add_label(self.help_win, ">> Github Link <<", '#cbdbfc', "blue", x=170, y=200, font=f,
bind=(True, "<Button-1>", lambda event: webbrowser.open('https://github.com/Gloryness/YoutubeDL-GUI/issues')), bind1=(True, "<Enter>"), bind2=(True, "<Leave>"))
def about_gui(self):
self.add_label(self.help_win, "About this GUI", "#cbdbfc", x=185, y=5, font=self.f)
self.add_label(self.help_win, "This GUI was made to make Downloading Videos easier, but in a bit of a stylish way too :)", "#cbdbfc", x=5, y=40)
self.add_label(self.help_win, "It took 2 months to make, on & off and it was all made by one person! (see credits)", "#cbdbfc", x=5, y=60)
self.add_label(self.help_win, "If you would like to look at the source code, it's all on Github.", "#cbdbfc", x=5, y=100)
self.add_label(self.help_win, "If you would like to request a feature, make an Issue on Github.", "#cbdbfc", x=5, y=120)
self.add_label(self.help_win, "If you encounter any bugs, be sure to report them on Github.", "#cbdbfc", x=5, y=140)
| 66.484615
| 183
| 0.648386
|
2f745828a07e10b2da5357c0e5622fdbf3b430b1
| 1,703
|
py
|
Python
|
dockermap/map/runner/signal_stop.py
|
merll/docker-map
|
54e325595fc0b6b9d154dacc790a222f957895da
|
[
"MIT"
] | 85
|
2015-01-02T01:05:14.000Z
|
2022-03-23T22:23:12.000Z
|
dockermap/map/runner/signal_stop.py
|
merll/docker-map
|
54e325595fc0b6b9d154dacc790a222f957895da
|
[
"MIT"
] | 21
|
2015-02-10T18:25:03.000Z
|
2020-10-28T08:38:39.000Z
|
dockermap/map/runner/signal_stop.py
|
merll/docker-map
|
54e325595fc0b6b9d154dacc790a222f957895da
|
[
"MIT"
] | 15
|
2015-02-27T12:19:35.000Z
|
2021-09-29T06:20:14.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import signal
from requests.exceptions import Timeout
from ..action import ContainerUtilAction
from ..input import ItemType
log = logging.getLogger(__name__)
class SignalMixin(object):
action_method_names = [
(ItemType.CONTAINER, ContainerUtilAction.SIGNAL_STOP, 'signal_stop'),
]
def signal_stop(self, action, c_name, **kwargs):
"""
Stops a container, either using the default client stop method, or sending a custom signal and waiting
for the container to stop.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
"""
client = action.client
sig = action.config.stop_signal
stop_kwargs = self.get_container_stop_kwargs(action, c_name, kwargs=kwargs)
if not sig or sig == 'SIGTERM' or sig == signal.SIGTERM:
try:
client.stop(**stop_kwargs)
except Timeout:
log.warning("Container %s did not stop in time - sent SIGKILL.", c_name)
try:
client.wait(c_name, timeout=stop_kwargs.get('timeout', 10))
except Timeout:
pass
else:
log.debug("Sending signal %s to the container %s and waiting for stop.", sig, c_name)
client.kill(c_name, signal=sig)
client.wait(c_name, timeout=stop_kwargs.get('timeout', 10))
| 35.479167
| 110
| 0.637111
|
3178e77a83eb9d8c57cc70e9fd09e67e6cfc9116
| 2,948
|
py
|
Python
|
Python-Standard-Library/DataCompression/zlib_server.py
|
gaufung/CodeBase
|
0292b06cfe002b3ad0299e43bb51192816a02c74
|
[
"MIT"
] | 1
|
2018-10-06T23:50:53.000Z
|
2018-10-06T23:50:53.000Z
|
Python-Standard-Library/DataCompression/zlib_server.py
|
wsgan001/CodeBase
|
0292b06cfe002b3ad0299e43bb51192816a02c74
|
[
"MIT"
] | null | null | null |
Python-Standard-Library/DataCompression/zlib_server.py
|
wsgan001/CodeBase
|
0292b06cfe002b3ad0299e43bb51192816a02c74
|
[
"MIT"
] | 1
|
2018-10-06T23:50:50.000Z
|
2018-10-06T23:50:50.000Z
|
import zlib
import logging
import socketserver
import binascii
BLOCK_SIZE = 64
class ZlibRequestHandler(socketserver.BaseRequestHandler):
logger = logging.getLogger('Server')
def handle(self):
compressor = zlib.compressobj(1)
filename = self.request.recv(1024).decode('utf-8')
self.logger.debug('Client asked for: %s', filename)
with open(filename, 'rb') as input:
while True:
block = input.read(BLOCK_SIZE)
if not block:
break
self.logger.debug('RAW %r', block)
compressed = compressor.compress(block)
if compressed:
self.logger.debug('Sending %r', binascii.hexlify(block))
self.request.send(compressed)
else:
self.logger.debug('buffered')
remaining = compressor.flush()
while remaining:
to_send = remaining[:BLOCK_SIZE]
remaining = remaining[BLOCK_SIZE:]
self.logger.debug('FLUSHING %r', binascii.hexlify(to_send))
self.request.send(to_send)
return
if __name__ == '__main__':
import socket
import threading
from io import BytesIO
logging.basicConfig(
level=logging.DEBUG,
format='%(name)s: %(message)s'
)
logger = logging.getLogger('Client')
address = ('localhost', 0)
server = socketserver.TCPServer(address, ZlibRequestHandler)
ip, port = server.server_address
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True)
t.start()
logger.info('Contacting server on %s:%s',ip, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
requested_file ='zlib_server.py'
logger.debug('sending filename: %r', requested_file)
len_sent = s.send(requested_file.encode('utf-8'))
buffer = BytesIO()
decompressor = zlib.decompressobj()
while True:
response = s.recv(BLOCK_SIZE)
if not response:
break
logger.debug('READ %r', binascii.hexlify(response))
to_decompress = decompressor.unconsumed_tail + response
while to_decompress:
decompressed = decompressor.decompress(to_decompress)
if decompressed:
logger.debug('DECOMPRESSED %r', decompressed)
buffer.write(decompressed)
to_decompress = decompressor.unconsumed_tail
else:
logger.debug('BUFFERING')
to_decompress = None
remainder = decompressor.flush()
if remainder:
logger.debug('FLUSHED %r', remainder)
buffer.write(remainder)
full_response=buffer.getvalue()
lorem = open('zlib_server.py', 'rb').read()
logger.debug('response mathes file content: %s', full_response == lorem)
s.close()
server.socket.close()
| 33.885057
| 76
| 0.604138
|
6171632c035ea3af6bf1253c01d1d4ee6f96c3d7
| 955
|
py
|
Python
|
p7e11.py
|
yannickbf-prog/python
|
da4bd2c8668966359b829a8ac2a896afeca2b150
|
[
"MIT"
] | null | null | null |
p7e11.py
|
yannickbf-prog/python
|
da4bd2c8668966359b829a8ac2a896afeca2b150
|
[
"MIT"
] | null | null | null |
p7e11.py
|
yannickbf-prog/python
|
da4bd2c8668966359b829a8ac2a896afeca2b150
|
[
"MIT"
] | null | null | null |
#Yannick p7e11 Escribe un programa que te pida una frase, y pase la frase como parámetro a una función. Ésta debe devolver si es palíndroma o no , y el programa principal escribirá el resultado por pantalla:
def comprobarPalindromo(fraseONumero):
fraseONumero = fraseONumero.replace (" ", "")
contarFraseONumero = int(len(fraseONumero) / 2)
#print(fraseONumero)
y = 1
esPalindromo = True
for i in range(0, (contarFraseONumero)):
#print(fraseONumero[i])
#print(fraseONumero[-y])
if (fraseONumero[i]) != (fraseONumero[-y]):
esPalindromo = False
#guardarPalabraInversa = guardarPalabraInversa +(fraseONumero[i])
y = y+1
return esPalindromo
fraseONumero1=input("Escribe una frase o numero")
if (comprobarPalindromo(fraseONumero1)):
print(f"{fraseONumero1} Es palindromo")
else:
print(f"{fraseONumero1} No es palindromo")
| 31.833333
| 208
| 0.66178
|
37b2f198905fcd4f56760b4d3e65f326d4b8aeb0
| 1,028
|
py
|
Python
|
examples/ssl_echo_client.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
examples/ssl_echo_client.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
examples/ssl_echo_client.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import logging
from proxy.core.connection import TcpServerConnection
from proxy.common.constants import DEFAULT_BUFFER_SIZE
logger = logging.getLogger(__name__)
if __name__ == '__main__':
client = TcpServerConnection('::', 12345)
client.connect()
client.wrap('example.com', ca_file='ca-cert.pem')
# wrap() will by default set connection to nonblocking
# flip it back to blocking
client.connection.setblocking(True)
try:
while True:
client.send(b'hello')
data = client.recv(DEFAULT_BUFFER_SIZE)
if data is None:
break
logger.info(data.tobytes())
finally:
client.close()
| 29.371429
| 86
| 0.659533
|
911298bf3109f6aa8d3ab54cb169fe3041c1faf6
| 459
|
py
|
Python
|
0001-0100/0041-First Missing Positive/0041-First Missing Positive.py
|
jiadaizhao/LeetCode
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
[
"MIT"
] | 49
|
2018-05-05T02:53:10.000Z
|
2022-03-30T12:08:09.000Z
|
0001-0100/0041-First Missing Positive/0041-First Missing Positive.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 11
|
2017-12-15T22:31:44.000Z
|
2020-10-02T12:42:49.000Z
|
0001-0100/0041-First Missing Positive/0041-First Missing Positive.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 28
|
2017-12-05T10:56:51.000Z
|
2022-01-26T18:18:27.000Z
|
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
for i in range(len(nums)):
while nums[i] != i + 1 and 1 <= nums[i] <= len(nums) and nums[i] != nums[nums[i] - 1]:
temp = nums[i]
nums[i] = nums[nums[i] - 1]
nums[temp - 1] = temp
for i, num in enumerate(nums):
if num != i + 1:
return i + 1
return len(nums) + 1
| 35.307692
| 98
| 0.448802
|
d2cefc6573275ee90ff12079f82e95080844dc61
| 563
|
py
|
Python
|
django_contactme/admin.py
|
petekalo/django-contactme
|
8a7d1e0213f0e02e9fa8043fdb74edbf8be921c0
|
[
"MIT"
] | null | null | null |
django_contactme/admin.py
|
petekalo/django-contactme
|
8a7d1e0213f0e02e9fa8043fdb74edbf8be921c0
|
[
"MIT"
] | null | null | null |
django_contactme/admin.py
|
petekalo/django-contactme
|
8a7d1e0213f0e02e9fa8043fdb74edbf8be921c0
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django_contactme.models import ContactMsg
class ContactMsgAdmin(admin.ModelAdmin):
list_display = ('name', 'ip_address', 'submit_date')
fieldsets = (
(None, {'fields': ('site',)}),
(_('Content'), {'fields': ('name', 'email', 'message',)}),
(_('Metadata'), {'fields': ('submit_date', 'ip_address')}),
)
date_hierarchy = 'submit_date'
ordering = ('-submit_date',)
admin.site.register(ContactMsg, ContactMsgAdmin)
| 31.277778
| 67
| 0.648313
|
8c8cf11af61fe072f5f42646b91c1ee83b90afca
| 1,229
|
py
|
Python
|
app/auth/forms.py
|
crandquist/PrinterApp
|
fda5ba2b7494c83ebec9eef6bb03eb8cc176b5d4
|
[
"MIT"
] | null | null | null |
app/auth/forms.py
|
crandquist/PrinterApp
|
fda5ba2b7494c83ebec9eef6bb03eb8cc176b5d4
|
[
"MIT"
] | 12
|
2020-07-23T03:04:46.000Z
|
2020-08-10T19:37:40.000Z
|
app/auth/forms.py
|
crandquist/PrinterApp
|
fda5ba2b7494c83ebec9eef6bb03eb8cc176b5d4
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Repeat Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
| 39.645161
| 79
| 0.703824
|
da1af07ea719efb6bd0de260ea8765075f164d15
| 7,704
|
py
|
Python
|
dgi_repo/fcrepo3/datastream_resource.py
|
discoverygarden/dgi_repo
|
1bb30d51a4cefc2ae510f5aeac4b36ed7a78f6a6
|
[
"Apache-2.0"
] | null | null | null |
dgi_repo/fcrepo3/datastream_resource.py
|
discoverygarden/dgi_repo
|
1bb30d51a4cefc2ae510f5aeac4b36ed7a78f6a6
|
[
"Apache-2.0"
] | null | null | null |
dgi_repo/fcrepo3/datastream_resource.py
|
discoverygarden/dgi_repo
|
1bb30d51a4cefc2ae510f5aeac4b36ed7a78f6a6
|
[
"Apache-2.0"
] | null | null | null |
"""
Class file for the implementation of the datastream resource.
"""
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED
import dgi_repo.database.read.repo_objects as object_reader
import dgi_repo.database.read.datastreams as ds_reader
import dgi_repo.database.write.datastreams as ds_writer
import dgi_repo.database.delete.datastreams as ds_purger
import dgi_repo.utilities as utils
import dgi_repo.fcrepo3.utilities as fedora_utils
from dgi_repo.exceptions import (ObjectDoesNotExistError,
DatastreamDoesNotExistError,
DatastreamExistsError,
DatastreamConflictsError,
ExternalDatastreamsNotSupported)
from dgi_repo.fcrepo3 import api, foxml
from dgi_repo.database.utilities import get_connection
class DatastreamResource(api.DatastreamResource):
"""
Provide the datastream CRUD endpoints.
"""
def _create_datastream(self, req, pid, dsid):
"""
Persist the new datastream.
Raises:
DatastreamExistsError: The object doesn't exist.
"""
conn = get_connection(ISOLATION_LEVEL_READ_COMMITTED)
with conn, conn.cursor() as cursor:
ds_info = ds_reader.datastream_from_raw(pid, dsid,
cursor=cursor).fetchone()
if ds_info:
raise DatastreamExistsError(pid, dsid)
self._upsert_ds(req, pid, dsid, cursor)
def _update_datastream(self, req, pid, dsid):
"""
Commit the modification to the datastream.
"""
conn = get_connection(ISOLATION_LEVEL_READ_COMMITTED)
with conn, conn.cursor() as cursor:
ds_reader.datastream_from_raw(pid, dsid, cursor=cursor)
ds_info = cursor.fetchone()
if ds_info is None:
raise DatastreamDoesNotExistError(pid, dsid)
ds = dict(ds_info)
ds['committed'] = ds['modified']
ds['datastream'] = ds['id']
del ds['id']
# Check modified date param, exiting if needed.
modified_date = req.get_param('lastModifiedDate')
if modified_date is not None:
modified_date = utils.iso8601_to_datetime(modified_date)
if ds['committed'] > modified_date:
raise DatastreamConflictsError(pid, dsid, ds['committed'],
modified_date)
if ds_info['versioned']:
ds_writer.upsert_old_datastream(ds, cursor=cursor)
if ds['resource'] is not None:
ds['mimetype'] = ds_reader.mime_from_resource(
ds['resource'],
cursor=cursor
).fetchone()['mime']
self._upsert_ds(req, pid, dsid, cursor, ds=ds)
return
def _delete_datastream(self, req, pid, dsid):
"""
Purge the datastream (or range of versions).
@TODO: handle logMessage when audit is dealt with.
"""
start = utils.iso8601_to_datetime(req.get_param('startDT'))
end = utils.iso8601_to_datetime(req.get_param('endDT'))
with get_connection() as conn, conn.cursor() as cursor:
ds_purger.delete_datastream_versions(
pid,
dsid,
start=start,
end=end,
cursor=cursor
)
if not cursor.rowcount:
object_info = object_reader.object_id_from_raw(
pid,
cursor=cursor
).fetchone()
if object_info is None:
# Only raise if the object is missing because Fedora.
raise ObjectDoesNotExistError(pid)
foxml.internalize_rels(pid, dsid,
req.env['wsgi.identity'].source_id,
cursor=cursor)
return (start, end)
def _upsert_ds(self, req, pid, dsid, cursor, ds=None):
"""
Upsert a datastream.
Raises:
ObjectDoesNotExistError: The object doesn't exist.
"""
if ds is not None:
ds = dict(ds)
del ds['modified']
else:
ds = {}
object_info = object_reader.object_id_from_raw(
pid, cursor=cursor).fetchone()
if object_info is None:
raise ObjectDoesNotExistError(pid)
control_group = req.get_param('controlGroup', default='M')
if control_group == 'E':
raise ExternalDatastreamsNotSupported
ds_location = req.get_param('dsLocation')
data_ref = None
data = None
if ds_location is not None:
if control_group == 'R':
data_ref = {
'TYPE': 'URL',
'REF': ds_location,
}
else:
data_ref = {
'TYPE': 'INTERNAL_ID',
'REF': ds_location,
}
else:
try:
data = req.get_param('file').file
except AttributeError:
# Data can come as the request body.
if req.content_length:
data = req.stream
checksums = []
checksum = req.get_param('checksum')
checksum_type = req.get_param('checksumType')
if checksum is not None or checksum_type is not None:
checksums.append({
'checksum': checksum,
'type': checksum_type,
}),
ds.update({
'dsid': dsid,
'object': object_info['id'],
'log': fedora_utils.resolve_log(req, cursor),
'checksums': checksums,
'data_ref': data_ref,
'data': data,
})
label_in = req.get_param('dsLabel')
if label_in is not None:
ds['label'] = label_in
ds.setdefault('label', '')
ds.setdefault('control_group', control_group)
version_in = req.get_param('versionable')
if version_in:
ds['versioned'] = version_in != 'false'
ds.setdefault('versioned', True)
mime_in = req.get_param('mimeType')
if mime_in:
ds['mimetype'] = mime_in
ds.setdefault('mimetype', 'application/octet-stream')
state_in = req.get_param('dsState')
if state_in:
ds['state'] = state_in
ds.setdefault('state', 'A')
fedora_utils.write_ds(ds, cursor=cursor)
foxml.internalize_rels(pid, dsid,
req.env['wsgi.identity'].source_id,
cursor=cursor)
def _get_datastream_info(self, pid, dsid, asOfDateTime=None, **kwargs):
"""
Get the ds* values in a dict, to build the datastream profile.
"""
with get_connection() as conn, conn.cursor() as cursor:
ds_reader.datastream_from_raw(pid, dsid, cursor=cursor)
ds_info = cursor.fetchone()
if ds_info is None:
raise DatastreamDoesNotExistError(pid, dsid)
if asOfDateTime is not None:
time = utils.iso8601_to_datetime(asOfDateTime)
ds_info = ds_reader.datastream_as_of_time(
ds_info['id'],
time,
cursor=cursor
)
if ds_info is None:
raise DatastreamDoesNotExistError(pid, dsid, time)
return fedora_utils.datastream_to_profile(ds_info, cursor)
| 36.339623
| 78
| 0.548157
|
18e56b70f69726222243ae0f89028e97b4405958
| 195
|
py
|
Python
|
SPOJ/LASTDIG2.py
|
cquark7/competitive_programming
|
c90754d91628b273703c74e373356394732f0cd3
|
[
"MIT"
] | 1
|
2019-06-16T08:21:56.000Z
|
2019-06-16T08:21:56.000Z
|
SPOJ/LASTDIG2.py
|
cquark7/competitive_programming
|
c90754d91628b273703c74e373356394732f0cd3
|
[
"MIT"
] | null | null | null |
SPOJ/LASTDIG2.py
|
cquark7/competitive_programming
|
c90754d91628b273703c74e373356394732f0cd3
|
[
"MIT"
] | null | null | null |
d2 = {n: {p % 4: str(n ** p)[-1] for p in range(4, 8)} for n in range(10)}
for tc in range(int(input())):
a, b = input().split()
b = int(b)
print(d2[int(a[-1])][b % 4] if b else '1')
| 32.5
| 74
| 0.492308
|
d68302e0aa5fba61998c63dd126fce2d702acbf7
| 2,433
|
py
|
Python
|
open_spiel/python/examples/playthrough.py
|
ReedyHarbour/MultiplayerPoker
|
70f3872a046d954d6ec2fca3d829ec36ac70e259
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/examples/playthrough.py
|
ReedyHarbour/MultiplayerPoker
|
70f3872a046d954d6ec2fca3d829ec36ac70e259
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/examples/playthrough.py
|
ReedyHarbour/MultiplayerPoker
|
70f3872a046d954d6ec2fca3d829ec36ac70e259
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Play a game, selecting random moves, and save what we see.
This can be used to check by hand the behaviour of a game, and also
as the basis for test cases.
Example usage:
```
playthrough --game kuhn_poker --params players=3
```
"""
from absl import app
from absl import flags
from absl import logging
from open_spiel.python.algorithms import generate_playthrough
FLAGS = flags.FLAGS
flags.DEFINE_string(
"game", "kuhn_poker(players=3)", "Name of the game, with optional parameters, e.g. "
"'kuhn_poker' or 'go(komi=4.5,board_size=19)'.")
flags.DEFINE_string("output_file", "output.txt", "Where to write the data to.")
flags.DEFINE_list("actions", None,
"A (possibly partial) list of action choices to make.")
flags.DEFINE_string("update_path", None,
"If set, regenerates all playthroughs in the path.")
flags.DEFINE_bool(
"alsologtostdout", False,
"If True, the trace will be written to std-out while it "
"is being constructed (in addition to the usual behavior).")
flags.DEFINE_integer("shard", 0, "The shard to update.")
flags.DEFINE_integer("num_shards", 1, "How many shards to use for updates.")
def main(unused_argv):
if FLAGS.update_path:
generate_playthrough.update_path(FLAGS.update_path, FLAGS.shard,
FLAGS.num_shards)
else:
if not FLAGS.game:
raise ValueError("Must specify game")
actions = FLAGS.actions
if actions is not None:
actions = [int(x) for x in actions]
text = generate_playthrough.playthrough(
FLAGS.game, actions, alsologtostdout=FLAGS.alsologtostdout)
if FLAGS.output_file:
with open(FLAGS.output_file, "w") as f:
f.write(text)
else:
logging.info(text)
if __name__ == "__main__":
app.run(main)
| 33.328767
| 88
| 0.706124
|
982703282a52fe3b6af5a568dcde080ce74e987c
| 26,752
|
py
|
Python
|
tensorboard/plugins/graph/keras_util_test.py
|
tjgq/tensorboard
|
751c961b90183115e4ab0ae3975d50146c0705b9
|
[
"Apache-2.0"
] | 7
|
2020-04-04T16:25:42.000Z
|
2021-10-02T18:26:56.000Z
|
tensorboard/plugins/graph/keras_util_test.py
|
tjgq/tensorboard
|
751c961b90183115e4ab0ae3975d50146c0705b9
|
[
"Apache-2.0"
] | 1
|
2021-09-02T14:57:13.000Z
|
2021-09-02T14:57:13.000Z
|
tensorboard/plugins/graph/keras_util_test.py
|
tjgq/tensorboard
|
751c961b90183115e4ab0ae3975d50146c0705b9
|
[
"Apache-2.0"
] | 4
|
2020-08-08T18:08:44.000Z
|
2021-05-13T05:22:40.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Utility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
from tensorflow.python.platform import test
from tensorboard.plugins.graph import keras_util
from tensorboard.util import test_util
class KerasUtilTest(tf.test.TestCase):
def assertGraphDefToModel(self, expected_proto, model):
model_config = json.loads(model.to_json())
self.assertProtoEquals(
expected_proto, keras_util.keras_model_to_graph_def(model_config)
)
def DISABLED_test_keras_model_to_graph_def_sequential_model(self):
expected_proto = """
node {
name: "sequential/dense_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential/dense"
input: "sequential/dense_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential/my_relu"
input: "sequential/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
node {
name: "sequential/dense_1"
input: "sequential/my_relu"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential/activation"
input: "sequential/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
"""
model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(32, input_shape=(784,)),
tf.keras.layers.Activation("relu", name="my_relu"),
tf.keras.layers.Dense(10),
tf.keras.layers.Activation("softmax"),
]
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_functional_model(self):
expected_proto = """
node {
name: "model/functional_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/dense"
input: "model/functional_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/dense_1"
input: "model/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/dense_2"
input: "model/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(784,), name="functional_input")
d0 = tf.keras.layers.Dense(64, activation="relu")
d1 = tf.keras.layers.Dense(64, activation="relu")
d2 = tf.keras.layers.Dense(64, activation="relu")
model = tf.keras.models.Model(inputs=inputs, outputs=d2(d1(d0(inputs))))
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_functional_model_with_cycle(self):
expected_proto = """
node {
name: "model/cycle_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/dense"
input: "model/cycle_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/dense_1"
input: "model/dense"
input: "model/dense_2"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/dense_2"
input: "model/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(784,), name="cycle_input")
d0 = tf.keras.layers.Dense(64, activation="relu")
d1 = tf.keras.layers.Dense(64, activation="relu")
d2 = tf.keras.layers.Dense(64, activation="relu")
model = tf.keras.models.Model(
inputs=inputs, outputs=d1(d2(d1(d0(inputs))))
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_lstm_model(self):
expected_proto = """
node {
name: "model/lstm_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/simple_rnn"
input: "model/lstm_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "SimpleRNN"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(None, 5), name="lstm_input")
encoder = tf.keras.layers.SimpleRNN(256)
model = tf.keras.models.Model(inputs=inputs, outputs=encoder(inputs))
self.assertGraphDefToModel(expected_proto, model)
def DISABLED_test_keras_model_to_graph_def_nested_sequential_model(self):
expected_proto = """
node {
name: "sequential_2/sequential_1_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential_2/sequential_1/sequential_input"
input: "sequential_2/sequential_1_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential_2/sequential_1/sequential/dense_input"
input: "sequential_2/sequential_1/sequential_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential_2/sequential_1/sequential/dense"
input: "sequential_2/sequential_1/sequential/dense_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential_2/sequential_1/sequential/activation"
input: "sequential_2/sequential_1/sequential/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
node {
name: "sequential_2/sequential_1/my_relu"
input: "sequential_2/sequential_1/sequential/activation"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
node {
name: "sequential_2/dense_1"
input: "sequential_2/sequential_1/my_relu"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential_2/activation_1"
input: "sequential_2/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
"""
sub_sub_model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(32, input_shape=(784,)),
tf.keras.layers.Activation("relu"),
]
)
sub_model = tf.keras.models.Sequential(
[sub_sub_model, tf.keras.layers.Activation("relu", name="my_relu"),]
)
model = tf.keras.models.Sequential(
[
sub_model,
tf.keras.layers.Dense(10),
tf.keras.layers.Activation("softmax"),
]
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_functional_multi_inputs(self):
expected_proto = """
node {
name: "model/main_input"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/embedding"
input: "model/main_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Embedding"
}
}
}
node {
name: "model/simple_rnn"
input: "model/embedding"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "SimpleRNN"
}
}
}
node {
name: "model/aux_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/concatenate"
input: "model/simple_rnn"
input: "model/aux_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Concatenate"
}
}
}
node {
name: "model/dense"
input: "model/concatenate"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/main_output"
input: "model/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/aux_output"
input: "model/simple_rnn"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
main_input = tf.keras.layers.Input(
shape=(100,), dtype="int32", name="main_input"
)
x = tf.keras.layers.Embedding(
output_dim=512, input_dim=10000, input_length=100
)(main_input)
rnn_out = tf.keras.layers.SimpleRNN(32)(x)
auxiliary_output = tf.keras.layers.Dense(
1, activation="sigmoid", name="aux_output"
)(rnn_out)
auxiliary_input = tf.keras.layers.Input(shape=(5,), name="aux_input")
x = tf.keras.layers.concatenate([rnn_out, auxiliary_input])
x = tf.keras.layers.Dense(64, activation="relu")(x)
main_output = tf.keras.layers.Dense(
1, activation="sigmoid", name="main_output"
)(x)
model = tf.keras.models.Model(
inputs=[main_input, auxiliary_input],
outputs=[main_output, auxiliary_output],
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_functional_model_as_layer(self):
expected_proto = """
node {
name: "model_1/sub_func_input_2"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model_1/sub_func_input_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model_1/model/sub_func_input_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model_1/model/sub_func_input_2"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model_1/model/dense"
input: "model_1/model/sub_func_input_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model_1/model/dense_1"
input: "model_1/model/sub_func_input_2"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model_1/concatenate"
input: "model_1/model/dense"
input: "model_1/model/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Concatenate"
}
}
}
node {
name: "model_1/dense_2"
input: "model_1/concatenate"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
inputs1 = tf.keras.layers.Input(shape=(784,), name="sub_func_input_1")
inputs2 = tf.keras.layers.Input(shape=(784,), name="sub_func_input_2")
d0 = tf.keras.layers.Dense(64, activation="relu")
d1 = tf.keras.layers.Dense(64, activation="relu")
d2 = tf.keras.layers.Dense(64, activation="relu")
sub_model = tf.keras.models.Model(
inputs=[inputs2, inputs1], outputs=[d0(inputs1), d1(inputs2)]
)
main_outputs = d2(
tf.keras.layers.concatenate(sub_model([inputs2, inputs1]))
)
model = tf.keras.models.Model(
inputs=[inputs2, inputs1], outputs=main_outputs
)
self.assertGraphDefToModel(expected_proto, model)
def DISABLED_test_keras_model_to_graph_def_functional_sequential_model(
self,
):
expected_proto = """
node {
name: "model/func_seq_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/sequential/dense_input"
input: "model/func_seq_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/sequential/dense"
input: "model/sequential/dense_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/sequential/my_relu"
input: "model/sequential/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
node {
name: "model/dense_1"
input: "model/sequential/my_relu"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(784,), name="func_seq_input")
sub_model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(32, input_shape=(784,)),
tf.keras.layers.Activation("relu", name="my_relu"),
]
)
dense = tf.keras.layers.Dense(64, activation="relu")
model = tf.keras.models.Model(
inputs=inputs, outputs=dense(sub_model(inputs))
)
self.assertGraphDefToModel(expected_proto, model)
def DISABLED_test_keras_model_to_graph_def_sequential_functional_model(
self,
):
expected_proto = """
node {
name: "sequential/model_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential/model/func_seq_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential/model/dense"
input: "sequential/model/func_seq_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential/dense_1"
input: "sequential/model/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential/my_relu"
input: "sequential/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(784,), name="func_seq_input")
dense = tf.keras.layers.Dense(64, activation="relu")
sub_model = tf.keras.models.Model(inputs=inputs, outputs=dense(inputs))
model = tf.keras.models.Sequential(
[
sub_model,
tf.keras.layers.Dense(32, input_shape=(784,)),
tf.keras.layers.Activation("relu", name="my_relu"),
]
)
self.assertGraphDefToModel(expected_proto, model)
if __name__ == "__main__":
tf.test.main()
| 27.437949
| 80
| 0.366216
|
82499e1efb50b0439d17ad9a5c921641ee95c56a
| 1,308
|
py
|
Python
|
tests/settings.py
|
systemallica/django-snitch
|
557ae2a0e01184ffc552536507782fff39785457
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
systemallica/django-snitch
|
557ae2a0e01184ffc552536507782fff39785457
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
systemallica/django-snitch
|
557ae2a0e01184ffc552536507782fff39785457
|
[
"MIT"
] | null | null | null |
DEBUG = True
USE_TZ = True
SECRET_KEY = "dummy"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"snitch",
"tests.app",
]
# Schedules
INSTALLED_APPS += ["django_celery_beat", "snitch.schedules"]
SITE_ID = 1
LANGUAGE_CODE = "en"
LANGUAGES = [("en", "English")]
MIDDLEWARE = ()
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"debug": DEBUG,
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
}
]
# DJANGO PUSH NOTIFICATIONS
# ------------------------------------------------------------------------------
# See: https://github.com/jazzband/django-push-notifications
INSTALLED_APPS += ("push_notifications",)
PUSH_NOTIFICATIONS_SETTINGS = {
"CONFIG": "push_notifications.conf.AppConfig",
"APPLICATIONS": {"snitch": {"PLATFORM": "FCM", "API_KEY": ""}},
}
# SNITCH SETTINGS
# ------------------------------------------------------------------------------
SNITCH_NOTIFICATION_MODEL = "app.Notification"
SNITCH_ENABLED_SEND_EMAILS = False
| 26.693878
| 85
| 0.561162
|
7092d44a81a731393a94d95dc59a7a424526ad3e
| 2,994
|
py
|
Python
|
config.py
|
simpen/flaskz
|
49efe908cd8a325b51453d960155f5ad20fa1e16
|
[
"MIT"
] | null | null | null |
config.py
|
simpen/flaskz
|
49efe908cd8a325b51453d960155f5ad20fa1e16
|
[
"MIT"
] | null | null | null |
config.py
|
simpen/flaskz
|
49efe908cd8a325b51453d960155f5ad20fa1e16
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISVBLE = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = 'smtp.qq.com'
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USE_SSL = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <471120122@qq.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 25
FLASKY_COMMENTS_PER_PAGE = 25
FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
}
| 30.242424
| 74
| 0.669339
|
2ebbaf7ffbcc42f2f7fe49ef985b7a9549a40b18
| 24,681
|
py
|
Python
|
test/functional/feature_csv_activation.py
|
cho2559/readercoin
|
aad2d84976b4fc5c8577f5460a303ec7492a3f25
|
[
"MIT"
] | null | null | null |
test/functional/feature_csv_activation.py
|
cho2559/readercoin
|
aad2d84976b4fc5c8577f5460a303ec7492a3f25
|
[
"MIT"
] | null | null | null |
test/functional/feature_csv_activation.py
|
cho2559/readercoin
|
aad2d84976b4fc5c8577f5460a303ec7492a3f25
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Readercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test activation of the first version bits soft fork.
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
"""
from decimal import Decimal
from itertools import product
from io import BytesIO
import time
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import ToHex, CTransaction
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
OP_CHECKSEQUENCEVERIFY,
OP_DROP,
)
from test_framework.test_framework import ReadercoinTestFramework
from test_framework.util import (
assert_equal,
get_bip9_status,
hex_str_to_bytes,
)
BASE_RELATIVE_LOCKTIME = 10
SEQ_DISABLE_FLAG = 1 << 31
SEQ_RANDOM_HIGH_BIT = 1 << 25
SEQ_TYPE_FLAG = 1 << 22
SEQ_RANDOM_LOW_BIT = 1 << 18
def relative_locktime(sdf, srhb, stf, srlb):
"""Returns a locktime with certain bits set."""
locktime = BASE_RELATIVE_LOCKTIME
if sdf:
locktime |= SEQ_DISABLE_FLAG
if srhb:
locktime |= SEQ_RANDOM_HIGH_BIT
if stf:
locktime |= SEQ_TYPE_FLAG
if srlb:
locktime |= SEQ_RANDOM_LOW_BIT
return locktime
def all_rlt_txs(txs):
return [tx['tx'] for tx in txs]
def sign_transaction(node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransactionwithwallet(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def create_bip112special(node, input, txversion, address):
tx = create_transaction(node, input, address, amount=Decimal("49.98"))
tx.nVersion = txversion
signtx = sign_transaction(node, tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def send_generic_input_tx(node, coinbases, address):
return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction(node, node.getblock(coinbases.pop())['tx'][0], address, amount=Decimal("49.99")))))
def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0):
"""Returns a list of bip68 transactions with different bits set."""
txs = []
assert(len(bip68inputs) >= 16)
for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
locktime = relative_locktime(sdf, srhb, stf, srlb)
tx = create_transaction(node, bip68inputs[i], address, amount=Decimal("49.98"))
tx.nVersion = txversion
tx.vin[0].nSequence = locktime + locktime_delta
tx = sign_transaction(node, tx)
tx.rehash()
txs.append({'tx': tx, 'sdf': sdf, 'stf': stf})
return txs
def create_bip112txs(node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0):
"""Returns a list of bip68 transactions with different bits set."""
txs = []
assert(len(bip112inputs) >= 16)
for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
locktime = relative_locktime(sdf, srhb, stf, srlb)
tx = create_transaction(node, bip112inputs[i], address, amount=Decimal("49.98"))
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = locktime + locktime_delta
tx.nVersion = txversion
signtx = sign_transaction(node, tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
tx.rehash()
txs.append({'tx': signtx, 'sdf': sdf, 'stf': stf})
return txs
class BIP68_112_113Test(ReadercoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4', '-addresstype=legacy']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def generate_blocks(self, number, version, test_blocks=None):
if test_blocks is None:
test_blocks = []
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append(block)
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version=536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def sync_blocks(self, blocks, success=True):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success)
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
self.log.info("Generate blocks in the past for coinbase outputs.")
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that the csv softfork is DEFINED")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
self.sync_blocks(test_blocks)
self.log.info("Advance from DEFINED to STARTED, height = 143")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
self.log.info("Fail to achieve LOCKED_IN")
# 100 out of 144 signal bit 0. Use a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
self.sync_blocks(test_blocks)
self.log.info("Failed to advance past STARTED, height = 287")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
self.log.info("Generate blocks to achieve LOCK-IN")
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
self.sync_blocks(test_blocks)
self.log.info("Advanced from STARTED to LOCKED_IN, height = 431")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Generate 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
self.sync_blocks(test_blocks)
# Inputs at height = 572
#
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
# 1 normal input
bip113input = send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int(inputblockhash, 16)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
self.sync_blocks(test_blocks)
self.log.info("Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)")
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = create_transaction(self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = create_bip68txs(self.nodes[0], bip68inputs, 1, self.nodeaddress)
bip68txs_v2 = create_bip68txs(self.nodes[0], bip68inputs, 2, self.nodeaddress)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress)
bip112txs_vary_nSequence_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1)
bip112txs_vary_nSequence_9_v2 = create_bip112txs(self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress)
bip112txs_vary_OP_CSV_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1)
bip112txs_vary_OP_CSV_9_v2 = create_bip112txs(self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = create_bip112special(self.nodes[0], bip112specialinput, 1, self.nodeaddress)
bip112tx_special_v2 = create_bip112special(self.nodes[0], bip112specialinput, 2, self.nodeaddress)
self.log.info("TESTING")
self.log.info("Pre-Soft Fork Tests. All txs should pass.")
self.log.info("Test version 1 txs")
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Test version 2 txs")
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
self.sync_blocks(test_blocks)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
self.log.info("Post-Soft Fork Tests.")
self.log.info("BIP 113 tests")
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
self.sync_blocks([self.create_test_block([bip113tx])], success=False)
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
self.sync_blocks([self.create_test_block([bip113tx])])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
self.sync_blocks(test_blocks)
self.log.info("BIP 68 tests")
self.log.info("Test version 1 txs - all should still pass")
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Test version 2 txs")
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']]
self.sync_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
for tx in bip68timetxs:
self.sync_blocks([self.create_test_block([tx])], success=False)
bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
for tx in bip68heighttxs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
self.sync_blocks(test_blocks)
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
self.sync_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
self.sync_blocks(test_blocks)
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
self.sync_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("BIP 112 tests")
self.log.info("Test version 1 txs")
# -1 OP_CSV tx should fail
self.sync_blocks([self.create_test_block([bip112tx_special_v1])], success=False)
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']]
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
for tx in fail_txs:
self.sync_blocks([self.create_test_block([tx])], success=False)
self.log.info("Test version 2 txs")
# -1 OP_CSV tx should fail
self.sync_blocks([self.create_test_block([bip112tx_special_v2])], success=False)
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']]
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
for tx in fail_txs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
for tx in fail_txs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# If sequencelock types mismatch, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
for tx in fail_txs:
self.sync_blocks([self.create_test_block([tx])], success=False)
# Remaining txs should pass, just test masking works properly
success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']]
self.sync_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG
signtx = sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
self.sync_blocks([self.create_test_block(time_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# TODO: Test empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
| 50.783951
| 166
| 0.695636
|
1d886c4765d7d85e7da97aa2fc71a047f7ee9135
| 4,816
|
py
|
Python
|
group/migrations/0001_initial.py
|
mingyuchoo/myartworks
|
9404dad4b9ee0047049a1a0196cb9ac32ce520d7
|
[
"MIT"
] | 1
|
2016-08-16T06:34:36.000Z
|
2016-08-16T06:34:36.000Z
|
group/migrations/0001_initial.py
|
mingyuchoo/myartworks
|
9404dad4b9ee0047049a1a0196cb9ac32ce520d7
|
[
"MIT"
] | null | null | null |
group/migrations/0001_initial.py
|
mingyuchoo/myartworks
|
9404dad4b9ee0047049a1a0196cb9ac32ce520d7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-15 01:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('classification', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Apply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'permissions': [['view_apply', 'Can view apply']],
},
),
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'permissions': [['view_bookmark', 'Can view bookmark']],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=200)),
('created_time', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'permissions': [['view_comment', 'Can view comment']],
},
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('R', 'Request'), ('D', 'Denied'), ('A', 'Approved')], default='R', max_length=1)),
('updated_time', models.DateTimeField(default=django.utils.timezone.now)),
('requested_date', models.DateTimeField(default=django.utils.timezone.now)),
('joined_date', models.DateTimeField(null=True)),
],
options={
'permissions': [['view_membership', 'Can view membership']],
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=5120)),
('status', models.CharField(choices=[('O', 'Open'), ('C', 'Closed')], default='O', max_length=1)),
('created_time', models.DateTimeField(default=django.utils.timezone.now)),
('updated_time', models.DateTimeField(default=django.utils.timezone.now)),
('comment_count', models.IntegerField(default=0)),
('bookmark_count', models.IntegerField(default=0)),
('apply_count', models.IntegerField(default=0)),
('share_count', models.IntegerField(default=0)),
('category', models.ManyToManyField(to='classification.Category')),
('manager', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='group_project_leaders', to=settings.AUTH_USER_MODEL)),
('member', models.ManyToManyField(through='group.Membership', to=settings.AUTH_USER_MODEL)),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'permissions': [['view_project', 'Can view project']],
},
),
migrations.CreateModel(
name='Share',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(default=django.utils.timezone.now)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='group.Project')),
('writer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_shares', to=settings.AUTH_USER_MODEL)),
],
options={
'permissions': [['view_share', 'Can view share']],
},
),
]
| 47.215686
| 170
| 0.580357
|
75e50b92d760f4ceae1d6a425cc18253285d6755
| 5,289
|
py
|
Python
|
saleor/graphql/order/tests/benchmark/test_order.py
|
siyoola/saleor
|
4e52b8655a5570a8ce0a3b1484b4d8b46fbd0ad0
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/order/tests/benchmark/test_order.py
|
siyoola/saleor
|
4e52b8655a5570a8ce0a3b1484b4d8b46fbd0ad0
|
[
"CC-BY-4.0"
] | 86
|
2021-11-01T04:51:55.000Z
|
2022-03-30T16:30:16.000Z
|
saleor/graphql/order/tests/benchmark/test_order.py
|
siyoola/saleor
|
4e52b8655a5570a8ce0a3b1484b4d8b46fbd0ad0
|
[
"CC-BY-4.0"
] | null | null | null |
import graphene
import pytest
from ....checkout.tests.benchmark.test_checkout_mutations import (
FRAGMENT_ADDRESS,
FRAGMENT_PRODUCT_VARIANT,
)
from ....tests.utils import get_graphql_content
FRAGMENT_DISCOUNTS = """
fragment OrderDiscounts on OrderDiscount {
id
type
valueType
value
name
translatedName
}
"""
FRAGMENT_SHIPPING_METHODS = """
fragment AvailableShippingMethods on ShippingMethod {
id
price {
amount
}
minimumOrderPrice {
amount
currency
}
}
"""
FRAGMENT_ORDER_DETAILS = (
FRAGMENT_ADDRESS
+ FRAGMENT_PRODUCT_VARIANT
+ FRAGMENT_DISCOUNTS
+ FRAGMENT_SHIPPING_METHODS
+ """
fragment OrderDetail on Order {
userEmail
paymentStatus
paymentStatusDisplay
status
statusDisplay
canFinalize
isShippingRequired
id
number
shippingAddress {
...Address
}
billingAddress {
...Address
}
discounts {
...OrderDiscounts
}
actions
fulfillments {
id
}
lines {
productName
quantity
variant {
...ProductVariant
}
unitPrice {
currency
...Price
}
thumbnail {
url
}
}
availableShippingMethods {
...AvailableShippingMethods
}
subtotal {
...Price
}
total {
...Price
}
totalCaptured {
amount
}
totalAuthorized {
amount
}
shippingPrice {
...Price
}
}
"""
)
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_user_order_details(
user_api_client, order_with_lines_and_events, count_queries
):
query = (
FRAGMENT_ORDER_DETAILS
+ """
query OrderByToken($token: UUID!) {
orderByToken(token: $token) {
...OrderDetail
}
}
"""
)
variables = {
"token": order_with_lines_and_events.id,
}
get_graphql_content(user_api_client.post_graphql(query, variables))
FRAGMENT_STAFF_ORDER_DETAILS = (
FRAGMENT_ORDER_DETAILS
+ """
fragment OrderStaffDetail on Order {
...OrderDetail
events {
id
date
type
user {
email
}
message
email
emailType
amount
paymentId
paymentGateway
quantity
composedId
orderNumber
invoiceNumber
oversoldItems
lines {
itemName
}
fulfilledItems {
orderLine {
id
}
}
warehouse {
id
}
transactionReference
shippingCostsIncluded
relatedOrder {
id
}
}
}
"""
)
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_staff_order_details(
staff_api_client,
permission_manage_orders,
order_with_lines_and_events,
count_queries,
):
query = (
FRAGMENT_STAFF_ORDER_DETAILS
+ """
query Order($id: ID!) {
order(id: $id) {
...OrderStaffDetail
}
}
"""
)
variables = {
"id": graphene.Node.to_global_id("Order", order_with_lines_and_events.id),
}
staff_api_client.user.user_permissions.add(permission_manage_orders)
get_graphql_content(staff_api_client.post_graphql(query, variables))
MULTIPLE_ORDER_DETAILS_QUERY = (
FRAGMENT_STAFF_ORDER_DETAILS
+ """
query orders {
orders(first: 10) {
edges {
node {
...OrderStaffDetail
}
}
}
}
"""
)
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_staff_multiple_orders(
staff_api_client,
permission_manage_orders,
permission_manage_users,
orders_for_benchmarks,
count_queries,
):
staff_api_client.user.user_permissions.set(
[permission_manage_orders, permission_manage_users]
)
content = get_graphql_content(
staff_api_client.post_graphql(MULTIPLE_ORDER_DETAILS_QUERY)
)
assert content["data"]["orders"] is not None
MULTIPLE_DRAFT_ORDER_DETAILS_QUERY = (
FRAGMENT_STAFF_ORDER_DETAILS
+ """
query draftOrders {
draftOrders(first: 10) {
edges {
node {
...OrderStaffDetail
}
}
}
}
"""
)
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_staff_multiple_draft_orders(
staff_api_client,
permission_manage_orders,
permission_manage_users,
draft_orders_for_benchmarks,
count_queries,
):
staff_api_client.user.user_permissions.set(
[permission_manage_orders, permission_manage_users]
)
content = get_graphql_content(
staff_api_client.post_graphql(MULTIPLE_DRAFT_ORDER_DETAILS_QUERY)
)
assert content["data"]["draftOrders"] is not None
| 20.579767
| 82
| 0.563244
|
64254b620c8c573d1f4f5a5ec866a1ba992810d2
| 4,811
|
py
|
Python
|
src/gluonnlp/data/candidate_sampler.py
|
davisliang/gluon-nlp
|
18a736dbb55c80c2de82d73b923c3cd3d9d53591
|
[
"Apache-2.0"
] | 7
|
2019-12-05T02:49:07.000Z
|
2020-08-17T01:11:59.000Z
|
src/gluonnlp/data/candidate_sampler.py
|
davisliang/gluon-nlp
|
18a736dbb55c80c2de82d73b923c3cd3d9d53591
|
[
"Apache-2.0"
] | null | null | null |
src/gluonnlp/data/candidate_sampler.py
|
davisliang/gluon-nlp
|
18a736dbb55c80c2de82d73b923c3cd3d9d53591
|
[
"Apache-2.0"
] | 3
|
2021-03-12T04:41:00.000Z
|
2021-03-12T04:41:24.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Candidate samplers"""
__all__ = ['UnigramCandidateSampler']
import functools
import operator
import mxnet as mx
import numpy as np
class UnigramCandidateSampler(mx.gluon.HybridBlock):
"""Unigram Candidate Sampler
Draw random samples from a unigram distribution with specified weights
using the alias method.
Parameters
----------
weights : mx.nd.NDArray
Unnormalized class probabilities. Samples are drawn and returned on the
same context as weights.context.
shape : int or tuple of int
Shape of data to be sampled.
TODO: Specifying the shape is only a workaround until random_like
operators are available in mxnet
dtype : str or np.dtype, default 'float32'
Data type of the candidates. Make sure that the dtype precision is
large enough to represent the size of your weights array precisely. For
example, float32 can not distinguish 2**24 from 2**24 + 1.
"""
def __init__(self, weights, shape, dtype='float32'):
super(UnigramCandidateSampler, self).__init__()
self._shape = shape
self._dtype = dtype
self.N = weights.size
if (np.dtype(dtype) == np.float32 and weights.size > 2**24) or \
(np.dtype(dtype) == np.float16 and weights.size > 2**11):
s = 'dtype={dtype} can not represent all weights'
raise ValueError(s.format(dtype=dtype))
total_weights = weights.sum()
prob = (weights * self.N / total_weights).asnumpy().tolist()
alias = [0] * self.N
# sort the data into the outcomes with probabilities
# that are high and low than 1/N.
low = []
high = []
for i in range(self.N):
if prob[i] < 1.0:
low.append(i)
else:
high.append(i)
# pair low with high
while len(low) > 0 and len(high) > 0:
l = low.pop()
h = high.pop()
alias[l] = h
prob[h] = prob[h] - (1.0 - prob[l])
if prob[h] < 1.0:
low.append(h)
else:
high.append(h)
for i in low + high:
prob[i] = 1
alias[i] = i
# store
prob = mx.nd.array(prob, dtype='float64')
alias = mx.nd.array(alias, dtype='float64')
self.prob = self.params.get_constant('prob', prob)
self.alias = self.params.get_constant('alias', alias)
def __repr__(self):
s = '{block_name}({len_weights}, {dtype})'
return s.format(block_name=self.__class__.__name__, len_weights=self.N,
dtype=self._dtype)
# pylint: disable=arguments-differ, unused-argument
def hybrid_forward(self, F, candidates_like, prob, alias):
"""Draw samples from uniform distribution and return sampled candidates.
Parameters
----------
candidates_like: mxnet.nd.NDArray or mxnet.sym.Symbol
This input specifies the shape of the to be sampled candidates. #
TODO shape selection is not yet supported. Shape must be specified
in the constructor.
Returns
-------
samples: mxnet.nd.NDArray or mxnet.sym.Symbol
The sampled candidates of shape candidates_like.shape. Candidates
are sampled based on the weights specified on creation of the
UnigramCandidateSampler.
"""
flat_shape = functools.reduce(operator.mul, self._shape)
idx = F.random.uniform(low=0, high=self.N, shape=flat_shape,
dtype='float64').floor()
prob = F.gather_nd(prob, idx.reshape((1, -1)))
alias = F.gather_nd(alias, idx.reshape((1, -1)))
where = F.random.uniform(shape=flat_shape,
dtype='float64') < prob
hit = idx * where
alt = alias * (1 - where)
candidates = (hit + alt).reshape(self._shape)
return candidates.astype(self._dtype)
| 36.172932
| 80
| 0.617335
|
08cd75650a2b7ff3310a54d75c49613baada1365
| 26,191
|
py
|
Python
|
dosagelib/plugins/webtoons.py
|
toonn/dosage
|
890f116179b34dfa5349495b9792a643ee4c2d74
|
[
"MIT"
] | null | null | null |
dosagelib/plugins/webtoons.py
|
toonn/dosage
|
890f116179b34dfa5349495b9792a643ee4c2d74
|
[
"MIT"
] | null | null | null |
dosagelib/plugins/webtoons.py
|
toonn/dosage
|
890f116179b34dfa5349495b9792a643ee4c2d74
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2021 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from ..scraper import _ParserScraper
class WebToons(_ParserScraper):
imageSearch = '//img[contains(@class, "_images")]/@data-url'
prevSearch = '//a[contains(@class, "_prevEpisode")]'
multipleImagesPerStrip = True
def __init__(self, name, url, titlenum):
super(WebToons, self).__init__('WebToons/' + name)
baseUrl = 'https://www.webtoons.com/en/'
self.url = baseUrl + url + '/episode/viewer?title_no=' + str(titlenum)
self.listUrl = baseUrl + url + '/list?title_no=' + str(titlenum)
self.stripUrl = self.url + '&episode_no=%s'
self.firstStripUrl = self.stripUrl % '1'
def starter(self):
# Avoid age/GDPR gate
for cookie in ('needGDPR', 'needCCPA', 'needCOPPA'):
self.session.cookies.set(cookie, 'false', domain='webtoons.com')
# Find current episode number
listPage = self.getPage(self.listUrl)
currentEpisode = listPage.xpath('//div[@class="detail_lst"]/ul/li')[0].attrib['data-episode-no']
# Check for completed tag
self.endOfLife = (listPage.xpath('//div[@id="_asideDetail"]//span[@class="txt_ico_completed2"]') != [])
return self.stripUrl % currentEpisode
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super(WebToons, self).fetchUrls(url, data, urlSearch)
# Update firstStripUrl with the correct episode title
if url.rsplit('=', 1)[-1] == '1':
self.firstStripUrl = url
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('=', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1].split('?', 1)[0]
return "%s-%03d.%s" % (episodeNum, imageNum, imageExt)
@classmethod
def getmodules(cls): # noqa: Allowed to be long
return (
# START AUTOUPDATE
cls('1000', 'action/one-thousand', 1217),
cls('10thDimensionBoys', 'comedy/10th-dimension-boys', 71),
cls('1111Animals', 'comedy/1111-animals', 437),
cls('2015SpaceSeries', 'sf/2015-space-series', 391),
cls('3SecondStrip', 'comedy/3-second-strip', 380),
cls('ABittersweetLife', 'slice-of-life/a-bittersweet-life', 294),
cls('AboutDeath', 'drama/about-death', 82),
cls('ABudgiesLife', 'slice-of-life/its-a-budgies-life', 985),
cls('Acception', 'drama/acception', 1513),
cls('Acursian', 'supernatural/acursian', 1452),
cls('Adamsville', 'horror/adamsville', 502),
cls('AdventuresOfGod', 'comedy/adventures-of-god', 853),
cls('AerialMagic', 'fantasy/aerial-magic', 1358),
cls('AgeMatters', 'romance/age-matters', 1364),
cls('AGoodDayToBeADog', 'romance/a-good-day-tobe-a-dog', 1390),
cls('Aisopos', 'drama/aisopos', 76),
cls('AliceElise', 'fantasy/alice-elise', 1481),
cls('AllThatWeHopeToBe', 'slice-of-life/all-that-we-hope-to-be', 470),
cls('AllThatYouAre', 'drama/all-that-you-are', 403),
cls('AlwaysHuman', 'romance/always-human', 557),
cls('Annarasumanara', 'drama/annarasumanara', 77),
cls('Anthronauts', 'challenge/anthronauts', 358917),
cls('AphroditeIX', 'sf/aphroditeix', 1451),
cls('ApocalypticHorseplay', 'supernatural/apocalyptic-horseplay', 635),
cls('AprilFlowers', 'fantasy/april-flowers', 1363),
cls('Arma', 'super-hero/arma', 1640),
cls('AsPerUsual', 'slice-of-life/as-per-usual', 599),
cls('AssassinRoommate', 'romance/assassin-roommate', 1050),
cls('AthenaComplex', 'fantasy/athena-complex', 867),
cls('AuraFromAnotherPlanet', 'comedy/aura-from-another-planet', 369),
cls('AverageAdventuresOfAnAverageGirl', 'slice-of-life/average-adventures-of-an-average-girl', 401),
cls('AXED', 'comedy/axed', 1558),
cls('Backchannel', 'super-hero/backchannel', 1456),
cls('BadSigns', 'comedy/bad-signs', 1623),
cls('Bastard', 'thriller/bastard', 485),
cls('BeforeWeKnewIt', 'romance/before-we-knew-it', 1972),
cls('BehindTheGIFs', 'comedy/behind-the-gifs', 658),
cls('BigJo', 'romance/big-jo', 854),
cls('BiteMe', 'thriller/bite-me', 1019),
cls('Blessed', 'drama/blessed', 1193),
cls('BloodInk', 'action/blood-ink', 1490),
cls('BloodlessWars', 'sf/bloodless-wars', 1622),
cls('BloopBloopRelationshipComic', 'challenge/bloop-bloop-relationship-comic', 239970),
cls('Bluechair', 'slice-of-life/bluechair', 199),
cls('BOOItsSex', 'slice-of-life/boo-its-sex', 1413),
cls('BoyfriendOfTheDead', 'comedy/boyfriend-of-the-dead', 1102),
cls('BrassAndSass', 'romance/brass-and-sass', 1652),
cls('BrimstoneAndRoses', 'romance/brimstone-and-roses', 1758),
cls('BrothersBond', 'action/brothersbond', 1458),
cls('BrutallyHonest', 'comedy/brutally-honest', 799),
cls('BuzzFeedComics', 'comedy/buzzfeed-comics', 585),
cls('CapeOfSpirits', 'action/cape-of-spirits', 1559),
cls('CARL', 'slice-of-life/carl', 1216),
cls('Caster', 'action/caster', 1461),
cls('CastleSwimmer', 'fantasy/castle-swimmer', 1499),
cls('Catharsis', 'fantasy/catharsis', 396),
cls('CatLoafAdventures', 'slice-of-life/cat-loaf-adventures', 1381),
cls('CheeseInTheTrap', 'drama/cheese-in-the-trap', 99),
cls('CherryBlossoms', 'romance/cherry-blossoms', 1005),
cls('Chiller', 'thriller/chiller', 536),
cls('ChocoLatte', 'romance/choco-latte', 1691),
cls('CityOfBlank', 'sf/city-of-blank', 1895),
cls('CityOfWalls', 'drama/city-of-wall', 505),
cls('ClusterFudge', 'slice-of-life/cluster-fudge', 355),
cls('CodeAdam', 'action/code-adam', 1657),
cls('CookingComically', 'tiptoon/cooking-comically', 622),
cls('CrapIDrewOnMyLunchBreak', 'challenge/crap-i-drew-on-my-lunch-break', 124756),
cls('Crumbs', 'romance/crumbs', 1648),
cls('CupidsArrows', 'romance/cupids-arrows', 1538),
cls('CursedPrincessClub', 'comedy/cursed-princess-club', 1537),
cls('Cyberbunk', 'sf/cyberbunk', 466),
cls('Cyberforce', 'super-hero/cyberforce', 531),
cls('CykoKO', 'super-hero/cyko-ko', 560),
cls('Darbi', 'action/darbi', 1098),
cls('Davinchibi', 'fantasy/davinchibi', 1190),
cls('DaYomanvilleGang', 'drama/da-yomanville-gang', 1578),
cls('DaysOfHana', 'drama/days-of-hana', 1246),
cls('DEADDAYS', 'horror/dead-days', 293),
cls('Debunkers', 'challenge/debunkers', 148475),
cls('DEEP', 'thriller/deep', 364),
cls('Defects', 'challenge/defects', 221106),
cls('Denma', 'sf/denma', 921),
cls('Dents', 'sf/dents', 671),
cls('Deor', 'fantasy/deor', 1663),
cls('DevilNumber4', 'supernatural/devil-no-4', 1695),
cls('DICE', 'fantasy/dice', 64),
cls('DistantSky', 'horror/distant-sky', 75),
cls('DONTHATE', 'comedy/dont-hate', 1574),
cls('DoodleForFood', 'slice-of-life/doodle-for-food', 487),
cls('DownToEarth', 'romance/down-to-earth', 1817),
cls('Dragnarok', 'fantasy/dragnarok', 1018),
cls('DragnarokDescendants', 'fantasy/dragnarok-descendants', 1433),
cls('DrFrost', 'drama/dr-frost', 371),
cls('Dustinteractive', 'comedy/dustinteractive', 907),
cls('DutyAfterSchool', 'sf/duty-after-school', 370),
cls('EatFighter', 'sports/eat-fighter', 1460),
cls('EcstasyHearts', 'sports/ecstasy-hearts', 604),
cls('Edith', 'romance/edith', 1536),
cls('Eggnoid', 'sf/eggnoid', 1229),
cls('Eleceed', 'action/eleceed', 1571),
cls('Elena', 'horror/elena', 484),
cls('ElfAndWarrior', 'fantasy/elf-and-warrior', 908),
cls('EMPYREA', 'fantasy/empyrea', 1407),
cls('EpicV', 'comedy/epic-v', 353),
cls('EscapeRoom', 'thriller/escape-room', 1815),
cls('EverywhereAndNowhere', 'comedy/everywhere-and-nowhere', 1598),
cls('FAMILYMAN', 'drama/family-man', 85),
cls('FantasySketchTheGame', 'sf/fantasy-sketch', 1020),
cls('Faust', 'supernatural/faust', 522),
cls('FINALITY', 'mystery/finality', 1457),
cls('Firebrand', 'supernatural/firebrand', 877),
cls('FisheyePlacebo', 'challenge/fisheye-placebo', 101841),
cls('Flow', 'fantasy/flow', 101),
cls('FluffyBoyfriend', 'supernatural/fluffy-boyfriend', 1164),
cls('ForTheSakeOfSita', 'romance/for-the-sake-of-sita', 349),
cls('FourLeaf', 'fantasy/four-leaf', 1454),
cls('FreakingRomance', 'romance/freaking-romance', 1467),
cls('FridayForbiddenTales', 'thriller/friday', 388),
cls('GenshinImpact', 'challenge/genshin-impact', 242646),
cls('Gepetto', 'sf/gepetto', 81),
cls('GhostsAmongTheWildFlowers', 'fantasy/ghosts-over-wild-flowers', 718),
cls('GhostTeller', 'horror/ghost-teller', 1307),
cls('GhostTheater', 'drama/ghost-theater', 1911),
cls('GhostWife', 'romance/ghost-wife', 1471),
cls('GirlsHaveABlog', 'slice-of-life/girls-have-a-blog', 1052),
cls('GirlsOfTheWilds', 'action/girls-of-the-wilds', 93),
cls('GodOfBath', 'comedy/god-of-bath', 91),
cls('GOSU', 'action/gosu', 1099),
cls('GourmetHound', 'drama/gourmet-hound', 1245),
cls('GremoryLand', 'horror/gremoryland', 1893),
cls('GuardiansOfTheVideoGame', 'sf/guardians-of-the-video-game', 368),
cls('HAPIBUNI', 'comedy/hapi-buni', 362),
cls('HardcoreLevelingWarrior', 'action/hardcore-leveling-warrior', 1221),
cls('HaveYouAnyFear', 'horror/have-you-any-fear', 1197),
cls('Haxor', 'sf/haxor', 1325),
cls('Heartwired', 'sf/heartwired', 1539),
cls('HeirsGame', 'drama/heirs-game', 1445),
cls('HeliosFemina', 'fantasy/helios-femina', 638),
cls('HelloWorld', 'slice-of-life/hello-world', 827),
cls('Hellper', 'fantasy/hellper', 185),
cls('HeroineChic', 'super-hero/heroine-chic', 561),
cls('HIVE', 'thriller/hive', 65),
cls('Hooky', 'fantasy/hooky', 425),
cls('HoovesOfDeath', 'fantasy/hooves-of-death', 1535),
cls('HouseOfStars', 'fantasy/house-of-stars', 1620),
cls('HowToBecomeADragon', 'fantasy/how-to-become-a-dragon', 1973),
cls('HowToLove', 'slice-of-life/how-to-love', 472),
cls('IDontWantThisKindOfHero', 'super-hero/i-dont-want-this-kind-of-hero', 98),
cls('IF', 'action/if', 1925),
cls('IllusionsOfAdulting', 'slice-of-life/illusions-of-adulting', 922),
cls('IllustratedInternet', 'comedy/illustrated-internet', 750),
cls('ILoveYoo', 'drama/i-love-yoo', 986),
cls('ImmortalNerd', 'slice-of-life/immortal-nerd', 579),
cls('ImTheGrimReaper', 'supernatural/im-the-grim-reaper', 1697),
cls('Inarime', 'super-hero/inarime', 675),
cls('InternetExplorer', 'challenge/internet-explorer', 219164),
cls('InTheBleakMidwinter', 'sf/in-the-bleak-midwinter', 1946),
cls('ItsMine', 'drama/its-mine', 2010),
cls('JackieRose', 'supernatural/jackie-rose', 613),
cls('JingleJungle', 'slice-of-life/jingle-jungle', 282),
cls('JustAskYuli', 'slice-of-life/just-ask-yuli', 402),
cls('JustForKicks', 'slice-of-life/just-for-kicks', 1152),
cls('JustPancakes', 'comedy/just-pancakes', 1651),
cls('KidsAreAllRight', 'drama/kids-are-all-right', 283),
cls('Killstagram', 'thriller/killstagram', 1971),
cls('KindOfConfidential', 'romance/kind-of-confidential', 663),
cls('KindOfLove', 'slice-of-life/kind-of-love', 1850),
cls('KnightRun', 'sf/knight-run', 67),
cls('Kubera', 'fantasy/kubera', 83),
cls('LalinsCurse', 'supernatural/lalins-curse', 1601),
cls('Lars', 'slice-of-life/lars', 358),
cls('LateBloomer', 'romance/late-bloomer', 988),
cls('LavenderJack', 'super-hero/lavender-jack', 1410),
cls('LESSA', 'action/lessa', 89),
cls('LESSA2TheCrimsonKnight', 'action/lessa-2', 507),
cls('LetsPlay', 'romance/letsplay', 1218),
cls('LibraryGhost', 'comedy/library-ghost', 220),
cls('LifeOutsideTheCircle', 'drama/life-outside-the-circle', 1260),
cls('LittleMatchaGirl', 'fantasy/little-matcha-girl', 1665),
cls('LiveForever', 'thriller/live-forever', 1312),
cls('LiveWithYourself', 'comedy/live-with-yourself', 919),
cls('Lone', 'fantasy/lone', 1929),
cls('Lookism', 'drama/lookism', 1049),
cls('LoreOlympus', 'romance/lore-olympus', 1320),
cls('Lorna', 'slice-of-life/lorna', 1284),
cls('LostInTranslation', 'drama/lost-in-translation', 1882),
cls('LoveAdviceFromTheGreatDukeOfHell', 'comedy/love-advice', 1498),
cls('Lozolz', 'tiptoon/lozolz', 1268),
cls('LUFF', 'romance/luff', 1489),
cls('Luggage', 'fantasy/luggage', 1642),
cls('LUMINE', 'fantasy/lumine', 1022),
cls('Lunarbaboon', 'slice-of-life/lunarbaboon', 523),
cls('MageAndDemonQueen', 'comedy/mage-and-demon-queen', 1438),
cls('Magical12thGraders', 'super-hero/magical-12th-graders', 90),
cls('Magician', 'fantasy/magician', 70),
cls('MagicSodaPop', 'fantasy/magic-soda-pop', 1947),
cls('MarryMe', 'romance/marry-me', 1951),
cls('MatchmakerHero', 'sf/matchmaker-hero', 1569),
cls('MelvinasTherapy', 'horror/melvinas-therapy', 1021),
cls('MeowMan', 'comedy/meow-man', 1677),
cls('MercWorks', 'slice-of-life/mercworks', 426),
cls('Messenger', 'fantasy/messenger', 1382),
cls('MetaphoricalHER', 'drama/metaphorical-her', 1475),
cls('MidnightPoppyLand', 'romance/midnight-poppy-land', 1798),
cls('MidnightRain', 'drama/midnight-rain', 1797),
cls('MidnightRhapsody', 'slice-of-life/midnight-rhapsody', 116),
cls('MidnightRhapsodySeason2', 'slice-of-life/midnight-rhapsody-season2', 365),
cls('MissAbbottAndTheDoctor', 'romance/miss-abbott-and-the-doctor', 707),
cls('MOONBEARD', 'comedy/moon-beard', 471),
cls('MoonYou', 'supernatural/moonyou', 1340),
cls('Murrz', 'slice-of-life/murrz', 1281),
cls('Muted', 'supernatural/muted', 1566),
cls('MyBoo', 'supernatural/my-boo', 1185),
cls('MyDearColdBloodedKing', 'romance/my-dear-cold-blooded-king', 961),
cls('MyDeepestSecret', 'thriller/my-deepest-secret', 1580),
cls('MyDictatorBoyfriend', 'comedy/my-dictator-boyfriend', 1391),
cls('MyGiantNerdBoyfriend', 'slice-of-life/my-giant-nerd-boyfriend', 958),
cls('MyKittyAndOldDog', 'slice-of-life/my-kitty-and-old-dog', 184),
cls('MyNameIsBenny', 'slice-of-life/my-name-is-benny', 1279),
cls('MyWallflowerKiss', 'challenge/my-wallflower-kiss', 151869),
cls('NanoList', 'sf/nano-list', 700),
cls('NationalDogDay2016', 'slice-of-life/national-dog-day', 747),
cls('NewLifeProject', 'comedy/new-life-project', 279),
cls('Newman', 'fantasy/newman', 405),
cls('NewNormalClass8', 'drama/new-normal-class-8', 100),
cls('Nicholalala', 'slice-of-life/nicholalala', 418),
cls('NightmareFactory', 'thriller/nightmare-factory', 616),
cls('Noblesse', 'action/noblesse', 87),
cls('NoblesseRaisAdventure', 'action/noblesse-spin-off', 608),
cls('NoScope', 'sports/no-scope', 1572),
cls('NotEvenBones', 'thriller/not-even-bones', 1756),
cls('NothingSpecial', 'fantasy/nothing-special', 1188),
cls('OddGirlOut', 'drama/odd-girl-out', 1420),
cls('OhHoly', 'romance/oh-holy', 809),
cls('ORANGEMARMALADE', 'romance/orange-marmalade', 97),
cls('Outrage', 'super-hero/outrage', 1450),
cls('OVERPOWERED', 'challenge/overpowered', 85292),
cls('PacificRimAmara', 'sf/pacific-rim-amara', 1327),
cls('PenguinLovesMev', 'slice-of-life/penguin-loves-mev', 86),
cls('PhantomParadise', 'fantasy/phantom-paradise', 1250),
cls('Pigminted', 'slice-of-life/pigminted', 482),
cls('PinchPoint', 'challenge/pinch-point-reborn', 334640),
cls('Plum', 'sports/plum', 1605),
cls('Polidiocy', 'comedy/polidiocy', 676),
cls('Pound', 'action/pound', 1496),
cls('PowerBallad', 'super-hero/power-ballad', 987),
cls('PurpleHyacinth', 'mystery/purple-hyacinth', 1621),
cls('Punderworld', 'challenge/punderworld', 312584),
cls('RandomChat', 'drama/random-chat', 1669),
cls('RANDOMPHILIA', 'comedy/randomphilia', 386),
cls('Rebirth', 'sf/rebirth', 1412),
cls('RefundHighSchool', 'fantasy/refundhighschool', 1360),
cls('RiseFromAshes', 'supernatural/rise-from-ashes', 959),
cls('RoarStreetJournal', 'slice-of-life/roar-street-journal', 397),
cls('RoomOfSwords', 'sf/room-of-swords', 1261),
cls('RotAndRuin', 'horror/rot-and-ruin', 1878),
cls('SafelyEndangered', 'comedy/safely-endangered', 352),
cls('SaltyStudio', 'romance/salty-studio', 74),
cls('SaphieTheOneEyedCat', 'slice-of-life/saphie-one-eyed-cat', 670),
cls('SAVEME', 'drama/bts-save-me', 1514),
cls('ScoobandShag', 'challenge/scoob-and-shag', 210827),
cls('ScorchingRomance', 'romance/scorching-romance', 1662),
cls('Seed', 'sf/seed', 1480),
cls('SHADOW', 'super-hero/shadow', 281),
cls('ShadowPirates', 'action/shadow-pirates', 1455),
cls('Shard', 'supernatural/shard', 960),
cls('Shiloh', 'thriller/shiloh', 1649),
cls('ShootAround', 'drama/shoot-around', 399),
cls('Shriek', 'thriller/shriek', 772),
cls('SID', 'supernatural/sid', 497),
cls('SIDEKICKS', 'super-hero/sidekicks', 92),
cls('SimonSues', 'supernatural/simon-sues', 1619),
cls('SirensLament', 'romance/sirens-lament', 632),
cls('Sithrah', 'fantasy/sithrah', 524),
cls('SkateFire100', 'sports/skate-fire-100', 1674),
cls('SmallWorld', 'slice-of-life/small-world', 1159),
cls('SmileBrush', 'slice-of-life/smile-brush', 94),
cls('SmileBrushMyOldPictures', 'slice-of-life/smile-brush-my-old-pictures', 302),
cls('Snailogy', 'slice-of-life/snailogy', 387),
cls('SOLEIL', 'fantasy/soleil', 1823),
cls('SOULCARTEL', 'fantasy/soul-cartel', 72),
cls('SoulOnHold', 'supernatural/soul-on-hold', 1701),
cls('SpaceBoy', 'sf/space-boy', 400),
cls('SpaceVixen', 'challenge/space-vixen-deep-space-k9', 207049),
cls('SpiritFingers', 'drama/spirit-fingers', 1577),
cls('Spirits', 'fantasy/spirits-re', 1348),
cls('StalkerXStalker', 'challenge/stalker-x-stalker', 245662),
cls('STARCROSS', 'super-hero/star-cross', 1599),
cls('StayingHealthyTogether', 'tiptoon/staying-healthy-together', 1963),
cls('StrawberrySeafoam', 'fantasy/strawberry-seafoam', 1248),
cls('SubtleDisaster', 'drama/subtle-disaster', 350),
cls('SubZero', 'romance/subzero', 1468),
cls('SuperSecret', 'romance/super-secret', 666),
cls('SupersonicGirl', 'super-hero/supersonic-girl', 633),
cls('SweetHome', 'thriller/sweethome', 1285),
cls('SwimmingLessonsForAMermaid', 'romance/swimming-lessons-for-a-mermaid', 1912),
cls('SwordInterval', 'supernatural/sword-interval', 486),
cls('TalesOfTheUnusual', 'horror/tales-of-the-unusual', 68),
cls('TheBadguys', 'super-hero/the-bad-guys', 701),
cls('TheBrooklynite', 'super-hero/the-brooklynite', 813),
cls('TheCliff', 'thriller/the-cliff', 80),
cls('TheCroaking', 'fantasy/the-croaking', 1494),
cls('TheDaneMen', 'comedy/the-danemen', 395),
cls('TheDevilIsAHandsomeMan', 'drama/the-devil-is-a-handsome-man', 1311),
cls('TheDoctorsAreOut', 'romance/the-doctors-are-out', 1910),
cls('TheFeverKing', 'super-hero/the-fever-king', 1659),
cls('TheFourOfThem', 'drama/the-four-of-them', 1524),
cls('TheGamer', 'action/the-gamer', 88),
cls('TheGentlemansArmchair', 'comedy/the-gentlemans-armchair', 469),
cls('TheGirlDownstairs', 'romance/the-girl-downstairs', 1809),
cls('THEGIRLFROMCLASS', 'drama/the-girl-from-class', 73),
cls('TheGodOfHighSchool', 'action/the-god-of-high-school', 66),
cls('TheKissBet', 'romance/the-kiss-bet', 1617),
cls('TheLifeOfTheThreeBears', 'slice-of-life/the-life-of-the-three-bears', 390),
cls('ThePurpleHeart', 'super-hero/the-purple-heart', 723),
cls('TheRedBook', 'horror/the-red-book', 467),
cls('TheRedHook', 'super-hero/the-red-hook', 643),
cls('TheRedKing', 'supernatural/the-red-king', 1687),
cls('TheShadowProphet', 'drama/the-shadow-prophet', 1881),
cls('TheSoundOfYourHeart', 'comedy/the-sound-of-your-heart', 269),
cls('TheSteamDragonExpress', 'fantasy/steam-dragon-express', 1270),
cls('TheStoriesOfThoseAroundMe', 'romance/the-stories-of-those-around-me', 96),
cls('TheStrangeTalesOfOscarZahn', 'fantasy/the-strange-tales-of-oscar-zahn', 685),
cls('TheVaultOfHorrorACollectionOfNightmares', 'horror/the-vault-of-horror-a-collection-of-nightmares', 295),
cls('TheWeightOfOurSky', 'historical/the-weight-of-our-sky', 1739),
cls('TheWitchAndTheBull', 'fantasy/the-witch-and-the-bull', 1892),
cls('TheWolfmanOfWulvershire', 'mystery/the-wolfman-of-wulvershire', 1784),
cls('TheWorldWhereIBelong', 'supernatural/the-world-where-i-belong', 1318),
cls('TheWrathAndTheDawn', 'fantasy/the-wrath-and-the-dawn', 1772),
cls('ThirdShiftSociety', 'supernatural/third-shift-society', 1703),
cls('Thornstone', 'fantasy/thornstone', 1612),
cls('TickleTown', 'comedy/tickle-town', 428),
cls('ToasterDude', 'comedy/toaster-dude', 1983),
cls('TokyoThreatDocumentationProject', 'challenge/tokyo-threat-documentation-project', 417973),
cls('TowerOfGod', 'fantasy/tower-of-god', 95),
cls('TrailerParkWarlock', 'comedy/trailer-park-warlock', 1512),
cls('TrashBird', 'comedy/trash-bird', 473),
cls('TrueBeauty', 'romance/truebeauty', 1436),
cls('Trump', 'fantasy/trump', 84),
cls('UndeadEd', 'supernatural/undeaded', 468),
cls('UnderPrin', 'supernatural/underprin', 78),
cls('UnderTheAegis', 'fantasy/under-the-aegis', 436),
cls('UnknownCaller', 'thriller/ar-toon', 775),
cls('UnlovableReplacement', 'romance/unlovable-replacement', 1762),
cls('UnluckyIsAsLuckyDoes', 'comedy/unlucky-is-as-lucky-does', 1554),
cls('UnOrdinary', 'super-hero/unordinary', 679),
cls('UnTouchable', 'romance/untouchable', 79),
cls('UpAndOut', 'slice-of-life/up-and-out', 488),
cls('UrbanAnimal', 'super-hero/urban-animal', 1483),
cls('Uriah', 'horror/uriah', 1607),
cls('VarsityNoir', 'mystery/varsity-noir', 1613),
cls('VersionDayAndNight', 'drama/version-day-and-night', 1796),
cls('WafflesAndPancakes', 'slice-of-life/waffles-and-pancakes', 1310),
cls('WarCry', 'super-hero/war-cry', 1247),
cls('WarningLabel', 'romance/warning-label', 1051),
cls('Watermelon', 'fantasy/watermelon', 1435),
cls('WeakHero', 'action/weakhero', 1726),
cls('WEBTOONGREENLiGHT', 'action/webtoon-greenlight', 1988),
cls('WestwoodVibrato', 'drama/westwood-vibrato', 537),
cls('WhereTangentsMeet', 'romance/where-tangents-meet', 421),
cls('WindBreaker', 'sports/wind-breaker', 372),
cls('WinterMoon', 'fantasy/winter-moon', 1093),
cls('WinterWoods', 'drama/winter-woods', 344),
cls('WitchCreekRoad', 'horror/witch-creek-road', 1453),
cls('WitchHunt', 'supernatural/witch-hunt', 363),
cls('Wolfsbane', 'horror/wolfsbane', 1826),
cls('XINK3R', 'super-hero/xinker', 541),
cls('YourAdventure', 'comedy/your-adventure', 506),
cls('YourLetter', 'drama/your-letter', 1540),
cls('YumisCells', 'slice-of-life/yumi-cell', 478),
cls('YunaAndKawachan', 'drama/yuna-and-kawachan', 1840),
cls('ZeroGame', 'fantasy/zero-game', 1704),
cls('ZomCom', 'challenge/zomcom', 70195),
# END AUTOUPDATE
)
| 61.481221
| 121
| 0.590126
|
b8ce2077e41cac03f7d2fba692f127dc2da2b639
| 1,156
|
py
|
Python
|
cogs/dbl.py
|
IceeMC/dat-banana-bot
|
14a81dc254e38dd6a4d65a9f7c6fd41da1e2369e
|
[
"MIT"
] | null | null | null |
cogs/dbl.py
|
IceeMC/dat-banana-bot
|
14a81dc254e38dd6a4d65a9f7c6fd41da1e2369e
|
[
"MIT"
] | null | null | null |
cogs/dbl.py
|
IceeMC/dat-banana-bot
|
14a81dc254e38dd6a4d65a9f7c6fd41da1e2369e
|
[
"MIT"
] | null | null | null |
import discord
import json
import aiohttp
import asyncio
import os
from discord.ext import commands
uri = 'https://discordbots.org/api'
class dbl:
def __init__(self, bot):
self.bot = bot
self.session = self.bot.session
with open("data/apikeys.json") as f:
x = json.loads(f.read())
self.token = x['dblapi']
def __unload(self):
self.bot.loop.create_task(self.session.close())
async def send(self):
dump = json.dumps({
'server_count': len(self.bot.guilds)
})
head = {
'authorization': self.token,
'content-type' : 'application/json'
}
url = '{0}/bots/388476336777461770/stats'.format(uri)
async with self.session.post(url, data=dump, headers=head) as resp:
print('returned {0.status} for {1} on dbl'.format(resp, dump))
async def on_guild_join(self, server):
await self.send()
async def on_guild_remove(self, server):
await self.send()
async def on_ready(self):
await self.send()
def setup(bot):
bot.add_cog(dbl(bot))
| 23.591837
| 75
| 0.58391
|
3a3f17af0dcf98f0c42df3855a6872ee8b29018d
| 207
|
py
|
Python
|
cwe/categories.py
|
Julian-Nash/cwe
|
af43faeccc8b9489d1ceb6403fac53814a2c5f9c
|
[
"MIT"
] | 11
|
2020-04-01T14:56:47.000Z
|
2022-02-22T06:00:50.000Z
|
cwe/categories.py
|
Julian-Nash/cwe
|
af43faeccc8b9489d1ceb6403fac53814a2c5f9c
|
[
"MIT"
] | 3
|
2020-05-21T02:45:23.000Z
|
2021-11-18T11:09:44.000Z
|
cwe/categories.py
|
Julian-Nash/cwe
|
af43faeccc8b9489d1ceb6403fac53814a2c5f9c
|
[
"MIT"
] | 2
|
2020-05-21T05:47:28.000Z
|
2021-08-06T04:59:19.000Z
|
import enum
@enum.unique
class CWECategory(enum.Enum):
HARDWARE_DESIGN: str = "hardware_design"
RESEARCH_CONCEPTS: str = "research_concepts"
SOFTWARE_DEVELOPMENT: str = "software_development"
| 20.7
| 54
| 0.758454
|
875142c6e23e705c6fa866e566dcce397449bea1
| 1,977
|
py
|
Python
|
pca/interfaces/repository.py
|
daniel-butler/python-clean-architecture
|
a95da91fffb1120e1e748c9ee7717a622647288e
|
[
"MIT"
] | null | null | null |
pca/interfaces/repository.py
|
daniel-butler/python-clean-architecture
|
a95da91fffb1120e1e748c9ee7717a622647288e
|
[
"MIT"
] | null | null | null |
pca/interfaces/repository.py
|
daniel-butler/python-clean-architecture
|
a95da91fffb1120e1e748c9ee7717a622647288e
|
[
"MIT"
] | 1
|
2019-12-11T01:32:08.000Z
|
2019-12-11T01:32:08.000Z
|
import typing as t
from .dao import IDao
from .entity import Entity, Id
class IRepository(t.Generic[Id, Entity]):
"""
Repository serves as a collection of entites (with methods such as get, add, update, remove)
with underlying persistence layer. Should know how to construct an instance, serialize it
and get its id.
Developers of repos for concrete entites are encouraged to subclass and put a meaningful
query and command methods along the basic ones.
"""
dao: t.ClassVar[IDao]
"""
Data Access Object which gives repo a persistence API. Its value is created
by requiring the DAO instance related to its entity from DI container.
"""
entity: t.ClassVar[t.Type[Entity]]
"""Entity type collected by this repo."""
def create(self, **kwargs) -> Entity:
"""
Creates an object compatible with this repo. Uses repo's factory
or the klass iff factory not present.
NB: Does not inserts the object to the repo. Use `create_and_add` method for that.
"""
raise NotImplementedError
def add(self, entity: Entity):
"""Adds the object to the repo to the underlying persistence layer via its DAO."""
raise NotImplementedError
def create_and_add(self, **kwargs) -> Entity:
"""Creates an object compatible with this repo and adds it to the collection."""
raise NotImplementedError
def find(self, id_: Id) -> t.Optional[Entity]:
"""Returns object of given id or None"""
raise NotImplementedError
def contains(self, id_: Id):
"""Checks whether an entity of given id is in the repo."""
raise NotImplementedError
def update(self, entity: Entity) -> None:
"""Updates the object in the repo."""
raise NotImplementedError
def remove(self, entity: Entity) -> None:
"""Removes the object from the underlying persistence layer via DAO."""
raise NotImplementedError
| 35.303571
| 96
| 0.671725
|
409197ee9fee03511d1dcc396972f94baa718aad
| 6,677
|
py
|
Python
|
sdk/python/pulumi_azure_native/eventhub/latest/namespace_authorization_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/eventhub/latest/namespace_authorization_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/eventhub/latest/namespace_authorization_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['NamespaceAuthorizationRule']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:eventhub:NamespaceAuthorizationRule'.""", DeprecationWarning)
class NamespaceAuthorizationRule(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:eventhub:NamespaceAuthorizationRule'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_rule_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rights: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'AccessRights']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Single item in a List or Get AuthorizationRule operation
Latest API Version: 2017-04-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_rule_name: The authorization rule name.
:param pulumi.Input[str] namespace_name: The Namespace name
:param pulumi.Input[str] resource_group_name: Name of the resource group within the azure subscription.
:param pulumi.Input[Sequence[pulumi.Input[Union[str, 'AccessRights']]]] rights: The rights associated with the rule.
"""
pulumi.log.warn("""NamespaceAuthorizationRule is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:eventhub:NamespaceAuthorizationRule'.""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authorization_rule_name'] = authorization_rule_name
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__['namespace_name'] = namespace_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if rights is None and not opts.urn:
raise TypeError("Missing required property 'rights'")
__props__['rights'] = rights
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventhub/latest:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:eventhub:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:eventhub:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:eventhub/v20140901:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:eventhub/v20140901:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:eventhub/v20150801:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:eventhub/v20150801:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:eventhub/v20170401:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:eventhub/v20170401:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:eventhub/v20180101preview:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:eventhub/v20180101preview:NamespaceAuthorizationRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NamespaceAuthorizationRule, __self__).__init__(
'azure-native:eventhub/latest:NamespaceAuthorizationRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NamespaceAuthorizationRule':
"""
Get an existing NamespaceAuthorizationRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["name"] = None
__props__["rights"] = None
__props__["type"] = None
return NamespaceAuthorizationRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def rights(self) -> pulumi.Output[Sequence[str]]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.361538
| 952
| 0.685338
|
5d7712b001f5f308c13b2eeb3890c392db9b0fed
| 3,532
|
py
|
Python
|
tests/test_content.py
|
brianwisti/rgb-zola
|
60504a4ea91e72b14c120dc47b43264f2691d5cd
|
[
"CC-BY-4.0"
] | null | null | null |
tests/test_content.py
|
brianwisti/rgb-zola
|
60504a4ea91e72b14c120dc47b43264f2691d5cd
|
[
"CC-BY-4.0"
] | null | null | null |
tests/test_content.py
|
brianwisti/rgb-zola
|
60504a4ea91e72b14c120dc47b43264f2691d5cd
|
[
"CC-BY-4.0"
] | null | null | null |
"""Check that content sources are in good, quality shape."""
from pathlib import Path
from subprocess import run
import frontmatter
import pytest
import yamale
CONTENT_PATH = Path("content")
ALL_MARKDOWN = list(CONTENT_PATH.glob("**/*.md"))
BLOG_ARTICLES = [
article
for article in ALL_MARKDOWN
if article.parts[1] == "blog" and article.name == "index.md"
]
CONFIG_ARTICLES = [article for article in ALL_MARKDOWN if article.parts[1] == "config"]
PAGE_ARTICLES = [
article
for article in ALL_MARKDOWN
if article not in BLOG_ARTICLES
and article not in CONFIG_ARTICLES
and article.name == "index.md"
]
SECTION_PAGES = [
article
for article in ALL_MARKDOWN
if article.name == "_index.md" and article not in CONFIG_ARTICLES
]
ARTIFACT_PASS = [
"using-markdown-it-in-python",
]
ARTIFACT_PAGES = [
page for page in ALL_MARKDOWN if page.parent.name not in ARTIFACT_PASS
]
@pytest.fixture(scope="session")
def blog_schema():
return yamale.make_schema("./schemas/blog.yml", parser="ruamel")
@pytest.fixture(scope="session")
def config_schema():
return yamale.make_schema("./schemas/config.yml", parser="ruamel")
@pytest.fixture(scope="session")
def page_schema():
return yamale.make_schema("./schemas/page.yml", parser="ruamel")
@pytest.fixture(scope="session")
def section_schema():
return yamale.make_schema("./schemas/section.yml", parser="ruamel")
class TestFrontmatter:
@pytest.mark.parametrize("content_path", BLOG_ARTICLES)
def test_valid_frontmatter_for_blog(self, blog_schema, content_path: Path):
print(f"Validing frontmatter for '{content_path}'")
components = content_path.read_text(encoding="utf-8").split("---\n")
frontmatter = components[1]
data = yamale.make_data(content=frontmatter)
assert yamale.validate(blog_schema, data)
@pytest.mark.parametrize("content_path", CONFIG_ARTICLES)
def test_valid_frontmatter_for_config(self, content_path, config_schema):
print(f"Validing frontmatter for '{content_path}'")
components = content_path.read_text(encoding="utf-8").split("---\n")
frontmatter = components[1]
data = yamale.make_data(content=frontmatter)
assert yamale.validate(config_schema, data)
@pytest.mark.parametrize("page", PAGE_ARTICLES)
def test_valid_frontmatter_for_pages(self, page, page_schema):
print(f"Validing frontmatter for '{page}'")
components = page.read_text(encoding="utf-8").split("---\n")
frontmatter = components[1]
data = yamale.make_data(content=frontmatter)
assert yamale.validate(page_schema, data)
@pytest.mark.parametrize("section", SECTION_PAGES)
def test_valid_frontmatter_for_sections(self, section, section_schema):
print(f"Validing frontmatter for '{section}'")
components = section.read_text(encoding="utf-8").split("---\n")
frontmatter = components[1]
data = yamale.make_data(content=frontmatter)
assert yamale.validate(section_schema, data)
class TestMarkdown:
def test_markdownlint(self):
res = run(["markdownlint", "-j", "content/**/index.md"], capture_output=True)
print(res.stderr.decode())
assert res.returncode == 0
@pytest.mark.parametrize("content_path", ARTIFACT_PAGES, ids=str)
def test_admonition_artifact(self, content_path: Path):
post = frontmatter.loads(content_path.read_text(encoding="utf-8"))
assert ":::" not in post.content
| 31.256637
| 87
| 0.703001
|
e3062059aaf3642ef120a7d85726a8e5d2d2df9b
| 1,079
|
py
|
Python
|
examples/animations/warping.py
|
goodhertz/coldtype
|
2460b66abb28e9532f9e2b55167ae565f95366e7
|
[
"Apache-2.0"
] | 142
|
2020-06-12T17:01:58.000Z
|
2022-03-16T23:21:37.000Z
|
examples/animations/warping.py
|
goodhertz/coldtype
|
2460b66abb28e9532f9e2b55167ae565f95366e7
|
[
"Apache-2.0"
] | 35
|
2020-04-15T15:34:54.000Z
|
2022-03-19T20:26:47.000Z
|
examples/animations/warping.py
|
goodhertz/coldtype
|
2460b66abb28e9532f9e2b55167ae565f95366e7
|
[
"Apache-2.0"
] | 14
|
2020-06-23T18:56:46.000Z
|
2022-03-31T15:54:56.000Z
|
from coldtype import *
from coldtype.warping import warp_fn
from coldtype.fx.skia import phototype
Style.RegisterShorthandPrefix("≈", "~/Type/fonts/fonts")
peshka = Font.Find("CoFoPeshkaV")
loop = Loop(150, 15, [ # some keyframes
dict(wdth=0, wght=0, rotate=-15, leading=200,
font_size=700, warp=0, blur=15),
dict(wdth=1, wght=1, rotate=0, leading=10,
font_size=50, warp=200, blur=5),
dict(wdth=0, wght=1, rotate=15, leading=100,
font_size=500, warp=50, blur=3),
dict(wdth=0.5, wght=0.5, rotate=0, leading=-470,
font_size=330, warp=0, blur=1)
])
@animation(timeline=loop, bg=0)
def warp(f):
state = f.a.t.current_state(f.i, e="eeio")
return ((ß:=StSt("WARP\nBLUR", peshka, ro=1, **state))
.align(f.a.r).pen() # a single, centered vector
.f(Gradient.V(ß.ambit(), hsl(0.7), hsl(0.9)))
#.flatten(5) # slower but preserves curves across warp
.nlt(warp_fn(f.i*30, f.i, mult=int(state["warp"])))
.f(1)
#.ch(phototype(f.a.r, state["blur"], cutw=50, fill=hsl(0.75)))
)
| 37.206897
| 70
| 0.612604
|
17c89b102d5fc0d5e6b4220c5cf78a24c1d13a66
| 4,722
|
py
|
Python
|
purity_fb/purity_fb_1dot12/models/array_connection_path_response.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 5
|
2017-09-08T20:47:22.000Z
|
2021-06-29T02:11:05.000Z
|
purity_fb/purity_fb_1dot12/models/array_connection_path_response.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 16
|
2017-11-27T20:57:48.000Z
|
2021-11-23T18:46:43.000Z
|
purity_fb/purity_fb_1dot12/models/array_connection_path_response.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 22
|
2017-10-13T15:33:05.000Z
|
2021-11-08T19:56:21.000Z
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.12 Python SDK
Pure Storage FlashBlade REST 1.12 Python SDK. Compatible with REST API versions 1.0 - 1.12. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.12
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ArrayConnectionPathResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[ArrayConnectionPath]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None): # noqa: E501
"""ArrayConnectionPathResponse - a model defined in Swagger""" # noqa: E501
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""Gets the pagination_info of this ArrayConnectionPathResponse. # noqa: E501
pagination information, only available in GET requests # noqa: E501
:return: The pagination_info of this ArrayConnectionPathResponse. # noqa: E501
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""Sets the pagination_info of this ArrayConnectionPathResponse.
pagination information, only available in GET requests # noqa: E501
:param pagination_info: The pagination_info of this ArrayConnectionPathResponse. # noqa: E501
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""Gets the items of this ArrayConnectionPathResponse. # noqa: E501
A list of array connection path objects. # noqa: E501
:return: The items of this ArrayConnectionPathResponse. # noqa: E501
:rtype: list[ArrayConnectionPath]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ArrayConnectionPathResponse.
A list of array connection path objects. # noqa: E501
:param items: The items of this ArrayConnectionPathResponse. # noqa: E501
:type: list[ArrayConnectionPath]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayConnectionPathResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayConnectionPathResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.271523
| 251
| 0.613088
|
489301969a2f7e437c398ebfb022471543a594d3
| 358
|
py
|
Python
|
src/metarl/torch/optimizers/__init__.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | 2
|
2021-02-07T12:14:52.000Z
|
2021-07-29T08:07:22.000Z
|
src/metarl/torch/optimizers/__init__.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
src/metarl/torch/optimizers/__init__.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
"""PyTorch optimizers."""
from metarl.torch.optimizers.conjugate_gradient_optimizer import (
ConjugateGradientOptimizer)
from metarl.torch.optimizers.differentiable_sgd import DifferentiableSGD
from metarl.torch.optimizers.optimizer_wrapper import OptimizerWrapper
__all__ = [
'OptimizerWrapper', 'ConjugateGradientOptimizer', 'DifferentiableSGD'
]
| 35.8
| 73
| 0.832402
|
bc275a9c665ad37e4b267c896a0a491345f2dc94
| 1,367
|
py
|
Python
|
sasx/magic.py
|
python-sasx/sasx
|
b8c812bf204701d5a175ad0f0c2286e6494f79c2
|
[
"MIT"
] | 3
|
2016-04-24T15:21:00.000Z
|
2018-07-08T04:28:35.000Z
|
sasx/magic.py
|
python-sasx/sasx
|
b8c812bf204701d5a175ad0f0c2286e6494f79c2
|
[
"MIT"
] | null | null | null |
sasx/magic.py
|
python-sasx/sasx
|
b8c812bf204701d5a175ad0f0c2286e6494f79c2
|
[
"MIT"
] | null | null | null |
from __future__ import division
import pandas
from IPython.core.magic import (Magics, magics_class, cell_magic)
from sasx.parse import sasx_parse
from sasx.code import sasx_preloop
from sasx.code import sasx_loop
from sasx.code import sasx_postloop
@magics_class
class SasxMagics(Magics):
"""Define Magic to run code in Simple dAta SyntaX (SASX).
%%sasx - Transform SASX code into Python code and execute it.
Special keywords recognised by SASX :
- data
- set
- drop
- keep
- output
- where ?
- _n_ ?
- groupby ?
"""
def __init__(self, shell):
super(SasxMagics, self).__init__(shell)
@cell_magic
def sasx(self, line_param, cell):
cell_parsed = sasx_parse(cell, self)
if cell_parsed['status']==0:
print(cell_parsed['message'])
return
#Generate python code
str_code = ""
str_code = str_code + sasx_preloop(cell_parsed)
str_code = str_code + sasx_loop(cell_parsed)
str_code = str_code + sasx_postloop(cell_parsed)
#Execute the code
ns = {}
print("-----")
print(str_code)
print("-----")
exec str_code in self.shell.user_ns, ns
# Register
ip = get_ipython()
ip.register_magics(SasxMagics)
| 23.982456
| 68
| 0.603511
|
21396df2d73ba0f2b3a0b50a224c0edc28e70bcb
| 6,408
|
py
|
Python
|
Examples/FanFicFare-master/fanficfare/adapters/adapter_noveltrovecom.py
|
TomNorrie/ePubify
|
89c89bd22cafdea787f3131ca9cdc8336209ed6c
|
[
"MIT"
] | null | null | null |
Examples/FanFicFare-master/fanficfare/adapters/adapter_noveltrovecom.py
|
TomNorrie/ePubify
|
89c89bd22cafdea787f3131ca9cdc8336209ed6c
|
[
"MIT"
] | null | null | null |
Examples/FanFicFare-master/fanficfare/adapters/adapter_noveltrovecom.py
|
TomNorrie/ePubify
|
89c89bd22cafdea787f3131ca9cdc8336209ed6c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 Fanficdownloader team, 2017 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
####################################################################################################
## Adapted by GComyn on April 22, 2017
####################################################################################################
import logging
import json
import re
import sys # ## used for debug purposes
import time
import urllib2
import datetime
from base_adapter import BaseSiteAdapter, makeDate
from .. import exceptions as exceptions
from ..htmlcleanup import stripHTML
logger = logging.getLogger(__name__)
####################################################################################################
def getClass():
return NovelTroveComSiteAdapter
####################################################################################################
class NovelTroveComSiteAdapter(BaseSiteAdapter):
''' This is a site with 1 story per page, so no multiple chapter stories
The date is listed (on the newer stories) as a month and a year, so I'll be adding that
to the summary, instead of trying to transform it to a date. '''
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult = False
# get storyId from url
# https://noveltrove.com/story/983/put-that-big-cock-in-me
self.story.setMetadata('storyId', self.parsedUrl.path.split('/')[2] + '_' + self.parsedUrl.path.split('/')[3])
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','ntcom')
# This is a 1 story/page site, so we will initialize the variable to keep the soup
self.html = ''
self.endindex = []
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%d %b. '%y"
####################################################################################################
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'noveltrove.com'
####################################################################################################
@classmethod
def getSiteExampleURLs(cls):
return "https://"+cls.getSiteDomain()+"/story/12345/astoryname"
####################################################################################################
def getSiteURLPattern(self):
return r"https://"+re.escape(self.getSiteDomain())+r"/story/([0-9])+/*(?P<id>[^/]+)"
####################################################################################################
## Getting the chapter list and the meta data, plus 'is adult' checking.
def doExtractChapterUrlsAndMetadata(self, get_cover=True):
url = self.url
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist('Error 404: {0}'.format(self.url))
else:
raise e
# use BeautifulSoup HTML parser to make everything easier to find.
soup = self.make_soup(data)
# Now go hunting for all the meta data we can get
metablock = soup.find('div', {'class', 'title-infos'})
## Getting Title
title = stripHTML(metablock.find('h1'))
self.story.setMetadata('title', title)
## Getting author
author = metablock.find('a', {'class':'author'})
self.story.setMetadata('authorId',author['href'].split('/')[1])
self.story.setMetadata('authorUrl','https://'+self.host+author['href'])
self.story.setMetadata('author',author.string)
## Get the categories
for tag in metablock.find_all('a', {'class':'story-category'}):
self.story.addToList('category',stripHTML(tag))
## There is no summary for these stories, so I'm going to take the first
## 250 characters.
synopsis = ''
pcount = 0
for para in soup.find('div', {'class':'body'}).find_all('p'):
synopsis += para.get_text() + ' '
pcount += 1
if pcount > 10:
break
synopsis = synopsis.strip()[:250] + '...'
self.setDescription(url, synopsis)
## Since this is a 1 story/page site, the published and updated dates are the same.
dateposted = stripHTML(metablock.find('div', {'class':'date'}))
self.story.setMetadata('datePublished', makeDate(dateposted, self.dateformat))
self.story.setMetadata('dateUpdated', makeDate(dateposted, self.dateformat))
## This is a 1 story/page site, so we'll keep the soup for the getChapterText function
## the chapterUrl and numChapters need to be set as well
self.html = soup
self.chapterUrls.append((self.story.getMetadata('title'), url))
self.story.setMetadata('numChapters', len(self.chapterUrls))
self.story.setMetadata('status', 'Completed')
## Getting the non-standard title page entries
copyrt = soup.find('div', {'class':'copyright'}).get_text()
self.story.setMetadata('copyright', copyrt)
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Using data that we got from: %s' % url)
soup = self.html
story = soup.find('div', {'class':'body'})
if story == None:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,story)
| 40.301887
| 118
| 0.564607
|
a6e1e665cdb4972593b64de7d6c9eb40a4c4f205
| 7,327
|
py
|
Python
|
sdk/python/pulumi_azure/containerservice/get_kubernetes_cluster.py
|
Frassle/pulumi-azure
|
593dd1020b09b83422928913d06bf91538926155
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/containerservice/get_kubernetes_cluster.py
|
Frassle/pulumi-azure
|
593dd1020b09b83422928913d06bf91538926155
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/containerservice/get_kubernetes_cluster.py
|
Frassle/pulumi-azure
|
593dd1020b09b83422928913d06bf91538926155
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
from .. import utilities, tables
class GetKubernetesClusterResult(object):
"""
A collection of values returned by getKubernetesCluster.
"""
def __init__(__self__, addon_profiles=None, agent_pool_profiles=None, dns_prefix=None, fqdn=None, kube_admin_configs=None, kube_admin_config_raw=None, kube_configs=None, kube_config_raw=None, kubernetes_version=None, linux_profiles=None, location=None, network_profiles=None, node_resource_group=None, role_based_access_controls=None, service_principals=None, tags=None, id=None):
if addon_profiles and not isinstance(addon_profiles, list):
raise TypeError('Expected argument addon_profiles to be a list')
__self__.addon_profiles = addon_profiles
"""
A `addon_profile` block as documented below.
"""
if agent_pool_profiles and not isinstance(agent_pool_profiles, list):
raise TypeError('Expected argument agent_pool_profiles to be a list')
__self__.agent_pool_profiles = agent_pool_profiles
"""
One or more `agent_profile_pool` blocks as documented below.
"""
if dns_prefix and not isinstance(dns_prefix, str):
raise TypeError('Expected argument dns_prefix to be a str')
__self__.dns_prefix = dns_prefix
"""
The DNS Prefix of the managed Kubernetes cluster.
"""
if fqdn and not isinstance(fqdn, str):
raise TypeError('Expected argument fqdn to be a str')
__self__.fqdn = fqdn
"""
The FQDN of the Azure Kubernetes Managed Cluster.
"""
if kube_admin_configs and not isinstance(kube_admin_configs, list):
raise TypeError('Expected argument kube_admin_configs to be a list')
__self__.kube_admin_configs = kube_admin_configs
"""
A `kube_admin_config` block as defined below. This is only available when Role Based Access Control with Azure Active Directory is enabled.
"""
if kube_admin_config_raw and not isinstance(kube_admin_config_raw, str):
raise TypeError('Expected argument kube_admin_config_raw to be a str')
__self__.kube_admin_config_raw = kube_admin_config_raw
"""
Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled.
"""
if kube_configs and not isinstance(kube_configs, list):
raise TypeError('Expected argument kube_configs to be a list')
__self__.kube_configs = kube_configs
"""
A `kube_config` block as defined below.
"""
if kube_config_raw and not isinstance(kube_config_raw, str):
raise TypeError('Expected argument kube_config_raw to be a str')
__self__.kube_config_raw = kube_config_raw
"""
Base64 encoded Kubernetes configuration.
"""
if kubernetes_version and not isinstance(kubernetes_version, str):
raise TypeError('Expected argument kubernetes_version to be a str')
__self__.kubernetes_version = kubernetes_version
"""
The version of Kubernetes used on the managed Kubernetes Cluster.
"""
if linux_profiles and not isinstance(linux_profiles, list):
raise TypeError('Expected argument linux_profiles to be a list')
__self__.linux_profiles = linux_profiles
"""
A `linux_profile` block as documented below.
"""
if location and not isinstance(location, str):
raise TypeError('Expected argument location to be a str')
__self__.location = location
"""
The Azure Region in which the managed Kubernetes Cluster exists.
"""
if network_profiles and not isinstance(network_profiles, list):
raise TypeError('Expected argument network_profiles to be a list')
__self__.network_profiles = network_profiles
"""
A `network_profile` block as documented below.
"""
if node_resource_group and not isinstance(node_resource_group, str):
raise TypeError('Expected argument node_resource_group to be a str')
__self__.node_resource_group = node_resource_group
"""
Auto-generated Resource Group containing AKS Cluster resources.
"""
if role_based_access_controls and not isinstance(role_based_access_controls, list):
raise TypeError('Expected argument role_based_access_controls to be a list')
__self__.role_based_access_controls = role_based_access_controls
"""
A `role_based_access_control` block as documented below.
"""
if service_principals and not isinstance(service_principals, list):
raise TypeError('Expected argument service_principals to be a list')
__self__.service_principals = service_principals
"""
A `service_principal` block as documented below.
"""
if tags and not isinstance(tags, dict):
raise TypeError('Expected argument tags to be a dict')
__self__.tags = tags
"""
A mapping of tags assigned to this resource.
"""
if id and not isinstance(id, str):
raise TypeError('Expected argument id to be a str')
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_kubernetes_cluster(name=None, resource_group_name=None):
"""
Use this data source to access information about an existing Managed Kubernetes Cluster (AKS).
~> **Note:** All arguments including the client secret will be stored in the raw state as plain-text.
[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html).
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__ret__ = await pulumi.runtime.invoke('azure:containerservice/getKubernetesCluster:getKubernetesCluster', __args__)
return GetKubernetesClusterResult(
addon_profiles=__ret__.get('addonProfiles'),
agent_pool_profiles=__ret__.get('agentPoolProfiles'),
dns_prefix=__ret__.get('dnsPrefix'),
fqdn=__ret__.get('fqdn'),
kube_admin_configs=__ret__.get('kubeAdminConfigs'),
kube_admin_config_raw=__ret__.get('kubeAdminConfigRaw'),
kube_configs=__ret__.get('kubeConfigs'),
kube_config_raw=__ret__.get('kubeConfigRaw'),
kubernetes_version=__ret__.get('kubernetesVersion'),
linux_profiles=__ret__.get('linuxProfiles'),
location=__ret__.get('location'),
network_profiles=__ret__.get('networkProfiles'),
node_resource_group=__ret__.get('nodeResourceGroup'),
role_based_access_controls=__ret__.get('roleBasedAccessControls'),
service_principals=__ret__.get('servicePrincipals'),
tags=__ret__.get('tags'),
id=__ret__.get('id'))
| 49.506757
| 384
| 0.686229
|
d99ca9dd059c744d68ba1802d9a1ede730341d8c
| 893
|
py
|
Python
|
tracker/site/management/commands/reset_counters.py
|
ashwoods/potato
|
0335ae8ee4a28832bc1ca8337c03c029b44c0665
|
[
"Apache-2.0"
] | null | null | null |
tracker/site/management/commands/reset_counters.py
|
ashwoods/potato
|
0335ae8ee4a28832bc1ca8337c03c029b44c0665
|
[
"Apache-2.0"
] | 16
|
2016-02-16T20:12:14.000Z
|
2016-02-22T06:47:44.000Z
|
tracker/site/management/commands/reset_counters.py
|
ashwoods/potatoist
|
0335ae8ee4a28832bc1ca8337c03c029b44c0665
|
[
"Apache-2.0"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from tracker.site.models import Project
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
counter = 0
STATES = {'deleted': 0, 'new': 1, 'open': 2, 'closed': 3}
for project in Project.objects.all():
project_counter = 0
project.counter.reset()
for key, value in STATES.iteritems():
getattr(project, '%s_counter' % key).reset()
state_counter = project.tickets.filter(state=value).count()
getattr(project, '%s_counter' % key).increment(state_counter)
counter += state_counter
project_counter += state_counter
project.counter.increment(project_counter)
self.stdout.write('Reset %s tickets' % counter)
| 40.590909
| 77
| 0.617021
|
1757b98c42a0e25221799b457250a1dd6614bb53
| 1,699
|
py
|
Python
|
pwm/compute-ic.py
|
CostaLab/snp-selex
|
33421c4d1eff8ee60594af983b9f95a53502ef9e
|
[
"MIT"
] | 13
|
2021-01-28T06:30:31.000Z
|
2022-02-14T16:40:11.000Z
|
pwm/compute-ic.py
|
CostaLab/snp-selex
|
33421c4d1eff8ee60594af983b9f95a53502ef9e
|
[
"MIT"
] | 2
|
2021-05-27T07:54:08.000Z
|
2022-01-17T17:50:21.000Z
|
pwm/compute-ic.py
|
CostaLab/snp-selex
|
33421c4d1eff8ee60594af983b9f95a53502ef9e
|
[
"MIT"
] | 6
|
2021-02-01T02:14:08.000Z
|
2022-01-13T17:14:56.000Z
|
import click
import csv
import numpy as np
from math import log
from Bio import motifs
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from Bio.Alphabet.IUPAC import unambiguous_dna
def homer_parse(fstream):
def build(name, freq):
m = motifs.Motif(counts=freq)
m.name = name
return m
nct = "ACGT"
name = ""
mtx = {a: [] for a in nct}
for line in fstream:
if line.startswith('>'):
if name != '':
yield build(name, mtx)
name = line.rstrip().split()[1]
mtx = {a: [] for a in nct}
else:
score = [float(x) for x in line.rstrip().split()]
for i, a in enumerate(nct):
mtx[a].append(score[i])
if name != '':
yield build(name, mtx)
@click.command()
@click.option('--motif', '-m', type=click.File('r'), required=True)
@click.option('--out', '-o', type=click.File('w'), default='-')
def main(motif, out):
writer = csv.DictWriter(out, delimiter='\t', fieldnames=['name', 'ic', 'mean_ic'])
writer.writeheader()
for m in homer_parse(motif):
ic = _ic(m)
writer.writerow(dict(name = _name(m), ic = ic, mean_ic = ic/m.length))
def _ic(motif):
acc = 0
pwm = motif.pwm
# background = motif.background
for i in range(pwm.length):
for letter in "ACGT":
p = pwm[letter, i]
# b = 0.25 # Naive background
# b = background[letter]
acc += p * log(p, 2)
acc += 2
return acc
def _name(motif):
name = motif.name.split('/')[0]
return name.upper().replace('-', '_')
if __name__ == '__main__':
main()
| 24.623188
| 86
| 0.547381
|
b4c7c26a148e569f736d7dc7f9ea394cb617a7a0
| 1,370
|
py
|
Python
|
UniSim/tools/analyzer.py
|
RDC4Smart-Mobility/UniSim
|
872a22ccdac859b9a12f11a9f5d20467e9db18ee
|
[
"MIT"
] | null | null | null |
UniSim/tools/analyzer.py
|
RDC4Smart-Mobility/UniSim
|
872a22ccdac859b9a12f11a9f5d20467e9db18ee
|
[
"MIT"
] | null | null | null |
UniSim/tools/analyzer.py
|
RDC4Smart-Mobility/UniSim
|
872a22ccdac859b9a12f11a9f5d20467e9db18ee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import sqlite3
import matplotlib.pyplot as plt
import numpy as np
class Analyzer(object):
def __init__(self, dbpath):
self.dbpath = dbpath
def connect(self):
if self.dbpath:
self.conn = sqlite3.connect(self.dbpath)
self.cursor = self.conn.cursor()
def disconnect(self):
if self.conn:
self.conn.close()
def in_rect(self, x, y, dx, dy):
ret = []
# Number of object in rect angle (x, y), (x + dx, y + dy)
sql = u"""
SELECT * FROM master;
"""
self.cursor.execute(sql)
for tick, status_id in self.cursor.fetchall():
#res_tick, res_status_id = self.cursor.fetchone()
sql = u"""
select count(*) from "status_%s"
where %s < lat and lat < %s
and %s < lng and lng < %s;
""" % (status_id, x, x + dx, y , y + dy)
self.cursor.execute(sql)
n = self.cursor.fetchone()[0]
ret.append((tick, n))
return ret
if __name__ == "__main__":
ana = Analyzer("./sample.db")
ana.connect()
data = ana.in_rect(100, 0, 4000, 2000)
x, y = zip(*data[:-1])
plt.plot(x,y)
plt.show()
ana.disconnect()
| 25.849057
| 82
| 0.537226
|
20a848b7967e2de70cc68a8a9b85b8f4c9992191
| 478
|
py
|
Python
|
tests/test_paths.py
|
seanholmes77/yfs
|
7f422848aec5f47e8a76c43647e034740abadbde
|
[
"MIT"
] | 3
|
2021-01-24T03:52:28.000Z
|
2022-01-10T19:21:09.000Z
|
tests/test_paths.py
|
seanholmes77/yfs
|
7f422848aec5f47e8a76c43647e034740abadbde
|
[
"MIT"
] | null | null | null |
tests/test_paths.py
|
seanholmes77/yfs
|
7f422848aec5f47e8a76c43647e034740abadbde
|
[
"MIT"
] | 1
|
2021-01-19T02:01:23.000Z
|
2021-01-19T02:01:23.000Z
|
from yfs import __version__
from yfs.paths import PROJECT_ROOT, SOURCE_ROOT
import toml
def test_sanity():
pyproject_path = PROJECT_ROOT / "pyproject.toml"
assert pyproject_path.exists()
with open(pyproject_path, mode="r") as file:
content = toml.loads(file.read())
assert content["tool"]["poetry"].get("version") is not None
assert content["tool"]["poetry"].get("version") == __version__
def test_sanity_two():
assert SOURCE_ROOT.exists()
| 25.157895
| 66
| 0.707113
|
a5b5e7b7c9310b809d126aed9e9a7629c11730ce
| 5,569
|
py
|
Python
|
src/patcher.py
|
luisvalesilva/multisurv
|
9f7c12b51260a3ea853df786014b5e68284c2b95
|
[
"MIT"
] | 11
|
2021-08-02T09:18:59.000Z
|
2022-02-04T15:51:25.000Z
|
src/patcher.py
|
luisvalesilva/multisurv
|
9f7c12b51260a3ea853df786014b5e68284c2b95
|
[
"MIT"
] | null | null | null |
src/patcher.py
|
luisvalesilva/multisurv
|
9f7c12b51260a3ea853df786014b5e68284c2b95
|
[
"MIT"
] | 8
|
2021-07-02T08:06:47.000Z
|
2022-03-08T16:07:34.000Z
|
"""Run offline patching from GDC slides.
Generate WSI patches and save to disk as PNG files.
"""
import os
import threading
import uuid
from PIL import Image
from wsipre import slide
class PatchGenerator(object):
"""Generator of GDC WSI patches."""
def __init__(self, slide_files, slide_level=0, random_tissue_patch=False,
patch_size=(299, 299), return_annotation=False):
"""
Parameters
----------
slide_files: list of 2-tuples
WSI and .XML annotation file path pairs.
slide_level: int
Slide level to get patch from.
random_tissue_patch: bool
Whether to get random patch from tissue regions, ignoring
annotations.
patch_size: 2-tuple
Patch size.
return_annotation: bool
Whether to output patch annotation.
"""
self.slide_files = slide_files
self.slide_level = slide_level
self.random_tissue_patch = random_tissue_patch
self.patch_size = patch_size
self.return_annotation = return_annotation
self.lock = threading.Lock()
self.reset()
self.n = len(slide_files)
def _get_random_patch(self, selected_slide):
wsi_file, xml_file = selected_slide
wsi = slide.Slide(wsi_file, xml_file, 'asap')
# Some slides have no detected tumor regions (label list is empty)
# Just skip them
if not wsi.labels:
return 'No tumor annotations found.'
patch, annotation = wsi.read_random_patch(
level=self.slide_level, size=self.patch_size, target_class=1,
min_class_area_ratio=0, polygon_type='area')
if self.return_annotation:
return patch, annotation, os.path.basename(wsi_file)
else:
return patch, os.path.basename(wsi_file)
def _get_random_tissue_patch(self, selected_slide):
if isinstance(selected_slide, (list, tuple)):
wsi_file, _ = selected_slide
else:
wsi_file = selected_slide
wsi = slide.Slide(wsi_file)
patch = wsi.read_random_tissue_patch(
level=self.slide_level, size=self.patch_size)
return patch, os.path.basename(wsi_file)
def reset(self):
"""Reset generator."""
self.i = 0
def __next__(self):
with self.lock:
if self.i >= self.n:
self.reset()
if self.random_tissue_patch:
result = self._get_random_tissue_patch(
self.slide_files[self.i])
else:
result = self._get_random_patch(self.slide_files[self.i])
self.i += 1
return result
class OfflinePatcher(object):
"""Run offline patching."""
def __init__(self, slide_files, target_dir, patch_size, slide_level=0,
get_random_tissue_patch=False):
self.slide_files = slide_files
self.target_dir = target_dir
self.patch_size = patch_size
self.slide_level = slide_level
self.file_format = 'png' # to preserve pixel values (unlike JPG...)
self.filename = None
self.patch_gen = PatchGenerator(
slide_files=self.slide_files, slide_level=self.slide_level,
random_tissue_patch=get_random_tissue_patch,
patch_size=self.patch_size)
# Make sure target directory exists
if not os.path.isdir(self.target_dir):
os.makedirs(self.target_dir)
def _compose_path(self):
# Make sure filename is unique
unique_id = str(uuid.uuid4().hex)[:5]
slide_file_name = os.path.splitext(self.filename)[0]
# Remove 2nd part of name
slide_file_name = os.path.splitext(slide_file_name)[0]
unique_name = slide_file_name + '_' + unique_id
unique_name += '.' + self.file_format.lower() # Add file extension
path = os.path.join(self.target_dir, unique_name)
return path
def _save(self, path):
"""Save WSI patch to disk.
Save image to PNG format, in order to preserve the numpy array pixel
values. There are many options to do this:
- matplotlib.image.imsave
- cv2.imwrite
- skimage.io.imsave
- PIL.Image.fromarray(patch).save
Decided to use PIL.
"""
self.patch.save(path)
def _make_patch(self):
self.patch, self.filename = next(self.patch_gen)
file_path = self._compose_path()
self._save(file_path)
def run(self, n):
"""Generate and save indicated number of image patches per slide.
Parameters
----------
n: int
Number of patches to generate (slides are selected in sequence).
"""
# Upon keyboard interrupt save last patch to make sure it is not
# corrupted
print('Generating WSI patches')
print('----------------------')
try:
for patch in range(n):
print('\r' + f'{str(patch + 1)}/{str(n)}', end='')
# Skip slides with no detected tumor regions
result = next(self.patch_gen)
if result == 'No tumor annotations found.':
continue
self.patch, self.filename = result
file_path = self._compose_path()
self._save(file_path)
except KeyboardInterrupt:
file_path = self._compose_path()
self._save(file_path)
print()
| 32.190751
| 77
| 0.598133
|
8c9d652bebd2fe21abfef15741e608c055827e86
| 6,321
|
py
|
Python
|
test/functional/feature_cltv.py
|
developertask/pivx-4
|
012420f23ae79c8f2e42223cc4bdb2b315e4445c
|
[
"MIT"
] | null | null | null |
test/functional/feature_cltv.py
|
developertask/pivx-4
|
012420f23ae79c8f2e42223cc4bdb2b315e4445c
|
[
"MIT"
] | null | null | null |
test/functional/feature_cltv.py
|
developertask/pivx-4
|
012420f23ae79c8f2e42223cc4bdb2b315e4445c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
1351.
"""
from test_framework.test_framework import OhonetworkTestFramework
from test_framework.util import *
from test_framework.mininode import *
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum
from io import BytesIO
CLTV_HEIGHT = 750
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
def cltv_invalidate(tx):
'''Modify the signature in vin 0 of the tx to fail CLTV
Prepends -1 CLTV DROP in the scriptSig itself.
TODO: test more ways that transactions using CLTV could be invalid (eg
locktime requirements fail, sequence time requirements fail, etc).
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def cltv_validate(node, tx, height):
'''Modify the signature in vin 0 of the tx to pass CLTV
Prepends <height> CLTV DROP in the scriptSig, and sets
the locktime to height'''
tx.vin[0].nSequence = 0
tx.nLockTime = height
# Need to re-sign, since nSequence and nLockTime changed
signed_result = node.signrawtransaction(ToHex(tx))
new_tx = CTransaction()
new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex'])))
new_tx.vin[0].scriptSig = CScript([CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] +
list(CScript(new_tx.vin[0].scriptSig)))
return new_tx
def create_transaction(node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex'])))
return tx
class BIP65Test(OhonetworkTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']]
self.setup_clean_chain = True
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
# wait_for_verack ensures that the P2P connection is fully up.
self.nodes[0].p2p.wait_for_verack()
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2)
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that an invalid-according-to-CLTV transaction can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.nodes[0].generate(205)
self.log.info("Test that blocks must now be at least version 5")
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT + 205), block_time)
block.nVersion = 4
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version')
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block")
block.nVersion = 5
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
# Verify that a block with this transaction is invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 5 block with a valid-according-to-CLTV transaction is accepted")
spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
spendtx.rehash()
block.vtx.pop(1)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP65Test().main()
| 40.780645
| 107
| 0.682012
|
6b96dc68283c30619b9e51c1cf05bac17194ed9c
| 514
|
py
|
Python
|
protex/__init__.py
|
wiederm/protex
|
7fb945dcee138705e28465f4657678185ed97cb8
|
[
"MIT"
] | 5
|
2021-06-01T13:36:29.000Z
|
2022-03-04T13:10:57.000Z
|
protex/__init__.py
|
wiederm/protex
|
7fb945dcee138705e28465f4657678185ed97cb8
|
[
"MIT"
] | 5
|
2021-05-12T07:34:24.000Z
|
2022-03-21T14:49:59.000Z
|
protex/__init__.py
|
wiederm/protex
|
7fb945dcee138705e28465f4657678185ed97cb8
|
[
"MIT"
] | 1
|
2021-12-09T16:18:13.000Z
|
2021-12-09T16:18:13.000Z
|
"""
protex
Proton exchange using SAMS and openMM for ionic liquids
"""
# Add imports here
from .protex import *
# Handle versioneer
from ._version import get_versions
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
import logging
# format logging message
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)1s()] %(message)s"
# set logging level
logging.basicConfig(format=FORMAT, datefmt="%d-%m-%Y:%H:%M", level=logging.INFO)
| 22.347826
| 80
| 0.743191
|
9a5a16a0441913718fc674d42f3bbebcf055eb82
| 6,270
|
py
|
Python
|
nas_bench_x11/models/svd_xgb.py
|
automl/nas-bench-x11
|
56aee15f125339c4d2af1cbfad9f66fd4643c9d7
|
[
"Apache-2.0"
] | 14
|
2021-12-08T17:56:01.000Z
|
2022-01-15T05:06:59.000Z
|
nas_bench_x11/models/svd_xgb.py
|
shenyann/nas-bench-x11
|
ebf64ce3c30cc2ad0909508b5e25652011179956
|
[
"Apache-2.0"
] | 4
|
2022-01-10T09:04:38.000Z
|
2022-01-23T03:35:09.000Z
|
nas_bench_x11/models/svd_xgb.py
|
shenyann/nas-bench-x11
|
ebf64ce3c30cc2ad0909508b5e25652011179956
|
[
"Apache-2.0"
] | 1
|
2021-12-08T17:56:06.000Z
|
2021-12-08T17:56:06.000Z
|
import logging
import os
import joblib
import numpy as np
import xgboost as xgb
from sklearn.multioutput import RegressorChain
from sklearn.preprocessing import StandardScaler
from nas_bench_x11.utils import utils
from nas_bench_x11.surrogate_model import SurrogateModel
class SVDXGBModel(SurrogateModel):
def __init__(self, data_root, log_dir, seed, model_config, data_config, search_space, nb101_api):
super().__init__(data_root, log_dir, seed, model_config, data_config, search_space, nb101_api)
self.model = None
self.model_config["param:objective"] = "reg:squarederror"
self.model_config["param:eval_metric"] = "rmse"
def parse_param_config(self):
identifier = "param:"
param_config = dict()
for key, val in self.model_config.items():
if key.startswith(identifier):
param_config[key.replace(identifier, "")] = val
return param_config
def train(self):
X_train, y_train, _ = self.load_dataset(dataset_type='train', use_full_lc=True)
X_val, y_val, _ = self.load_dataset(dataset_type='val', use_full_lc=True)
param_config = self.parse_param_config()
param_config["seed"] = self.seed
# new code
self.num_components = param_config["num_components"]
self.ss = StandardScaler()
u, s, vh = np.linalg.svd(y_train, full_matrices=False)
self.svd_s = s
self.svd_vh = vh
self.model = RegressorChain(xgb.XGBRegressor(
num_rounds=param_config["num_rounds"],
boosting_type=param_config["boosting_type"],
max_depth=param_config["max_depth"],
learning_rate=param_config["learning_rate"],
num_leaves=param_config["num_leaves"],
min_child_weight=param_config["min_child_weight"],
reg_alpha=param_config["lambda_l1"],
reg_lambda=param_config["lambda_l2"],
))
# the labels are the first n components of the SVD on the training data
labels = u[:, :self.num_components].copy()
fitted_labels = self.ss.fit_transform(labels)
self.model.fit(X_train, fitted_labels, verbose=True)
train_pred = self.ss.inverse_transform(self.model.predict(X_train))\
@ np.diag(self.svd_s[:self.num_components])@self.svd_vh[:self.num_components, :]
val_pred = self.ss.inverse_transform(self.model.predict(X_val))\
@ np.diag(self.svd_s[:self.num_components])@self.svd_vh[:self.num_components, :]
residuals = u[:, :self.num_components]@np.diag(self.svd_s[:self.num_components])@self.svd_vh[:self.num_components, :] - np.stack(y_train)
# self.kernel = gaussian_kde(residuals.T+np.random.randn(*residuals.T.shape)*1e-8)
# metrics for final prediction
train_pred_final = np.array(train_pred)
val_pred_final = np.array(val_pred)
y_train_final = y_train
y_val_final = y_val
train_metrics = utils.evaluate_learning_curve_metrics(y_train_final, train_pred_final, prediction_is_first_arg=False)
valid_metrics = utils.evaluate_learning_curve_metrics(y_val_final, val_pred_final, prediction_is_first_arg=False)
logging.info('train metrics: %s', train_metrics)
logging.info('valid metrics: %s', valid_metrics)
return valid_metrics
def test(self):
X_test, y_test, _ = self.load_dataset(dataset_type='test', use_full_lc=True)
test_pred = self.ss.inverse_transform(self.model.predict(X_test))\
@ np.diag(self.svd_s[:self.num_components])@self.svd_vh[:self.num_components, :]
y_test_final = y_test
test_pred_final = np.array(test_pred)
test_metrics = utils.evaluate_learning_curve_metrics(y_test_final, test_pred_final, prediction_is_first_arg=False)
logging.info('test metrics %s', test_metrics)
return test_metrics
def validate(self):
X_val, y_val, _ = self.load_dataset(dataset_type='val', use_full_lc=True)
val_pred = self.ss.inverse_transform(self.model.predict(X_val))\
@ np.diag(self.svd_s[:self.num_components])@self.svd_vh[:self.num_components, :]
y_val_final = y_val
val_pred_final = np.array(val_pred)
valid_metrics = utils.evaluate_learning_curve_metrics(y_val_final, val_pred_final, prediction_is_first_arg=False)
logging.info('test metrics %s', valid_metrics)
return valid_metrics
def save(self):
save_list = [self.model, self.ss, self.svd_s, self.svd_vh, self.num_components]
# save_list = [self.model, self.kernel, self.ss, self.svd_s, self.svd_vh, self.num_components]
joblib.dump(save_list, os.path.join(self.log_dir, 'surrogate_model.model'))
def load(self, model_path):
model, ss, svd_s, svd_vh, num_components = joblib.load(model_path)
self.model = model
self.ss = ss
self.svd_s = svd_s
self.svd_vh = svd_vh
self.num_components = num_components
def evaluate(self, result_paths):
X_test, y_test, _ = self.load_dataset(dataset_type='test', use_full_lc=True)
test_pred = self.ss.inverse_transform(self.model.predict(X_test))\
@ np.diag(self.svd_s[:self.num_components])@self.svd_vh[:self.num_components, :]
y_test_final = y_test
test_pred_final = np.array(test_pred)
test_metrics = utils.evaluate_learning_curve_metrics(y_test_final, test_pred_final, prediction_is_first_arg=False)
return test_metrics, test_pred, y_test
def query(self, config_dict, search_space='darts', use_noise=False):
if search_space == 'darts':
config_space_instance = self.config_loader.query_config_dict(config_dict)
X = config_space_instance.get_array().reshape(1, -1)
idx = np.isnan(X)
X[idx] = -1
X = X.reshape(1, -1)
else:
X = np.array([config_dict])
ypred = self.ss.inverse_transform(self.model.predict(X))\
@ np.diag(self.svd_s[:self.num_components])@self.svd_vh[:self.num_components, :]
if use_noise:
noise = self.kernel.resample(ypred.shape[0])
return ypred[0] + noise[0]
return ypred[0]
| 40.980392
| 145
| 0.676236
|
e6229fc27c8a06d4d92bd7c6f797a3663bbc1115
| 3,085
|
py
|
Python
|
preprocess/init_rw.py
|
WWW2022PAGG/PAGG
|
f3eddec9157d1b34c100883193221d64c26be7ee
|
[
"MIT"
] | null | null | null |
preprocess/init_rw.py
|
WWW2022PAGG/PAGG
|
f3eddec9157d1b34c100883193221d64c26be7ee
|
[
"MIT"
] | null | null | null |
preprocess/init_rw.py
|
WWW2022PAGG/PAGG
|
f3eddec9157d1b34c100883193221d64c26be7ee
|
[
"MIT"
] | null | null | null |
import numpy as np
import json
import time
from pathlib import Path
from dataset import PlanetoidData
from torch_geometric.utils import from_scipy_sparse_matrix
import torch
import compute_merw as rw
import scipy
import argparse
from scipy.sparse import csr_matrix
import warnings
warnings.filterwarnings('ignore')
parser = argparse.ArgumentParser()
parser.add_argument('-data', '--data_name',
# action='append', nargs='*',
type=str, default='cora') # , 'citeseer']
args = parser.parse_args()
data_name = args.data_name
def load_data_ranked(name):
datasets = json.load(
open("/home/syf/workspace/jupyters/configs/dataset.json"))
dataset_run = datasets[name]["dataset"]
dataset_path = datasets[name]["dataset_path"][0]
# dataset_path = "/home/syf/workspace/jupyters" / Path(dataset_path)
val_size = datasets[name]["val_size"]
dataset = PlanetoidData(
dataset_str=dataset_run, dataset_path=dataset_path, val_size=val_size
)
# features = dataset._sparse_data["features"]
adj = dataset._sparse_data["sparse_adj"]
n = adj.shape[0]
labels = dataset._dense_data["y_all"]
# adj = adj + scipy.sparse.eye(n)
edge_index = from_scipy_sparse_matrix(adj)[0] # indices + edge_weight
# x = np.array( features.todense() )
edge_index = np.array(edge_index)
y = torch.tensor(np.argmax(labels, 1), dtype=torch.long)
return edge_index, adj, y
if __name__ == '__main__':
old_datasets = ["cora", "pubmed", "citeseer", "cornell"]
for data_name in [
"cornell",
"cora",
# 'Nba',
"citeseer",
"pubmed",
# 'Electronics',
# 'bgp',
]:
if data_name in old_datasets:
edge_index, adj, y = load_data_ranked(data_name)
else:
y = np.load(f"/data/syf/{data_name}/y.npy")
edge_index = np.load(f"/data/syf/{data_name}/edge_index.npy")
row = edge_index[0]
col = edge_index[1]
data = np.ones(edge_index.shape[-1])
adj = csr_matrix((data, (row, col)),
shape=(y.shape[0], y.shape[0]))
n = y.shape[0]
# adj = adj + scipy.sparse.eye(n) # with self-loop or not
start = time.time()
start_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(start))
print("calculating", start_time)
# print(type(adj))
P_merw, _, _, _ = rw.compute_merw(adj)
M = edge_index.shape[1]
cal_end = time.time()
print("saving", (cal_end-start)/60, (cal_end-start)/3600)
file = open(f"edge_input/{data_name}_nsl.in", "w")
print(y.shape[0], edge_index.shape[1]*2, file=file)
for i in range(M):
u, v = edge_index[0, i], edge_index[1, i]
print(u, v, P_merw[u, v], file=file)
print(v, u, P_merw[v, u], file=file)
end = time.time()
end_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(end))
print("over", (end-start)/60, (end-start)/3600, end_time)
| 34.277778
| 77
| 0.603566
|
fa1497e8baa8bcc5bb270a9c6e78876648c1cea3
| 10,893
|
py
|
Python
|
burst/lcloud.py
|
eye0inc/burst
|
c51290a8c52323714735f7f38a7822011d2db7cd
|
[
"Apache-2.0"
] | null | null | null |
burst/lcloud.py
|
eye0inc/burst
|
c51290a8c52323714735f7f38a7822011d2db7cd
|
[
"Apache-2.0"
] | null | null | null |
burst/lcloud.py
|
eye0inc/burst
|
c51290a8c52323714735f7f38a7822011d2db7cd
|
[
"Apache-2.0"
] | null | null | null |
import os, sys, time, argparse, json
import yaml
from pprint import pprint
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.base import NodeAuthSSHKey
from libcloud.common.google import ResourceNotFoundError
from easydict import EasyDict as dictobj
from burst.verbos import vprint
config = dictobj()
#
# for now providers are EC2 or GCE
#
def init(conf = None):
#init is a one-time thang
if 'driver' in config:
return
if conf == None:
conf = {}
yam = conf.get('configfile', os.environ['HOME'] + "/.burst/config.yml")
if os.path.exists(yam):
#FIXME: check for local overriding .burst
f = open(yam)
yconf = yaml.load(f, Loader=yaml.FullLoader)
f.close()
if 'compute_config' in conf:
compute_config = conf['compute_config']
else:
compute_config = yconf['compute']['settings']['default_compute']
#this got a bit strained. sorry
storage_config = None
if 'storage_config' in conf: #if storage_config passed in, use
storage_config = conf['storage_config']
else:
if 'storage' in yconf: #otherwise check in config.yml
storage_config = yconf['storage']['settings']['default_storage']
if storage_config: #if it exists,
storage = yconf['storage']['configurations'][storage_config] #use it
storage['config'] = storage_config #and store the config name too
yconf = yconf['compute']['configurations'][compute_config]
yconf.update(yconf['settings']) #easier to deal with all attributes at top level
yconf['compute_config']=compute_config
if storage_config: #if specified,
yconf['storage'] = storage #pull storage to top level for ease
else:
vprint ("config.yml not found")
yconf = {} #dummy yconf
if 'provider' in conf:
config.provider = conf['provider']
else:
if 'provider' in yconf:
config.provider = yconf['provider']
else:
raise Exception("Configuration file %s not available. Try running:\nburst --configure" % yam)
for param in ['access', 'secret', 'region', 'project', 'default_image', 'default_size', 'default_gpu_image',
'default_gpu_size', 'default_gpu', 'storage', 'compute_config']:
if param in conf:
config[param] = conf[param]
else:
config[param] = yconf.get(param, None)
cls = get_driver(Provider[config.provider])
if config.provider == 'EC2':
config.driver = cls(config.access, config.secret, region=config.region)
elif config.provider == 'GCE':
if hasattr(config.secret, 'lower'): #string points to key file
privkeypath = config.secret
config.raw_secret = config.secret
else: #if dict, create key file
config.raw_secret = "%s.json" % config.secret['private_key_id']
privkeypath = "%s/.burst/%s.json" % (os.path.expanduser("~"), config.secret['private_key_id'])
if not os.path.exists(privkeypath):
fp = open(privkeypath, 'w')
json.dump(config.secret, fp)
fp.close()
config.driver = cls(config.access, privkeypath, datacenter=config.region, project=config.project)
else:
vprint ("ERROR: unknown cloud provider", config.provider)
def get_config():
return config
def get_server(url=None, uuid=None, name=None, conf = None):
init(conf)
nodes = config.driver.list_nodes()
if url:
node = [x for x in nodes if url in x.public_ips and x.state != 'terminated']
elif uuid:
node = [x for x in nodes if x.uuid.find(uuid)==0 and x.state != 'terminated']
elif name:
node = [x for x in nodes if x.name==name and x.state != 'terminated']
else:
return "error: specify url, uuid, or name"
return node[0] if node else None
def get_server_state(srv):
nodes = config.driver.list_nodes() #need to refresh node to get state
node = [x for x in nodes if x.uuid.find(srv.uuid)==0]
if node:
return node[0].state
vprint ("Cannot find server to determine state; assuming terminated")
return 'terminated'
def get_server_size(srv):
if config.provider=='EC2':
return srv.extra['instance_type']
elif config.provider=='GCE':
typ = srv.extra['machineType']
i = typ.rfind('/')
return typ[i+1:]
# not working now that EC2 image == AMI full name
# def get_server_image(srv):
# if config.provider=='EC2':
# pprint(srv.extra)
# return srv.extra['name']
# elif config.provider=='GCE':
# return srv.extra['image']
def start_server(srv):
result = srv.start()
if not result:
return "error starting server"
state = None
while state != 'running':
state = get_server_state(srv)
time.sleep(2)
vprint ("server state:", state)
vprint ("Waiting for public IP address to be assigned")
config.driver.wait_until_running([srv])
vprint("Public IP's:", srv.public_ips)
while len(srv.public_ips)==0 or srv.public_ips.count(None) == len(srv.public_ips): #Really? Google? [None]????
# srv = config.driver.list_nodes(ex_node_ids=[srv.id])[0]
srv = get_server(uuid=srv.uuid) #seems necessary to refresh to update state
vprint("Public IP's:", srv.public_ips)
time.sleep(5)
return srv
#
# fill in default values for size & image
#
def fix_size_and_image(size, image):
if image=="DEFAULT_IMAGE":
image = config.default_image
if size=="DEFAULT_SIZE":
size = config.default_size
if image=="DEFAULT_GPU_IMAGE":
image = config.default_gpu_image
if size=="DEFAULT_GPU_SIZE":
size = config.default_gpu_size
return size, image
def launch_server(name, size=None, image=None, pubkey=None, conf = None, user=None, gpus=None):
init(conf)
size, image = fix_size_and_image(size, image)
image_full_path = image
if config.provider=='EC2':
images = config.driver.list_images(ex_filters={'name': image})
elif config.provider=='GCE':
#note: GCE libcloud driver list_images is hella borke, list is incomplete so...
images = []
for proj in ["deeplearning-platform-release", "ubuntu-os-cloud"]:
try:
im = config.driver.ex_get_image(image, ex_project_list=proj)
images = [im]
break
except ResourceNotFoundError:
pass
else:
ims = config.driver.list_images()
images = [x for x in ims if x.name == image]
if not images:
raise Exception("Image %s not found" % image)
image = images[0]
sizes = [x for x in config.driver.list_sizes() if x.name == size]
if not sizes:
raise Exception("Instance size %s not found" % size)
size = sizes[0]
vprint ("Launching instance image=%s, id=%s, session=%s, type=%s ram=%s disk=%s" % (image_full_path, image.id, name, size.id, size.ram, size.disk))
if pubkey:
if config.provider == 'EC2': #Everybody makes it up
auth = NodeAuthSSHKey(pubkey)
node = config.driver.create_node(name, size, image, auth=auth)
elif config.provider == 'GCE':
meta = {
'items': [
{
'key': 'sshKeys',
'value': '%s: %s' % (user, pubkey)
}
]
}
if gpus:
vprint ("Launching with GPU")
node = config.driver.create_node(name, size, image, ex_metadata=meta, ex_accelerator_type=config.default_gpu,
ex_accelerator_count=1, ex_on_host_maintenance="TERMINATE")
else:
vprint ("Launching without GPU")
node = config.driver.create_node(name, size, image, ex_metadata=meta)
else:
raise Exception("Unsupported clown provider: %s" % config.provider)
else:
node = config.driver.create_node(name, size, image)
vprint ("Waiting for public IP address to be active")
config.driver.wait_until_running([node])
while len(node.public_ips)==0:
# node = config.driver.list_nodes(ex_node_ids=[node.id])[0] #refresh node -- is this really necessary
node = get_server(uuid=node.uuid) #seems necessary to refresh to update state
vprint("Public IP's:", node.public_ips)
time.sleep(5)
return node
def stop_server(srv):
result = srv.stop_node()
if not result:
return "error stopping server"
state = None
while state != 'stopped':
state = get_server_state(srv)
time.sleep(2)
vprint ("server state:", state)
return "success"
def terminate_server(srv):
result = config.driver.destroy_node(srv)
if not result:
return "error terminating server"
state = None
while state != 'terminated':
state = get_server_state(srv)
time.sleep(2)
vprint ("server state:", state)
return "success"
def list_servers(name, conf = None, terminated=True):
init(conf)
ret = []
nodes = config.driver.list_nodes()
for x in nodes:
x = get_server(uuid=x.uuid) #seems necessary to refresh to update state
if not x:
continue
# print ("DBG", terminated, x.state)
if (not terminated) and (x.state=='terminated'): #don't include terminated
continue
if x.name==name:
ret.append([x])
img = x.extra['image_id'] if config.provider == 'EC2' else x.image
if img == config.default_image:
img += " (default_image, no gpu)"
elif img == config.default_gpu_image:
img += " (default_gpu_image)"
s = "IMAGE: %s STATE: %s IP's: %s ID: %s/%s" %(img, x.state, x.public_ips, config.provider, x.id)
ret[-1].append(s)
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--url")
parser.add_argument("--uuid")
parser.add_argument("--name")
args, unknown = parser.parse_known_args()
n = get_server(args.url, args.uuid, args.name)
pprint (n)
| 38.62766
| 151
| 0.584228
|
565961e1b8c5391a5674c944eff3349f69447085
| 34,797
|
py
|
Python
|
homeassistant/components/cast/media_player.py
|
wstewart15/core
|
854d7d49367d560406d6099a5ba56a0be6c0b9c7
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/cast/media_player.py
|
wstewart15/core
|
854d7d49367d560406d6099a5ba56a0be6c0b9c7
|
[
"Apache-2.0"
] | 7
|
2020-08-05T07:22:37.000Z
|
2020-10-12T10:43:30.000Z
|
homeassistant/components/cast/media_player.py
|
wstewart15/core
|
854d7d49367d560406d6099a5ba56a0be6c0b9c7
|
[
"Apache-2.0"
] | 1
|
2022-01-03T06:44:44.000Z
|
2022-01-03T06:44:44.000Z
|
"""Provide functionality to interact with Cast devices on the network."""
from __future__ import annotations
import asyncio
from contextlib import suppress
from datetime import datetime
import json
import logging
import pychromecast
from pychromecast.controllers.homeassistant import HomeAssistantController
from pychromecast.controllers.multizone import MultizoneManager
from pychromecast.controllers.receiver import VOLUME_CONTROL_TYPE_FIXED
from pychromecast.quick_play import quick_play
from pychromecast.socket_client import (
CONNECTION_STATUS_CONNECTED,
CONNECTION_STATUS_DISCONNECTED,
)
import voluptuous as vol
import yarl
from homeassistant.components import media_source, zeroconf
from homeassistant.components.media_player import (
BrowseError,
BrowseMedia,
MediaPlayerEntity,
async_process_play_media_url,
)
from homeassistant.components.media_player.const import (
ATTR_MEDIA_EXTRA,
MEDIA_CLASS_DIRECTORY,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CAST_APP_ID_HOMEASSISTANT_LOVELACE,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.network import NoURLAvailableError, get_url, is_hass_url
import homeassistant.util.dt as dt_util
from homeassistant.util.logging import async_create_catching_coro
from .const import (
ADDED_CAST_DEVICES_KEY,
CAST_MULTIZONE_MANAGER_KEY,
CONF_IGNORE_CEC,
CONF_UUID,
DOMAIN as CAST_DOMAIN,
SIGNAL_CAST_DISCOVERED,
SIGNAL_CAST_REMOVED,
SIGNAL_HASS_CAST_SHOW_VIEW,
)
from .discovery import setup_internal_discovery
from .helpers import CastStatusListener, ChromecastInfo, ChromeCastZeroconf
_LOGGER = logging.getLogger(__name__)
APP_IDS_UNRELIABLE_MEDIA_INFO = ("Netflix",)
CAST_SPLASH = "https://www.home-assistant.io/images/cast/splash.png"
SUPPORT_CAST = SUPPORT_PLAY_MEDIA | SUPPORT_TURN_OFF
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_UUID): cv.string,
vol.Optional(CONF_IGNORE_CEC): vol.All(cv.ensure_list, [cv.string]),
}
),
)
@callback
def _async_create_cast_device(hass: HomeAssistant, info: ChromecastInfo):
"""Create a CastDevice Entity from the chromecast object.
Returns None if the cast device has already been added.
"""
_LOGGER.debug("_async_create_cast_device: %s", info)
if info.uuid is None:
_LOGGER.error("_async_create_cast_device uuid none: %s", info)
return None
# Found a cast with UUID
added_casts = hass.data[ADDED_CAST_DEVICES_KEY]
if info.uuid in added_casts:
# Already added this one, the entity will take care of moved hosts
# itself
return None
# -> New cast device
added_casts.add(info.uuid)
if info.is_dynamic_group:
# This is a dynamic group, do not add it but connect to the service.
group = DynamicCastGroup(hass, info)
group.async_setup()
return None
return CastDevice(info)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Cast from a config entry."""
hass.data.setdefault(ADDED_CAST_DEVICES_KEY, set())
# Import CEC IGNORE attributes
pychromecast.IGNORE_CEC += config_entry.data.get(CONF_IGNORE_CEC) or []
wanted_uuids = config_entry.data.get(CONF_UUID) or None
@callback
def async_cast_discovered(discover: ChromecastInfo) -> None:
"""Handle discovery of a new chromecast."""
# If wanted_uuids is set, we're only accepting specific cast devices identified
# by UUID
if wanted_uuids is not None and str(discover.uuid) not in wanted_uuids:
# UUID not matching, ignore.
return
cast_device = _async_create_cast_device(hass, discover)
if cast_device is not None:
async_add_entities([cast_device])
async_dispatcher_connect(hass, SIGNAL_CAST_DISCOVERED, async_cast_discovered)
ChromeCastZeroconf.set_zeroconf(await zeroconf.async_get_instance(hass))
hass.async_add_executor_job(setup_internal_discovery, hass, config_entry)
class CastDevice(MediaPlayerEntity):
"""Representation of a Cast device on the network.
This class is the holder of the pychromecast.Chromecast object and its
socket client. It therefore handles all reconnects and audio group changing
"elected leader" itself.
"""
_attr_should_poll = False
_attr_media_image_remotely_accessible = True
def __init__(self, cast_info: ChromecastInfo) -> None:
"""Initialize the cast device."""
self._cast_info = cast_info
self._chromecast: pychromecast.Chromecast | None = None
self.cast_status = None
self.media_status = None
self.media_status_received = None
self.mz_media_status: dict[str, pychromecast.controllers.media.MediaStatus] = {}
self.mz_media_status_received: dict[str, datetime] = {}
self.mz_mgr = None
self._attr_available = False
self._status_listener: CastStatusListener | None = None
self._hass_cast_controller: HomeAssistantController | None = None
self._add_remove_handler = None
self._cast_view_remove_handler = None
self._attr_unique_id = str(cast_info.uuid)
self._attr_name = cast_info.friendly_name
if cast_info.cast_info.model_name != "Google Cast Group":
self._attr_device_info = DeviceInfo(
identifiers={(CAST_DOMAIN, str(cast_info.uuid).replace("-", ""))},
manufacturer=str(cast_info.cast_info.manufacturer),
model=cast_info.cast_info.model_name,
name=str(cast_info.friendly_name),
)
async def async_added_to_hass(self):
"""Create chromecast object when added to hass."""
self._add_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_CAST_DISCOVERED, self._async_cast_discovered
)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_stop)
self.async_set_cast_info(self._cast_info)
# asyncio.create_task is used to avoid delaying startup wrapup if the device
# is discovered already during startup but then fails to respond
asyncio.create_task(
async_create_catching_coro(self.async_connect_to_chromecast())
)
self._cast_view_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_HASS_CAST_SHOW_VIEW, self._handle_signal_show_view
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect Chromecast object when removed."""
await self._async_disconnect()
if self._add_remove_handler:
self._add_remove_handler()
self._add_remove_handler = None
if self._cast_view_remove_handler:
self._cast_view_remove_handler()
self._cast_view_remove_handler = None
def async_set_cast_info(self, cast_info):
"""Set the cast information."""
self._cast_info = cast_info
async def async_connect_to_chromecast(self):
"""Set up the chromecast object."""
_LOGGER.debug(
"[%s %s] Connecting to cast device by service %s",
self.entity_id,
self._cast_info.friendly_name,
self._cast_info.cast_info.services,
)
chromecast = await self.hass.async_add_executor_job(
pychromecast.get_chromecast_from_cast_info,
self._cast_info.cast_info,
ChromeCastZeroconf.get_zeroconf(),
)
self._chromecast = chromecast
if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:
self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()
self.mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]
self._status_listener = CastStatusListener(self, chromecast, self.mz_mgr)
self._attr_available = False
self.cast_status = chromecast.status
self.media_status = chromecast.media_controller.status
self._chromecast.start()
self.async_write_ha_state()
async def _async_disconnect(self):
"""Disconnect Chromecast object if it is set."""
if self._chromecast is None:
# Can't disconnect if not connected.
return
_LOGGER.debug(
"[%s %s] Disconnecting from chromecast socket",
self.entity_id,
self._cast_info.friendly_name,
)
self._attr_available = False
self.async_write_ha_state()
await self.hass.async_add_executor_job(self._chromecast.disconnect)
self._invalidate()
self.async_write_ha_state()
def _invalidate(self):
"""Invalidate some attributes."""
self._chromecast = None
self.cast_status = None
self.media_status = None
self.media_status_received = None
self.mz_media_status = {}
self.mz_media_status_received = {}
self.mz_mgr = None
self._hass_cast_controller = None
if self._status_listener is not None:
self._status_listener.invalidate()
self._status_listener = None
# ========== Callbacks ==========
def new_cast_status(self, cast_status):
"""Handle updates of the cast status."""
self.cast_status = cast_status
self._attr_volume_level = cast_status.volume_level if cast_status else None
self._attr_is_volume_muted = (
cast_status.volume_muted if self.cast_status else None
)
self.schedule_update_ha_state()
def new_media_status(self, media_status):
"""Handle updates of the media status."""
if (
media_status
and media_status.player_is_idle
and media_status.idle_reason == "ERROR"
):
external_url = None
internal_url = None
tts_base_url = None
url_description = ""
if "tts" in self.hass.config.components:
# pylint: disable=[import-outside-toplevel]
from homeassistant.components import tts
with suppress(KeyError): # base_url not configured
tts_base_url = tts.get_base_url(self.hass)
with suppress(NoURLAvailableError): # external_url not configured
external_url = get_url(self.hass, allow_internal=False)
with suppress(NoURLAvailableError): # internal_url not configured
internal_url = get_url(self.hass, allow_external=False)
if media_status.content_id:
if tts_base_url and media_status.content_id.startswith(tts_base_url):
url_description = f" from tts.base_url ({tts_base_url})"
if external_url and media_status.content_id.startswith(external_url):
url_description = f" from external_url ({external_url})"
if internal_url and media_status.content_id.startswith(internal_url):
url_description = f" from internal_url ({internal_url})"
_LOGGER.error(
"Failed to cast media %s%s. Please make sure the URL is: "
"Reachable from the cast device and either a publicly resolvable "
"hostname or an IP address",
media_status.content_id,
url_description,
)
self.media_status = media_status
self.media_status_received = dt_util.utcnow()
self.schedule_update_ha_state()
def new_connection_status(self, connection_status):
"""Handle updates of connection status."""
_LOGGER.debug(
"[%s %s] Received cast device connection status: %s",
self.entity_id,
self._cast_info.friendly_name,
connection_status.status,
)
if connection_status.status == CONNECTION_STATUS_DISCONNECTED:
self._attr_available = False
self._invalidate()
self.schedule_update_ha_state()
return
new_available = connection_status.status == CONNECTION_STATUS_CONNECTED
if new_available != self.available:
# Connection status callbacks happen often when disconnected.
# Only update state when availability changed to put less pressure
# on state machine.
_LOGGER.debug(
"[%s %s] Cast device availability changed: %s",
self.entity_id,
self._cast_info.friendly_name,
connection_status.status,
)
self._attr_available = new_available
self.schedule_update_ha_state()
def multizone_new_media_status(self, group_uuid, media_status):
"""Handle updates of audio group media status."""
_LOGGER.debug(
"[%s %s] Multizone %s media status: %s",
self.entity_id,
self._cast_info.friendly_name,
group_uuid,
media_status,
)
self.mz_media_status[group_uuid] = media_status
self.mz_media_status_received[group_uuid] = dt_util.utcnow()
self.schedule_update_ha_state()
# ========== Service Calls ==========
def _media_controller(self):
"""
Return media controller.
First try from our own cast, then groups which our cast is a member in.
"""
media_status = self.media_status
media_controller = self._chromecast.media_controller
if media_status is None or media_status.player_state == "UNKNOWN":
groups = self.mz_media_status
for k, val in groups.items():
if val and val.player_state != "UNKNOWN":
media_controller = self.mz_mgr.get_multizone_mediacontroller(k)
break
return media_controller
def turn_on(self):
"""Turn on the cast device."""
if not self._chromecast.is_idle:
# Already turned on
return
if self._chromecast.app_id is not None:
# Quit the previous app before starting splash screen or media player
self._chromecast.quit_app()
# The only way we can turn the Chromecast is on is by launching an app
if self._chromecast.cast_type == pychromecast.const.CAST_TYPE_CHROMECAST:
self._chromecast.play_media(CAST_SPLASH, pychromecast.STREAM_TYPE_BUFFERED)
else:
self._chromecast.start_app(pychromecast.config.APP_MEDIA_RECEIVER)
def turn_off(self):
"""Turn off the cast device."""
self._chromecast.quit_app()
def mute_volume(self, mute):
"""Mute the volume."""
self._chromecast.set_volume_muted(mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._chromecast.set_volume(volume)
def media_play(self):
"""Send play command."""
media_controller = self._media_controller()
media_controller.play()
def media_pause(self):
"""Send pause command."""
media_controller = self._media_controller()
media_controller.pause()
def media_stop(self):
"""Send stop command."""
media_controller = self._media_controller()
media_controller.stop()
def media_previous_track(self):
"""Send previous track command."""
media_controller = self._media_controller()
media_controller.queue_prev()
def media_next_track(self):
"""Send next track command."""
media_controller = self._media_controller()
media_controller.queue_next()
def media_seek(self, position):
"""Seek the media to a specific location."""
media_controller = self._media_controller()
media_controller.seek(position)
async def _async_root_payload(self, content_filter):
"""Generate root node."""
children = []
# Add media browsers
for platform in self.hass.data[CAST_DOMAIN].values():
children.extend(
await platform.async_get_media_browser_root_object(
self.hass, self._chromecast.cast_type
)
)
# Add media sources
try:
result = await media_source.async_browse_media(
self.hass, None, content_filter=content_filter
)
children.extend(result.children)
except BrowseError:
if not children:
raise
# If there's only one media source, resolve it
if len(children) == 1 and children[0].can_expand:
return await self.async_browse_media(
children[0].media_content_type,
children[0].media_content_id,
)
return BrowseMedia(
title="Cast",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="",
media_content_type="",
can_play=False,
can_expand=True,
children=sorted(children, key=lambda c: c.title),
)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
content_filter = None
if self._chromecast.cast_type in (
pychromecast.const.CAST_TYPE_AUDIO,
pychromecast.const.CAST_TYPE_GROUP,
):
def audio_content_filter(item):
"""Filter non audio content."""
return item.media_content_type.startswith("audio/")
content_filter = audio_content_filter
if media_content_id is None:
return await self._async_root_payload(content_filter)
for platform in self.hass.data[CAST_DOMAIN].values():
browse_media = await platform.async_browse_media(
self.hass,
media_content_type,
media_content_id,
self._chromecast.cast_type,
)
if browse_media:
return browse_media
return await media_source.async_browse_media(
self.hass, media_content_id, content_filter=content_filter
)
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
# Handle media_source
if media_source.is_media_source_id(media_id):
sourced_media = await media_source.async_resolve_media(self.hass, media_id)
media_type = sourced_media.mime_type
media_id = sourced_media.url
extra = kwargs.get(ATTR_MEDIA_EXTRA, {})
metadata = extra.get("metadata")
# Handle media supported by a known cast app
if media_type == CAST_DOMAIN:
try:
app_data = json.loads(media_id)
if metadata is not None:
app_data["metadata"] = extra.get("metadata")
except json.JSONDecodeError:
_LOGGER.error("Invalid JSON in media_content_id")
raise
# Special handling for passed `app_id` parameter. This will only launch
# an arbitrary cast app, generally for UX.
if "app_id" in app_data:
app_id = app_data.pop("app_id")
_LOGGER.info("Starting Cast app by ID %s", app_id)
await self.hass.async_add_executor_job(
self._chromecast.start_app, app_id
)
if app_data:
_LOGGER.warning(
"Extra keys %s were ignored. Please use app_name to cast media",
app_data.keys(),
)
return
app_name = app_data.pop("app_name")
try:
await self.hass.async_add_executor_job(
quick_play, self._chromecast, app_name, app_data
)
except NotImplementedError:
_LOGGER.error("App %s not supported", app_name)
return
# Try the cast platforms
for platform in self.hass.data[CAST_DOMAIN].values():
result = await platform.async_play_media(
self.hass, self.entity_id, self._chromecast, media_type, media_id
)
if result:
return
# If media ID is a relative URL, we serve it from HA.
media_id = async_process_play_media_url(self.hass, media_id)
# Configure play command for when playing a HLS stream
if is_hass_url(self.hass, media_id):
parsed = yarl.URL(media_id)
if parsed.path.startswith("/api/hls/"):
extra = {
**extra,
"stream_type": "LIVE",
"media_info": {
"hlsVideoSegmentFormat": "fmp4",
},
}
# Default to play with the default media receiver
app_data = {"media_id": media_id, "media_type": media_type, **extra}
await self.hass.async_add_executor_job(
quick_play, self._chromecast, "default_media_receiver", app_data
)
def _media_status(self):
"""
Return media status.
First try from our own cast, then groups which our cast is a member in.
"""
media_status = self.media_status
media_status_received = self.media_status_received
if media_status is None or media_status.player_state == "UNKNOWN":
groups = self.mz_media_status
for k, val in groups.items():
if val and val.player_state != "UNKNOWN":
media_status = val
media_status_received = self.mz_media_status_received[k]
break
return (media_status, media_status_received)
@property
def state(self):
"""Return the state of the player."""
# The lovelace app loops media to prevent timing out, don't show that
if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:
return STATE_PLAYING
if (media_status := self._media_status()[0]) is not None:
if media_status.player_is_playing:
return STATE_PLAYING
if media_status.player_is_paused:
return STATE_PAUSED
if media_status.player_is_idle:
return STATE_IDLE
if self.app_id is not None and self.app_id != pychromecast.IDLE_APP_ID:
if self.app_id in APP_IDS_UNRELIABLE_MEDIA_INFO:
# Some apps don't report media status, show the player as playing
return STATE_PLAYING
return STATE_IDLE
if self._chromecast is not None and self._chromecast.is_idle:
return STATE_OFF
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
# The lovelace app loops media to prevent timing out, don't show that
if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:
return None
media_status = self._media_status()[0]
return media_status.content_id if media_status else None
@property
def media_content_type(self):
"""Content type of current playing media."""
# The lovelace app loops media to prevent timing out, don't show that
if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:
return None
if (media_status := self._media_status()[0]) is None:
return None
if media_status.media_is_tvshow:
return MEDIA_TYPE_TVSHOW
if media_status.media_is_movie:
return MEDIA_TYPE_MOVIE
if media_status.media_is_musictrack:
return MEDIA_TYPE_MUSIC
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
# The lovelace app loops media to prevent timing out, don't show that
if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:
return None
media_status = self._media_status()[0]
return media_status.duration if media_status else None
@property
def media_image_url(self):
"""Image url of current playing media."""
if (media_status := self._media_status()[0]) is None:
return None
images = media_status.images
return images[0].url if images and images[0].url else None
@property
def media_title(self):
"""Title of current playing media."""
media_status = self._media_status()[0]
return media_status.title if media_status else None
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
media_status = self._media_status()[0]
return media_status.artist if media_status else None
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
media_status = self._media_status()[0]
return media_status.album_name if media_status else None
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
media_status = self._media_status()[0]
return media_status.album_artist if media_status else None
@property
def media_track(self):
"""Track number of current playing media (Music track only)."""
media_status = self._media_status()[0]
return media_status.track if media_status else None
@property
def media_series_title(self):
"""Return the title of the series of current playing media."""
media_status = self._media_status()[0]
return media_status.series_title if media_status else None
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
media_status = self._media_status()[0]
return media_status.season if media_status else None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
media_status = self._media_status()[0]
return media_status.episode if media_status else None
@property
def app_id(self):
"""Return the ID of the current running app."""
return self._chromecast.app_id if self._chromecast else None
@property
def app_name(self):
"""Name of the current running app."""
return self._chromecast.app_display_name if self._chromecast else None
@property
def supported_features(self):
"""Flag media player features that are supported."""
support = SUPPORT_CAST
media_status = self._media_status()[0]
if self._chromecast and self._chromecast.cast_type in (
pychromecast.const.CAST_TYPE_CHROMECAST,
pychromecast.const.CAST_TYPE_AUDIO,
):
support |= SUPPORT_TURN_ON
if (
self.cast_status
and self.cast_status.volume_control_type != VOLUME_CONTROL_TYPE_FIXED
):
support |= SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET
if media_status and self.app_id != CAST_APP_ID_HOMEASSISTANT_LOVELACE:
support |= SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP
if media_status.supports_queue_next:
support |= SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK
if media_status.supports_seek:
support |= SUPPORT_SEEK
if "media_source" in self.hass.config.components:
support |= SUPPORT_BROWSE_MEDIA
return support
@property
def media_position(self):
"""Position of current playing media in seconds."""
# The lovelace app loops media to prevent timing out, don't show that
if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:
return None
media_status = self._media_status()[0]
if media_status is None or not (
media_status.player_is_playing
or media_status.player_is_paused
or media_status.player_is_idle
):
return None
return media_status.current_time
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:
return None
return self._media_status()[1]
async def _async_cast_discovered(self, discover: ChromecastInfo):
"""Handle discovery of new Chromecast."""
if self._cast_info.uuid != discover.uuid:
# Discovered is not our device.
return
_LOGGER.debug("Discovered chromecast with same UUID: %s", discover)
self.async_set_cast_info(discover)
async def _async_stop(self, event):
"""Disconnect socket on Home Assistant stop."""
await self._async_disconnect()
def _handle_signal_show_view(
self,
controller: HomeAssistantController,
entity_id: str,
view_path: str,
url_path: str | None,
):
"""Handle a show view signal."""
if entity_id != self.entity_id or self._chromecast is None:
return
if self._hass_cast_controller is None:
self._hass_cast_controller = controller
self._chromecast.register_handler(controller)
self._hass_cast_controller.show_lovelace_view(view_path, url_path)
class DynamicCastGroup:
"""Representation of a Cast device on the network - for dynamic cast groups."""
def __init__(self, hass, cast_info: ChromecastInfo):
"""Initialize the cast device."""
self.hass = hass
self._cast_info = cast_info
self._chromecast: pychromecast.Chromecast | None = None
self.mz_mgr = None
self._status_listener: CastStatusListener | None = None
self._add_remove_handler = None
self._del_remove_handler = None
def async_setup(self):
"""Create chromecast object."""
self._add_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_CAST_DISCOVERED, self._async_cast_discovered
)
self._del_remove_handler = async_dispatcher_connect(
self.hass, SIGNAL_CAST_REMOVED, self._async_cast_removed
)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_stop)
self.async_set_cast_info(self._cast_info)
self.hass.async_create_task(
async_create_catching_coro(self.async_connect_to_chromecast())
)
async def async_tear_down(self) -> None:
"""Disconnect Chromecast object."""
await self._async_disconnect()
if self._cast_info.uuid is not None:
# Remove the entity from the added casts so that it can dynamically
# be re-added again.
self.hass.data[ADDED_CAST_DEVICES_KEY].remove(self._cast_info.uuid)
if self._add_remove_handler:
self._add_remove_handler()
self._add_remove_handler = None
if self._del_remove_handler:
self._del_remove_handler()
self._del_remove_handler = None
def async_set_cast_info(self, cast_info):
"""Set the cast information and set up the chromecast object."""
self._cast_info = cast_info
async def async_connect_to_chromecast(self):
"""Set the cast information and set up the chromecast object."""
_LOGGER.debug(
"[%s %s] Connecting to cast device by service %s",
"Dynamic group",
self._cast_info.friendly_name,
self._cast_info.cast_info.services,
)
chromecast = await self.hass.async_add_executor_job(
pychromecast.get_chromecast_from_cast_info,
self._cast_info.cast_info,
ChromeCastZeroconf.get_zeroconf(),
)
self._chromecast = chromecast
if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:
self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()
self.mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]
self._status_listener = CastStatusListener(self, chromecast, self.mz_mgr, True)
self._chromecast.start()
async def _async_disconnect(self):
"""Disconnect Chromecast object if it is set."""
if self._chromecast is None:
# Can't disconnect if not connected.
return
_LOGGER.debug(
"[%s %s] Disconnecting from chromecast socket",
"Dynamic group",
self._cast_info.friendly_name,
)
await self.hass.async_add_executor_job(self._chromecast.disconnect)
self._invalidate()
def _invalidate(self):
"""Invalidate some attributes."""
self._chromecast = None
self.mz_mgr = None
if self._status_listener is not None:
self._status_listener.invalidate()
self._status_listener = None
async def _async_cast_discovered(self, discover: ChromecastInfo):
"""Handle discovery of new Chromecast."""
if self._cast_info.uuid != discover.uuid:
# Discovered is not our device.
return
_LOGGER.debug("Discovered dynamic group with same UUID: %s", discover)
self.async_set_cast_info(discover)
async def _async_cast_removed(self, discover: ChromecastInfo):
"""Handle removal of Chromecast."""
if self._cast_info.uuid != discover.uuid:
# Removed is not our device.
return
if not discover.cast_info.services:
# Clean up the dynamic group
_LOGGER.debug("Clean up dynamic group: %s", discover)
await self.async_tear_down()
async def _async_stop(self, event):
"""Disconnect socket on Home Assistant stop."""
await self._async_disconnect()
| 36.589905
| 88
| 0.645515
|
c40794397cf49fcc83153df678b749f00bf27755
| 1,037
|
py
|
Python
|
FreeInductionDecay/analysis/hilbert_transform.py
|
renereimann/FID_Simulation
|
40fe7f0892a5f4600d863658f748906bff050b67
|
[
"MIT"
] | null | null | null |
FreeInductionDecay/analysis/hilbert_transform.py
|
renereimann/FID_Simulation
|
40fe7f0892a5f4600d863658f748906bff050b67
|
[
"MIT"
] | null | null | null |
FreeInductionDecay/analysis/hilbert_transform.py
|
renereimann/FID_Simulation
|
40fe7f0892a5f4600d863658f748906bff050b67
|
[
"MIT"
] | 1
|
2020-04-11T04:18:31.000Z
|
2020-04-11T04:18:31.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import hilbert
from ..units import uV, ms
class HilbertTransform(object):
def __init__(self, times, flux):
if len(times) != len(flux):
raise AttributeError("times and flux must have the same dimension")
self.N = len(flux)
self.time = times / ms
self.flux = flux / uV
self.h = hilbert(self.flux)
def real(self):
return np.real(self.h)
def imag(self):
return np.imag(self.h)
def PhaseFunction(self):
phi = np.arctan(self.imag()/self.real())
jump = np.pi*np.logical_and(phi[:-1] > 0, phi[1:] < 0)
phi += np.concatenate([[0], np.cumsum(jump)])
return self.time*ms, phi
def EnvelopeFunction(self):
return self.time*ms, np.sqrt(self.imag()**2 + self.real()**2)*uV
def plot_phase_function(self, fig=None):
if fig is None:
fig, ax = plt.subplots()
t, phi = self.PhaseFunction()
ax.scatter(t, np.degrees(phi))
| 28.805556
| 79
| 0.584378
|
69d9b8dcb1063804a274138acfd44b66ee48ab5f
| 1,616
|
py
|
Python
|
geotrek/appconfig.py
|
fossabot/Geotrek-admin
|
ea2c873511ad724c742c64d81cbf31f37dbe3093
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/appconfig.py
|
fossabot/Geotrek-admin
|
ea2c873511ad724c742c64d81cbf31f37dbe3093
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/appconfig.py
|
fossabot/Geotrek-admin
|
ea2c873511ad724c742c64d81cbf31f37dbe3093
|
[
"BSD-2-Clause"
] | null | null | null |
from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
from django.contrib.auth.apps import AuthConfig
from django.contrib.contenttypes.apps import ContentTypesConfig
from django.contrib.sessions.apps import SessionsConfig
from django.db.models.signals import post_migrate, pre_migrate
from django_celery_results.apps import CeleryResultConfig
from geotrek.common.utils.signals import pm_callback, check_srid_has_meter_unit
class GeotrekConfig(AppConfig):
"""
Base class to handle table move on right schemas, and load SQL files
!! WARNING !! need to create subclass in geotrek.myapp.apps for project apps,
and create subclasses here for external subclasses
"""
def __init__(self, *args, **kwargs):
pre_migrate.connect(check_srid_has_meter_unit, sender=self, dispatch_uid='geotrek.core.checksrid')
super(GeotrekConfig, self).__init__(*args, **kwargs)
def ready(self):
post_migrate.connect(pm_callback, sender=self, dispatch_uid='geotrek.core.movetoschemas')
class AuthGeotrekConfig(AuthConfig, GeotrekConfig):
"""
bind for django.contrib.auth
"""
pass
class ContenttypeGeotrekConfig(ContentTypesConfig, GeotrekConfig):
"""
bind for django.contrib.contenttype
"""
pass
class SessionsGeotrekConfig(SessionsConfig, GeotrekConfig):
pass
class AdminGeotrekConfig(AdminConfig, GeotrekConfig):
pass
class CeleryGeotrekConfig(GeotrekConfig, CeleryResultConfig):
pass
class EasyThumbnailsGeotrekConfig(GeotrekConfig):
name = 'easy_thumbnails'
verbose_name = 'Easy thumbnails'
| 29.381818
| 106
| 0.770421
|
11302a14b1d829799a07ae503a05f54d8c58f734
| 5,767
|
py
|
Python
|
python/tvm/tir/sparse.py
|
yzh119/tvm
|
19400c9967020ca822399f57de0253c3dc98845b
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
python/tvm/tir/sparse.py
|
yzh119/tvm
|
19400c9967020ca822399f57de0253c3dc98845b
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 9
|
2021-10-20T13:48:52.000Z
|
2021-12-09T07:14:24.000Z
|
python/tvm/tir/sparse.py
|
yzh119/tvm
|
19400c9967020ca822399f57de0253c3dc98845b
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""SparseTIR axes and SparseBuffer
"""
from typing import List, Dict, Optional
import tvm._ffi
from tvm.ir import PrimExpr
from tvm.runtime import Object, const
from tvm.tir import Var
from . import _ffi_api
from .buffer import Buffer
class Axis(Object):
"""Base class of all the sparse axes."""
@property
def name(self):
return _ffi_api.GetAxisName(self)
@property
def length(self):
return _ffi_api.GetAxisLength(self)
@property
def idtype(self):
return _ffi_api.GetAxisIndexType(self)
class DenseAxis(Axis):
pass
class SparseAxis(Axis):
pass
@tvm._ffi.register_object("tir.sparse.DenseFixedAxis")
class DenseFixedAxis(DenseAxis):
"""DenseFixedAxis node
Parameters
----------
name : str
The name of the axis
length : PrimExpr
The length of the axis
"""
name: str
length: PrimExpr
def __init__(self, name, length):
self.__init_handle_by_constructor__(
_ffi_api.DenseFixedAxis, name, length # type: ignore
)
@tvm._ffi.register_object("tir.sparse.DenseVariableAxis")
class DenseVariableAxis(DenseAxis):
"""DenseVariableAxis node
Parameters
----------
name : str
The name of the axis
length : PrimExpr
The length of the axis
indptr : Buffer
The indptr buffer of the axis
"""
name: str
length: PrimExpr
indptr: Buffer
def __init__(self, name, length, indptr):
self.__init_handle_by_constructor__(
_ffi_api.DenseVariableAxis, name, length, indptr # type: ignore
)
@tvm._ffi.register_object("tir.sparse.SparseFixedAxis")
class SparseFixedAxis(DenseAxis):
"""SparseFixedAxis node
Parameters
----------
name : str
The name of the axis
length : PrimExpr
The length of the axis
indices : Buffer
The indices buffer of the axis
num_cols : PrimExpr
The number of non-zero elements along the axis
"""
name: str
length: PrimExpr
indices: Buffer
num_cols: PrimExpr
def __init__(self, name, length, indices, num_cols):
self.__init_handle_by_constructor__(
_ffi_api.SparseFixedAxis, name, length, indices, num_cols # type: ignore
)
@tvm._ffi.register_object("tir.sparse.SparseVariableAxis")
class SparseVariableAxis(DenseAxis):
"""SparseVariableAxis node
Parameters
----------
name : str
The name of the axis
length : PrimExpr
The length of the axis
indptr : Buffer
The indptr buffer of the axis
indices : Buffer
The indices buffer of the axis
"""
name: str
length: PrimExpr
indptr: Buffer
indices: Buffer
def __init__(self, name, length, indptr, indices):
self.__init_handle_by_constructor__(
_ffi_api.SparseVariableAxis, name, length, indptr, indices # type: ignore
)
@tvm._ffi.register_object("tir.sparse.AxisTree")
class AxisTree(Object):
"""AxisTree node
Parameters
----------
axis_parent_map: Dict
A dictionary that maps axis name to parent axis name, value is None if there is not parent axis.
"""
axis_parent_map: Dict[str, Optional[str]]
def __init__(self, axis_parent_map) -> None:
keys = list(axis_parent_map.keys())
values = list(axis_parent_map.values())
self.__init_handle_by_constructor__(
_ffi_api.AxisTree, keys, values # type:ignore
)
@tvm._ffi.register_object("tir.sparse.SparseBuffer")
class SparseBuffer(Object):
"""SparseBuffer node
Parameters
----------
axes : List[Axis]
The axes of the sparse buffer
data : Buffer
The data of the sparse buffer
name : str
The name of the sparse buffer
"""
axes: List[Axis]
data: Buffer
name: str
def __init__(self, axes, data, name):
self.__init_handle_by_constructor__(
_ffi_api.SparseBuffer, axes, data, name # type: ignore
)
@tvm._ffi.register_object("tir.sparse.SpIterVar")
class SpIterVar(Object):
"""IterVar in SparseTIR
Parameters
----------
var : Var
The var of the SpIterVar
max_extent : PrimExpr
The maximum extent of the SpIterVar
kind : int
The kind of the SpIterVar
is_reduction : bool
Whether the SpIterVar is a reduction iterator
axis : Optional[Axis]
The axis over which the SpIterVar iterates. Required to be defined
when `kind` is not `DenseFixed`
"""
var: Var
max_extent: PrimExpr
kind: int
is_reduction: bool
axis: Optional[Axis]
DenseFixed = 0
DenseVariable = 1
SparseFixed = 2
SparseVariable = 3
def __init__(self, var, max_extent, kind, axis=None):
self.__init_handle_by_constructor__(
_ffi_api.SpIterVar, var, max_extent, kind, is_reduction, axis # type: ignore
)
| 23.73251
| 104
| 0.658575
|
82b9ca856b434b9abd044923c0088faa91e76968
| 1,169
|
py
|
Python
|
octavia/api/v2/controllers/__init__.py
|
BeaconFramework/Distributor
|
c9f8737063263ca69365679c8b76331766d63191
|
[
"Apache-2.0"
] | 1
|
2019-01-11T06:20:25.000Z
|
2019-01-11T06:20:25.000Z
|
octavia/api/v2/controllers/__init__.py
|
BeaconFramework/Distributor
|
c9f8737063263ca69365679c8b76331766d63191
|
[
"Apache-2.0"
] | null | null | null |
octavia/api/v2/controllers/__init__.py
|
BeaconFramework/Distributor
|
c9f8737063263ca69365679c8b76331766d63191
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from octavia.api.v1.controllers import load_balancer
from octavia.api.v2.controllers import base
class BaseV2Controller(base.BaseController):
loadbalancers = load_balancer.LoadBalancersController()
@pecan.expose()
def get(self):
return "v2.0"
class LBaaSController(BaseV2Controller):
"""Expose /lbaas/ endpoint for the v2.0 controller.
Provides backwards compatibility with LBaaSV2
To be removed once LBaasV2 has been removed.
"""
pass
class V2Controller(BaseV2Controller):
lbaas = LBaaSController()
| 27.833333
| 78
| 0.730539
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.