repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
openstates/openstates
|
openstates/ks/ksapi.py
|
Python
|
gpl-3.0
| 8,104
| 0.000123
|
ksleg = "http://www.kslegislature.org/li"
url = "%s/api/v11/rev-1/" % ksleg
# These actions are from the KLISS API documentation,
# and are in the same order as that table
# The PDF is linked from this webpage, and changes name
# based on the most recent API version:
# http://www.kslegislature.org/klois/Pages/RESTianAPI.html
action_codes = {
# motion to acede; appointed
"ccac_om_370": None,
"efa_fabc_343": "passage",
"efa_fabc_342": "passage",
"cref_cref_500": "referral-committee",
"gov_avm_336": "veto-override-passage",
# change sequence
"mot_cgo_200": None,
"mot_cgo_201": None,
"mot_cgo_202": None,
"gov_mg_378": "executive-veto-line-item",
"fa_fabc_115": "failure",
"cr_rsc_292": "committee-passage-favorable",
"cr_rsc_276": "committee-passage",
"cr_rsc_274": "committee-passage-unfavorable",
"cr_rsc_275": "committee-passage-unfavorable",
"cr_rsc_273": "committee-passage-unfavorable",
"cr_rsc_270": "substitution",
# untabled/reconsiderations
"mot_tab_402": None,
"mot_tab_403": None,
"mot_tab_401": None,
"mot_tab_404": None,
"mot_rcon_303": None,
"mot_rcon_302": None,
"ee_enrb_149": "executive-receipt",
"cow_jcow_197": ["passage", "substitution"],
"mot_pspn_405": None, # postpone - failed
# other COW actions
"cow_jcow_211": None,
"cow_jcow_210": None,
"cow_jcow_214": None,
"cow_jcow_695": None,
"cow_jcow_694": None,
"cow_jcow_693": None,
"cow_jcow_692": None,
"cow_jcow_690": None,
# withdrawn from consent cal.'
"ccal_rcc_233": None,
"efa_fabc_933": "passage", # these 3 are 2/3 emergency clause votes...
"efa_fabc_936": "failure",
"efa_fabc_934": "passage",
"cref_cref_316": ["withdrawal", "referral-committee"],
"cref_cref_315": ["withdrawal", "referral-committee"],
"cur_con_374": None, # non-concur, conf. com. requested
"cr_rsc_801": "committee-passage-unfavorable", # these 3 are appointments..
"cr_rsc_800": "committee-passage-favorable",
"cr_rsc_802": "committee-passage",
"gov_mg_150": "executive-signature",
"gov_mg_151": None, # law w/o signature
"gov_mg_154": "executive-veto",
"cow_jcow_180": "passage", # COW
"ar_adj_605": None, # adjourn
"ee_enrb_888": None, # enrolled and presented to Sec. of State
"cow_jcow_239": "passage", # adopted
"cur_con_875": None, # nonconcurrences
"cur_con_876": None,
"cur_con_873": None,
"fa_fabc_341": "passage",
"fa_fabc_340": "passage",
"ccac_ccr_860": None,
"efa_fabc_115": "failure",
"intro_iopbc_158": "introduction",
"cr_rsc_291": "committee-passage",
"fa_fabc_116": "failure",
"cow_jcow_728": "amendment-withdrawal",
"cow_jcow_727": "amendment-failure",
"cow_jcow_726": "amendment-passage",
"cow_jcow_725": ["substitution", "passage"],
# motions to postpone
"mot_pspn_404": None,
"mot_pspn_403": None,
"mot_pspn_402": None,
"fa_fabc_910": "failure",
# suspend rules
"mot_susp_216": None,
"mot_susp_214": None,
"mot_susp_215": None,
"cr_rsc_289": "committee-passage",
# conference committee
"ccac_ccr_375": None,
"cur_con_337": None,
"cur_con_336": None,
"cur_con_335": None,
"ref_rbc_308": "referral-committee",
"ref_rbc_307": "referral-committee",
"ref_rbc_311": "referral-committee",
"efa_fabc_352": "passage",
"efa_fabc_351": "passage",
"intro_ibc_251": "passage",
# COW recommendations
"cow_jcow_705": ["substitution", "passage"],
"cow_jcow_704": ["substitution", "passage"],
"cow_jcow_707": "amendment-introduction",
"cow_jcow_709": "passage",
"cow_jcow_708": "passage",
# adjourn/recess
"ar_adj_625": None,
"ar_adj_626": None,
"intro_ires_251": "passage",
# engrossed/rengrossed
"ee_eng_225": None,
"ee_eng_227": None,
# referred to COW
"ref_rbc_235": None,
"cur_iopbc_141": "referral-committee",
"mot_wd_126": None, # 'committee:withdrawn',
"mot_wd_127": None, # withdraw from com- failed
"mot_wd_125": None, # withdraw from com- pending
# strike from calendar
"mot_strk_505": None,
"mot_strk_504": None,
"mot_strk_501": None,
# conf. com report adopted
"ccac_om_832": "passage",
"ccac_ccr_862": None, # motion to not adopt conf.com report failed
"ccac_ccr_863": "failure", # failed in conf.com, report not adopted
"ccac_ccr_865": None, # motion to not adopt conf.com report failed
"ccac_ccr_867": None, # agree to disagree on conf. com report
# passed over
"cow_jcow_201": None,
"cow_jcow_202": None,
"co
|
w_jcow_203": None,
"ccac_cc_377": None, # conf committee changed member
"ee_enrb_226": None, # Enrolled
# more COW actions
"cow_jcow_681": None,
"cow_jcow_682": None,
"cow_jcow_683": None,
"cow_jcow_688": None,
"cow_jcow_689": None,
# veto overrides
"gov_avm_88
|
5": "veto-override-failure",
"gov_avm_887": "veto-override-passage",
"ref_rsc_312": "referral-committee",
# more COW actions
"cow_jcow_903": None,
"cow_jcow_902": None,
"cow_jcow_901": None,
"cow_jcow_905": None,
# no motion to veto override (count as failure?)
"gov_avm_128": "veto-override-failure",
"gov_avm_129": "veto-override-failure",
"cow_jcow_191": "passage",
"cow_jcow_192": "passage",
"cow_jcow_195": None, # com. report adopted
"cow_jcow_196": ["passage", "substitution"],
"gov_avm_125": "veto-override-failure",
"mot_ref_102": "referral-committee",
"mot_ref_105": None, # not referred to committee
"cref_cref_551": "referral-committee",
"cref_cref_552": "referral-committee",
"mot_apt_301": None, # 20 days in committee, returned to senate
"ccac_om_878": None, # Motion to accede failed
"efa_fabc_925": ["passage", "substitution"],
"efa_fabc_926": ["passage", "substitution"],
"efa_fabc_923": ["passage", "substitution"],
"efa_fabc_922": ["passage", "substitution"],
"fa_fabc_105": ["failure", "substitution"],
"fa_fabc_104": "failure",
"intro_ibc_157": "introduction",
"intro_ibc_156": "filing",
"fa_fabc_905": "passage",
"intro_ires_681": "introduction",
"cref_cref_290": "referral-committee",
"fa_fabc_352": "passage",
"ccac_ccr_145": "failure",
"fa_fabc_351": "passage",
# motion to move to general orders
"mot_adv_303": None,
"mot_adv_302": None,
"mot_adv_301": None,
"efa_fabc_106": ["failure", "substitution"],
"efa_fabc_105": ["failure", "substitution"],
"efa_fabc_104": "failure",
"ccac_ccr_833": "failure",
"ref_rbc_310": "referral-committee",
"cr_rsc_283": "committee-passage-favorable",
"cr_rsc_282": "committee-passage-favorable",
"cr_rsc_281": "committee-passage-favorable",
"cr_rsc_287": "committee-passage-favorable",
"cr_rsc_286": "committee-passage-favorable",
"cr_rsc_285": "committee-passage-favorable",
"ref_rbc_500": "referral-committee",
"cr_rsc_288": "committee-passage",
# Conf. Com. reports
"ccac_ccr_883": None,
"ccac_ccr_880": None,
"ccac_ccr_881": None,
"cow_jcow_712": ["passage", "substitution"],
"cow_jcow_710": ["passage", "substitution"],
"cow_jcow_711": ["passage", "substitution"],
"cow_jcow_716": None,
"fa_fabc_925": "passage",
"fa_fabc_924": "passage",
"fa_fabc_926": "failure",
"fa_fabc_921": ["passage", "substitution"],
"fa_fabc_920": ["passage", "substitution"],
"fa_fabc_923": ["passage", "substitution"],
"fa_fabc_922": ["passage", "substitution"],
"cr_rsc_821": "committee-passage-unfavorable",
"cow_jcow_305": "referral-committee",
"cow_jcow_304": "referral-committee",
"gov_avm_349": "veto-override-failure",
"intro_ibc_681": "introduction",
"dss_627": None,
"mot_susp_203": None,
"mot_susp_202": None,
"mot_susp_206": None,
"cur_con_101": None, # concur. failed
"cur_om_141": "referral-committee",
"misc_he_200": None,
}
|
bitmazk/django-event-rsvp
|
event_rsvp/tests/forms_tests.py
|
Python
|
mit
| 3,796
| 0
|
"""Tests for the forms of the ``event_rsvp`` app."""
from django.test import TestCase
from django.utils import timezone
from django_libs.tests.factories import UserFactory
from event_rsvp.forms import EventForm, GuestForm
from event_rsvp.models import Event, Guest
from event_rsvp.tests.factories import EventFactory
class EventFormTestCase(TestCase):
"""Tests for the ``EventForm`` form class."""
longMessage = True
def test_validates_and_saves_input(self):
self.user = UserFactory()
data = {
'title': 'Foo',
'venue': 'Bar',
'start': timezone.now(),
'end': timezone.now() + timezone.timedelta(days=11),
}
form = EventForm(data=data, created_by=self.user)
self.assertTrue(form.is_valid())
instance = form.save()
self.assertEqual(Event.objects.all().count(), 1)
# Test update
data.update({'street': 'Foostreet'})
form = EventForm(data=data, instance=instance, created_by=self.user)
instance = form.save()
self.assertEqual(instance.street, 'Foostreet')
# Test creating an event from a template
form = EventForm(data=data, instance=instance, created_by=self.user,
create_from_tem
|
plate=True)
self.assertTrue(form.is_valid())
instance = form.save()
self.assertEqual(Event.objects.all().count(), 2)
# Test saving a template
data.update({'template_name': 'Foo'})
form = EventForm(data=data, created_by=self.user)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(
Event.objects.exclude(template_name__exact='').count(), 1)
# Test updating a template
|
data.update({'street': 'Barstreet'})
instance = Event.objects.get(template_name='Foo')
form = EventForm(data=data, instance=instance, created_by=self.user)
self.assertTrue(form.is_valid())
instance = form.save()
self.assertEqual(instance.street, 'Barstreet')
class GuestFormTestCase(TestCase):
"""Tests for the ``GuestForm`` form class."""
longMessage = True
def test_validates_and_saves_input(self):
# Test exceeding available seats
self.event = EventFactory(available_seats=1)
form = GuestForm(data={'number_of_seats': 100}, event=self.event,
user=None)
self.assertFalse(form.is_valid())
# Test exceeding available seats (plural error msg)
self.event = EventFactory(available_seats=20, max_seats_per_guest=1)
form = GuestForm(data={'number_of_seats': 100}, event=self.event,
user=None)
self.assertFalse(form.is_valid())
# Test exceeding max amount of seats per booking
form = GuestForm(data={'number_of_seats': 2}, event=self.event,
user=None)
self.assertFalse(form.is_valid())
# Test exceeding max amount of seats per booking (plural error msg)
self.event = EventFactory(max_seats_per_guest=2)
form = GuestForm(data={'number_of_seats': 3}, event=self.event,
user=None)
self.assertFalse(form.is_valid())
# Test missing required fields
self.event = EventFactory(required_fields=['name', 'phone'])
form = GuestForm(data={'name': 'Foo', 'email': 'test@example.com'},
event=self.event, user=None)
self.assertFalse(form.is_valid())
# Test valid form
form = GuestForm(data={'name': 'Foo', 'phone': '+4911111111'},
event=self.event, user=None)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(Guest.objects.all().count(), 1)
|
google/gazoo-device
|
gazoo_device/tests/unit_tests/nrf_matter_device_test.py
|
Python
|
apache-2.0
| 4,549
| 0.005056
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for base class NrfMatterDevice."""
from unittest import mock
from absl.testing import parameterized
from gazoo_device import errors
from gazoo_device.base_classes import nrf_matter_device
from gazoo_device.capabilities import device_power_default
from gazoo_device.capabilities import pwrpc_common_default
from gazoo_device.tests.unit_tests.capability_tests.mixins import pwrpc_common_test
from gazoo_device.tests.unit_tests.utils import fake_device_test_case
import immutabledict
_FAKE_DEVICE_ID = "nrfmatterdevicestub-detect"
_FAKE_DEVICE_ADDRESS = "fake-device-address"
_FAKE_VENDOR_ID = "fake-vendor-id"
_FAKE_PRODUCT_ID = "fake-product-id"
_NRF_CONNECT_PERSISTENT_PROPERTIES = immutabledict.immutabledict({
"os": "Zephyr RTOS",
"platform": "nRF Connect",
"serial_number": "FT2BSR6O",
"name": "nrfmatterdevicestub_detect",
"device_type": "nrfmatterdevicestub",
"vendor_id": _FAKE_VENDOR_ID,
"product_id": _FAKE_PRODUCT_ID,
})
class NrfMatterDeviceStub(nrf_matter_device.NrfMatterDevice):
"""Dummy implementation for instantiation of NrfMatterDevice."""
DEVICE_TYPE = "nrfmatterdevicestub"
ENDPOINT_ID_TO_CLASS = {}
class NrfMatterDeviceTest(fake_device_test_case.FakeDeviceTestCase,
pwrpc_common_test.PigweedRpcCommonTestMixin):
"""Unit tests for base class NrfMatterDevice."""
def setUp(self):
super().setUp()
self.setup_fake_device_requirements(_FAKE_DEVICE_ID)
self.device_config["persistent"]["console_port_name"] = _FAKE_DEVICE_ADDRESS
jlink_patcher = mock.patch("pylink.JLink")
jlink_patcher.start()
self.addCleanup(jlink_patcher.stop)
self.uut = NrfMatterDeviceStub(self.mock_manager,
self.device_config,
log_directory=self.artifacts_directory)
@mock.patch.object(nrf_matter_device.os.path, "exists", return_value=True)
def test_is_connected_true(self, mock_exists):
"""Verifies is_connected returns true when console port exists."""
self.assertTrue(
nrf_matter_device.NrfMatterDevice.is_connected(self.device_config))
mock_exists.assert_called_once()
@mock.patch.object(
nrf_matter_device.NrfMatterDevice, "pw_rpc_common")
def test_get_detection_info_on_success(self, mock_rpc_common):
"""Verifies persistent properties are set correctly."""
mock_rpc_common.vendor_id = _FAKE_VENDOR_ID
mock_rpc_common.product_id = _FAKE_PRODUCT_ID
self._test_get_detection_info(
console_port_name=_FAKE_DEVICE_ADDRESS,
device_class=NrfMatterDeviceStub,
persistent_properties=_NRF_CONNECT_
|
PERSISTENT_PROPERTIES)
def test_flash_build_capability(self):
"""Verifies the initialization of flash_build capability."""
self.assertTrue(self.uut.flash_build)
def test_matter_endpoints_capability(self):
"""Verifies the initialization of matter_endpoints capability."""
self.assertIsNotNone(self.uut.matter_endpoints)
def test_device
|
_power_capability(self):
"""Verifies the initialization of device_power capability."""
self.assertIsNotNone(self.uut.device_power)
@mock.patch.object(
device_power_default.DevicePowerDefault, "cycle", autospec=True)
def test_device_reboot_hard(self, reboot_fn):
self.uut.reboot(method="hard")
reboot_fn.assert_called_once()
@parameterized.parameters(dict(method="soft"), dict(method="pw_rpc"))
@mock.patch.object(
pwrpc_common_default.PwRPCCommonDefault, "reboot", autospec=True)
def test_device_reboot(self, reboot_fn, method):
self.uut.reboot(method)
reboot_fn.assert_called_once()
def test_device_reboot_raise_error(self):
"""Test reboot method with invalid method."""
with self.assertRaisesRegex(
errors.DeviceError,
r"ValueError: Method invalid_reboot_method not recognized"):
self.uut.reboot(method="invalid_reboot_method")
if __name__ == "__main__":
fake_device_test_case.main()
|
networkdynamics/zenlib
|
src/zen/tests/gml_interpreter.py
|
Python
|
bsd-3-clause
| 2,727
| 0.041437
|
from zen import *
import unittest
import os
import os.path as path
import tempfile
class GMLTokenizerCase(unittest.TestCase):
tok = gml_tokenizer.GMLTokenizer()
codec = gml_codec.BasicGMLCodec()
interp = gml_interpreter.GMLInterpreter(codec, tok)
def test_basic_correct(self):
tokens = [
('keyOne', 0, 1), ('"one"', 1, 1),
('keyTwo', 0, 1), ('2', 1, 1)
]
expected_interpretation = {'keyOne': 'one', 'keyTwo': 2}
got_interpretation = self.interp.interpret(tokens)
self.assertEqual(got_interpretation, expected_interpretation)
def test_nested_list_correct(self):
tokens = [
('keyOne', 0, 1), ('[', 2, 1),
('subKeyOne', 0, 2), ('"one"', 1, 2),
('subKeyTwo', 0, 3), ('[', 2, 3),
('subSubKey', 0, 4), ('"one"', 1, 4),
('subSubKey', 0, 5), ('2', 1, 5),
(']', 3, 6),
(']', 3, 7),
('keyTwo', 0, 8), ('"two"', 1, 8)
]
expected_interpretation = {
'keyOne': {
'subKeyOne': 'one',
'subKeyTwo': {
'subSubKey': ['one', 2]
}
},
'keyTwo': 'two'
}
got_interpretation = self.interp.interpret(tokens)
self.assertEqual(got_interpretation, expected_interpretation)
def test_correct_empty_list(self):
tokens = [
('keyOne', 0, 1), ('[', 2, 1),
(']', 3, 2), ('keyTwo', 0, 3), ('"two"', 1, 3)
]
expected_interpretation = {'keyOne': {}, 'keyTwo': 'two'}
got_interpretation = self.interp.interpret(tokens)
self.assertEqual(got_interpretation, expected_interpretation)
def test_incorrect_val_when_key_expected(self):
# VAL_TOK when KEY_TOK expected
tokens = [
('"keyOne"', 1, 1), ('"one"', 1, 1),
('keyTwo', 0, 1), ('2', 1, 1)
]
self.assertRaises(ZenException, self.interp.interpret, tokens)
def test_incorrect_key_when_val_expected(self):
# KEY_TOK when VAL_TOK expected
tokens = [
('keyOne', 1, 1), ('one', 0, 1),
('keyTwo', 0, 1), ('2', 1, 1)
]
self.assertRaises(ZenException, self.interp.interpret, tokens)
def test_incorrect_unexpected_token_type(self):
# unexpected token type
tokens = [
('keyOne', 1, 1), ('"one"', 4, 1),
('keyTwo', 0, 1), ('2', 1, 1)
]
self.assertRaises(ZenException, self.interp.interpret, tokens)
d
|
ef test_incorrect_eof_when_expecting_value(self):
tokens = [
('keyOne', 0, 1), ('"one"', 1, 1),
('keyTwo', 0, 1)
]
self.assertRaises(ZenException, self.interp.interpret, tokens)
def test_incorrect_eolist_when_expecting_value(self):
tokens = [
('keyOne', 0, 1), ('[', 2, 1),
('subKeyOne', 0, 2), ('"one"', 1, 2),
('subKe
|
yTwo', 0, 3),
(']', 3, 6),
('keyTwo', 0, 8), ('"two"', 1, 8)
]
self.assertRaises(ZenException, self.interp.interpret, tokens)
if __name__ == '__main__':
unittest.main()
|
x2Ident/x2Ident_test
|
mitmproxy/mitmproxy/addons.py
|
Python
|
gpl-3.0
| 2,173
| 0
|
from __future__ import absolute_import, print_function, division
from mitmproxy import exceptions
import pprint
def _get
|
_name(itm):
return getattr(itm, "name", itm.__class__.__name__)
class Addons(object):
def __init__(self, master):
self.chain = []
self.master = master
master.options.changed.connect(self.options_update)
def options_update(self, options, updated):
for i in self.chain:
with self.master.handlecontext():
i.configure(options, updated)
def add(self, options, *addons):
if not addons:
raise
|
ValueError("No addons specified.")
self.chain.extend(addons)
for i in addons:
self.invoke_with_context(i, "start")
self.invoke_with_context(
i,
"configure",
self.master.options,
self.master.options.keys()
)
def remove(self, addon):
self.chain = [i for i in self.chain if i is not addon]
self.invoke_with_context(addon, "done")
def done(self):
for i in self.chain:
self.invoke_with_context(i, "done")
def has_addon(self, name):
"""
Is an addon with this name registered?
"""
for i in self.chain:
if _get_name(i) == name:
return True
def __len__(self):
return len(self.chain)
def __str__(self):
return pprint.pformat([str(i) for i in self.chain])
def invoke_with_context(self, addon, name, *args, **kwargs):
with self.master.handlecontext():
self.invoke(addon, name, *args, **kwargs)
def invoke(self, addon, name, *args, **kwargs):
func = getattr(addon, name, None)
if func:
if not callable(func):
raise exceptions.AddonError(
"Addon handler %s not callable" % name
)
func(*args, **kwargs)
def __call__(self, name, *args, **kwargs):
for i in self.chain:
self.invoke(i, name, *args, **kwargs)
|
openstack/zaqar
|
zaqar/storage/configuration.py
|
Python
|
apache-2.0
| 1,625
| 0
|
# Copyright (c) 2016 HuaWei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR
|
CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
class Configuration(object):
def __init__(self, conf):
"""Initialize configuration."""
self.local_conf = conf
def register_opts(self, volu
|
me_opts, group=None):
self.local_conf.register_opts(volume_opts, group=group)
def set_override(self, name, override, group=None):
self.local_conf.set_override(name, override, group=group)
def safe_get(self, value):
try:
return self.__getattr__(value)
except cfg.NoSuchOptError:
return None
def __contains__(self, key):
"""Return True if key is in local_conf."""
return key in self.local_conf
def __getattr__(self, value):
# Don't use self.local_conf to avoid reentrant call to __getattr__()
local_conf = object.__getattribute__(self, 'local_conf')
return getattr(local_conf, value)
def __getitem__(self, key):
"""Look up an option value and perform string substitution."""
return self.local_conf.__getitem__(key)
|
peterbe/configman
|
configman/tests/test_option.py
|
Python
|
bsd-3-clause
| 12,084
| 0.000248
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial
|
Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# K Lars Lohn, lars@mozilla.com
# Peter Bengtsson, peterbe@mozilla.com
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
|
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import unittest
import re
import datetime
import configman.converters as conv
import configman.datetime_util as dtu
from configman.option import Option
from configman.config_exceptions import CannotConvertError
class TestCase(unittest.TestCase):
def test_option_constructor_basics(self):
o = Option('name')
self.assertEqual(o.name, 'name')
self.assertEqual(o.default, None)
self.assertEqual(o.doc, None)
self.assertEqual(o.from_string_converter, None)
self.assertEqual(o.value, None)
o = Option('lucy')
self.assertEqual(o.name, 'lucy')
self.assertEqual(o.default, None)
self.assertEqual(o.doc, None)
self.assertEqual(o.from_string_converter, None)
self.assertEqual(o.value, None)
o = Option(u'spa\xa0e')
self.assertEqual(o.name, u'spa\xa0e')
self.assertEqual(o.default, None)
self.assertEqual(o.doc, None)
self.assertEqual(o.from_string_converter, None)
self.assertEqual(o.value, None)
data = {
'name': 'lucy',
'default': 1,
'doc': "lucy's integer"
}
o = Option(**data)
self.assertEqual(o.name, 'lucy')
self.assertEqual(o.default, 1)
self.assertEqual(o.doc, "lucy's integer")
self.assertEqual(o.from_string_converter, int)
self.assertEqual(o.value, 1)
data = {
'name': 'lucy',
'default': 1,
'doc': "lucy's integer",
'value': '1'
}
o = Option(**data)
self.assertEqual(o.name, 'lucy')
self.assertEqual(o.default, 1)
self.assertEqual(o.doc, "lucy's integer")
self.assertEqual(o.from_string_converter, int)
self.assertEqual(o.value, 1)
data = {
'name': 'lucy',
'default': '1',
'doc': "lucy's integer",
'from_string_converter': int
}
o = Option(**data)
self.assertEqual(o.name, 'lucy')
self.assertEqual(o.default, 1) # converted using `int`
self.assertEqual(o.doc, "lucy's integer")
self.assertEqual(o.from_string_converter, int)
self.assertEqual(o.value, 1)
data = {
'name': 'lucy',
'default': '1',
'doc': "lucy's integer",
'from_string_converter': int,
}
o = Option(**data)
self.assertEqual(o.name, 'lucy')
self.assertEqual(o.default, 1)
self.assertEqual(o.doc, "lucy's integer")
self.assertEqual(o.from_string_converter, int)
self.assertEqual(o.value, 1)
data = {
'default': '1',
'doc': "lucy's integer",
'from_string_converter': int,
}
o = Option('now', **data)
self.assertEqual(o.name, 'now')
self.assertEqual(o.default, 1)
self.assertEqual(o.doc, "lucy's integer")
self.assertEqual(o.from_string_converter, int)
self.assertEqual(o.value, 1)
d = datetime.datetime.now()
o = Option('now', default=d)
self.assertEqual(o.name, 'now')
self.assertEqual(o.default, d)
self.assertEqual(o.doc, None)
self.assertEqual(o.from_string_converter,
dtu.datetime_from_ISO_string)
self.assertEqual(o.value, d)
data = {
'default': '1.0',
'doc': "lucy's height",
'from_string_converter': float,
}
o = Option('now', **data)
self.assertEqual(o.name, 'now')
self.assertEqual(o.default, 1.0)
self.assertEqual(o.doc, "lucy's height")
self.assertEqual(o.from_string_converter, float)
self.assertEqual(o.value, 1.0)
def test_option_constructor_more_complex_default_converters(self):
data = {
'default': '2011-12-31',
'doc': "lucy's bday",
'from_string_converter': dtu.date_from_ISO_string,
}
o = Option('now', **data)
self.assertEqual(o.name, 'now')
self.assertEqual(o.default, datetime.date(2011, 12, 31))
self.assertEqual(o.doc, "lucy's bday")
self.assertEqual(o.from_string_converter, dtu.date_from_ISO_string)
self.assertEqual(o.value, datetime.date(2011, 12, 31))
data = {
'default': '2011-12-31',
'doc': "lucy's bday",
'from_string_converter': \
'configman.datetime_util.date_from_ISO_string',
}
o = Option('now', **data)
self.assertEqual(o.name, 'now')
self.assertEqual(o.default, datetime.date(2011, 12, 31))
self.assertEqual(o.doc, "lucy's bday")
self.assertEqual(o.from_string_converter, dtu.date_from_ISO_string)
self.assertEqual(o.value, datetime.date(2011, 12, 31))
def test_setting_known_from_string_converter_onOption(self):
opt = Option('name', default=u'Peter')
self.assertEqual(opt.default, u'Peter')
self.assertEqual(opt.from_string_converter, unicode)
opt = Option('name', default=100)
self.assertEqual(opt.default, 100)
self.assertEqual(opt.from_string_converter, int)
opt = Option('name', default=100L)
self.assertEqual(opt.default, 100L)
self.assertEqual(opt.from_string_converter, long)
opt = Option('name', default=100.0)
self.assertEqual(opt.default, 100.0)
self.assertEqual(opt.from_string_converter, float)
from decimal import Decimal
opt = Option('name', default=Decimal('100.0'))
self.assertEqual(opt.default, Decimal('100.0'))
self.assertEqual(opt.from_string_converter, Decimal)
opt = Option('name', default=False)
self.assertEqual(opt.default, False)
self.assertEqual(opt.from_string_converter,
conv.boolean_converter)
dt = datetime.datetime(2011, 8, 10, 0, 0, 0)
opt = Option('name', default=dt)
self.assertEqual(opt.default, dt)
self.assertEqual(opt.from_string_converter,
dtu.datetime_from_ISO_string)
dt = datetime.date(2011, 8, 10)
opt = Option('name', default=dt)
self.assertEqual(opt.default, dt)
self.assertEqual(opt.from_string_converter,
dtu.date_from_ISO_string)
def test_boolean_converter_inOption(self):
opt = Option('name', default=False)
self.assertEqual(opt.default, False)
|
jepio/JKalFilter
|
test/test_track.py
|
Python
|
gpl-2.0
| 673
| 0.001486
|
""" Test of tracking and detector response. """
# pylint:
|
disable=C0103
from ..detector import LayeredDetector
from ..track import gen_straight_tracks
from matplotlib import pyplot as plt
def main():
"""
Test if construction of detector works and propagate tracks through
detector.
"""
A = LayeredDet
|
ector(1, 0, 0.5, 8, 10, 25)
tracks = gen_straight_tracks(20)
x_coords = [0.1 * i for i in xrange(100)]
A.propagate_tracks(tracks)
for track in tracks:
y = [track.get_yintercept(x) for x in x_coords]
plt.plot(x_coords, y)
plt.xlim(0, 10)
plt.ylim(-0.5, 0.5)
A.draw()
if __name__ == "__main__":
main()
|
danbob123/gplearn
|
gplearn/skutils/tests/test_testing.py
|
Python
|
bsd-3-clause
| 3,785
| 0.000264
|
import warnings
import unittest
import sys
from nose.tools import assert_raises
from gplearn.skutils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert
|
_warns is not i
|
mpacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
|
Danielweber7624/pybuilder
|
src/main/python/pybuilder/plugins/python/pep8_plugin.py
|
Python
|
apache-2.0
| 1,506
| 0.000664
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pybuilder.core import use_plugin, task, after, init
from pybuilder.utils import assert_can_execute, read_file
from pybuilder.plugins.python.python_plugin_helper import execute_tool_on_source_files
use_plugin("python.core")
@init
def init_pep8_properties(project):
project.build_depends_on("pep8")
@after("prepare")
def check_pep8_available(logger):
logger.debug("Checking availability of pep8")
assert_can_execute(("pep8", ), "pep8", "plugin python.pep8")
@task
def analyze(project, logger):
logger.info("Executing pep8 on project sources")
_, report_file = execute_tool_on_source_files(project, "pep8", ["pep8"])
reports = read_file(report_file)
if len(reports) > 0:
l
|
ogger.warn("Found %d warning%s p
|
roduced by pep8",
len(reports), "" if len(reports) == 1 else "s")
|
wallnerryan/quantum_migrate
|
quantumclient/quantum/v2_0/nvp_qos_queue.py
|
Python
|
apache-2.0
| 2,899
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from quantumclient.quantum import v2_0 as quantumv20
class ListQoSQueue(quantumv20.ListCommand):
"""List queues that belong to a given tenant."""
resource = 'qos_queue'
log = logging.getLogger(__name__ + '.ListQoSQueue')
_formatters = {}
list_columns = ['id', 'name', 'min', 'max',
'qos_marking', 'dscp', 'default']
class ShowQoSQueue(quantumv20.ShowCommand):
"""Show information of a given queue."""
resource = 'qos_queue'
log = logging.getLogger(__name__ + '.ShowQoSQueue')
allow_names = True
class CreateQoSQueue(quantumv20.CreateCommand):
"""Create a queue."""
resource = 'qos_queue'
log = logging.getLogger(__name__ + '.CreateQoSQueue')
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='NAME',
help='Name of queue')
parser.add_argument(
'--min',
help='min-rate'),
parser.add_argument(
'--max',
help='max-rate'),
parser.add_argument(
'--qos-marking',
help='qos marking untrusted/trusted'),
parser.add_argument(
'--default',
default=False,
help=('If true all ports created with be the size of this queue'
' if queue is not specified')),
parser.add_argument(
'--dscp',
help='Differentiated Services Code Point'),
def args2body(self, parsed_args):
params = {'name': parsed_args.name,
'default': parsed_args.default}
if parsed_args.min:
params['min'] = parsed_args.min
if parsed_args.max:
params['max'] = parsed_args.max
if
|
parsed_args.qos_marking:
params['qos_marking'] = parsed_args.qos_marking
if parsed_args.dscp:
params['dscp'] = parsed_args.dscp
if parsed_args.tenant_id:
params['ten
|
ant_id'] = parsed_args.tenant_id
return {'qos_queue': params}
class DeleteQoSQueue(quantumv20.DeleteCommand):
"""Delete a given queue."""
log = logging.getLogger(__name__ + '.DeleteQoSQueue')
resource = 'qos_queue'
allow_names = True
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_express_route_circuit_peerings_operations.py
|
Python
|
mit
| 21,838
| 0.00522
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations:
"""ExpressRouteCircuitPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network
|
/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> As
|
yncLROPoller[None]:
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitPeering":
"""Gets the specified peering for the express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: Th
|
Danko90/cifpy3
|
lib/cif/types/observables/ipv6.py
|
Python
|
gpl-3.0
| 836
| 0.001196
|
__author__ = 'James DeVincentis <james.d@hexhost.net>'
from .ipaddress import Ipaddress
class Ipv6(Ipaddress):
def __
|
init__(self, *args, **kwargs):
self._mask = None
super(Ipv6, self).__init__(self, args, **kwargs)
@property
def mask(self):
return self._mask
@mask.setter
def mask(self, value):
if self._validation and value is not None:
if not isinstance(value, int):
try:
value = int(value)
except Exception as e:
raise TypeErr
|
or("Mask must be an integer") from e
if value > 128:
raise TypeError("Mask cannot be greater than 128 bits for IPv6")
if value < 0:
raise TypeError("Mask cannot be less than 0 bits")
self._mask = value
|
jkloo/BatchY
|
batchy/cli.py
|
Python
|
mit
| 2,138
| 0.003274
|
#!/usr/bin/python
import argparse
from . import CLI
from .utils import _list_files
from .find import batchy_find
from .update import batchy_update
from .view import batchy_view
def _batchy_find(args):
return batchy_find(args.pattern, args.keys, args.replace, args.files)
def _batchy_update(args):
return batchy_update(args.key, args.value, args.files, add=args.add, append=args.append)
def _batchy_view(args):
return batchy_view(args.files, args.keys)
def _generate_args(args=None):
parser = argparse.ArgumentParser(prog=CLI)
subparser = parser.add_subparsers(title='subcommands')
find = subparser.add_parser('find')
find.set_defaults(func=_batchy_find)
find.add_argument('pattern', help='Pattern to search files for.')
find.add_argument('--keys', nargs='+', help='')
find.add_argument('--replace', nargs='+', help='')
find.add_argument('--files', nargs='+', help='List of files to limit the scope to.', default=[])
find.add_argument('--dirs', nargs='+', help='List of directories to limit the scope to.', default=[])
update = subparser.add_parser('update')
update.set_defaults(func=_batchy_update)
update.add_argument('key', help='')
update.add_argument('value', help='')
group = update.add_mutually_exclusive_group()
group.add_argument('--add', action='store_true')
group.add_argument('--append', action='store_true')
update.add_argument('--files', nargs='+', help='List of files to limit the scope to.', default=[])
update.add_argument('--dirs', nargs='+', help='List of directories to limit the scope to.', default=[])
view = subparser.add_parser('view')
view.set_defaults(func=_batchy_view)
view.add_argument('--keys', nargs='+')
view.add_argument('--files', nargs='+', help='List of files to limit the scope to.', default=[])
view.add_argument('--dirs', nargs='+', help='List of directories to limit the scope to.', default=[])
args = parser.parse_args(args)
args.f
|
iles = _list_files(args.files, args.dirs)
return args
|
def main(args=None):
args = _generate_args(args)
return args.func(args)
|
santiago-salas-v/walas
|
basic_constants_from_the_properties_of_gases_and_liquids.py
|
Python
|
mit
| 10,361
| 0.002124
|
import os
import re
import locale
locale.setlocale(locale.LC_ALL, '') # decimals according to locale
out_file_name = './logs/output_basic_constants.csv'
sep_char_for_csv = '|'
out_file = open(out_file_name, mode='w')
out_file_full_path = os.path.abspath(out_file_name)
def str_list_to_np_array_str(param):
return 'np.array([' + param + '])'
#sel_component_numbers = [130, 460, 461, 463, 95]
sel_component_numbers = [461, 455, 460, 463, 465]
# sel_component_numbers = [66, 60, 440, 460]
#sel_component_numbers = [130, 460, 31, 440, 455]
#sel_component_numbers = [66, 438, ]
out_file.write('sep=' + sep_char_for_csv + '\n')
out_file.write("""
# Source of data:
# Poling, Bruce E., John M. Prausnitz,
# and John P. O'connell.
# The properties of gases and liquids.
# Vol. 5. New York: Mcgraw-hill, 2001.
# Basic Constants I: 468 Components in table
""")
out_file.write('\n')
test_exp_descriptors = '(^[0-9]{1,3})\|(.*)\|(.*)\|' + \
'([0-9]{1,7}-[0-9]{1,7}-[0-9]{1,7})'
test_exp_basic_constants_i = test_exp_descriptors + \
'[\||\ ](\-?\ ?[0-9]+\.?[0-9]+)?' * 8 # 8 numeric columns
test_exp_basic_constants_ii = test_exp_descriptors + \
'[\||\ ](\-?\ ?[0-9]+\.?[0-9]+)?' * 7 # 7 numeric columns
test_exp_ig_heat_capacities = test_exp_descriptors + \
'\|(—|[0-9]{1,4}-[0-9]{1,4})?' + \
'[\||\ ](\-?\ ?[0-9]+\.?[0-9]+)?' * 7 # 7 numeric columns
file_name_basic_constants_i = os.path.abspath(
'./data/basic_constants_i_properties_of_gases_and_liquids.csv'
)
file_name_basic_constants_ii = os.path.abspath(
'./data/basic_constants_ii_properties_of_gases_and_liquids.csv'
)
file_name_ig_heat_capacities = os.path.abspath(
'./data/ig_l_heat_capacities_properties_of_gases_and_liquids.csv'
)
file = open(file_name_basic_constants_i, mode='r', encoding='utf-8-sig')
matches = []
k = 0
out_file.write('# ======================')
out_file.write('\n')
for line in file:
k += 1
# print header
if k < 4:
out_file.write(line)
out_file.write('\n')
match = re.search(test_exp_basic_constants_i, line)
if match is not None:
matches.append(match.groups())
file.close()
out_file.write('# Basic Constants I: ' + str(len(matches)) +
' Components in table')
out_file.write('\n')
no = [int(item[0]) for item in matches]
formula = [item[1] for item in matches]
name = [item[2] for item in matches]
cas_no = [item[3] for item in matches]
mol_wt = [float(item[4].replace(' ', ''))
if item[4] is not None
else 0.0
for item in matches] # g/mol
tfp = [float(item[5].replace(' ', ''))
if item[5] is not None
else 0.0
for item in matches] # K
tb = [float(item[6].replace(' ', ''))
if item[6] is not None
else 0.0
for item in matches] # K
tc = [float(item[7].replace(' ', ''))
if item[7] is not None
else 0.0
for item in matches] # K
pc = [float(item[8].replace(' ', ''))
if item[8] is not None
else 0.0
for item in matches] # bar
vc = [float(item[9].replace(' ', ''))
if item[9] is not None
else 0.0
for item in matches] # cm^3/mol
zc = [float(item[10].replace(' ', ''))
if item[10] is not None
else 0.0
for item in matches] # []
omega = [float(item[11].replace(' ', ''))
if item[11] is not None
else 0.0
for item in matches] # []
table_indexes_of_comp_nos = [
no.index(comp_no) for comp_no in sel_component_numbers
]
out_file.write('# ======================')
out_file.write('\n')
props = ['no', 'formula', 'name', 'cas_no',
'mol_wt', 'tfp', 'tb', 'tc',
'pc', 'zc', 'omega']
out_file.write('# Values in Table:')
out_file.write('\n')
for prop in props:
is_numeric_prop = not isinstance((globals()[prop])[0], str)
out_file.write(prop)
if is_numeric_prop:
for comp_no in table_indexes_of_comp_nos:
out_file.write(' | ' +
locale.str((globals()[prop])[comp_no])
)
else:
for comp_no in table_indexes_of_comp_nos:
out_file.write(' | ' +
(globals()[prop])[comp_no]
)
out_file.write('\n')
file = open(file_name_basic_constants_ii, mode='r', encoding='utf-8-sig')
matches = []
k = 0
out_file.write('# ======================')
out_file.write('\n')
for line in file:
k += 1
# print header
if k < 9:
out_file.write(line)
out_file.write('\n')
match = re.search(test_exp_basic_constants_ii, line)
if match is not None:
matches.append(match.groups())
file.close()
out_file.write('# Basic Constants II: ' + str(len(matches)) +
' Components in table')
out_file.write('\n')
no = [int(item[0]) for item in matches]
formula = [item[1] for item in matches]
name = [item[2] for item in matches]
cas_no = [item[3] for item in matches]
delHf0 = [float(item[4].replace(' ', ''))
if item[4] is not None
else 0.0
for item in matches] # kJ/mol
delGf0 = [float(item[5].replace(' ', ''))
if item[5] is not None
else 0.0
for item in matches] # kJ/mol
delHb = [float(item[6].replace(' ', ''))
if item[6] is not None
else 0.0
for item in matches] # kJ/mol
delHm = [float(item[7].replace(' ', ''))
if item[7] is not None
else 0.0
for item in matches] # kJ/mol
v_liq = [float(item[8].replace(' ', ''))
if item[8] is not None
else 0.0
for item in matches] # cm^3/mol
t_liq = [float(item[9].replace(' ', ''))
if item[9] is not None
else 0.0
for item in matches] # K
dipole = [float(item[10].replace(' ', ''))
if item
|
[10] is not None
else 0.0
for item in matches] # Debye
table_indexes_of_comp_nos = [
no.index(comp_no) for comp_no in sel_component_numbers
]
out_file.write('# ======================')
out_file.write('\n')
props = ['no', 'formula', 'name', 'cas_no',
'delHf0', 'delGf0', 'delHb', 'delHm',
'v_liq', 't_liq', 'dipole']
out_file.write('# Values in Table:')
out_file.write('\n')
for
|
prop in props:
is_numeric_prop = not isinstance((globals()[prop])[0], str)
out_file.write(prop)
if is_numeric_prop:
for comp_no in table_indexes_of_comp_nos:
out_file.write(' | ' +
locale.str((globals()[prop])[comp_no])
)
else:
for comp_no in table_indexes_of_comp_nos:
out_file.write(' | ' +
(globals()[prop])[comp_no]
)
out_file.write('\n')
file = open(file_name_ig_heat_capacities, mode='r', encoding='utf-8-sig')
matches = []
k = 0
out_file.write('# ======================')
out_file.write('\n')
for line in file:
k += 1
# print header
if k < 5:
out_file.write(line)
out_file.write('\n')
match = re.search(test_exp_ig_heat_capacities, line)
if match is not None:
matches.append(match.groups())
file.close()
out_file.write('# Ideal Gas and Liquid Heat Capacities: ' + str(len(matches)) +
' Components in table')
out_file.write('\n')
no = [int(item[0]) for item in matches]
formula = [item[1] for item in matches]
name = [item[2] for item in matches]
cas_no = [item[3] for item in matches]
trange = [item[4] for item in matches]
a0 = [float(item[5].replace(' ', ''))
if item[5] is not None
else 0.0
for item in matches]
a1 = [1e-3 * float(item[6].replace(' ', ''))
if item[6] is not None
else 0.0
for item in matches]
a2 = [1e-5 * float(item[7].replace(' ', ''))
if item[7] is not None
else 0.0
for item in matches]
a3 = [1e-8 * float(item[8].replace(' ', ''))
if item[8] is not None
else 0.0
for item in matches]
a4 = [1e-11 * float(item[9].replace(' ', ''))
if item[9] is not None
else 0.0
for item in matches]
cpig = [float(item[10].replace(' ', ''))
if item[10] is not None
else 0.0
for item in matches]
cpliq = [float(item[11].rep
|
153/wbs
|
admin.py
|
Python
|
cc0-1.0
| 4,183
| 0.005259
|
#/usr/bin/env python3
import webtools as wt
import os, crypt, cgitb
cgitb.enable()
modes = {"0": "no mode",
"1": "lock",
"2": "sticky",
"3": "stickylock",
"4": "permasage"
}
settings = "./settings.txt"
b_conf = []
cd = {}
with open(settings, "r") as settings:
settings = settings.read().splitlines()
for s in settings:
if len(s) == 0 or s[0] == "#" or ": " not in s:
continue
elif "#" in s:
s = s.split("#")[0]
s = s.split(": ")
if len(s) > 2:
s[1] = ": ".join(s[1:])
try:
s[1] = int(s[1])
except:
pass
b_conf.append(s[1])
cd[s[0]] = s[1]
with open("./admin/op.html", 'r') as op:
op = op.read()
def mode_icons(mo=""):
micons = ["", "lock.png", "sticky.png",
["lock.png", "sticky.png"], "ghost.png"]
ic = micons[int(mo)]
if len(ic) == 2:
ic = ["./img/" + i for i in ic if len(ic) == 2]
elif len(ic):
ic = ["./img/" + ic]
return ic
def login_admin():
# if wt.get_cookie():
cookies = wt.get_cookie()
if 'pw' in cookies.keys():
if tripcode(cookies['pw']) == b_conf[3]:
return 1
elif wt.get_form('pw') and \
tripcode(wt.get_form('pw')) == b_conf[3]:
print(wt.put_cookie('pw', wt.get_form('pw')))
return 1
else:
if wt.get_form('pw'):
print("Password incorrect.<br>")
print("<h1>Login</h1>")
print("<p>", wt.new_form('admin.py', 'post'))
print("#" + wt.put_form('password', 'pw'))
print(wt.put_form('submit', '', 'Submit'))
print("</form><p>")
return 0
def admin_splash():
print("""<pre>
- change settings
- moderate threads
- modify wordfilter
</pre>""")
if not wt.get_form('mode') or not wt.get_form('thread'):
print("<h2>Settings</h2>")
print("\n".join(["<br> - "+str(i) for i in b_conf]))
for s in cd.keys():
print("<p>",s + ":<br> ", cd[s])
print("<h2>Threads</h2>")
if wt.get_form('more'):
print("<a href='.'>Back</a><br>")
print(wt.get_form('thread'), "<hr>")
# print(load_thread(wt.get_form('thread')))
show_thread(load_thread(wt.get_form('thread')))
else:
mod_threads()
def mod_threads():
print("<pre>")
with open(b_conf[6]) as t_list:
print(t_list.read())
print("</pre>")
ti = thread_index()
for t in ti["ti"]:
# t = filename
# t[0] last reply time, t[1] thread title
# t[2] reply count, t[3] thread mode
mic = mode_icons(ti[t][3])
tm = [f"<img src='{m}'>" for m in mic]
if ti[t][3] in modes:
ti[t][3] = modes[ti[t][3]]
mk = list(modes.keys())
mv = [modes[i] for i in mk]
dropdown = wt.dropdown("mode", mk, mv)
ti[t][3] = dropdown.replace(f">{ti[t][3]}", \
f" selected>{ti[t][3]}")
print(op.format(t, ti[t], " ".join(tm)))
def thread_index():
with open(b_conf[6]) as t_list:
t_list = t_list.read().splitlines()
t =
|
{}
t["ti"] = []
for th in t_list:
th = th.split(" >< ")
t["ti"].append(th[0])
t[th[0]] = th[1:]
return t
def load_thread(thr='0'):
# print(b_conf[5] + thr)
with open(b_conf[5] + thr, 'r') as thr:
thr = thr.read().splitlines()
for n, th in enumerate(thr):
thr[n] = th.split(' >< ')
return thr
def show_thread(thr=[]):
if not thr:
return
|
None
table = ["<table>"]
table.append("<tr><td><td>Name<td>Date<td>Comment")
print("<tr><th colspan='4'>", thr.pop(0)[0])
for n, t in enumerate(thr):
t.pop(2)
t = f"<tr><td>{n+1}.<td>" + "<td>".join(t)
table.append(t)
print("\n".join(table), "</table>")
def tripcode(pw):
pw = pw[:8]
salt = (pw + "H..")[1:3]
trip = crypt.crypt(pw, salt)
return (trip[-10:])
def main():
print(wt.head(b_conf[0]))
print("<h2>", b_conf[0], "admin</h2>")
# print(wt.get_cookie())
if login_admin() == 1:
admin_splash()
main()
|
bijanebrahimi/pystatus
|
pystatus/libs/__init__.py
|
Python
|
gpl-3.0
| 331
| 0.003021
|
# from cryptography import *
from salmon import MagicSig
from crypt impo
|
rt strip_whitespaces, b64_to_num, b64_to_str, b64encode, b64decode, generate_rsa_key, export_rsa_key
from activitystreams import salmon, salmon1, salmon2, salm
|
on3
from webfinger import WebfingerClient
from convert import str_to_datetime, datetime_to_rfc3339
|
makerdao/keeper
|
pymaker/numeric.py
|
Python
|
agpl-3.0
| 14,935
| 0.003348
|
# This file is part of Maker Keeper Framework.
#
# Copyright (C) 2017-2018 reverendus
# Copyright (C) 2018 bargst
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
from functools import total_ordering, reduce
from decimal import *
_context = Context(prec=1000, rounding=ROUND_DOWN)
@total_ordering
class Wad:
"""Represents a number with 18 decimal places.
`Wad` implements comparison, addition, subtraction, multiplication and division operators. Comparison, addition,
subtraction and division only work with other instances of `Wad`. Multiplication works with instances
of `Wad` and `Ray` and also with `int` numbers. The result of multiplication is always a `Wad`.
`Wad`, along with `Ray`, are the two basic numeric types used by Maker contracts.
Notes:
The internal representation of `Wad` is an unbounded integer, the last 18 digits of it being treated
as decimal places. It is similar to the representation used in Maker contracts (`uint128`).
"""
def __init__(self, value):
"""Creates a new Wad number.
Args:
value: an instance of `Wad`, `Ray` or an integer. In case of an integer, the internal representation
of Maker contracts is used which means that passing `1` will create an instance of `Wad`
with a value of `0.000000000000000001'.
"""
if isinstance(value, Wad):
self.value = value.value
elif isinstance(value, Ray):
self.value = int((Decimal(value.value) // (Decimal(10)**Decimal(9))).quantize(1, context=_context))
elif isinstance(value, Rad):
self.value = int((Decimal(value.value) // (Decimal(10)**Decimal(27))).quantize(1, context=_context))
elif isinstance(value, int):
# assert(value >= 0)
self.value = value
else:
raise ArithmeticError
@classmethod
def from_number(cls, number):
# assert(number >= 0)
pwr = Decimal(10) ** 18
dec = Decimal(str(number)) * pwr
return Wad(int(dec.quantize(1, context=_context)))
def __repr__(self):
return "Wad(" + str(self.value) + ")"
def __str__(self):
tmp = str(self.value).zfill(19)
return (tmp[0:len(tmp)-18] + "." + tmp[len(tmp)-18:len(tmp)]).replace("-.", "-0.")
def __add__(self, other):
if isinstance(other, Wad):
return Wad(self.value + other.value)
else:
raise ArithmeticError
def __sub__(self, other):
if isinstance(other, Wad):
return Wad(self.value - other.value)
else:
raise ArithmeticError
def __mod__(self, other):
if isinstance(other, Wad):
return Wad(self.value % other.value)
else:
raise ArithmeticError
# z = cast((uint256(x) * y + WAD / 2) / WAD);
def __mul__(self, other):
if isinstance(other, Wad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(18))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, Ray):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(27))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, Rad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(45))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, int):
return Wad(int((Decimal(self.value) * Decimal(other)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __truediv__(self, other):
if isinstance(other, Wad):
return Wad(int((Decimal(self.value) * (Decimal(10) ** Decimal(18)) / Decimal(other.value)).quantize(1, context=_context)))
else:
raise Arit
|
hmeticError
def __abs__(self):
return Wad(abs(self.value))
def __eq__(self, other):
if isinstance(other, Wad):
return self.value =
|
= other.value
else:
raise ArithmeticError
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
if isinstance(other, Wad):
return self.value < other.value
else:
raise ArithmeticError
def __int__(self):
return int(self.value / 10**18)
def __float__(self):
return self.value / 10**18
def __round__(self, ndigits: int = 0):
return Wad(round(self.value, -18 + ndigits))
def __sqrt__(self):
return Wad.from_number(math.sqrt(self.__float__()))
@staticmethod
def min(*args):
"""Returns the lower of the Wad values"""
return reduce(lambda x, y: x if x < y else y, args[1:], args[0])
@staticmethod
def max(*args):
"""Returns the higher of the Wad values"""
return reduce(lambda x, y: x if x > y else y, args[1:], args[0])
@total_ordering
class Ray:
"""Represents a number with 27 decimal places.
`Ray` implements comparison, addition, subtraction, multiplication and division operators. Comparison, addition,
subtraction and division only work with other instances of `Ray`. Multiplication works with instances
of `Ray` and `Wad` and also with `int` numbers. The result of multiplication is always a `Ray`.
`Ray`, along with `Wad`, are the two basic numeric types used by Maker contracts.
Notes:
The internal representation of `Ray` is an unbounded integer, the last 27 digits of it being treated
as decimal places. It is similar to the representation used in Maker contracts (`uint128`).
"""
def __init__(self, value):
"""Creates a new Ray number.
Args:
value: an instance of `Ray`, `Wad` or an integer. In case of an integer, the internal representation
of Maker contracts is used which means that passing `1` will create an instance of `Ray`
with a value of `0.000000000000000000000000001'.
"""
if isinstance(value, Ray):
self.value = value.value
elif isinstance(value, Wad):
self.value = int((Decimal(value.value) * (Decimal(10)**Decimal(9))).quantize(1, context=_context))
elif isinstance(value, Rad):
self.value = int((Decimal(value.value) / (Decimal(10)**Decimal(18))).quantize(1, context=_context))
elif isinstance(value, int):
# assert(value >= 0)
self.value = value
else:
raise ArithmeticError
@classmethod
def from_number(cls, number):
# assert(number >= 0)
pwr = Decimal(10) ** 27
dec = Decimal(str(number)) * pwr
return Ray(int(dec.quantize(1, context=_context)))
def __repr__(self):
return "Ray(" + str(self.value) + ")"
def __str__(self):
tmp = str(self.value).zfill(28)
return (tmp[0:len(tmp)-27] + "." + tmp[len(tmp)-27:len(tmp)]).replace("-.", "-0.")
def __add__(self, other):
if isinstance(other, Ray):
return Ray(self.value + other.value)
else:
raise ArithmeticError
def __sub__(self, other):
if isinstance(other, Ray):
return Ray(self.value - other.value)
else:
raise ArithmeticError
def __mod__(self, other):
if isinstance(other, Ray):
return Ray(self.value % other.value)
else:
raise ArithmeticError
def _
|
joelthe1/web-scraping
|
scrape-website-example-1.py
|
Python
|
mit
| 3,431
| 0.01195
|
#!/usr/bin/python
'''Scrape a website using urllib2 (A library for pinging URLs) and BeautifulSoup (A library for parsing HTML)'''
from bs4 import BeautifulSoup
import urllib2
import time
import sys
import socket
start_time = time.time()
#Open files
rfile = open("input.csv","r").read().splitlines()
wfile = open("translations.csv","w")
sepComma = ","
newline = "\n"
counter = 0
tcounter = 0
#Start processing
for pEword in rfile:
retry = 0
# print pEword
while True:
try:
counter += 1
tcounter += 1
url = "http://www.example.com/"
print url
req = urllib2.Request(url)
req.add_header("Connection", "keep-alive")
req.add_header("User-Agent", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0")
# req.add_header("Accept-Encoding", "gzip")
req.add_header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8")
# req.add_header("Cache-Control", "no-cache")
req.add_header("Accept-Language", "en-US,en;q=0.8"
|
)
req.add_header("Host", "filesdownloader.com")
# req.add_header("If-Modified-Since", "Thu, 30 Jan 2014 17:24:29 GMT")
# req.add_header("Cache-Control", "max-age=0")
# req.add_header("If-Modified-Since", "Fri, 31 Jan 2014 21:52:35 GMT")
req.add_header("Cookie", "s=6a11e5h6sald
|
4faibrkcp5bm85; __unam=7639673-143e40de47e-4218148d-4; __utma=127728666.207454719.1391100551.1391208734.1391434591.6; __utmb=127728666.2.10.1391434591; __utmc=127728666; __utmz=127728666.1391197253.2.2.utmcsr=prx.centrump2p.com|utmccn=(referral)|utmcmd=referral|utmcct=/english/")
page = urllib2.urlopen(req,timeout=4)
except urllib2.HTTPError, e:
if retry > 2:
raise e
print e.code
retry += 1
time.sleep(10)
except urllib2.URLError, e:
if retry > 2:
raise e
print e.args
retry += 1
time.sleep(10)
except socket.timeout as e:
if retry > 2:
raise e
print "Request timed out!"
retry += 1
time.sleep(10)
except:
etype, value, tb = sys.exc_info()
response = "%s" % value.message
print etype,response,tb
raise
else:
soup = BeautifulSoup(page.read())
orderedlists = soup.find_all("ol", class_="eirol")
wfile.write(pEword);wfile.write(sepComma)
#Looping <li> tags
for thelist in orderedlists:
for listitems in thelist:
pHword = listitems.next_element.next_sibling.string.encode('utf-8')
print pHword
wfile.write(pHword);wfile.write(sepComma);
# print pHword
wfile.write(newline)
# if counter > 2:
# time.sleep(3)
# counter = 0
if tcounter/1000 in range(15) and tcounter%1000 == 0:
print "{words} words completed".format(words = tcounter)
# if tcounter%300 == 0:
# print "Waiting for 10 mins"
# time.sleep(600)
break
wfile.close()
print time.time() - start_time, "seconds"
print "Successfully created dictionary."
|
w1ll1am23/home-assistant
|
homeassistant/components/modbus/cover.py
|
Python
|
apache-2.0
| 7,501
| 0.000533
|
"""Support for Modbus covers."""
from __future__ import annotations
from datetime import timedelta
from typing import Any
from pymodbus.exceptions import ConnectionException, ModbusException
from pymodbus.pdu import ExceptionResponse
from homeassistant.components.cover import SUPPORT_CLOSE, SUPPORT_OPEN, CoverEntity
from homeassistant.const import (
CONF_COVERS,
CONF_DEVICE_CLASS,
CONF_NAME,
CONF_SCAN_INTERVAL,
CONF_SLAVE,
)
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import (
ConfigType,
DiscoveryInfoType,
HomeAssistantType,
)
from .const import (
CALL_TYPE_COIL,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CONF_REGISTER,
CONF_STATE_CLOSED,
CONF_STATE_CLOSING,
CONF_STATE_OPEN,
CONF_STATE_OPENING,
CONF_STATUS_REGISTER,
CONF_STATUS_REGISTER_TYPE,
MODBUS_DOMAIN,
)
from .modbus import ModbusHub
async def async_setup_platform(
hass: HomeAssistantType,
config: ConfigType,
async_add_entities,
discovery_info: DiscoveryInfoType | None = None,
):
"""Read configuration and create Modbus cover."""
if discovery_info is None:
return
covers = []
for cover in discovery_info[CONF_COVERS]:
hub: ModbusHub = hass.data[MODBUS_DOMAIN][discovery_info[CONF_NAME]]
covers.append(ModbusCover(hub, cover))
async_add_entities(covers)
class ModbusCover(CoverEntity, RestoreEntity):
"""Representation of a Modbus cover."""
def __init__(
self,
hub: ModbusHub,
config: dict[str, Any],
):
"""Initialize the modbus cover."""
self._hub: ModbusHub = hub
self._coil = config.get(CALL_TYPE_COIL)
self._device_class = config.get(CONF_DEVICE_CLASS)
self._name = config[CONF_NAME]
self._register = config.get(CONF_REGISTER)
self._slave = config.get(CONF_SLAVE)
self._state_closed = config[CONF_STATE_CLOSED]
self._state_closing = config[CONF_STATE_CLOSING]
self._state_open = config[CONF_STATE_OPEN]
self._state_opening = config[CONF_STATE_OPENING]
self._status_register = config.get(CONF_STATUS_REGISTER)
self._status_register_type = config[CONF_STATUS_REGISTER_TYPE]
self._scan_interval = timedelta(seconds=config[CONF_SCAN_INTERVAL])
self._value = None
self._available = True
# If we read cover status from coil, and not from optional status register,
# we interpret boolean value False as closed cover, and value True as open cover.
# Intermediate states are not supported in such a setup.
if self._coil is not None and self._status_register is None:
self._state_closed = False
self._state_open = True
self._state_closing = None
self._state_opening = None
# If we read cover status from the main register (i.e., an optional
# status register is not specified), we need to make sure the register_type
# is set to "holding".
if self._register is not None and self._status_register is None:
self._status_register = self._register
self._status_register_type = CALL_TYPE_REGISTER_HOLDING
async def async_added_to_hass(self):
"""Handle entity which will be added."""
state = await self.async_get_last_state()
if not state:
return
self._value = state.state
async_track_time_interval(
self.hass, lambda arg: self._update(), self._scan_interval
)
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
return self._device_class
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._value == self._state_opening
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._value == self._state_closing
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return self._value == self._state_closed
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
# Handle polling directly in this entity
return False
def open_cover(self, **kwargs: Any) -> None:
"""Open cover."""
if self._coil is not None:
self._write_coil(True)
else:
self._write_register(self._state_open)
self._update()
def close_cover(self, **kwargs: Any) -> None:
"""Close cover."""
if self._coil is not None:
self._write_coil(False)
else:
self._write_register(self._state_closed)
self._update()
def _update(self):
"""Update the state of the cover."""
if self._coil is not None and self._status_register is None:
self._value = self._read_coil()
else:
self._value = self._read_status_register()
self.schedule_update_ha_state()
def _read_status_register(self) -> int | None:
"""Read status register using the Modbus hub slave."""
try:
if self._status_register_type == CALL_TYPE_REGISTER_INPUT:
result = self._hub.read_input_registers(
self._slave, self._status_register, 1
)
else:
result = self._hub.read_holding_registers(
self._slave, self._status_register, 1
)
except ConnectionException:
self._available = False
return
if isinstance(result, (ModbusException, ExceptionResponse)):
self._available = False
return
value = int(result.registers[0])
self._available = True
return value
def _write_register(self, value):
"""Write holding register using the Modbus hub slave."""
try:
self._hub.write_register(self._slave, self._register, value)
except ConnectionException:
self._available = False
return
self._available = True
def _read_coil(self) -> bool | None:
"""Read coil using the Modbus hub slave."""
try:
result = self._hub.read_coils(self._slave, self._coil, 1)
except ConnectionException:
self._available = False
return
if isinstance(result, (ModbusException, ExceptionResponse)):
self._available = False
return
value = bool(result.bits[0] & 1)
self._available = True
return value
def _w
|
rite_coil(self, value):
"""Write coil using the Modbus hub slave."""
try:
self._hub.write_coil(self._slave,
|
self._coil, value)
except ConnectionException:
self._available = False
return
self._available = True
|
google-research/google-research
|
basisnet/personalization/centralized_emnist/data_processing.py
|
Python
|
apache-2.0
| 4,786
| 0.006686
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data related functions for packing emnist dataset.
parse_data: extract client data from emnist and add ids to tuples of numpy
arrays.
pack_dataset: pack the numpy arrays into tf.data.Dataset.
"""
import numpy as np
import tensorflow as tf
NUM_EMNIST_CLASSES = 62
MAX_DATA_SIZE = 700000
SPLIT_SIZE = 0.1
SHUFFLE_SIZE = 10000
PARSE_DATA_BATCH_SIZE = 256
def pack_dataset(data_tuple, mode, batch_size=256, with_dist=False):
"""Packs the arrays into tf.data.Dataset.
Args:
data_tuple: tuples of numpy array return from parse_data() as inputs.
It follows the orders:
For with_dist is True:
Input images, client ids, label distributions, labels
For with_dist is False:
Input images, client ids, labels
mode: training mode of test mode.
batch_size: batch size for the dataset.
with_dist: using label distributions as inputs.
Returns:
A tf.data.Dataset
"""
if with_dist:
x, idx, dist, y = data_tuple
dataset = tf.data.Dataset.from_tensor_slices(({
'input_x': x,
'input_id': idx,
'input_dist': dist,
}, y))
else:
x, idx, y = data_tuple
dataset = tf.data.Dataset.from_tensor_slices(({
'input_x': x,
'input_id': idx
}, y))
if mode == 'train':
dataset = dataset.shuffle(SHUFFLE_SIZE)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
def coun
|
t_dataset(dataset):
cnt = 0
for _ in iter(dataset):
cnt = cnt + 1
return int(cnt)
def get_local_y_dist(client_dataset):
dist =
|
np.zeros((1, NUM_EMNIST_CLASSES))
for x in client_dataset:
y = x['label'].numpy()
dist[y] += 1
return np.array(dist).reshape((1, -1)) / np.sum(dist)
def parse_data(emnist_train,
emnist_test,
client_ids,
cliend_encodings,
with_dist=False):
"""Packs the client dataset into tuples of arrays with client ids.
Args:
emnist_train: the tff clientdata object of the training sets.
emnist_test: the tff clientdata object of the test sets.
client_ids: client ids to extract.
cliend_encodings: a dictionary encoding client string id to number.
with_dist: using label distributions as inputs or not.
Returns:
Three tuples of numpy arrays:
The training set for fine-tuning, the smaller split of the training set,
the test set, each is a tuple of the following np.array:
Input images, input ids, label distributions, labels if with_dist is True
Input images, input ids, labels if with_dist is False
"""
def merge_clients(emnist, split_size=1):
# Cache in the memory for faster training iterations
train_x, train_id, train_y = np.zeros((MAX_DATA_SIZE, 28, 28, 1)), np.zeros(
(MAX_DATA_SIZE)), np.zeros((MAX_DATA_SIZE))
cnt = 0
if with_dist:
train_dist = np.zeros((MAX_DATA_SIZE, NUM_EMNIST_CLASSES))
client_num_list = []
for client_id in client_ids:
ds = emnist.create_tf_dataset_for_client(client_id)
client_id = cliend_encodings[client_id]
ds_np = ds.batch(PARSE_DATA_BATCH_SIZE)
if with_dist:
y_dist = get_local_y_dist(ds)
client_num = 0
for x in ds_np:
y = x['label']
x = tf.expand_dims(x['pixels'], axis=-1)
if split_size < 1:
split_num = int(len(y)*split_size)
ids = np.random.choice(np.arange(len(y)), split_num)
y = tf.gather(y, ids)
x = tf.gather(x, ids)
num = len(y)
idx = np.array([client_id]*num)
train_x[cnt:cnt+num] = x
train_y[cnt:cnt+num] = y
train_id[cnt:cnt+num] = idx
if with_dist:
train_dist[cnt:cnt+num] = np.tile(y_dist, [num, 1])
cnt += num
client_num += num
client_num_list.append(client_num)
train_x = train_x[:cnt]
train_y = train_y[:cnt]
train_id = train_id[:cnt]
if with_dist:
train_dist = train_dist[:cnt]
return train_x, train_id, train_dist, train_y
else:
return train_x, train_id, train_y
return merge_clients(emnist_train), merge_clients(
emnist_train, split_size=SPLIT_SIZE), merge_clients(emnist_test)
|
dapengchen123/code_v1
|
reid/loss/oim.py
|
Python
|
mit
| 1,727
| 0.000579
|
from __future__ import absolute_import
import torch
import torch.nn.functional as F
from torch import nn, autograd
class OIM(autograd.Function):
def __init__(self, lut, momentum=0.5):
super(OIM, self).__init__()
self.lut = lut
self.momentum = momentum
def forward(self, inputs, targets):
self.save_for_backward(inputs, targets)
outputs = inputs.mm(self.lut.t())
return outputs
def backward(self, grad_outputs):
inputs, t
|
argets = self.saved_tensors
grad_inputs = None
if self.needs_input_grad[0]:
grad_inputs = grad_outputs.mm(self.lut)
for x, y in zip(inputs, targets):
self.lut[y] = self.momentum * self.lut[y] + (1. - self.momentum) * x
self.lut[y] /= self.lut[y].norm()
return grad_inputs, None
def oim(inputs, targets, lut, momentum=0.5):
return OIM(lut, momentum=momentum)(inputs, targets)
class OIMLoss(nn.
|
Module):
def __init__(self, num_features, num_classes, scalar=1.0, momentum=0.5,
weight=None, size_average=True):
super(OIMLoss, self).__init__()
self.num_features = num_features
self.num_classes = num_classes
self.momentum = momentum
self.scalar = scalar
self.weight = weight
self.size_average = size_average
self.register_buffer('lut', torch.zeros(num_classes, num_features))
def forward(self, inputs, targets):
inputs = oim(inputs, targets, self.lut, momentum=self.momentum)
inputs *= self.scalar
loss = F.cross_entropy(inputs, targets, weight=self.weight,
size_average=self.size_average)
return loss, inputs
|
LungNoodle/lungsim
|
tests/pFUnit-3.2.9/bin/pFUnitParser.py
|
Python
|
apache-2.0
| 34,426
| 0.011735
|
#!/usr/bin/env python
# For python 2.6-2.7
from __future__ import print_function
from os.path import *
import re
# from parseBrackets import parseBrackets
from parseDirectiveArgs import parseDirectiveArguments
class MyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
assertVariants = 'Fail|Equal|True|False|LessThan|LessThanOrEqual|GreaterThan|GreaterThanOrEqual'
assertVariants += '|IsMemberOf|Contains|Any|All|NotAll|None|IsPermutationOf'
assertVariants += '|ExceptionRaised|SameShape|IsNaN|IsFinite'
def cppSetLineAndFile(line, file):
return "#line " + str(line) + ' "' + file + '"\n'
def getSubroutineName(line):
try:
m = re.match('\s*subroutine\s+(\w*)\s*(\\([\w\s,]*\\))?\s*(!.*)*$', line, re.IGNORECASE)
return m.groups()[0]
except:
raise MyError('Improper format in declaration of test procedure.')
def parseArgsFirstRest(directiveName,line):
"""If the @-directive has more than one argument, parse into first and rest strings.
Added for assertAssociated.
"""
if directiveName != '':
m = re.match('\s*'+directiveName+'\s*\\((.*\w.*)\\)\s*$',line,re.IGNORECASE)
if m:
argStr = m.groups()[0]
else:
return None
else:
argStr = line
args = parseDirectiveArguments(argStr)
if args == []:
returnArgs = None
elif len(args) == 1:
returnArgs = [args[0]]
else:
returnArgs = [args[0],','.join(args[1:])]
return returnArgs
def parseArgsFirstSecondRest(directiveName,line):
"""If the @-directive must have at least two arguments, parse into first, second,
and rest strings. Added for assertAssociated.
"""
args1 = parseArgsFirstRest(directiveName,line)
returnArgs = None
if args1 != None:
if len(args1) == 1:
returnArgs = args1
elif len(args1) == 2:
args2 = parseArgsFirstRest('',args1[1])
returnArgs = [args1[0]] + args2
elif len(args1) == 3:
print(-999,'parseArgsFirstSecondRest::error!')
returnArgs = None
return returnArgs
def getSelfObjectName(line):
m = re.match('\s*subroutine\s+\w*\s*\\(\s*(\w+)\s*(,\s*\w+\s*)*\\)\s*$', line, re.IGNORECASE)
if m:
return m.groups()[0]
else:
return m
def getTypeName(line):
m = re.match('\s*type(.*::\s*|\s+)(\w*)\s*$', line, re.IGNORECASE)
return m.groups()[1]
class Action():
def apply(self, line):
m = self.match(line)
if m: self.action(m, line)
return m
class AtTest(Action):
def __init__(self, parser):
self.parser = parser
self.keyword = '@test'
def match(self, line):
m = re.match('\s*'+self.keyword+'(\s*(\\(.*\\))?\s*$)', line, re.IGNORECASE)
|
return m
def action(self, m, line):
options = re.match('\s*'+self.keyword+'\s*\\((.*)\\)\s*$', line, re.IGNORECASE)
method = {}
if options:
npesOption = re.search('npes\s*=\s*\\[([0-9,\s]+)\\]', options.groups()[0], re.IGNORECASE)
if npesOption:
npesString = npesOption.groups()[0]
npes = map(int, npesString.sp
|
lit(','))
method['npRequests'] = npes
#ifdef is optional
matchIfdef = re.match('.*ifdef\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE)
if matchIfdef:
ifdef = matchIfdef.groups()[0]
method['ifdef'] = ifdef
matchIfndef = re.match('.*ifndef\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE)
if matchIfndef:
ifndef = matchIfndef.groups()[0]
method['ifndef'] = ifndef
matchType = re.match('.*type\s*=\s*(\w+)', options.groups()[0], re.IGNORECASE)
if matchType:
print ('Type', matchType.groups()[0])
method['type'] = matchType.groups()[0]
paramOption = re.search('testParameters\s*=\s*[{](.*)[}]', options.groups()[0], re.IGNORECASE)
if paramOption:
paramExpr = paramOption.groups()[0]
method['testParameters'] = paramExpr
casesOption = re.search('cases\s*=\s*(\\[[0-9,\s]+\\])', options.groups()[0], re.IGNORECASE)
if casesOption:
method['cases'] = casesOption.groups()[0]
nextLine = self.parser.nextLine()
method['name'] = getSubroutineName(nextLine)
# save "self" name for use with @mpiAssert
self.parser.currentSelfObjectName = getSelfObjectName(nextLine)
# save "self" name for use with @mpiAssert
dummyArgument = getSelfObjectName(nextLine)
if dummyArgument:
method['selfObjectName'] = dummyArgument
self.parser.userTestMethods.append(method)
self.parser.commentLine(line)
self.parser.outputFile.write(nextLine)
# deprecated - should now just use @test
class AtMpiTest(AtTest):
def __init__(self, parser):
self.parser = parser
self.keyword = '@mpitest'
class AtTestCase(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@testcase\s*(|\\(.*\\))\s*$', line, re.IGNORECASE)
return m
def action(self, m, line):
options = re.match('\s*@testcase\s*\\((.*)\\)\s*$', line, re.IGNORECASE)
if options:
value = re.search('constructor\s*=\s*(\w*)', options.groups()[0], re.IGNORECASE)
if value:
self.parser.userTestCase['constructor'] = value.groups()[0]
value = re.search('npes\s*=\s*\\[([0-9,\s]+)\\]', options.groups()[0], re.IGNORECASE)
if value:
npesString = value.groups()[0]
npes = map(int,npesString.split(','))
self.parser.userTestCase['npRequests'] = npes
value = re.search('cases\s*=\s*(\\[[0-9,\s]+\\])', options.groups()[0], re.IGNORECASE)
if value:
cases = value.groups()[0]
self.parser.userTestCase['cases'] = cases
value = re.search('testParameters\s*=\s*[{](.*)[}]', options.groups()[0], re.IGNORECASE)
if value:
paramExpr = value.groups()[0]
self.parser.userTestCase['testParameters'] = paramExpr
nextLine = self.parser.nextLine()
self.parser.userTestCase['type']=getTypeName(nextLine)
self.parser.commentLine(line)
self.parser.outputFile.write(nextLine)
class AtSuite(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
nameRe = "'\w+'|" + """\w+"""
m = re.match("\s*@suite\s*\\(\s*name\s*=\s*("+nameRe+")\s*\\)\s*$", line, re.IGNORECASE)
return m
def action(self, m, line):
self.parser.suiteName=m.groups()[0][1:-1]
self.parser.wrapModuleName = 'Wrap' + self.parser.suiteName
class AtBegin(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*module\s+(\w*)\s*$', line, re.IGNORECASE)
return m
def action(self, m, line):
self.parser.userModuleName = m.groups()[0]
self.parser.wrapModuleName = 'Wrap' + self.parser.userModuleName
if not self.parser.suiteName:
self.parser.suiteName = self.parser.userModuleName + "_suite"
self.parser.outputFile.write(line)
class AtAssert(Action):
def __init__(self, parser):
self.parser = parser
def match(self, line):
m = re.match('\s*@assert('+assertVariants+')\s*\\((.*\w.*)\\)\s*$', line, re.IGNORECASE)
return m
def appendSourceLocation(self, fileHandle, fileName, lineNumber):
fileHandle.write(" & location=SourceLocation( &\n")
fileHandle.write(" & '" + str(basename(fileName)) + "', &\n")
fileHandle.write(" & " + str(lineNumber) + ")")
def action(self, m, line):
p = self.parser
p.outputFile.write(cppSetLineAndFile(p.currentLineNumber, p.fileName))
|
stwunsch/gnuradio
|
grc/core/Platform.py
|
Python
|
gpl-3.0
| 11,876
| 0.001347
|
"""
Copyright 2008-2016 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
import sys
from . import ParseXML, Messages, Constants
from .Config import Config
from .Element import Element
from .generator import Generator
from .FlowGraph import FlowGraph
from .Connection import Connection
from .Block import Block
from .Port import Port
from .Param import Param
from .utils import odict, extract_docs
class Platform(Element):
Config = Config
Generator = Generator
FlowGraph = FlowGraph
Connection = Connection
Block = Block
Port = Port
Param = Param
is_platform = True
def __init__(self, *args, **kwargs):
""" Make a platform for GNU Radio """
Element.__init__(self)
self.config = self.Config(*args, **kwargs)
self.block_docstrings = {}
self.block_docstrings_loaded_callback = lambda: None # dummy to be replaced by BlockTreeWindow
self._docstring_extractor = extract_docs.SubprocessLoader(
callback_query_result=self._save_docstring_extraction_result,
callback_finished=lambda: self.block_docstrings_loaded_callback()
)
# Create a dummy flow graph for the blocks
self._flow_graph = Element(self)
self._flow_graph.connections = []
self.blocks = odict()
self._blocks_n = odict()
self._block_categories = {}
self.domains = {}
self.connection_templates = {}
self._auto_hier_block_generate_chain = set()
self.build_block_library()
def __str__(self):
return 'Platform - {}({})'.format(self.config.key, self.config.name)
@staticmethod
def find_file_in_paths(filename, paths, cwd):
"""Checks the provided paths relative to cwd for a certain filename"""
if not os.path.isdir(cwd):
cwd = os.path.dirname(cwd)
if isinstance(paths, str):
paths = (p for p in paths.split(':') if p)
for path in paths:
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.normpath(os.path.join(cwd, path))
file_path = os.path.join(path, filename)
if os.path.exists(os.path.normpath(file_path)):
return file_path
def load_and_generate_flow_graph(self, file_path):
"""Loads a flow graph from file and generates it"""
Messages.set_indent(len(self._auto_hier_block_generate_chain))
Messages.send('>>> Loading: %r\n' % file_path)
if file_path in self._auto_hier_block_generate_chain:
Messages.send(' >>> Warning: cyclic hier_block dependency\n')
return False
self._auto_hier_block_generate_chain.add(file_path)
try:
flow_graph = self.get_new_flow_graph()
flow_graph.grc_file_path = file_path
# Other, nested higiter_blocks might be auto-loaded here
flow_graph.import_data(self.parse_flow_graph(file_path))
flow_graph.rewrite()
flow_graph.validate()
if not flow_graph.is_valid():
raise Exception('Flowgraph invalid')
if not flow_graph.get_option('generate_options').startswith('hb'):
raise Exception('Not a hier block')
except Exception as e:
Messages.send('>>> Load Error: {}: {}\n'.format(file_path, str(e)))
return False
finally:
self._auto_hier_block_generate_chain.discard(file_path)
Messages.set_indent(len(self._auto_hier_block_generate_chain))
try:
Messages.send('>>> Generating: {}\n'.format(file_path))
generator = self.Generator(flow_graph, file_path)
generator.write()
except Exception as e:
Messages.send('>>> Generate Error: {}: {}\n'.format(file_path, str(e)))
return False
self.load_block_xml(generator.get_file_path_xml())
return True
def build_block_library(self):
"""load the blocks and block tree from the search paths"""
self._docstring_extractor.start()
# Reset
self.blocks.clear()
self._blocks_n.clear()
self._block_categories.clear()
self.domains.clear()
|
self.connection_templates.clear()
ParseXML.xml_failures.clear()
# Try to parse and load blocks
for xml_file in
|
self.iter_xml_files():
try:
if xml_file.endswith("block_tree.xml"):
self.load_category_tree_xml(xml_file)
elif xml_file.endswith('domain.xml'):
self.load_domain_xml(xml_file)
else:
self.load_block_xml(xml_file)
except ParseXML.XMLSyntaxError as e:
# print >> sys.stderr, 'Warning: Block validation failed:\n\t%s\n\tIgnoring: %s' % (e, xml_file)
pass
except Exception as e:
print >> sys.stderr, 'Warning: XML parsing failed:\n\t%r\n\tIgnoring: %s' % (e, xml_file)
# Add blocks to block tree
for key, block in self.blocks.iteritems():
category = self._block_categories.get(key, block.category)
# Blocks with empty categories are hidden
if not category:
continue
root = category[0]
if root.startswith('[') and root.endswith(']'):
category[0] = root[1:-1]
else:
category.insert(0, Constants.DEFAULT_BLOCK_MODULE_NAME)
block.category = category
self._docstring_extractor.finish()
# self._docstring_extractor.wait()
def iter_xml_files(self):
"""Iterator for block descriptions and category trees"""
for block_path in self.config.block_paths:
if os.path.isfile(block_path):
yield block_path
elif os.path.isdir(block_path):
for dirpath, dirnames, filenames in os.walk(block_path):
for filename in sorted(filter(lambda f: f.endswith('.xml'), filenames)):
yield os.path.join(dirpath, filename)
def load_block_xml(self, xml_file):
"""Load block description from xml file"""
# Validate and import
ParseXML.validate_dtd(xml_file, Constants.BLOCK_DTD)
n = ParseXML.from_file(xml_file).find('block')
n['block_wrapper_path'] = xml_file # inject block wrapper path
# Get block instance and add it to the list of blocks
block = self.Block(self._flow_graph, n)
key = block.get_key()
if key in self.blocks:
print >> sys.stderr, 'Warning: Block with key "{}" already exists.\n\tIgnoring: {}'.format(key, xml_file)
else: # Store the block
self.blocks[key] = block
self._blocks_n[key] = n
self._docstring_extractor.query(
block.get_key(),
block.get_imports(raw=True),
block.get_make(raw=True)
)
def load_category_tree_xml(self, xml_file):
"""Validate and parse category tree file and add it to list"""
ParseXML.validate_dtd(xml_file, Constants.BLOCK_TREE_DTD)
xml = ParseXML.from_file(xml_file)
path = []
def load_category(cat_n):
path.append(cat_n.find('name').strip())
for block_key in cat_n.findall('block'):
if block_key not in sel
|
chipsecintel/chipsec
|
source/tool/chipsec/cfg/__init__.py
|
Python
|
gpl-2.0
| 894
| 0.020134
|
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2016, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is d
|
istributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact
|
information:
#chipsec@intel.com
#
## \defgroup config Platform Configuration
# chipsec/cfg/\<platform\>.py - configuration for a specific \<platform\>
|
ArcherSys/ArcherSys
|
Lib/distutils/tests/test_archive_util.py
|
Python
|
mit
| 35,333
| 0.002723
|
<<<<<<< HEAD
<<<<<<< HEAD
# -*- coding: utf-8 -*-
"""Tests for distutils.archive_util."""
import unittest
import os
import sys
import tarfile
from os.path import splitdrive
import warnings
from distutils import archive_util
from distutils.archive_util import (check_archive_formats, make_tarball,
make_zipfile, make_archive,
ARCHIVE_FORMATS)
from distutils.spawn import find_executable, spawn
from distutils.tests import support
from test.support import check_warnings, run_unittest, patch
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
try:
import zlib
ZLIB_SUPPORT = True
except ImportError:
ZLIB_SUPPORT = False
def can_fs_encode(filename):
"""
Return True if the filename can be saved in the file system.
"""
if os.path.supports_unicode_filenames:
return True
try:
filename.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
return False
return True
class ArchiveUtilTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_make_tarball(self):
self._make_tarball('archive')
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
@unittest.skipUnless(can_fs_encode('årchiv'),
'File system cannot handle this filename')
def test_make_tarball_latin1(self):
"""
Mirror test_make_tarball, except filename contains latin characters.
"""
self._make_tarball('årchiv') # note this isn't a real word
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
@unittest.skipUnless(can_fs_encode('のアーカイブ'),
'File system cannot handle this filename')
def test_make_tarball_extended(self):
"""
Mirror test_make_tarball, except filename contains extended
characters outside the latin charset.
"""
self._make_tarball('のアーカイブ') # japanese for archive
def _make_tarball(self, target_name):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, target_name)
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
|
# trying an uncompressed one
base_name = os.path.join(tmpdir2, target_n
|
ame)
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(find_executable('tar') and find_executable('gzip')
and ZLIB_SUPPORT,
'Need the tar, gzip and zlib command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(find_executable('compress'),
'The compress program is required')
def test_compress_deprecated(self):
tmpdir, tmpdir2, base_name = self._create_files()
# using compress and testing the PendingDeprecationWarning
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with check_warnings() as w:
warnings.simplefilter("always")
make_tarball(base_name, 'dist', compress='compress')
finally:
os.chdir(old_dir)
tarball = base_name + '.tar.Z'
self.assertTrue(os.path.exists(tarball))
self.assertEqual(len(w.warnings), 1)
# same test with dry_run
os.remove(tarball)
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with check_warnings() as w:
warnings.simplefilter("always")
make_tarball(base_name, 'dist', compress='compress',
dry_run=True)
finally:
os.chdir(old_dir)
self.assertFalse(os.path.exists(tarball))
self.assertEqual(len(w.warnings), 1)
@unittest.skipUnless(ZIP_SUPPORT and ZLIB_SUPPORT,
'Need zip and zlib support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile_no_zlib(self):
patch(self, archive_util.zipfile, 'zlib', None) # force zlib ImportError
called = []
zipfile_class = zipfile.ZipFile
def fake_zipfile(*a, **kw):
if
|
unibet/unbound-ec2
|
tests/unit/test_server.py
|
Python
|
isc
| 6,080
| 0.002138
|
from tests import unittest
from tests import mock
from unbound_ec2 import server
from tests import attrs
class TestServer(server.Server):
HANDLE_FORWARD_RESULT = 'dummy_handle_forward'
HANDLE_PASS_RESULT = True
DNSMSG = mock.MagicMock()
def handle_request(self, _id, event, qstate, qdata, request_type):
return self.HANDLE_FORWARD_RESULT
def new_dns_msg(self, qname):
return self.DNSMSG
class TestAbstractServer(unittest.TestCase):
def setUp(self):
server.log_info = mock.Mock()
lookup_mock = mock.MagicMock()
self.zone = '.bogus.tld'
self.reverse_zone = '127.in-addr.arpa'
self.ttl = 'bogus_ttl'
self.ip_order = 'bogus_ip_order'
self.forwarded_zones = ''
self.srv = TestServer(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order, self.forwarded_zones)
def tearDown(self):
self.srv = None
def test_ope
|
rate_event_new(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qdata = mock.Mag
|
icMock()
qstate.qinfo.qname_str = "fqdn.not-bogus.tld"
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE'])
def test_operate_event_pass(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_PASS']
qstate = mock.MagicMock()
qdata = mock.MagicMock()
qstate.qinfo.qname_str = "fqdn.not-bogus.tld"
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE'])
def test_operate_event_moddone(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_MODDONE']
qstate = mock.MagicMock()
qdata = mock.MagicMock()
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_FINISHED'])
def test_operate_forward(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_A']
qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone
qdata = mock.MagicMock()
self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_FORWARD_RESULT)
qstate.qinfo.qtype = attrs['RR_TYPE_ANY']
self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_FORWARD_RESULT)
def test_forwarded_zones(self):
server.log_info = mock.Mock()
lookup_mock = mock.MagicMock()
forwarded_zones = '.subdomain%s' % self.zone
self.srv2 = TestServer(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order, forwarded_zones)
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_A']
qstate.qinfo.qname_str = 'bogus-name%s' % self.forwarded_zones
qdata = mock.MagicMock()
self.assertEqual(self.srv.operate(id, event, qstate, qdata), TestServer.HANDLE_PASS_RESULT)
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_WAIT_MODULE'])
class TestAuthoritativeServer(unittest.TestCase):
def setUp(self):
server.log_info = mock.Mock()
lookup_mock = mock.MagicMock()
self.zone = '.bogus.tld'
self.reverse_zone = '127.in-addr.arpa'
self.ttl = 'bogus_ttl'
self.ip_order = 'bogus_ip_order'
self.forwarded_zones = ''
self.srv = server.Authoritative(self.zone, self.reverse_zone, self.ttl, lookup_mock, self.ip_order,
self.forwarded_zones)
def tearDown(self):
self.srv = None
def test_handle_forward(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_A']
qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone
qdata = mock.MagicMock()
server.DNSMessage = mock.MagicMock()
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
def test_handle_empty(self):
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_TXT']
qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone
qdata = mock.MagicMock()
server.DNSMessage = mock.MagicMock()
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
class TestCachingServer(unittest.TestCase):
def setUp(self):
server.log_info = mock.Mock()
self.lookup_mock = mock.MagicMock()
self.zone = '.bogus.tld'
self.reverse_zone = '127.in-addr.arpa'
self.ttl = 88888881
self.ip_order = 'bogus_ip_order'
self.forwarded_zones = ''
self.srv = server.Caching(self.zone, self.reverse_zone, self.ttl, self.lookup_mock, self.ip_order,
self.forwarded_zones)
def tearDown(self):
self.srv = None
def test_handle_forward(self):
server.storeQueryInCache = mock.Mock()
server.DNSMessage = mock.MagicMock()
instances_mock = mock.MagicMock()
instances_mock.tags = {'Address': 'bogus_ip_address'}
self.lookup_mock.lookup.return_value = [instances_mock]
id = 'bogus_id'
event = attrs['MODULE_EVENT_NEW']
qstate = mock.MagicMock()
qstate.qinfo.qtype = attrs['RR_TYPE_A']
qstate.qinfo.qname_str = 'bogus-name%s.' % self.zone
qdata = mock.MagicMock()
self.assertTrue(self.srv.operate(id, event, qstate, qdata))
qstate.ext_state.__setitem__.assert_called_with(id, attrs['MODULE_FINISHED'])
self.assertEqual(qstate.return_msg.rep.security, 2)
server.DNSMessage.return_value.answer.append.assert_called_with(
'%s %d IN A %s' % (qstate.qinfo.qname_str, self.ttl, 'bogus_ip_address'))
|
waddedMeat/asteroids-ish
|
Asteroids/MovingObject.py
|
Python
|
mit
| 242
| 0
|
__aut
|
hor__ = 'jmoran'
from Asteroids import Object
class MovingObject(Object):
def __init__(self, window, game, init_point, slope):
Object.__init__(self, window, game)
|
self.point = init_point
self.slope = slope
|
last-one/tools
|
caffe/result/celeba_multilabel_acc.py
|
Python
|
bsd-2-clause
| 857
| 0.002334
|
import os
import numpy as np
import sys
label_file = open('/home/hypan/data/celebA/test.txt', 'r')
lines = label_file.readlines()
label_file.close()
acc = np.zeros(40)
cou = 0
for line in lines:
info = li
|
ne.strip('\r\n').split()
name = info[0].split('.')[0]
gt_labels = info[1: ]
feat_path = '/home/hypan/data/celebA/result/' + sys.argv[1] + '/test_feature/' + name + '.npy'
if not os.path.exists(feat_path):
print '{} has not predict feature.'.format(name)
pd_labels = np.load(feat_path)
cnt = len(pd_labels)
for i in range(cnt):
gt_label = int(gt_labels[i])
pd_label = pd_labels[i]
if pd_label >= 0:
|
pd_label = 1
else:
pd_label = -1
if gt_label == pd_label:
acc[i] += 1
cou += 1
for i in range(40):
print i, acc[i] * 1.0 / cou
|
beeftornado/sentry
|
src/sentry/models/projectkey.py
|
Python
|
bsd-3-clause
| 8,014
| 0.001497
|
from __future__ import absolute_import, print_function
import petname
import six
import re
from bitfield import BitField
from uuid import uuid4
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from six.moves.urllib.parse import urlparse
from sentry import options, features
from sentry.db.models import (
Model,
BaseManager,
BoundedPositiveIntegerField,
FlexibleForeignKey,
JSONField,
sane_repr,
)
from sentry.tasks.relay import schedule_update_config_cache
_uuid4_re = re.compile(r"^[a-f0-9]{32}$")
# TODO(dcramer): pull in enum library
class ProjectKeyStatus(object):
ACTIVE = 0
INACTIVE = 1
class ProjectKeyManager(BaseManager):
def post_save(self, instance, **kwargs):
schedule_update_config_cache(
project_id=instance.project_id, generate=True, update_reason="projectkey.post_save"
)
def post_delete(self, instance, **kwargs):
schedule_update_config_cache(
project_id=instance.project_id, generate=True, update_reason="projectkey.post_delete"
)
class ProjectKey(Model):
__core__ = True
project = FlexibleForeignKey("sentry.Project", related_name="key_set")
label = models.CharField(max_length=64, blank=True, null=True)
public_key = models.CharField(max_length=32, unique=True, null=True)
secret_key = models.CharField(max_length=32, unique=True, null=True)
roles = BitField(
flags=(
# access to post events to the store endpoint
(u"store", u"Event API access"),
# read/write access to rest API
(u"api", u"Web API access"),
),
default=["store"],
)
status = BoundedPositiveIntegerField(
default=0,
choices=(
(ProjectKeyStatus.ACTIVE, _("Active")),
(ProjectKeyStatus.INACTIVE, _("Inactive")),
),
db_index=True,
)
date_added = models.DateTimeField(default=timezone.now, null=True)
rate_limit_count = BoundedPositiveIntegerField(null=True)
rate_limit_window = BoundedPositiveIntegerField(null=True)
objects = ProjectKeyManager(
cache_fields=("public_key", "secret_key"),
# store projectkeys in memcached for longer than other models,
# specifically to make the relay_projectconfig endpoint faster.
cache_ttl=60 * 30,
)
data = JSONField()
# support legacy project keys in API
scopes = (
"project:read",
"project:write",
"project:admin",
"project:releases",
"event:read",
"event:write",
"event:admin",
)
class Meta:
app_label = "sentry"
db_table = "sentry_projectkey"
__repr__ = sane_repr("project_id", "public_key")
def __unicode__(self):
return six.text_type(self.public_key)
@classmethod
def generate_api_key(cls):
return uuid4().hex
@classmethod
def looks_like_api_key(cls, key):
return bool(_uuid4_re.match(key))
@classmethod
def from_dsn(cls, dsn):
urlparts = urlparse(dsn)
public_key = urlparts.username
project_id = urlparts.path.rsplit("/", 1)[-1]
try:
return ProjectKey.objects.get(public_key=public_key, project=project_id)
except ValueError:
# ValueError would come from a non-integer project_id,
# which is obviously a DoesNotExist. We catch and rethrow this
# so anything downstream expecting DoesNotExist works fine
raise ProjectKey.DoesNotExist("ProjectKey matching query does not exist.")
@classmethod
def get_default(cls, project):
return cls.objects.filter(
project=project,
roles=models.F("roles").bitor(cls.roles.store),
status=ProjectKeyStatus.ACTIVE,
).first()
@property
def is_active(self):
return self.status == ProjectKeyStatus.ACTIVE
@property
def rate_limit(self):
if self.rate_limit_count and self.rate_limit_window:
return (self.rate_limit_count, self.rate_limit_window)
return (0, 0)
def save(self, *args, **kwargs):
if not self.public_key:
self.public_key = ProjectKey.generate_api_key()
if not self.secret_key:
self.secret_key = ProjectKey.generate_api_key()
if not self.label:
self.label = petname.Generate(2, " ", letters=10).title()
super(ProjectKey, self).save(*args, **kwargs)
def get_dsn(self, domain=None, secure=True, public=False):
urlparts = urlparse(self.get_endpoint(public=public))
if not public:
key = "%s:%s" % (self.public_key, self.secret_key)
else:
key = self.public_key
# If we do not have a scheme or domain/hostname, dsn is never valid
if not urlparts.netloc or not urlparts.scheme:
return ""
return "%s://%s@%s/%s" % (
urlparts.scheme,
key,
urlparts.netloc + urlparts.path,
self.project_id,
)
@property
def organization_id(self):
return self.project.organization_id
@property
def organization(self):
return s
|
elf.project.organization
@property
def dsn_private(self):
return self.get_dsn(public=False)
@property
def dsn_public(self):
return self.get_dsn(public=True)
@property
def csp_endpoint(sel
|
f):
endpoint = self.get_endpoint()
return "%s/api/%s/csp-report/?sentry_key=%s" % (endpoint, self.project_id, self.public_key)
@property
def security_endpoint(self):
endpoint = self.get_endpoint()
return "%s/api/%s/security/?sentry_key=%s" % (endpoint, self.project_id, self.public_key)
@property
def minidump_endpoint(self):
endpoint = self.get_endpoint()
return "%s/api/%s/minidump/?sentry_key=%s" % (endpoint, self.project_id, self.public_key)
@property
def unreal_endpoint(self):
return "%s/api/%s/unreal/%s/" % (self.get_endpoint(), self.project_id, self.public_key)
@property
def js_sdk_loader_cdn_url(self):
if settings.JS_SDK_LOADER_CDN_URL:
return "%s%s.min.js" % (settings.JS_SDK_LOADER_CDN_URL, self.public_key)
else:
endpoint = self.get_endpoint()
return "%s%s" % (
endpoint,
reverse("sentry-js-sdk-loader", args=[self.public_key, ".min"]),
)
def get_endpoint(self, public=True):
if public:
endpoint = settings.SENTRY_PUBLIC_ENDPOINT or settings.SENTRY_ENDPOINT
else:
endpoint = settings.SENTRY_ENDPOINT
if not endpoint:
endpoint = options.get("system.url-prefix")
if features.has("organizations:org-subdomains", self.project.organization):
urlparts = urlparse(endpoint)
if urlparts.scheme and urlparts.netloc:
endpoint = "%s://%s.%s%s" % (
urlparts.scheme,
settings.SENTRY_ORG_SUBDOMAIN_TEMPLATE.format(
organization_id=self.project.organization_id
),
urlparts.netloc,
urlparts.path,
)
return endpoint
def get_allowed_origins(self):
from sentry.utils.http import get_origins
return get_origins(self.project)
def get_audit_log_data(self):
return {
"label": self.label,
"public_key": self.public_key,
"secret_key": self.secret_key,
"roles": int(self.roles),
"status": self.status,
"rate_limit_count": self.rate_limit_count,
"rate_limit_window": self.rate_limit_window,
}
def get_scopes(self):
return self.scopes
|
eveliotc/gradleplease-workflow
|
common.py
|
Python
|
apache-2.0
| 1,475
| 0.013559
|
# -*- coding: utf-8 -*-
|
__author__ = 'eveliotc'
__license__ = 'See LICENSE'
import alfred
from alfred import Item
import sys
from subprocess import Popen, PIPE
def json_
|
to_obj(x):
if isinstance(x, dict):
return type('X', (), {k: json_to_obj(v) for k, v in x.iteritems()})
else:
return x
def join_query(dic):
return ' '.join(dic)
def le_result(r, exit = True):
alfred.write(r)
if exit:
sys.exit()
def xml_result(r, exit = True):
if len(r) < 1:
empty_result(exit)
else:
le_result(alfred.xml(r), exit)
def empty_result(exit = True):
empty = Item(
attributes={'uid': alfred.uid('empty'), 'arg': ''},
title='Gradle Please',
subtitle=u':( Nothing found.',
icon=u'icon.png')
xml_result([empty], exit)
def apple_script(scpt, args=[]):
p = Popen(['osascript', '-'] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(scpt)
return stdout
def tell_alfred(what):
apple_script('tell application "Alfred 2" to search "%s"' % what)
# TODO refactor gp.py to use this instead of dynamic obj
class Pom(object):
a = ''
g = ''
p = ''
latestVersion = ''
source = ''
@property
def id(self):
return self.g + ':' + self.a
def __repr__(self):
#notjson #justdebugginthings
return '{id:%s a:%s g:%s p:%s v:%s}' % (self.id, self.a, self.g, self.p, self.latestVersion)
|
Qwaz/solved-hacking-problem
|
SharifCTF/2016/RSA-Keygen/generate-key.py
|
Python
|
gpl-2.0
| 2,346
| 0.020887
|
from random import randrange
import fractions
def get_primes(n):
numbers = set(range(n, 1, -1))
primes = []
while numbers:
p = numbers.pop()
primes.append(p)
numbers.difference_update(set(range(p*2, n+1, p)))
return primes
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
def miller_rabin(n, k):
r, s = 0, n - 1
while s % 2 == 0:
r += 1
s //= 2
for _ in range(k):
a = randrange(2, n - 1)
x = pow(a, s, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == n - 1:
break
else:
return False
return True
### main #################
p
|
rimes = get_primes(443)
primes.sort()
del primes[0]
#print primes
pi = 1;
for x in primes:
pi *= x
print "pi=%X" % pi
while True:
kp = randrange(1, 2**12) + 2**12 + 2**13 + 2**14 + \
2**15 + 2**16 + 2**17 +
|
2**18 + 2**19
print "kp=%X" % kp
tp = 0
while fractions.gcd(tp, pi) != 1:
print "trying..."
tp = randrange(1, 2**399);
print "tp=%X" % tp
p = kp * pi * 2**400 + tp
print "p=%X" % p
print "bitlength(p)=", len(bin(p))-2
if miller_rabin(p, 40) == True:
break
while True:
kq = randrange(1, 2**12) + 2**12 + 2**13 + 2**14 + \
2**15 + 2**16 + 2**17 + 2**18 + 2**19
print "kq=%X" % kq
tq = 0
while fractions.gcd(tq, pi) != 1:
print "trying..."
tq = randrange(1, 2**399);
print "tq=%X" % tq
q = kq * pi * 2**400 + tq
print "q=%X" % q
print "bitlength(q)=", len(bin(q))-2
if miller_rabin(q, 40) == True:
break
print "p=%X" % p
print "q=%X" % q
n = p * q
print "n=%X" % n
print "bitlength(n)=", len(bin(n))-2
e = 2**16 + 1
print "e=%X" % e
#print "bitlength(e)=", len(bin(e))-2
d = modinv(e, (p-1)*(q-1))
print "d=%X" % d
#print "bitlength(d)=", len(bin(d))-2
m = 12354178254918274687189741234123412398461982374619827346981756309845712384198076
print "m=%X" % m
print "bitlength(m)=", len(bin(m))-2
c = pow(m, e, n)
print "c=%X" % c
print "bitlength(c)=", len(bin(c))-2
m2 = pow(c, d, n)
print "m2=%X" % m2
print "bitlength(m2)=", len(bin(m2))-2
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/controllers/panel.py
|
Python
|
gpl-3.0
| 271
| 0
|
from django.utils.translation import
|
ugettext_lazy as _
from crystal_dashboard.dashboards.crystal import dashboard
import horizon
class Controllers(horizon.Panel):
name = _("Controllers")
slug = 'controllers'
dashboard.CrystalController.regis
|
ter(Controllers)
|
XKNX/xknx
|
xknx/io/transport/ip_transport.py
|
Python
|
mit
| 3,449
| 0.00087
|
"""
Abstract base for a specific IP transports (TCP or UDP).
* It starts and stops a socket
* It handles callbacks for incoming frame service types
"""
from __future__ import annotations
from abc import ABC, abstractmethod
import asyncio
import logging
from typing import Callable, cast
from xknx.exceptions import CommunicationError
from xknx.knxip import HPAI, KNXIPFrame, KNXIPServiceType
TransportCallbackType = Callable[[KNXIPFrame, HPAI, "KNXIPTransport"], None]
knx_logger = logging.getLogger("xknx.knx")
class KNXIPTransport(ABC):
"""Abstract base class for KNX/IP transports."""
callbacks: list[KNXIPTransport.Callback]
local_hpai: HPAI
remote_addr: tuple[str, int]
transport: asyncio.BaseTransport | None
|
class Callback:
"""Callback class for handling callbacks for different 'KNX service types' of received packets."""
def __init__(
self,
callback: TransportCallbackType,
service_types: list[KNXIPServiceType] | None = None,
):
"""Initialize Callback class."""
self.callback = callback
self.service_types = service_types or []
def has_service(self, servi
|
ce_type: KNXIPServiceType) -> bool:
"""Test if callback is listening for given service type."""
return not self.service_types or service_type in self.service_types
def register_callback(
self,
callback: TransportCallbackType,
service_types: list[KNXIPServiceType] | None = None,
) -> KNXIPTransport.Callback:
"""Register callback."""
if service_types is None:
service_types = []
callb = KNXIPTransport.Callback(callback, service_types)
self.callbacks.append(callb)
return callb
def unregister_callback(self, callb: KNXIPTransport.Callback) -> None:
"""Unregister callback."""
self.callbacks.remove(callb)
def handle_knxipframe(self, knxipframe: KNXIPFrame, source: HPAI) -> None:
"""Handle KNXIP Frame and call all callbacks matching the service type ident."""
handled = False
for callback in self.callbacks:
if callback.has_service(knxipframe.header.service_type_ident):
callback.callback(knxipframe, source, self)
handled = True
if not handled:
knx_logger.debug(
"Unhandled: %s from: %s",
knxipframe.header.service_type_ident,
source,
)
@abstractmethod
async def connect(self) -> None:
"""Connect transport."""
@abstractmethod
def send(self, knxipframe: KNXIPFrame, addr: tuple[str, int] | None = None) -> None:
"""Send KNXIPFrame via transport."""
def getsockname(self) -> tuple[str, int]:
"""Return socket IP and port."""
if self.transport is None:
raise CommunicationError(
"No transport defined. Socket information not resolveable"
)
return cast(tuple[str, int], self.transport.get_extra_info("sockname"))
def getremote(self) -> str | None:
"""Return peername."""
return (
self.transport.get_extra_info("peername")
if self.transport is not None
else None
)
def stop(self) -> None:
"""Stop socket."""
if self.transport is not None:
self.transport.close()
|
meltwater/proxymatic
|
src/proxymatic/services.py
|
Python
|
mit
| 5,559
| 0.002159
|
import re
from copy import copy
from random import randint
class Server(object):
def __init__(self, ip, port, hostname):
self.ip = ip
self.port = port
self.hostname = hostname
self.weight = 500
self.maxconn = None
def __cmp__(self, other):
if not isinstance(other, Server):
return -1
return cmp((self.ip, self.port, self.weight, self.maxconn), (other.ip, other.port, other.weight, other.maxconn))
def __hash__(self):
return hash((self.ip, self.port, self.weight, self.maxconn))
def __str__(self):
extra = []
if self.weight != 500:
extra.append("weight=%d" % self.weight)
if self.maxconn:
extra.append("maxconn=%d" % self.maxconn)
result = '%s:%s' % (self.ip, self.port)
if extra:
result += '(%s)' % ','.join(extra)
return result
def __repr__(self):
return 'Server(%s, %s, %s, %s)' % (repr(self.ip), repr(self.port), repr(self.weight), repr(self.maxconn))
def clone(self):
return copy(self)
def setWeight(self, weight):
clone = self.clone()
clone.weight = weight
return clone
def setMaxconn(self, maxconn):
clone = self.clone()
clone.maxconn = maxconn
return clone
class Service(object):
def __init__(self, name, source, port, protocol, application='binary', healthcheck=False, healthcheckurl='/', timeoutclient=None, timeoutserver=None):
self.name = name
self.source = source
self.port = port
self.protocol = protocol
self.application = application
self.healthcheck = healthcheck
self.healthcheckurl = healthcheckurl
self.timeoutclient = timeoutclient
self.timeoutserver = timeoutserver
self.servers = set()
self.slots = []
# Check if there's a port override
match = re.search('.@(\d+)$', self.name)
if match:
self.name = self.name[0:-(len(match.group(1))+1)]
self.port = int(match.group(1))
def clone(self):
clone = Service(self.name, self.source, self.port, self.protocol, self.application, self.healthcheck, self.healthcheckurl, self.timeoutclient,
self.timeoutserver)
clone.servers = set(self.servers)
clone.slots = list(self.slots)
return clone
def __str__(self):
# Represent misc. service attributes as k=v pairs, but only if their value is not None
service_attributes = ['timeoutclient', 'timeoutserver']
service_options = ['%s=%s' % (attr, getattr(self, attr)) for attr in service_attributes if getattr(self, attr) is not None]
# Only use healthcheckurl if healtcheck has a meaningful value
if self.healthcheck:
service_options.append('healtcheck=%s' % self.healthcheck)
service_options.append('healthcheckurl=%s' % self.healthcheckurl)
return '%s:%s/%s%s -> [%s]' % (
self.name, self.port, self.application if self.application != 'binary' else self.protocol,
'(%s)' % ','.join(service_options) if service_options else '',
', '.join([str(s) for s in sorted(self.servers)]))
def __repr__(self):
return 'Service(%s, %s, %s, %s, %s)' % (repr(self.name), repr(self.port), repr(self.protocol), repr(self.application), repr(sorted(self.servers)))
def __cmp__(self, other):
if not isinstance(other, Service):
return -1
return cmp((self.name, self.port, self.protocol, self.servers), (other.name, other.port, other.protocol, other.servers))
def __hash__(self):
return hash((self.name, self.port, self.protocol, self.servers))
@property
def portname(self):
return re.sub('[^a-zA-Z0-9]', '_', str(self.port))
@property
def marathonpath(self):
ret = ''
for s in self.name.split('.'):
if ret is not '':
ret = s + '.' + ret
else:
ret = s
return ret
def update(self, other):
"""
Returns an new updated Service object
"""
clone = self.clone()
clone.name = other.name
clone.source = other.source
clone.port = other.port
clone.protocol = other.protocol
clone.timeoutclient = other.timeoutclient
clone.timeoutserver = other.timeoutserver
for server in clone.servers - other.servers:
clone._remove(server)
for server in other.servers - clone.servers:
clone._add(server)
return clone
def addServer(self, server):
clone = self.clone()
clone._add(server)
return clone
def setApplication(self, application):
clone = self.clone()
clone.application = application
return clone
def _add(self, server):
self.servers.add(server)
# Keep servers in the same index w
|
hen they're added
for i in range(len(self.slots)):
if not self.slots[i]:
self.slots[i] = server
return
# Not present in list, just insert randomly
self.slots.insert(randint(0, len(self.slots)), server)
def _remove(self, server):
self.servers.remove(server)
# Set the server slot to None
|
for i in range(len(self.slots)):
if self.slots[i] == server:
del self.slots[i]
return
raise KeyError(str(server))
|
domchoi/fluentPython
|
dataModel/vector.py
|
Python
|
gpl-3.0
| 743
| 0.014805
|
'''
Created on Mar 28, 2017
@author: J001684
'''
from math import hypot
class Vector:
'''
classdocs
'''
def __init__(self, x=0, y=0):
'''
Constructor
'''
self.x = x
self.y = y
def _repr_(self):
return 'Vector({x}, {y})'.format(x=self.x, y=self.y)
def _abs_(self):
return hypot(self.x, self.y)
def _bool_(self):
return bool(abs(self))
def _add_(self, other):
x = self.x + other.x
y = self.y + other.y
return Vector(x, y)
def _mul_(self, scalar):
|
return Vector(self.x * scalar, self.y * sc
|
alar)
v1 = Vector(2, 4)
print v1
|
serbyy/MozDef
|
alerts/unauth_ssh_pyes.py
|
Python
|
mpl-2.0
| 2,867
| 0.002093
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
#
# Contributors:
# Aaron Meihm <ameihm@mozilla.com>
from lib.alerttask import AlertTask
import pyes
import json
import re
from configlib import getConfig, OptionParser
# Note: this plugin requires a configuration file (unauth_ssh_pyes.conf)
# to exist in the same directory as the plugin.
#
# It should contain content such as:
# [options]
# hostfilter <ES compatible r
|
egexp>
# user username
# skiphosts 1.2.3.4 2.3.4.5
class AlertUnauthSSH(AlertTask):
def main(self):
date_timedelta = dict(minutes=30)
self.config_file = './unauth_ssh_pyes.conf'
self.config = None
self.initConfiguration()
must = [
pyes.TermFilter('_type', 'event'),
pyes.TermFilter('category', 'syslog'),
pyes.
|
TermFilter('details.program', 'sshd'),
pyes.QueryFilter(pyes.QueryStringQuery('details.hostname: /{}/'.format(self.config.hostfilter))),
pyes.QueryFilter(pyes.MatchQuery('summary', 'Accepted publickey {}'.format(self.config.user), operator='and'))
]
must_not = []
for x in self.config.skiphosts:
must_not.append(pyes.QueryFilter(pyes.MatchQuery('summary', x)))
self.filtersManual(date_timedelta, must=must, must_not=must_not)
self.searchEventsSimple()
self.walkEvents()
def initConfiguration(self):
myparser = OptionParser()
(self.config, args) = myparser.parse_args([])
self.config.hostfilter = getConfig('hostfilter', '', self.config_file)
self.config.user = getConfig('user', '', self.config_file)
self.config.skiphosts = getConfig('skiphosts', '', self.config_file).split()
# Set alert properties
def onEvent(self, event):
category = 'unauthaccess'
tags = ['ssh']
severity = 'WARNING'
targethost = 'unknown'
sourceipaddress = 'unknown'
x = event['_source']
if 'details' in x:
if 'hostname' in x['details']:
targethost = x['details']['hostname']
if 'sourceipaddress' in x['details']:
sourceipaddress = x['details']['sourceipaddress']
targetuser = 'unknown'
expr = re.compile('Accepted publickey for ([A-Za-z0-9]+) from')
m = expr.match(event['_source']['summary'])
groups = m.groups()
if len(groups) > 0:
targetuser = groups[0]
summary = 'Unauthorized SSH account usage by {0} on {1} user {2}'.format(sourceipaddress, targethost, targetuser)
return self.createAlertDict(summary, category, tags, [event], severity)
|
tcpcloud/contrail-controller
|
src/nodemgr/analytics_nodemgr/analytics_event_manager.py
|
Python
|
apache-2.0
| 2,984
| 0.008378
|
#
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from gevent import monkey
monkey.patch_all()
import os
import sys
import socket
import subprocess
import json
import time
import datetime
import platform
import gevent
import ConfigParser
from nodemgr.common.event_manager import EventManager
from pysandesh.sandesh_base import *
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, NodeTypeNames,\
Module2NodeType
from subprocess import Popen, PIPE
from nodemgr.common.sandesh.nodeinfo.ttypes import *
from nodemgr.common.sandesh.nodeinfo.cpuinfo.ttypes import *
from nodemgr.common.sandesh.nodeinfo.process_info.ttypes import *
from nodemgr.common.sandesh.nodeinfo.process_info.constants import *
from pysandesh.connection_info import ConnectionState
class AnalyticsEventManager(EventManager):
def __init__(self, rule_file, discovery_server,
discovery_port, collector_addr):
EventManager.__init__(
self, rule_file, discovery_server,
discovery_port, collector_addr, sandesh_global)
self.node_type = 'contrail-analytics'
self.table = "ObjectCollectorInfo"
self.module = Module.ANALYTICS_NODE_MGR
self.module_id = ModuleNames[self.module]
self.supervisor_serverurl = "unix:///var/run/supervisord_analytics.sock"
self.add_current_process()
node_type = Module2NodeType[self.module]
node_type_name = NodeTypeNames[node_type]
_disc = self.get_discovery_client()
sandesh_global.init_generator(
self.module_id, socket.gethostname(),
node_type_name, self.instance_id, self.collector_addr,
self.module_id, 8104, ['nodemgr.common.sandesh'], _disc)
sandesh_global.set_logging_params(enable_local_log=True)
ConnectionState.init(sandesh_global, socket.gethostname(), self.module_id,
self.instance_id,
staticmethod(ConnectionState.get_process_state_cb),
NodeStatusUVE, NodeStatus, self.table)
self.send_system_cpu_info()
self.third_party_process_dict = {}
# end __init__
def process(self):
if self.rule_file is '':
self.rule_file = "/etc/contrail/" + \
"supervisord_analytics_files/contrail-an
|
alytics.rules"
json_file = open(self.rule_file)
self.rules_data = json.load(json_file)
def send_process_state_db(self, group_names):
self.send_process_state_db_base(
group_names, ProcessInfo)
def send_nodemgr_process_status(self):
self.send_nodemgr_process_status_b
|
ase(
ProcessStateNames, ProcessState, ProcessStatus)
def get_node_third_party_process_dict(self):
return self.third_party_process_dict
def get_process_state(self, fail_status_bits):
return self.get_process_state_base(
fail_status_bits, ProcessStateNames, ProcessState)
|
ktan2020/legacy-automation
|
win/Lib/distutils/command/build_clib.py
|
Python
|
mit
| 8,340
| 0.001439
|
"""distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
__revision__ = "$Id$"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os
from distutils.core import Command
from distutils.errors import DistutilsSetupError
from distutils.ccompiler import customize_compiler
from distutils import log
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib(Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib=', 'b',
"directory to build C/C++ libraries to"),
('build-temp=', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
def run(self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
"""Ensure that the list of libraries is valid.
`library` is presumably provided as a command option 'libraries'.
This method checks that it is a list of 2-tuples, where the tuples
are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(libraries, list):
raise DistutilsSetupError, \
"'libraries' option must be a list of tuples"
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError, \
"each element of 'libraries' must a 2-tuple"
name, build_info = lib
if not isinstance(name, str):
raise DistutilsSetupError, \
"first element of each tuple in 'libraries' " + \
"must be a string (the library name)"
if '/' in name or (os.sep != '/' and os.sep in name):
raise DistutilsSetupError, \
("bad library name '%s': " +
"may not contain directory separators") % \
lib[0]
if not isinstance(build_info, dict):
raise DistutilsSetupError, \
"second element of each tuple in 'libraries' " + \
"must be a dictionary (build info)"
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames") % lib_name
filenames.extend(sources)
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
|
"a list of source filenames") % lib_name
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = buil
|
d_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_
|
pandastream/panda_client_python
|
panda/models.py
|
Python
|
mit
| 5,728
| 0.005237
|
import json
import logging
from functools import wraps
logger = logging.getLogger(__name__)
class PandaError(Exception):
pass
def error_check(func):
@wraps(func)
def check(*args, **kwargs):
try:
res = func(*args, **kwargs)
if "error" in res:
logger.error(res["message"])
raise PandaError(res["message"])
except Exception as e:
logger.error(e)
raise
return res
return check
class Retriever(object):
def __init__(self, panda, model_type, path = None):
self.panda = panda
self.model_type = model_type
if path:
self.path = path
else:
self.path = model_type.path
class GroupRetriever(Retriever):
@error_check
def _all(self, **kwargs):
json_data = self.panda.get("{0}.json".format(self.path), kwargs)
return json.loads(json_data)
@error_check
def new(self, *args, **kwargs):
return self.model_type(self.panda, *args, **kwargs)
@error_check
def create(self, *args, **kwargs):
return self.new(*args, **kwargs).create(**kwargs)
@error_check
def find(self, val, **kwargs):
json_data = self.panda.get("{0}/{1}.json".format(self.path, val), **kwargs)
return self.model_type(self.panda, **json.loads(json_data))
def all(self, **kwargs):
return [self.model_type(self.panda, **json_attr) for json_attr in self._all(**kwargs)]
def where(self, pred, **kwargs):
return [self.model_type(self.panda, **json_attr) for json_attr in self._all(**kwargs) if pred(json_attr)]
class SingleRetriever(Retriever):
@error_check
def get(self, **kwargs):
json_data = self.panda.get("{0}.json".format(self.path), **kwargs)
return self.model_type(self.panda, json.loads(json_data))
@error_check
def post(self, **kwargs):
json_data = self.panda.post("{0}.json".format(self.path), **kwargs)
return self.model_type(self.panda, json.loads(json_data))
class PandaDict(dict):
def __init__(self, panda, *arg, **kwarg):
self.panda = panda
super(PandaDict, self).__init__(*arg, **kwarg)
def to_json(self, *args, **kwargs):
return json.dumps(self, *args, **kwargs)
class PandaModel(PandaDict):
def dup(self):
copy = self.copy()
if "id" in copy:
copy["id"]
return copy
def reload(self):
json_data = self.panda.get("{0}/{1}.json".format(self.path, self["id"]))
self.clear()
parsed = json.loads(json_data)
self.update(parsed)
@error_check
def create(self, **kwargs):
json_data = self.panda.post("{0}.json".format(self.path), kwargs)
return self.__class__(self.panda, json.loads(json_data))
@error_check
def delete(self, **kwargs):
json_data = self.panda.delete("{0}/{1}.json".format(self.path, self["id"]), kwargs)
return self.__class__(self.panda, json.loads(json_data))
class UpdatablePandaModel(PandaModel):
changed_values = {}
@error_check
def save(self):
put_path = "{0}/{1}.json".format(self.path, self["id"])
ret = type(self)(self.panda, json.loads(self.panda.put(put_path, self.changed_values)))
if "error" not in ret:
self.changed_values = {}
return ret
def __setitem__(self, key, val):
self.changed_values[key] = val
super(UpdatablePandaModel, self).__setitem__(key, val)
# http://stackoverflow.com/a/2588648/1542900
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
# http://stackoverflow.com/a/2588648/1542900
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
class Video(PandaModel):
path = "/videos"
def encodings(self):
return GroupRetriever(self.panda, Encoding, "/videos/{0}/encodings".format(self["id"])).all()
def metadata(self):
return SingleRetriever(self.panda, Metadata, "/videos/{0}/metadata".format(self["id"])).get()
class
|
Cloud(UpdatablePandaModel):
path = "/clouds"
class Encoding(PandaModel):
path = "/encodings"
def video(self):
return SingleRetriever(self.panda, Video, "/videos/{0}".format(self["video_id"])).get()
def profile(self):
key = self["profile_name"] or self["profile_id"]
return SingleRetriever(self.panda, Video, "/profiles/{0}".format(key)).get()
def cancel(self):
return SingleRetriever(self.panda, PandaDict, "/encodin
|
gs/{0}/cancel.json".format(self["id"])).post()
def retry(self):
return SingleRetriever(self.panda, PandaDict, "/encodings/{0}/retry.json".format(self["id"])).post()
class Profile(UpdatablePandaModel):
path = "/profiles"
class Notifications(UpdatablePandaModel):
path = "/notifications"
@error_check
def save(self):
tmp = dict(self)
for event in tmp["events"]:
tmp["events"][event] = str(tmp["events"][event]).lower()
return Notifications(self.panda, json.loads(self.panda.put("/notifications.json", tmp)))
def delete(self):
raise AttributeError("Notification instance has no attribute 'delete'")
def reload(self):
json_data = self.panda.get("/notifications.json")
self.clear()
self.update(json.loads(json_data))
class Metadata(PandaDict):
pass
|
caseyrollins/osf.io
|
addons/wiki/migrations/0010_migrate_node_wiki_pages.py
|
Python
|
apache-2.0
| 23,077
| 0.003467
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-22 20:39
from __future__ import unicode_literals
import time
import logging
import progressbar
from django.db import connection, migrations
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from bulk_update.helper import bulk_update
from addons.wiki.models import WikiPage, WikiVersion
from osf.models import Comment, Guid
logger = logging.getLogger(__name__)
def reverse_func(state,
|
schema):
"""
Reverses NodeWikiPage migration. Repoints guids back to each NodeWikiPage,
repoints comment_targets, comments_viewed_timestamps, and deletes all WikiVersions and WikiPages
"""
NodeWikiPage = state.get_model('addons_wiki', 'nodewikipage')
Abs
|
tractNode = state.get_model('osf', 'AbstractNode')
nwp_content_type_id = ContentType.objects.get_for_model(NodeWikiPage).id
nodes = AbstractNode.objects.exclude(wiki_pages_versions={})
progress_bar = progressbar.ProgressBar(maxval=nodes.count() or 100).start()
for i, node in enumerate(nodes, 1):
progress_bar.update(i)
for wiki_key, version_list in node.wiki_pages_versions.items():
if version_list:
for index, version in enumerate(version_list):
nwp = NodeWikiPage.objects.filter(former_guid=version).include(None)[0]
# All NodeWikiPages associated with a certain wiki key on a node point to the same WikiPage.
wp = WikiPage.load(version)
guid = migrate_guid_referent(Guid.load(version), nwp, nwp_content_type_id)
guid.save()
nwp = guid.referent
# Moved only for last item in wiki_pages_versions array for every page_name, NWP->WP is a many-to-one mapping. NWP->WV is a one-to-one mapping.
move_comment_target(Guid.load(wp._id), nwp)
update_comments_viewed_timestamp(node, wp._id, nwp)
progress_bar.finish()
WikiVersion.objects.all().delete()
WikiPage.objects.all().delete()
logger.info('NodeWikiPages restored and WikiVersions and WikiPages removed.')
def move_comment_target(current_guid, desired_target):
"""
Move the comment's target from the current target to the desired target
Specifically for repointing WikiPage comments -> NodeWikiPage comments
"""
desired_target_guid_id = Guid.load(desired_target.former_guid).id
if Comment.objects.filter(Q(root_target=current_guid) | Q(target=current_guid)).exists():
Comment.objects.filter(root_target=current_guid).update(root_target_id=desired_target_guid_id)
Comment.objects.filter(target=current_guid).update(target_id=desired_target_guid_id)
return
def update_comments_viewed_timestamp(node, current_wiki_guid, desired_wiki_object):
"""Replace the current_wiki_object keys in the comments_viewed_timestamp dict with the desired wiki_object_id """
users_pending_save = []
# We iterate over .contributor_set instead of .contributors in order
# to take advantage of .include('contributor__user')
for contrib in node.contributor_set.all():
user = contrib.user
if user.comments_viewed_timestamp.get(current_wiki_guid, None):
timestamp = user.comments_viewed_timestamp[current_wiki_guid]
user.comments_viewed_timestamp[desired_wiki_object._id] = timestamp
del user.comments_viewed_timestamp[current_wiki_guid]
users_pending_save.append(user)
if users_pending_save:
bulk_update(users_pending_save, update_fields=['comments_viewed_timestamp'])
return users_pending_save
def migrate_guid_referent(guid, desired_referent, content_type_id):
"""
Point the guid towards the desired_referent.
Pointing the NodeWikiPage guid towards the WikiPage will still allow links to work.
"""
guid.content_type_id = content_type_id
guid.object_id = desired_referent.id
return guid
def migrate_node_wiki_pages(state, schema):
create_wiki_pages_sql(state, schema)
create_guids(state, schema)
create_wiki_versions_and_repoint_comments_sql(state, schema)
migrate_comments_viewed_timestamp_sql(state, schema)
migrate_guid_referent_sql(state, schema)
def create_wiki_pages_sql(state, schema):
NodeWikiPage = state.get_model('addons_wiki', 'nodewikipage')
then = time.time()
logger.info('Starting migration of WikiPages [SQL]:')
wikipage_content_type_id = ContentType.objects.get_for_model(WikiPage).id
nodewikipage_content_type_id = ContentType.objects.get_for_model(NodeWikiPage).id
with connection.cursor() as cursor:
cursor.execute(
"""
CREATE TEMPORARY TABLE temp_wikipages
(
node_id INTEGER,
user_id INTEGER,
page_name_key TEXT,
latest_page_name_guid TEXT,
first_page_name_guid TEXT,
page_name_display TEXT,
created TIMESTAMP,
modified TIMESTAMP
)
ON COMMIT DROP;
-- Flatten out the wiki_page_versions json keys
INSERT INTO temp_wikipages (node_id, page_name_key)
SELECT
oan.id AS node_id
, jsonb_object_keys(oan.wiki_pages_versions) as page_name_key
FROM osf_abstractnode AS oan;
-- Retrieve the latest guid for the json key
UPDATE temp_wikipages AS twp
SET
latest_page_name_guid = (
SELECT trim(v::text, '"')
FROM osf_abstractnode ioan
, jsonb_array_elements(oan.wiki_pages_versions->twp.page_name_key) WITH ORDINALITY v(v, rn)
WHERE ioan.id = oan.id
ORDER BY v.rn DESC
LIMIT 1
)
FROM osf_abstractnode AS oan
WHERE oan.id = twp.node_id;
-- Retrieve the first guid for the json key
UPDATE temp_wikipages AS twp
SET
first_page_name_guid = (
SELECT trim(v::text, '"')
FROM osf_abstractnode ioan
, jsonb_array_elements(oan.wiki_pages_versions->twp.page_name_key) WITH ORDINALITY v(v, rn)
WHERE ioan.id = oan.id
ORDER BY v.rn ASC
LIMIT 1
)
FROM osf_abstractnode AS oan
WHERE oan.id = twp.node_id;
-- Remove any json keys that reference empty arrays (bad data? e.g. abstract_node id=232092)
DELETE FROM temp_wikipages AS twp
WHERE twp.latest_page_name_guid IS NULL;
-- Retrieve page_name nodewikipage field for the latest wiki page guid
UPDATE temp_wikipages AS twp
SET
page_name_display = anwp.page_name
FROM osf_guid AS og INNER JOIN addons_wiki_nodewikipage AS anwp ON (og.object_id = anwp.id AND og.content_type_id = %s)
WHERE og._id = twp.latest_page_name_guid;
-- Retrieve user_id, created, and modified nodewikipage field for the first wiki page guid
UPDATE temp_wikipages AS twp
SET
user_id = anwp.user_id
, created = anwp.date
, modified = anwp.modified
FROM osf_guid AS og INNER JOIN addons_wiki_nodewikipage AS anwp ON (og.object_id = anwp.id AND og.content_type_id = %s)
WHERE og._id = twp.first_page_name_guid;
-- Populate the wikipage table
INSERT INTO addons_wiki_wikipage (node_id, user_id, content_type_pk, page_name, created, modified)
SELECT
twp.node_id
, twp.user_id
, %s
, twp.page_name_display
, twp.created
, twp.modified
FROM temp_wikipages AS twp;
""", [nodewikipage_content_type_id, nodewikipage_content_type_id, wikipage_content_type_id]
)
now = time.time()
logger.info('Finished migration of WikiPages [SQL]: {:.5} seconds'.format(now - then))
def create_gu
|
eli261/jumpserver
|
apps/audits/urls/api_urls.py
|
Python
|
gpl-2.0
| 319
| 0
|
# ~*~ coding: utf-8 ~*~
from __future__ import unicode_literals
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from .. import api
|
app_name = "audits"
router = DefaultRouter()
|
router.register(r'ftp-log', api.FTPLogViewSet, 'ftp-log')
urlpatterns = [
]
urlpatterns += router.urls
|
keithhackbarth/clowder_python_client
|
tests.py
|
Python
|
gpl-2.0
| 6,224
| 0
|
# -*- coding: utf-8 -*-
import datetime
import unittest
import clowder
import mock
# import psutil
class BaseClowderTestCase(unittest.TestCase):
"""Base class for all clowder test cases."""
def assert_send_contains_data(self, send_mock, key, value):
"""Assert that the given send mock was called with the given key and
value pair.
:param send_mock: A mock
:type send_mock: mock.MagicMock
:param key: A key
:type key: hashable
:param value: The expected value
:type value: mixed
"""
self.assertIn(key, send_mock.call_args[0][0])
self.assertEqual(value, send_mock.call_args[0][0][key])
class TestCleanFrequency(unittest.TestCase):
def test_should_return_value_if_int_given(self):
self.assertEqual(100, clowder._clean_frequency(100))
def test_should_return_total_seconds_if_timedelta_given(self):
fixture = datetime.timedelta(hours=1)
self.assertEqual(
fixture.total_seconds(), clowder._clean_frequency(fixture)
)
def test_should_raise_error_if_any_other_type_value_given(self):
self.assertRaisesRegexp(
ValueError,
"Invalid frequency 'hello'",
clowder._clean_frequency,
"hello"
)
class TestValidateData(unittest.TestCase):
def test_should_succeed_if_only_valid_keys_given(self):
clowder._validate_data({
'name': 'my-test',
'url': clowder.CLOWDER_API_URL,
'value': 123,
'status': 1,
'frequency': 1098123098
})
def test_should_raise_error_if_invalid_data_given(self):
self.assertRaisesRegexp(
ValueError,
"Invalid data keys 'herp, derp'",
clowder._validate_data,
{'name': 'Hey', 'status': 1, 'herp': 123, 'derp': 456}
)
def test_should_raise_error_if_missing_keys(self):
self.assertRaisesRegexp(
ValueError,
"Missing keys 'name'",
clowder._validate_data,
{'value': 1}
)
class TestFail(BaseClowderTestCase):
def test_should_raise_error_if_status_given(self):
self.assertRaisesRegexp(
AttributeError,
"Status should not be provided to fail",
clowder.fail,
{'status': 'should fail'}
)
@mock.patch('clowder._send')
def test_should_send_value_provided_along(self, send):
clowder.fail({'name': 'Invalid stuff'})
send.assert_called_once()
self.assert_send_contains_data(send, 'name', 'Invalid stuff')
@mock.patch('clowder._send')
def test_should_send_status_of_negative_one(self, send):
clowder.fail({'value': "Invalid stuff"})
send.assert_called_once()
self.assert_send_contains_data(send, 'status', -1)
class TestOk(BaseClowderTestCase):
def test_should_raise_error_if_status_given(self):
self.assertRaisesRegexp(
AttributeError,
"Status should not be provided to ok",
clowder.ok,
{'name': 'Test', 'status': 'should fail'}
)
@mock.patch('clowder._send')
def test_should_send_value_provided_along(self, send):
clowder.ok({'value': 'Invalid stuff'})
send.assert_called_once()
self.assert_send_contains_data(send, 'value', 'Invalid stuff')
@mock.patch('clowder._send')
def test_should_send_status_of_one(self, send):
clowder.ok({'value': "Invalid stuff"})
send.assert_called_once()
self.assert_send_contains_data(send, 'status', 1)
class TestDelete(BaseClowderTestCase):
@mock.patch('clowder._send')
def test_should_use_correct_delete_url(self, send):
clowder.delete('test')
send.assert_called_once()
self.assert_send_contains_data(send, 'url', clowder.CLOWDER_DELETE_URL)
class TestSubmit(BaseClowderTestCase):
def test_should_raise_error_if_alert_not_given(self):
self.assertRaisesRegexp(
ValueError,
"Alert required",
clowder.submit,
name='Hello',
value=123
)
def test_should_raise_error_if_value_not_given(self):
self.assertRaisesRegexp(
ValueError,
"Value required",
clowder.submit,
name='Test',
alert=lambda x: (x > 10)
)
@mock.patch('clowder.fail')
def test_should_call_fail_if_predicate_returns_true(self, fail):
clowder.submit(alert=lambda x: x > 10, value=15)
fail.assert_called_once()
@mock.patch('clowder.ok')
def test_should_call_ok_if_predicate_returns_false(self, ok):
clowder.submit(alert=lambda x: x > 10, value=10)
ok.assert_called_once()
class TestSend(BaseClowderTestCase):
def setUp(self):
super(TestSend, self).setUp()
self.fixture = {'name': 'hello', 'status': 1}
@mock.patch('requests.post')
def test_should_use_default_clowder_api_url(self, post):
clowder._send(self.fixture)
post.assert_called_once()
args = post.call_args[0]
url = args[0]
self.assertEqual(url, clowder.CLOWDER_API_URL)
@mock.patch('requests.post')
def test_should_contain_provided_data(self, post):
clowder._send(self.fixture)
post.asse
|
rt_called_once()
kwargs = post.call_args[1]
self.assertIn('data', kwargs)
self.assertEqual(kwargs['data'], self.fixture)
def test_should_raise_error_if_invalid_data_given(self):
self.assertRaisesRegexp(
Va
|
lueError,
"Invalid data keys 'herp'",
clowder._send,
{'name': 'Test', 'herp': 123}
)
def test_should_raise_error_if_missing_keys(self):
self.assertRaisesRegexp(
ValueError,
"Missing keys 'name'",
clowder._send,
{'value': 1}
)
# clowder.ok({
# 'name': 'CPU Percent',
# 'value': psutil.cpu_percent(interval=1),
# 'frequency': datetime.timedelta(minutes=0.5)
# })
# clowder.ok({
# 'name': 'Memory Utilization',
# 'value': psutil.phymem_usage().percent
# })
|
jctoledo/ligandneighbours
|
ligandneighbours.py
|
Python
|
mit
| 10,769
| 0.023865
|
# Copyright (c) 2013 Jose Cruz-Toledo
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Command-line application for creating NTriples representation of a ligand neighbourhood of a pdb file.
This program takes as input a directory of pdb files and searches those structures for any residues found in a known dictionary of ligands:
(https://docs.google.com/spreadsheet/pub?key=0AnGgKfZdJasrdC00bUxHcVRXaFloSnJYb3VmYkwyVnc&single=true&gid=0&output=csv). If a ligand is found
in a structure an NTriples file is generated that includes details about the neighbourhood members and
Usage:
$ python ligandneighbours.py -dir /path/to/loca/dir/with/pdb/files --radius 4.8 --out /path/to/output
"""
import os
import sys
import argparse
import urllib2
import csv
import re
import hashlib
import random
from Bio.PDB import *
from collections import defaultdict
#parser for command-line arguments
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-dir','--input_dir', help='a local direcory containing PDB structures (in the .pdb format) for which you wish to find the ligand neighbourhood', required=True)
parser.add_argument('-out', '--output_file', help='the file where the output will be stored as CSV', required=True)
parser.add_argument('--radius', nargs='?', const=5.0, type=float, default=5.0)
pdb_to_ligand_list_url = 'https://docs.google.com/spreadsheet/pub?key=0AnGgKfZdJasrdC00bUxHcVRXaFloSnJYb3VmYkwyVnc&single=true&gid=0&output=csv'
base_uri = 'http://bio2rdf.org'
rdfs = 'http://www.w3.org/2000/01/rdf-schema#'
rdf = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
def main(argv):
#parse the command-line flags.
flags = parser.parse_args(argv[1:])
local_dir = flags.input_dir
output_di
|
r = flags.output_file
radius = flags.radius
#fetch a list of all the pdb files in the input directory
filepaths = fetchPdbFilePaths(loca
|
l_dir)
#fetch the ligand list
ligands = fetchLigandList(pdb_to_ligand_list_url)
for fp in filepaths:
#get the file name and extension of the pdb file
fn, fe = os.path.splitext(fp)
pdbId = fn.rsplit('/')[-1]
# dict of ligands (PDB.Residue) to residues (PDB.Residue)
ln = findNeighbours(fp, ligands, radius)
if ln:
#now we can generate a list of uris for each ligand neighbor
luri = makeURIHood(ln)
hoodNTriples = makeHoodNTriplesAnnotation(luri, pdbId, radius)
#write an N3 file as output
writeN3Hood(hoodNTriples, pdbId, output_dir)
#Creates a ligand neighborhood
def makeHoodNTriplesAnnotation(ligand_uri_dict, aPdbId, aRadius):
rm = ''
#make a hood uri
hood_uri = base_uri+'/lighood_resource:'+hashlib.sha224(str(aPdbId)+str(aRadius)+str(random.random())).hexdigest()
#type the hood
rm += "<"+hood_uri+"> <"+rdf+"type> <"+base_uri+"/lighood_vocabulary:ligand_neighbourhood> .\n"
rm += "<"+base_uri+"/lighood_vocabulary:ligand_neighbourhood> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2000/01/rdf-schema#Class> .\n"
rm += "<"+hood_uri+"> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#NamedIndividual> .\n"
#link it to the pdb structure
rm += "<"+base_uri+"/pdb:"+aPdbId+"> <"+base_uri+"/lighood_vocabulary:has_neighborhood> <"+hood_uri+"> .\n"
#add the radius
radius_uri = base_uri+'/lighood_resource:'+hashlib.sha224(str(aRadius)+str(random.random())).hexdigest()
rm += "<"+hood_uri+"> <"+base_uri+"/lighood_vocabulary:has_attribute> <"+radius_uri+">. \n"
rm += "<"+radius_uri+"> <"+rdf+"type> <"+base_uri+"/lighood_vocabulary:radius> .\n"
rm += "<"+base_uri+"/lighood_vocabulary:radius> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2000/01/rdf-schema#Class> .\n"
rm += "<"+radius_uri+"> <"+"<http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#NamedIndividual> .\n"
rm += "<"+radius_uri+"> <"+base_uri+"/lighood_vocabulary:has_value> \""+str(aRadius)+"\". \n"
for (ligand_uri, res_uri) in ligand_uri_dict.items():
#add ligand
rm += "<"+hood_uri+"> <"+base_uri+"/lighood_vocabulary:has_member> <"+ligand_uri+"> .\n"
#type the ligand
rm += "<"+ligand_uri+"> <"+rdf+"type> <"+base_uri+"/lighood_vocabulary:ligand> .\n"
rm += "<"+base_uri+"/lighood_vocabulary:ligand> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2000/01/rdf-schema#Class> .\n"
rm += "<"+ligand_uri+"> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#NamedIndividual> .\n"
for aru in res_uri:
#add parts
rm += "<"+hood_uri+"> <"+base_uri+"/lighood_vocabulary:has_member> <"+aru+"> .\n"
#link ligand to neighbors
rm += "<"+ligand_uri+"> <"+base_uri+"/lighood_vocabulary:has_neighbor> <"+aru+"> .\n"
#type the neighbors
rm += "<"+aru+"> <"+rdf+"type> <"+base_uri+"/lighood_vocabulary:neighbor> .\n"
rm += "<"+base_uri+"/lighood_vocabulary:neighbor> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2000/01/rdf-schema#Class> .\n"
rm += "<"+aru+"> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#NamedIndividual> .\n"
return rm
#creates an N3 file with aPdbId in the specified anOutputDirectory
# by parsing the ligand_uri_dict
def writeN3Hood(someNTriples, aPdbId, anOutputDirectory):
if someNTriples:
f = open(anOutputDirectory+'/'+aPdbId+'-ligand-neighborhood.nt','w')
f.write(someNTriples)
f.close()
#returns a defaultdict(list) where the key is the Bio2RDF URI of
# the ligand residue in this structure and the value is a list of
# residue URIs that are in the radius of the given ligand
def makeURIHood(aLigandDictList):
rm = defaultdict(list)
for (ligand, hood) in aLigandDictList.items():
ligfi = ligand.get_full_id()
#build ligand uri
ligand_uri = base_uri+'/pdb_resource:'+ligfi[0]+'/chemicalComponent_'+ligfi[2]+str(ligfi[3][1])
residue_uris = []
for aResidue in hood:
fi = aResidue.get_full_id()
res_pdbid = fi[0]
res_chain = fi[2]
res_position = fi[3][1]
res_uri = base_uri+'/pdb_resource:'+res_pdbid+'/chemicalComponent_'+res_chain+str(res_position)
residue_uris.append(res_uri)
if not ligand in rm:
rm[ligand_uri] = residue_uris
return rm
# compute the ligand neighbourhood for the given pdb file.
# someVerifiedLigands is the contents of the spreadsheet with known ligands
# aRadius is the threshold underwhich the comparison will be made
# this method returns a defaultdict(list) where the key is a ligand
# and the value is a list of PDB.Residue that exist within aRadius of the given
# ligand
def findNeighbours(aPdbFilePath, someVerifiedLigands, aRadius):
rm = defaultdict(list)
fn, fe = os.path.splitext(aPdbFilePath)
pdbId = fn.rsplit('/')[-1]
match = re.match('^\w{4}$', pdbId)
if match:
p = PDBParser(PERMISSIVE=1, QUIET=1)
structure = p.get_structure(pdbId, aPdbFilePath)
models = structure.get_list()
#iterate over the models
for aModel in models:
chains = aModel.get_list()
#get all the atoms ('A') in this model
model_atoms = Selection.unfold_entities(aModel,'A')
#create a neighbor search
ns = NeighborSearch(model_atoms)
#search the chains for any known li
|
cigroup-ol/metaopt
|
metaopt/plugin/visualization/best_fitness.py
|
Python
|
bsd-3-clause
| 1,969
| 0
|
# -*- coding: utf-8 -*-
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Standard Library
from datetime import datetime
# Third Party
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D # Load 3d plots capabilities
# First Party
from metaopt.plugin.plugin import Plugin
NUMBER_OF_SAMPLES = 200
COLORMAP = cm.jet
REVERSED_COLORMAP = cm.jet_r
class VisualizeBestFitnessPlugin(Plugin):
"""Visualize optimization progess"""
def __init__(self):
self.best_fitnesses = []
self.timestamps = []
self.start_time = None
self.current_best = None
self.return_spec = None
def setup(self, f, param_spec, return_spec):
del f, param_spec
self.return_spec = return_spec
if not self.start_time:
self.start_time = datetime.now()
def on_result(self, invocation):
fitness = invocation.current_result
if self.current_best is None or fitness < self.current_best:
self.current_best = fitness
self.best_fitnesses.append(self.current_best.raw_values)
time_delta = datetime.now() - self.start_time
self.timestamps.append(time_delta.total_seconds())
def show_fitness_invocations_plot(self):
"""Show a fitness--invocations plot"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("Number of Invocations")
|
ax.set_ylabel(self.get_y_label())
ax.plot(self.best_fitnesses)
plt.show()
def show_fitness_time_plot(self):
"""Show a fitness--time plot"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("Time")
ax.set_ylabel(self.get_y_la
|
bel())
ax.plot(self.timestamps, self.best_fitnesses)
plt.show()
def get_y_label(self):
return self.return_spec.return_values[0]["name"]
|
st3f4n/latex-sanitizer
|
sanitize.py
|
Python
|
gpl-3.0
| 1,706
| 0
|
#!/usr/bin/env python3
import fileinput
import string
import sys
DELETE = ''
REPLACE = {'“': '``',
'”': '\'\'',
'’': '\'',
'\\': '\\textbackslash ',
'*': '\\textasteriskcentered ',
'_': '\\_',
'#': '\\#',
'$': '\\$',
'%': '\\%',
'{': '\\{',
|
'}': '\\}',
'&': '\\&',
'…': '\\dots ',
'~': '\\~{}',
'^': '\\^{}'}
def main():
all_deleted, all_replaced, all_specials = set(), set(), set()
for line in fileinput.input():
line, deleted = delete(line, DELETE)
all_deleted.update(deleted)
line, replaced = replace(line, REPLACE)
all_replaced.update(replaced)
specials = special_characters(line)
all_specials.update(specials)
|
sys.stdout.write(line)
print('Deleted characters: {}'.format(' '.join(sorted(all_deleted))),
file=sys.stderr)
print('Replaced characters: {}'.format(' '.join(sorted(all_replaced))),
file=sys.stderr)
prtxt = 'Remaining special characters: {}'
print(prtxt.format(' '.join(sorted(all_specials))),
file=sys.stderr)
def delete(text, illegals):
deleted = {char for char in illegals if char in text}
table = {char: None for char in illegals}
text = text.translate(str.maketrans(table))
return text, deleted
def replace(text, table):
replaced = {char for char in table if char in text}
text = text.translate(str.maketrans(table))
return text, replaced
def special_characters(text):
return {char for char in text if char not in string.printable}
if __name__ == '__main__':
main()
|
MOOCworkbench/MOOCworkbench
|
marketplace/migrations/0013_auto_20170605_1359.py
|
Python
|
mit
| 601
| 0.001664
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-05 13:59
f
|
rom __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0012_auto_20170604_1335'),
]
operations = [
migrations.AlterField(
model_name='package',
name='name',
field=models.CharField(max_length=255, unique=T
|
rue, validators=[django.core.validators.RegexValidator('^[a-z]*$', 'Only lowercase letters are allowed.')]),
),
]
|
vegastrike/Assets-Production
|
modules/XGUI.py
|
Python
|
gpl-2.0
| 2,379
| 0.008407
|
import Base
import VS
import GUI
import XGUITypes
import XGUIDebug
XGUIRootSingleton = None
XGUIPythonScriptAPISingleton = None
"""----------------------------------------------------------------"""
""" """
""" XGUIRoot - root management interface for the XML-GUI framework."""
""" """
"""----------------------------------------------------------------"""
class XGUIRoot:
def __init__(self):
self.templates = {}
def getTemplate(self,type,name):
if type in self.templates and name in self.templates[type]:
return self.templates[type][name]
else:
return None
def addTemplate(self,tpl):
type = tpl.getType()
name = tpl.getName()
if not type in self.templates:
XGUIDebug.trace(1,"XGUI: Initializing template category \"" + str(type) + "\"\n")
self.templates[type] = {}
XGUIDebug.trace(2,"XGUI: Loading template \"" + str(name) + "\" into category \"" + str(type) + "\"\n")
self.templates[type][name] = tpl
class XGUIPythonScript:
def __init__(self,code,filename):
code = code.replace("\r\n","\n")
code += "\n"
self.code = compile(code,filename,'exec')
def execute(self,context):
exec(self.code,
|
context)
return context
"""----------------------------------------------------------------"""
""" """
""" XGUIPythonScriptAPI - through this class, all PythonScript """
""" API calls are routed. """
""" """
"""-----------------------
|
-----------------------------------------"""
class XGUIPythonScriptAPI:
def __init__(self,layout,room):
self.layout = layout
self.room = room
"""----------------------------------------------------------------"""
""" """
""" XGUI global initialization """
""" """
"""----------------------------------------------------------------"""
def XGUIInit():
XGUIRootSingleton = XGUIRoot()
|
HBPNeurorobotics/nest-simulator
|
pynest/nest/tests/test_quantal_stp_synapse.py
|
Python
|
gpl-2.0
| 4,353
| 0
|
# -*- coding: utf-8 -*-
#
# test_quantal_stp_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script compares the two variants of the Tsodyks/Markram synapse in NEST.
import nest
import numpy
import unittest
@nest.check_stack
class QuantalSTPSynapseTestCase(unittest.
|
TestCase):
"""Compare quantal_stp_synapse with its deterministic equivalent."""
def test_QuantalST
|
PSynapse(self):
"""Compare quantal_stp_synapse with its deterministic equivalent"""
nest.ResetKernel()
nest.set_verbosity(100)
n_syn = 12 # number of synapses in a connection
n_trials = 50 # number of measurement trials
# parameter set for facilitation
fac_params = {"U": 0.03, "u": 0.03,
"tau_fac": 500., "tau_rec": 200., "weight": 1.}
dep_params = {"U": 0.5, "u": 0.5, "tau_fac": 15.,
"tau_rec": 670., "weight": 1.}
lin_params = {"U": 0.3, "u": 0.3, "tau_fac": 330.,
"tau_rec": 330., "weight": 1.}
# Here we assign the parameter set to the synapse models
t1_params = fac_params # for tsodyks2_synapse
t2_params = t1_params.copy() # for furhmann_synapse
t2_params['n'] = n_syn
t2_params['weight'] = 1. / n_syn
nest.SetDefaults("tsodyks2_synapse", t1_params)
nest.SetDefaults("quantal_stp_synapse", t2_params)
nest.SetDefaults("iaf_psc_exp", {"tau_syn_ex": 3., 'tau_m': 70.})
source = nest.Create('spike_generator')
nest.SetStatus(
source,
{
'spike_times': [
30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330., 360., 390., 900.]
}
)
parrot = nest.Create('parrot_neuron')
neuron = nest.Create("iaf_psc_exp", 2)
# We must send spikes via parrot because devices cannot
# connect through plastic synapses
# See #478.
nest.Connect(source, parrot)
nest.Connect(parrot, neuron[:1], syn_spec="tsodyks2_synapse")
nest.Connect(parrot, neuron[1:], syn_spec="quantal_stp_synapse")
voltmeter = nest.Create("voltmeter", 2)
nest.SetStatus(voltmeter, {"withgid": False, "withtime": True})
t_plot = 1000.
t_tot = 1500.
# the following is a dry run trial so that the synapse dynamics is
# idential in all subsequent trials.
nest.Simulate(t_tot)
# Now we connect the voltmeters
nest.Connect([voltmeter[0]], [neuron[0]])
nest.Connect([voltmeter[1]], [neuron[1]])
for t in range(n_trials):
t_net = nest.GetKernelStatus('time')
nest.SetStatus(source, {'origin': t_net})
nest.Simulate(t_tot)
nest.Simulate(.1) # flush the last voltmeter events from the queue
vm = numpy.array(nest.GetStatus([voltmeter[1]], 'events')[0]['V_m'])
vm_reference = numpy.array(nest.GetStatus(
[voltmeter[0]], 'events')[0]['V_m'])
vm.shape = (n_trials, t_tot)
vm_reference.shape = (n_trials, t_tot)
vm_mean = numpy.array([numpy.mean(vm[:, i])
for i in range(int(t_tot))])
vm_ref_mean = numpy.array(
[numpy.mean(vm_reference[:, i]) for i in range(int(t_tot))])
error = numpy.sqrt((vm_ref_mean[:t_plot] - vm_mean[:t_plot])**2)
self.assertTrue(numpy.max(error) < 4.0e-4)
def suite():
suite = unittest.makeSuite(QuantalSTPSynapseTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
andydandy74/ClockworkForDynamo
|
nodes/2.x/python/FamilyInstance.FlipFromToRoom.py
|
Python
|
mit
| 612
| 0.011438
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB
|
import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
faminstances = UnwrapElement(IN[0])
booleans = []
TransactionManager.Instance.EnsureInTransaction(doc)
for item in faminstances:
try:
item.FlipFromToRoom()
booleans.append(True)
except:
booleans.append(False)
Trans
|
actionManager.Instance.TransactionTaskDone()
OUT = (faminstances,booleans)
|
antoinecarme/pyaf
|
tests/periodicities/Minute/Cycle_Minute_1600_T_7.py
|
Python
|
bsd-3-clause
| 82
| 0.04878
|
import tests.perio
|
dicities.p
|
eriod_test as per
per.buildModel((7 , 'T' , 1600));
|
LBenzahia/cltk
|
cltk/lemmatize/french/french.py
|
Python
|
mit
| 2,931
| 0.006188
|
import re
"""Rules are based on Brunot & Bruneau (1949).
"""
estre_replace = [('^sereient$|^fussions$|^fussiens$|^sereies$|^sereiet$|^serïens$|^seriiez$|^fussiez$|^fussent$|^ierent$|^fustes$|^furent$|^ierent$|^sereie$|^seroie$|^sereit$|^seiens$|^seient$|^fusses$|^fussez$|^estant$|^seiens$|^somes$|^estes$|^ieres$|^ieres$|^eiens$|^eriez$|^erent$|^fumes$|^irmes$|^ertes$|^seies$|^seiet$|^seiez$|^fusse$|^seies$|^seiez$|^suis$|^sont$|^iere$|^eres$|^eret$|^iers$|^iert$|^seie$|^seit$|^fust$|^esté$|^ies$|^est$|^ere$|^ert$|^fui$|^fus$|^ier$|^ert$|^es$|^fu$', 'estre')]
avoir_replace = [('^avreient$|^avroient$|^eüssions$|^eüssiens$|^avrarai$|^avreies$|'
'^avroies$|^avreiet$|^avroiet$|^avrïens$|^avrïons$|^avriiez$|'
'^eüssiez$|^eüssent$|^eüstes$|^óurent$|^avrons$|^avront$
|
|^avreie$
|
|'
'^avrïez$|^eüsses$|^eüssez$|^avons$|^eümes$|^orent$|^avrai$|'
'^avras$|^avrez$|^aiens$|^ayons$|^aient$|^eüsse$|^avez$|^avra$|'
'^arai$|^aies$|^aiet$|^aiez$|^ayez$|^eüst$|^ont$|^eüs$|'
'^oüs$|^óut$|^oiz$|^aie$|^ait$|^ai$|^as$|^at$|^oi$|'
'^ot$|^oü$|^eü$|^a$', 'avoir')]
auxiliary_rules = estre_replace+avoir_replace
first_conj_rules = [('es$|e$|ons$|ez$|ent$|z$|(e)ai$|(e)as$|(e)a$|(e)at$|(e)ames$|(e)astes$|(e)erent$|(e)asse$|é$', 'er')]
i_type_rules = [('i$|is$|it$|imes$|istes$|irent$|isse$', 'ir')]
u_type_rules = [('ui$|us$|ut$|umes$|ustes$|urent$|usse$', 'oir')]
verbal_rules = u_type_rules+i_type_rules+first_conj_rules
regime_rules = [('on$|ain$', 'e')]
plural_rules = [('ales$|aux$|aus$', 'al'),
('s$', '')]
masc_to_fem_rules = [('se$', 'x'),
('ive$', 'if'),
('ee$', 'e')]
french_nominal_rules = regime_rules+plural_rules+masc_to_fem_rules
misc_rules = [('x$', 'l'),
('z$', 't'),
('un$', 'on'),
('eus$', 'os'),
('^e$', 'et')]
determiner_rules= [('^li$|^lo$|^le$|^la$|^les$', 'le'),
('^del$|^du$', 'de le'),
('^al$|^au$', 'a le'),
('^as$|^aus$|^aux$', "a les"),
('^uns$|^une$|^unes$', 'un')]
reduction_rules = [("d'", 'de'),
("m'", 'me'),
("t'", 'te'),
("l'", 'le'),
("qu'", "que")]
patterns = determiner_rules+misc_rules+auxiliary_rules+verbal_rules+french_nominal_rules+reduction_rules
def build_match_and_apply_functions(pattern, replace):
def matches_rule(word):
return re.search(pattern, word)
def apply_rule(word):
return re.sub(pattern, replace, word)
return (matches_rule, apply_rule)
rules = [build_match_and_apply_functions(pattern, replace)
for (pattern, replace) in patterns]
def regex(token):
for matches_rule, apply_rule in rules:
if matches_rule(token):
return apply_rule(token)
|
hgdeoro/GarnishMyPic
|
gmp/dnd.py
|
Python
|
gpl-3.0
| 4,062
| 0.003693
|
# -*- coding: utf-8 -*-
#=======================
|
========================================================
#
# Copyright 2013 Horacio Guillermo de Oro <hgdeoro@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either vers
|
ion 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
import datetime
import os
import wx
from gmp.garnisher import do_garnish, BORDER_SIZE_BOTTOM
from gmp.utils import GMP_OUTPUT_DIR, GMP_AUTHOR, GMP_FONT, \
GMP_DEFAULT_FONT_SIZE, GMP_OUTPUT_QUALITY, GMP_BORDER, GMP_COLOR, \
GMP_DEFAULT_MAX_SIZE, GMP_TITLE, GMP_TITLE_IMAGE, GMP_EXIF_COPYRIGHT
class MyFileDropTarget(wx.FileDropTarget):
def __init__(self, window):
wx.FileDropTarget.__init__(self)
self.window = window
def OnDropFiles(self, x, y, filenames):
"""
When files are dropped, write where they were dropped and then
the file paths themselves
"""
self.window.SetInsertionPointEnd()
self.window.clearText()
for filepath in filenames:
self.window.updateText(filepath + '\n')
self.window.updateText(" + Procesing " + os.path.normpath(os.path.abspath(filepath)) + "...")
self.window.refreshWindow()
exit_status = do_garnish(filepath, GMP_OUTPUT_DIR,
author=GMP_AUTHOR,
overwrite=True,
font_file=GMP_FONT,
font_size=GMP_DEFAULT_FONT_SIZE,
output_quality=GMP_OUTPUT_QUALITY,
border_size=GMP_BORDER,
border_color=GMP_COLOR,
border_size_bottom=BORDER_SIZE_BOTTOM,
max_size=[int(x) for x in GMP_DEFAULT_MAX_SIZE.split('x')],
title=GMP_TITLE,
title_img=GMP_TITLE_IMAGE,
year=datetime.date.today().year,
technical_info=True,
exif_copyright=GMP_EXIF_COPYRIGHT,
rotate=0,
)
self.window.updateText(" OK\n")
self.window.refreshWindow()
self.window.updateText("\nFinished!\n")
class DnDPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent=parent)
file_drop_target = MyFileDropTarget(self)
lbl = wx.StaticText(self, label="Drag file to process here:")
self.fileTextCtrl = wx.TextCtrl(self,
style=wx.TE_MULTILINE | wx.HSCROLL | wx.TE_READONLY)
self.fileTextCtrl.SetDropTarget(file_drop_target)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(lbl, 0, wx.ALL, 5)
sizer.Add(self.fileTextCtrl, 1, wx.EXPAND | wx.ALL, 5)
self.SetSizer(sizer)
def SetInsertionPointEnd(self):
"""
Put insertion point at end of text control to prevent overwriting
"""
self.fileTextCtrl.SetInsertionPointEnd()
def updateText(self, text):
"""
Write text to the text control
"""
self.fileTextCtrl.WriteText(text)
def clearText(self):
self.fileTextCtrl.Clear()
def refreshWindow(self):
self.Refresh()
self.Update()
self.UpdateWindowUI()
class DnDFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, title="DnD for GMP")
panel = DnDPanel(self)
self.Show()
if __name__ == "__main__":
app = wx.App(False)
frame = DnDFrame()
app.MainLoop()
|
OpusVL/odoo
|
addons/account/wizard/account_change_currency.py
|
Python
|
agpl-3.0
| 3,683
| 0.003801
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_change_currency(osv.osv_memory):
_name = 'account.change.currency'
_description = 'Change Currency'
_columns = {
'currency_id': fields.many2one('res.currency', 'Change to', required=True, help="Select a currency to apply on the invoice"),
}
def view_init(self, cr , uid , fields_list, context=None):
obj_inv = self.pool.get('account.invoice')
if context is None:
context = {}
if context.get('active_id',False):
if obj_inv.browse(cr, uid, context['active_id']).state != 'draft':
raise osv.except_osv(_('Error!'), _('You can only change currency for Draft Invoice.'))
pass
def change_currency(self, cr, uid, ids, context=None):
obj_inv = self.pool.get('account.invoice')
obj_inv_line = self.pool.get('account.invoice.line')
obj_currency = self.pool.get('res.currency')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
new_currency = data.currency_id.id
invoice = obj_inv.browse(cr, uid, context['active_id'], context=context)
if invoice.currency_id.id == new_currency:
return {}
rate = obj_currency.browse(cr, uid, new_currency, context=context).rate
for line in invoice.invoice_line:
new_price = 0
if invoice.company_id.currency_id.id == invoice.currency_id.id:
new_price = line.price_unit * rate
if new_price <= 0:
raise osv.except_osv(_('Error!'), _('New currency is not configured properly.'))
if invoice.company_id.currency_id.id != invoice.currency_id.id and invoice.company_id.currency_id.id == new_currency:
old_rate = invoice.currency_id.rate
if old_rate <= 0:
raise osv.except_osv(_('Error!'), _('Current currency is not configured properly.'))
new_price = line.price_unit / old_rate
if invoice.company_id.currency_id.id != invoice.currency_id.id and invoice.company_id.
|
currency_id.id != new_currency:
old_rate =
|
invoice.currency_id.rate
if old_rate <= 0:
raise osv.except_osv(_('Error!'), _('Current currency is not configured properly.'))
new_price = (line.price_unit / old_rate ) * rate
obj_inv_line.write(cr, uid, [line.id], {'price_unit': new_price})
obj_inv.write(cr, uid, [invoice.id], {'currency_id': new_currency}, context=context)
return {'type': 'ir.actions.act_window_close'}
|
teddywing/pubnub-python
|
python-tornado/examples/here-now.py
|
Python
|
mit
| 1,031
| 0.007759
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
import sys
from pubnub import PubnubTornado as Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or ''
ssl
|
_on = len(sys.argv) > 5 and bo
|
ol(sys.argv[5]) or False
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on)
channel = 'hello_world'
# Asynchronous usage
def callback(message):
print(message)
pubnub.here_now(channel, callback=callback, error=callback)
pubnub.start()
|
metabrainz/acousticbrainz-server
|
webserver/utils.py
|
Python
|
gpl-2.0
| 860
| 0.002326
|
import string
import random
import webserver.views.api.exceptions
def generate_strin
|
g(length):
"""Generates random s
|
tring with a specified length."""
return ''.join([random.SystemRandom().choice(
string.ascii_letters + string.digits
) for _ in range(length)])
def reformat_date(value, fmt="%b %d, %Y"):
return value.strftime(fmt)
def reformat_datetime(value, fmt="%b %d, %Y, %H:%M %Z"):
return value.strftime(fmt)
def validate_offset(offset):
"""Validate the offset.
If the offset is None, return 0, otherwise interpret it as a number. If it is
not a number, raise 400.
"""
if offset:
try:
offset = int(offset)
except ValueError:
raise webserver.views.api.exceptions.APIBadRequest("Offset must be an integer value")
else:
offset = 0
return offset
|
rsignell-usgs/notebook
|
HOPS/hops_velocity.py
|
Python
|
mit
| 830
| 0.03253
|
# coding: utf-8
# ## Plot velocity from non-CF HOPS dataset
# In[5]:
get_ipython().magic(u'matplotlib inline')
import netCDF4
import matplotlib.pyplot as plt
# In[6]:
url='http://geoport.whoi.edu/thredds/dodsC/usgs/data2/
|
rsignell/gdrive/nsf-alpha/Data/MIT_MSEAS/MSEAS_Tides_20160317/mseas_tides_2015071612_2015081612_01h.nc'
# In[8]:
nc = netCDF4.Dataset(url)
# In[9]:
ncv = nc.variables
# In[ ]:
# extract lon,lat variables from vgrid2 variable
lon = ncv['vgrid2'][:,:,0]
lat = ncv['vgrid2'][:,:,1]
# In[20]:
# extract u,v variables from vbaro variable
itime = -1
u = ncv['
|
vbaro'][itime,:,:,0]
v = ncv['vbaro'][itime,:,:,1]
# In[30]:
n=10
fig = plt.figure(figsize=(12,8))
plt.quiver(lon[::n,::n],lat[::n,::n],u[::n,::n],v[::n,::n])
#plt.axis([-70.6,-70.4,41.2,41.4])
# In[ ]:
# In[ ]:
# In[ ]:
|
npapier/sbf
|
src/sbfDoxygen.py
|
Python
|
gpl-3.0
| 6,169
| 0.041984
|
# SConsBuildFramework - Copyright (C) 2013, Nicolas Papier.
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation.
# Author Nicolas Papier
import os
from src.sbfRsync import createRsyncAction
from src.SConsBuildFramework import stringFormatter
# To be able to use SConsBuildFramework.py without SCons
import __builtin__
try:
from SCons.Script import *
except ImportError as e:
if not hasattr(__builtin__, 'SConsBuildFrameworkQuietImport'):
print ('sbfWarning: unable to import SCons.[Environment,Options,Script]')
### special doxygen related targets : dox_build dox_install dox dox_clean dox_mrproper ###
def printDoxygenBuild( target, source, localenv ) :
return '\n' + stringFormatter( localenv, "Build documentation with doxygen" )
def printDoxygenInstall( target, source, localenv ) :
return '\n' + stringFormatter( localenv, "Install doxygen documentation" )
# Creates a custom doxyfile
def doxyfileAction( target, source, env ) :
sbf = env.sbf
# Compute inputList, examplePath and imagePath parameters of doxyfile
inputList = ''
examplePath = ''
imagePath = ''
for projectName in sbf.myParsedProjects :
localenv = sbf.myParsedProjects[projectName]
projectPathName = localenv['sbf_projectPathName']
newPathEntry = os.path.join(projectPathName, 'include') + ' '
if os.path.exists( newPathEntry ) :
inputList += newPathEntry
newPathEntry = os.path.join(projectPathName, 'src') + ' '
if os.path.exists( newPathEntry ) :
inputList += newPathEntry
newPathEntry = os.path.join(projectPathName, 'doc') + ' '
if os.path.exists( newPathEntry ) :
inputList += newPathEntry
newPathEntry = os.path.join(projectPathName, 'doc', 'example') + ' '
if os.path.exists( newPathEntry ) :
examplePath += newPathEntry
newPathEntry = os.path.join(projectPathName, 'doc', 'image') + ' '
if os.path.exists( newPathEntry ) :
imagePath += newPathEntry
# Create a custom doxyfile
import shutil
targetName = str(target[0])
sourceName = str(source[0])
print 'Generating {}'.format( targetName )
shutil.copyfile(sourceName, targetName) # or env.Execute( Copy(targetName, sourceName) )
with open( targetName, 'a' ) as file:
file.write( '\n### Added by SConsBuildFramework\n' )
file.write( 'LAYOUT_FILE = "%s"\n' % os.path.join(sbf.mySCONS_BUILD_FRAMEWORK, 'DoxygenLayout.xml') )
file.write( 'PROJECT_NAME = "%s"\n' % sbf.myProject )
file.write( 'PROJECT_NUMBER = "%s generated at %s"\n' % (sbf.myVersion, sbf.myDateTime) )
file.write( 'OUTPUT_DIRECTORY = "%s"\n' % (targetName + '_build') )
file.write( 'INPUT = %s\n' % inputList )
#FIXME: FILE_PATTERNS, EXCLUDE, EXCLUDE_PATTERNS
file.write( 'EXAMPLE_PATH = %s\n' % examplePath )
file.write( 'IMAGE_PATH = %s\n' % imagePath )
file.write( 'ENABLED_SECTIONS = %s\n' % sbf.myProject )
# Synchronizes files from source to target.
# target should be yourDestinationPath/dummy.out
# Recursively copy the entire directory tree rooted at source to the destination directory (named by os.path.dirname(target)).
# Remark : the destination directory would be removed before the copying occurs (even if not empty, so be carefull).
def syncAction( target, source, env ) :
import shutil
sourcePath = str(source[0])
destinationPath = os.path.dirname(str(target[0]))
print 'Copying %s at %s' % (sourcePath, destinationPath)
if ( os.path.ismount(destinationPath) ) :
print 'sbfError: Try to use %s as an installation/desinstallation directory. Stop action to prevent any unwanted file destruction'
return None
shutil.rmtree( destinationPath, True )
if ( os.path.isdir( os.path.dirname(destinationPath) ) == False ):
os.makedirs( os.path.dirname(destinationPath) )
shutil.copytree( sourcePath, destinationPath )
def configureDoxTarget( env ):
# @todo improves output message
sbf = env.sbf
if ( ('dox_build' in sbf.myBuildTargets) or
('dox_install' in sbf.myBuildTargets) or
('dox' in sbf.myBuildTargets) or
('dox_clean' in sbf.myBuildTargets) or
('dox_mrproper' in sbf.myBuildTargets) ):
if ( ('dox_clean' in sbf.myBuildTargets) or
('dox_mrproper' in sbf.myBuildTargets) ):
env.SetOption('clean', 1)
#@todo use other doxyfile(s). see doxInputDoxyfile
doxInputDoxyfile = os.path.join(sbf.mySCONS_BUILD_FRAMEWORK, 'doxyfile')
doxOutputPath = os.path.join(sbf.myBuildPath, 'doxygen', sbf.myProject, sbf.myVersion )
doxOutputCustomDoxyfile = os.path.join(doxOutputPath, 'doxyfile.sbf')
doxBuildPath = os.path.join(doxOutputPath, 'doxyfile.sbf_build')
doxInstallPath = os.path.join(sbf.myInstallDirectory, 'doc', sbf.myProject, sbf.myVersion)
# target dox_build
commandGenerateDoxyfile = env.Command( doxOutputCustomDoxyfile, doxInputDoxyfile, Action(doxyfileAction, printDoxygenBuild) )
env.Alias( 'dox_build', commandGenerateDoxyfile )
commandCompileDoxygen = env.Command( 'dox_build.out', 'dummy.in', 'doxygen ' + doxOutputCustomDoxyfile )
env.Alias( 'dox_build', commandCompileDoxygen )
env.AlwaysBuild( [commandGenerateDoxyfile, commandCompileDoxygen] )
env.Depends( commandCompileDoxygen, commandGenerateDoxyfile )
# target dox_install
dox_install_cmd = env.Command( os.path.join(doxInstallPath,'dummy.out'), Dir(os.path.join(doxBuildPath, 'html')), Action(syncAction, printDoxygenInstall) )
env.Alias( 'dox_install', [ 'dox_build', dox_install_cmd ] )
env.AlwaysBuild( dox_install_cmd )
env.Depends( dox_install_cmd, 'do
|
x_build' )
# target dox
env.
|
Alias( 'dox', 'dox_install' )
if env['publishOn'] :
rsyncAction = createRsyncAction( env, 'doc_%s_%s' % (sbf.myProject, sbf.myVersion), Dir(os.path.join(doxBuildPath, 'html')), 'dox' )
env.Depends( rsyncAction, 'dox_install' )
# target dox_clean
env.Alias( 'dox_clean', 'dox' )
env.Clean( 'dox_clean', doxOutputPath )
# target dox_mrproper
env.Alias( 'dox_mrproper', 'dox_clean' )
env.Clean( 'dox_mrproper', doxInstallPath )
|
jrmi/pypeman
|
pypeman/msgstore.py
|
Python
|
apache-2.0
| 9,722
| 0.00288
|
import os
import re
import asyncio
import logging
from collections import OrderedDict
from pypeman.message import Message
from pypeman.errors import PypemanConfigError
logger = logging.getLogger("pypeman.store")
DATE_FORMAT = '%Y%m%d_%H%M'
class MessageStoreFactory():
""" Message store factory class can generate Message store instance for specific store_id. """
def get_store(self, store_id):
"""
:param store_id: identifier of corresponding message store.
:return: A MessageStore corresponding to correct store_id.
"""
class MessageStore():
""" A MessageStore keep an history of processed messages. Mainly used in channels. """
async def start(self):
"""
Called at startup to initialize store.
"""
async def store(self, msg):
"""
Store a message in the store.
:param msg: The message to store.
:return: Id for this specific message.
"""
async def change_message_state(self, id, new_state):
"""
Change the `id` message state.
:param id: Message specific store id.
:param new_state: Target state.
"""
async def get(self, id):
"""
Return one message corresponding to given `id` with his status.
:param id: Message id. Message store dependant.
:return: A dict `{'id':<message_id>, 'state': <message_state>, 'message': <message_object>}`.
"""
async def search(self, start=0, count=10, order_by='timestamp'):
"""
Return a list of message with store specific `id` and processed status.
:param start: First element.
:param count: Count of elements since first element.
:param order_by: Message order. Allowed values : ['timestamp', 'status'].
:return: A list of dict `{'id':<message_id>, 'state': <message_state>, 'message': <message_object>}`.
"""
async def total(self):
"""
:return: total count of messages
"""
class NullMessageStoreFactory(MessageStoreFactory):
""" Return an NullMessageStore that do nothing at all. """
def get_store(self, store_id):
return NullMessageStore()
class NullMessageStore(MessageStore):
""" For testing purpose """
async def store(self, msg):
return None
async def get(self, id):
return None
async def search(self, **kwargs):
return None
async def total(self):
return 0
class FakeMessageStoreFactory(MessageStoreFactory):
""" Return an Fake message store """
def get_store(self, store_id):
return FakeMessageStore()
class FakeMessageStore(MessageStore):
""" For testing purpose """
async def store(self, msg):
logger.debug("Should store message %s", msg)
return 'fake_id'
async def get(self, id):
return {'id':id, 'state': 'processed', 'message': None}
async def search(self, **kwargs):
return []
async def total(self):
return 0
class MemoryMessageStoreFactory(MessageStoreFactory):
""" Return a Memory message store. All message are lost at pypeman stop. """
def __init__(self):
self.base_dict = {}
def get_store(self, store_id):
return MemoryMessageStore(self.base_dict, store_id)
class MemoryMessageStore(MessageStore):
""" Store messages in memory """
def __init__(self, base_dict, store_id):
super().__init__()
self.messages = base_dict.setdefault(store_id, OrderedDict())
async def store(self, msg):
msg_id = msg.uuid
self.messages[msg_id] = {'id': msg_id, 'state': Message.PENDING, 'timestamp': msg.timestamp, 'message': msg.to_dict()}
return msg_id
async def change_message_state(self, id, new_state):
self.messages[id]['state'] = new_state
async def get(self, id):
resp = dict(self.messages[id])
resp['message'] = Message.from_dict(resp['message'])
return resp
async def search(self, start=0, count=10, order_by='timestamp'):
if order_by.startswith('-'):
reverse = True
sort_key = order_by[1:]
else:
reverse = False
sort_key = order_by
result = []
for value in sorted(self.messages.values(), key=lambda x: x[sort_key], reverse=reverse):
resp = dict(value)
resp['message'] = Message.from_dict(resp['message'])
result.append(resp)
return result[start: start + count]
async def total(self):
return len(self.messages)
class FileMessageStoreFactory(MessageStoreFactory):
"""
Generate a FileMessageStore message store instance.
Store a file in `<base_path>/<store_id>/<month>/<day>/` hierachy.
"""
# TODO add an option to reguraly archive old file or delete them
def __init__(self, path):
super().__init__()
|
if path is None:
raise PypemanConfigError('file message store requires a path')
self.base_path = path
def get_store(se
|
lf, store_id):
return FileMessageStore(self.base_path, store_id)
class FileMessageStore(MessageStore):
""" Store a file in `<base_path>/<store_id>/<month>/<day>/` hierachy."""
# TODO file access should be done in another thread. Waiting for file backend.
def __init__(self, path, store_id):
super().__init__()
self.base_path = os.path.join(path, store_id)
# Match msg file name
self.msg_re = re.compile(r'^([0-9]{8})_([0-9]{2})([0-9]{2})_[0-9abcdef]*$')
try:
# Try to make dirs if necessary
os.makedirs(os.path.join(self.base_path))
except FileExistsError:
pass
self._total = 0
async def start(self):
self._total = await self.count_msgs()
async def store(self, msg):
""" Store a file in `<base_path>/<store_id>/<month>/<day>/` hierachy."""
# TODO implement a safer store to avoid broken messages
# The filename is the file id
filename = "{}_{}".format(msg.timestamp.strftime(DATE_FORMAT), msg.uuid)
dirs = os.path.join(str(msg.timestamp.year), "%02d" % msg.timestamp.month, "%02d" % msg.timestamp.day)
try:
# Try to make dirs if necessary
os.makedirs(os.path.join(self.base_path, dirs))
except FileExistsError:
pass
file_path = os.path.join(dirs, filename)
# Write message to file
with open(os.path.join(self.base_path, file_path), "w") as f:
f.write(msg.to_json())
await self.change_message_state(file_path, Message.PENDING)
self._total += 1
return file_path
async def change_message_state(self, id, new_state):
with open(os.path.join(self.base_path, id + '.meta'), "w") as f:
f.write(new_state)
async def get_message_state(self, id):
with open(os.path.join(self.base_path, id + '.meta'), "r") as f:
state = f.read()
return state
async def get(self, id):
if not os.path.exists(os.path.join(self.base_path, id)):
raise IndexError
with open(os.path.join(self.base_path, id), "rb") as f:
msg = Message.from_json(f.read().decode('utf-8'))
return {'id': id, 'state': await self.get_message_state(id), 'message': msg}
async def sorted_list_directories(self, path, reverse=True):
"""
:param path: Base path
:param reverse: reverse order
:return: List of directories in specified path ordered
"""
return sorted([d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))], reverse=reverse)
async def count_msgs(self):
"""
Count message by listing all directories. To be used at startup.
"""
count = 0
for year in await self.sorted_list_directories(os.path.join(self.base_path)):
for month in await self.sorted_list_directories(os.path.join(self.base_path, year)):
for day in await self.sorted_list_directories(os.path.join(self.base_path, year, month)):
|
payet-s/pyrser
|
pyrser/type_system/val.py
|
Python
|
gpl-3.0
| 1,017
| 0
|
# val for type checking (literal or ENUM style)
from pyrser import fmt
from pyrser.type_system.signature import *
from pyrser.type_system.type_name import *
class Val(Signature):
"""
Describe a value signature for the language
"""
nvalues = 0
valuniq = dict()
def __init__(self, value, tret: str):
if not isinstance(value, str):
value = str(value)
self.value = value
if not isinstance(tret, TypeName):
tret = TypeName(tret)
self.tret = tret
k = self.value + "$" + tret
idx = 0
if k not in Val.valuniq:
Val.nvalues += 1
Val.valuniq[k] = Val.nvalues
idx = Val.nvalues
else:
|
idx = Val.valuniq[k]
super().__init__('$' + str(idx))
def internal_name(self):
"""
Return the unique internal name
"""
unq = super().internal
|
_name()
if self.tret is not None:
unq += "_" + self.tret
return unq
|
aqfaridi/Code-Online-Judge
|
web/env/Main1145/Main1145.py
|
Python
|
mit
| 90
| 0.011111
|
while(
|
True):
|
n = input()
if(n == 42):
break
else:
print n
|
imh/gnss-analysis
|
gnss_analysis/mk_sdiffs.py
|
Python
|
lgpl-3.0
| 4,406
| 0.006809
|
import pandas as pd
import numpy as np
from swiftnav.ephemeris import *
from swiftnav.single_diff import SingleDiff
from swiftnav.gpstime import *
def construct_pyobj_eph(eph):
return Ephemeris(
eph.tgd,
eph.crs, eph.crc, eph.cuc, eph.cus, eph.cic, eph.cis,
eph.dn, eph.m0, eph.ecc, eph.sqrta, eph.omega0, eph.omegadot, eph.w, eph.inc, eph.inc_dot,
eph.af0, eph.af1, eph.af2,
GpsTime(eph.toe_wn, eph.toe_tow), GpsTime(eph.toc_wn, eph.toc_tow),
eph['valid'], # this syntax is needed because the method .valid takes precedence to the field
eph.healthy,
eph.prn+1) # +1 temporarily, until i get the next dataset where this is fixed
def separate_ephs(ephs):
"""
Return a dictionary of prn to dataframe, where each dataframe is
the unique ephemerides (unique and first, as in fst . groupby) over
the time period the data was taken.
"""
sep_ephs_tuples = [(int(prn),ephs[ephs['prn'] == prn]) for prn in ephs['prn'].unique()]
sep_ephs = {}
for sep_eph_tuple in sep_ephs_tuples:
prn = sep_eph_tuple[0]+1 #temporarily, just for the dataset before i started storing them correctly TODO FIX
frame = pd.DataFrame(sep_eph_tuple[1].drop_duplicates().apply(construct_pyobj_eph, axis=1), columns=['ephemeris'])
# frame = pd.DataFrame(sep_eph_tuple[1].apply(construct_pyobj_eph, axis=1), columns=['ephemeris'])
frame['time'] = frame.index
sep_ephs[prn] = frame
return sep_ephs
def merge_into_sdiffs(ephs, sd):
"""
Taking ephemerides and observation data, this will merge them
together into a panel whose index is a sat, major axis is time,
and minor axis is everything needed for an sdiff struct.
It's super slow, so I left it all in pandas format, so we can
save it out in hdf5 and get it back all nicely processed.
"""
sep_ephs = separate_ephs(ephs)
sats = sd.items
num_sats = map(lambda x: int(x[1:]),sats)
sdiff_dict = {}
for sat in sats:
# sat = sats[0]
sat_ephs = sep_ephs[int(sat[1:])]
fst_eph = sat_ephs.ix[0].ephemeris
obs = sd[sat]
obs['time'] = obs.index
def make_single_diff(x):
if np.isnan(x.C1) or np.isnan(x.L1) or np.isnan(x.S1_1) or np.isnan(x.S1_2):
return pd.Series([np.nan]*11,
index=['C1', 'L1', 'D1', 'sat_pos_x', 'sat_pos_y', 'sat_pos_z',
'sat_vel_x', 'sat_vel_y', 'sat_vel_z', 'min_snr', 'prn'])
c1 = x.C1
l1 = x.L1
snr = min(x.S1_1, x.S1_2)
timestamp = x.time
earlier_ephs = sat_ephs[sat_ephs['time'] <= timestam
|
p]
if earlier_ephs.shape[0] >= 1:
eph = earlier_ephs.ix[-1].ephemeris
else:
eph = fst_eph
gpstime = datetime2gpst(timestamp)
p
|
os, vel, clock_err, clock_rate_err = calc_sat_pos(eph, gpstime)
return pd.Series([c1, l1, np.nan, pos[0], pos[1], pos[2], vel[0], vel[1], vel[2], snr, int(sat[1:])],
index=['C1', 'L1', 'D1', 'sat_pos_x', 'sat_pos_y', 'sat_pos_z',
'sat_vel_x', 'sat_vel_y', 'sat_vel_z', 'min_snr', 'prn'])
sdiffs = obs.apply(make_single_diff,axis=1).dropna(how='all',axis=0)
sdiff_dict[sat] = sdiffs
return pd.Panel(sdiff_dict)
def main():
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("ephemeris",
help="the ephemeris file to process")
parser.add_argument("input",
help="the HDF5 file to process")
parser.add_argument("base_name", default=False,
help="the marker name of the base station")
parser.add_argument("rover_name", default=False,
help="the marker name of the rover")
args = parser.parse_args()
eph_file = pd.HDFStore(args.ephemeris)
eph = eph_file['eph']
h5 = pd.HDFStore(args.input)
sd_table = h5['sd_%s_%s' % (args.rover_name, args.base_name)]
output_table_name = 'sdiff_%s_%s' % (args.rover_name, args.base_name)
h5[output_table_name] = merge_into_sdiffs(eph, sd_table)
h5.close()
if __name__ == '__main__':
main()
|
snsokolov/contests
|
codeforces/556A_zeroes.py
|
Python
|
unlicense
| 2,674
| 0.000748
|
#!/usr/bin/env python3
# 556A_zeroes.py - Codeforces.com/problemset/problem/556/A Zeroes quiz by Sergey 2015
# Standard modules
import unittest
import sys
import re
# Additional modules
###############################################################################
# Zeroes Class
###############################################################################
class Zeroes:
""" Zeroes representation """
def __init__(self, args):
""" Default constructor """
self.list = args
def calculate(self):
""" Main calcualtion function of the class """
result = 0
for n in self.list:
result += 1 if n else -1
return str(abs(result))
###############################################################################
# Executable code
###############################################################################
def get_inputs(test_inputs=None):
it = iter(test_inputs.split("\n")) if test_inputs else None
def uinput():
""" Unit-testable input function wr
|
apper """
if it:
return next(it)
else:
return input()
# Getting string inputs
num = int(uinput())
ints = [int(n) for n in uinput()]
return ints
def calculate(test_inputs=None):
""" Base class calculate method wrapper """
return Zeroes(get_inputs(test_inputs)).calculate()
##########################################################################
|
#####
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_sample_tests(self):
""" Quiz sample tests. Add \n to separate lines """
self.assertEqual(calculate("4\n1100"), "0")
self.assertEqual(calculate("5\n01010"), "1")
self.assertEqual(calculate("8\n11101111"), "6")
str = "1\n"
for i in range(2*pow(10, 5)):
str += "0"
self.assertEqual(calculate(str), "200000")
def test_get_inputs(self):
""" Input string decoding testing """
self.assertEqual(get_inputs("4\n1100"), [1, 1, 0, 0])
def test_Zeroes_class__basic_functions(self):
""" Zeroes class basic functions testing """
# Constructor test
d = Zeroes([1, 0, 0, 1])
self.assertEqual(d.list[0], 1)
self.assertEqual(d.calculate(), "0")
d.list = [1, 0, 0, 0]
self.assertEqual(d.calculate(), "2")
if __name__ == "__main__":
# Avoiding recursion limitaions
sys.setrecursionlimit(100000)
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
# Print the result string
print(calculate())
|
sqaxomonophonen/worldmapsvg
|
svg/path/path.py
|
Python
|
cc0-1.0
| 15,350
| 0.001303
|
from __future__ import division
from math import sqrt, cos, sin, acos, degrees, radians, log
from collections import MutableSequence
# This file contains classes for the different types of SVG path segments as
# well as a Path object that contains a sequence of path segments.
MIN_DEPTH = 5
ERROR = 1e-12
def segment_length(curve, start, end, start_point, end_point, error, min_depth, depth):
"""Recursively approximates the length by straight lines"""
mid = (start + end) / 2
mid_point = curve.point(mid)
length = abs(end_point - start_point)
first_half = abs(mid_point - start_point)
second_half = abs(end_point - mid_point)
length2 = first_half + second_half
if (length2 - length > error) or (depth < min_depth):
# Calculate the length of each segment:
depth += 1
return (segment_length(curve, start, mid, start_point, mid_point,
error, min_depth, depth) +
segment_length(curve, mid, end, mid_point, end_point,
error, min_depth, depth))
# This is accurate enough.
return length2
class Line(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __repr__(self):
return 'Line(start=%s, end=%s)' % (self.start, self.end)
def __eq__(self, other):
if not isinstance(other, Line):
return NotImplemented
return self.start == other.start and self.end == other.end
def __ne__(self, other):
if not isinstance(other, Line):
return NotImplemented
return not self == other
def point(self, pos):
distance = self.end - self.start
return self.start + distance * pos
def length(self, error=None, min_depth=None):
distance = (self.end - self.start)
return sqrt(distance.real ** 2 + distance.imag ** 2)
class CubicBezier(object):
def __init__(self, start, control1, control2, end):
self.start = start
self.cont
|
rol1 = control1
self.control2 = control2
self.end = end
def __repr__(self):
return 'CubicBezier(start=%s, control1=%s, control2=%s, end=%s)' % (
self.start, self.control1, self.control2, self.end)
def __eq__(self, other):
if not isinstance(other, CubicBezier):
return NotImplemented
return self.start == other.start and self.end == other.end and \
self.control1 == other.con
|
trol1 and self.control2 == other.control2
def __ne__(self, other):
if not isinstance(other, CubicBezier):
return NotImplemented
return not self == other
def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (previous.end - previous.control2))
else:
return self.control1 == self.start
def point(self, pos):
"""Calculate the x,y position at a certain position of the path"""
return ((1 - pos) ** 3 * self.start) + \
(3 * (1 - pos) ** 2 * pos * self.control1) + \
(3 * (1 - pos) * pos ** 2 * self.control2) + \
(pos ** 3 * self.end)
def length(self, error=ERROR, min_depth=MIN_DEPTH):
"""Calculate the length of the path up to a certain position"""
start_point = self.point(0)
end_point = self.point(1)
return segment_length(self, 0, 1, start_point, end_point, error, min_depth, 0)
class QuadraticBezier(object):
def __init__(self, start, control, end):
self.start = start
self.end = end
self.control = control
def __repr__(self):
return 'QuadraticBezier(start=%s, control=%s, end=%s)' % (
self.start, self.control, self.end)
def __eq__(self, other):
if not isinstance(other, QuadraticBezier):
return NotImplemented
return self.start == other.start and self.end == other.end and \
self.control == other.control
def __ne__(self, other):
if not isinstance(other, QuadraticBezier):
return NotImplemented
return not self == other
def is_smooth_from(self, previous):
"""Checks if this segment would be a smooth segment following the previous"""
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (previous.end - previous.control))
else:
return self.control == self.start
def point(self, pos):
return (1 - pos) ** 2 * self.start + 2 * (1 - pos) * pos * self.control + \
pos ** 2 * self.end
def length(self, error=None, min_depth=None):
# http://www.malczak.info/blog/quadratic-bezier-curve-length/
a = self.start - 2 * self.control + self.end
b = 2 * (self.control - self.start)
A = 4 * (a.real ** 2 + a.imag ** 2)
B = 4 * (a.real * b.real + a.imag * b.imag)
C = b.real ** 2 + b.imag ** 2
Sabc = 2 * sqrt(A + B + C)
A2 = sqrt(A)
A32 = 2 * A * A2
C2 = 2 * sqrt(C)
BA = B / A2
return (A32 * Sabc + A2 * B * (Sabc - C2) + (4 * C * A - B ** 2) *
log((2 * A2 + BA + Sabc) / (BA + C2))) / (4 * A32)
class Arc(object):
def __init__(self, start, radius, rotation, arc, sweep, end):
"""radius is complex, rotation is in degrees,
large and sweep are 1 or 0 (True/False also work)"""
self.start = start
self.radius = radius
self.rotation = rotation
self.arc = bool(arc)
self.sweep = bool(sweep)
self.end = end
self._parameterize()
def __repr__(self):
return 'Arc(start=%s, radius=%s, rotation=%s, arc=%s, sweep=%s, end=%s)' % (
self.start, self.radius, self.rotation, self.arc, self.sweep, self.end)
def __eq__(self, other):
if not isinstance(other, Arc):
return NotImplemented
return self.start == other.start and self.end == other.end and \
self.radius == other.radius and self.rotation == other.rotation and \
self.arc == other.arc and self.sweep == other.sweep
def __ne__(self, other):
if not isinstance(other, Arc):
return NotImplemented
return not self == other
def _parameterize(self):
# Conversion from endpoint to center parameterization
# http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
cosr = cos(radians(self.rotation))
sinr = sin(radians(self.rotation))
dx = (self.start.real - self.end.real) / 2
dy = (self.start.imag - self.end.imag) / 2
x1prim = cosr * dx + sinr * dy
x1prim_sq = x1prim * x1prim
y1prim = -sinr * dx + cosr * dy
y1prim_sq = y1prim * y1prim
rx = self.radius.real
rx_sq = rx * rx
ry = self.radius.imag
ry_sq = ry * ry
# Correct out of range radii
radius_check = (x1prim_sq / rx_sq) + (y1prim_sq / ry_sq)
if radius_check > 1:
rx *= sqrt(radius_check)
ry *= sqrt(radius_check)
rx_sq = rx * rx
ry_sq = ry * ry
t1 = rx_sq * y1prim_sq
t2 = ry_sq * x1prim_sq
c = sqrt(abs((rx_sq * ry_sq - t1 - t2) / (t1 + t2)))
if self.arc == self.sweep:
c = -c
cxprim = c * rx * y1prim / ry
cyprim = -c * ry * x1prim / rx
self.center = complex((cosr * cxprim - sinr * cyprim) +
((self.start.real + self.end.real) / 2),
(sinr * cxprim + cosr * cyprim) +
((self.start.imag + self.end.imag) / 2))
ux = (x1prim - cxprim) / rx
uy = (y1prim - cyprim) / ry
vx = (-x1prim - cxprim) / rx
vy = (-y1prim - cyprim) / ry
n = sqrt(ux * ux + uy * uy)
p = ux
theta = degrees(aco
|
kk6/onedraw
|
onedraw/tags/models.py
|
Python
|
mit
| 345
| 0
|
# -*- coding: utf-8 -*-
from django.db import m
|
odels
from tweets.models import Tweet
class Tag(models.Model):
name = models.CharField(max_length=255, unique=True, db_index=True)
is_hash
|
tag = models.BooleanField(default=False)
tweets = models.ManyToManyField(Tweet, related_name='tags')
class Meta:
db_table = 'tags'
|
shravan97/WordHunter
|
ImageSearch/image_searcher.py
|
Python
|
mit
| 775
| 0.023226
|
""" A simple module to get the links of first
10 images d
|
isplayed on google image search
"""
from googleapiclient.discovery import build
class GoogleImageSearch:
def __init__(self,api_key,cse_id):
self.
|
my_api_key = api_key
self.my_cse_id= cse_id
def search(self,search_term,**kwargs):
google_service = build("customsearch", "v1",
developerKey=self.my_api_key)
result = google_service.cse().list(q=search_term,
cx=self.my_cse_id, **kwargs).execute()
return result['items']
def get_image_links(self , search_term):
results = self.search(search_term , searchType='image')
links = [result['link'] for result in results]
return links
|
urashima9616/Leetcode_Python
|
Leet201_BitwiswAndRange.py
|
Python
|
gpl-3.0
| 1,001
| 0.006993
|
"""
Given a range [m, n] where 0 <= m <= n <= 2147483647, re
|
turn the bitwise AND of all numbers in this range, inclusive.
For example, given the range [5, 7], you should return 4.
Credits:
Special thanks to @amrsaqr for adding this problem and creating all test cases.
"""
class Solution(object):
def rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
ans = 0
gap = n-m
max_i = n
|
min_i = m
bits = [0 for _ in xrange(32)]
for i in xrange(32):
#take the i-th pos of max and min
a = max_i & 1
b = min_i & 1
max_i >>= 1
min_i >>= 1
if a == 0 or b == 0:
bits[i] = 0
else:
if gap >>i > 0:
bits[i] = 0
else:
bits[i] = 1
for each in bits[::-1]:
ans |= each
ans<<=1
return ans
|
henriquesouza/toply
|
src/objects/GetText.py
|
Python
|
gpl-3.0
| 6,417
| 0.009512
|
# -*- coding: utf-8 -*-
class GetText():
_file_path = None
_body_list = None
_target = None
def __init__(self, file_path):
#self._file_path = open(file_path, "r+").read().replace("<br","\n<br")
self._file_path = file_path.replace("<br />", "<br />\n")
#self._file_path = (self._file_path.replace("\n",";;")).split(";;")
#self._file_path = file_path
#print(self._file_path)
self._file_path = ((self._file_path.replace("\n", "123")).replace(" ", "")).replace("> <", "")
self._file_path = (self._file_path).replace("<p", "\n<p")
#print (self._file_path)
self._body_list = self.get_body().split("123")
self.set_target_content(self.get_body())
self.get_beginning()
self.remove_tags()
#super(leGetText, self).__init__()
def get_html(self):
return self._file_path
def get_body(self):
return self.get_html().split("</head>", -1)[1]
def get_first_br_line(self):
br_index = 0
for i in self._body_list:
if(i.find("<br") > -1):
return (self._body_list.index(i))
else:
++br_index
return br_index
def get_since_first_br(self):
since_first_br = self._body_list
del since_first_br[0:self.get_first_br_line()]
self.set_target_content(since_first_br)
return since_first_br
def set_target_content(self, content):
self._target = content
def get_target(self):
return self._target
def br_check(self, info):
if(info == "<br>"):
return True
elif(info == "<br />"):
return True
elif(info == "</ br>"):
return True
elif(info == "< br>"):
return True
else:
return False
def get_beginning(self):
# verifying whether there's any br in the next index
since_last_br = self.get_since_first_br()
#getting beginning of the lyrics
#print(since_last_br)
i = 0
for j in since_last_br:
if (
j.find("<br") > -1 and
since_last_br[i+1].find("<br") > -1 and
since_last_br[i+2].fin
|
d("<br") > -1 and
since_last_br[i+3].find("<br") > -1 and
self.br_check(since_last_br[i]) == False and
self.br_check(since_last_br[i+1]) == False and
self.br_check(since_last_br[i+2]) == False and
self.br_check(since_last_br[i+3]) == False
):
del since_last_br[0:i]
break
el
|
se:
i = i +1
if (since_last_br[i].find("<br") > -1 and i+3< len(since_last_br) and self.br_check(since_last_br[i+3]) == False):
#print("i + 1 contains <br>")
#print(since_last_br[i])
del since_last_br[0:i]
# print (since_last_br)
break
self.set_target_content(since_last_br[0:200])
def remove_tags(self):
#removing tags
#removing b
html_file = "ç\n".join(self.get_target())
while(html_file.find("<b>") > -1):
#print("occur")
op = html_file.index("<b>")
cl = html_file.index("/b>")+3
html_file = list(html_file)
#for i in range(op, cl):
del html_file[op:cl]
html_file = "".join(html_file)
#removing [] (brackets) => from "[" to "\n"
while(html_file.find("[") > -1):
op = html_file.index("[")
cl = html_file.find("]")+1
bracket_line = html_file.split("ç")
l = 0
for k in bracket_line:
if(k.find("[") > -1):
break
l = l +1
del bracket_line[l]
html_file = ""
for k in bracket_line:
html_file = html_file + k+"ç"
'''
html_file = list(html_file)
#for i in range(op, cl):
del html_file[op:cl]
html_file = "".join(html_file)'''
self.set_target_content(html_file.split("ç"))
def get_end(self):
#getting the end of the lyrics (check if the next tag
#being opened is the same as the one being close
broken_since = "".join(self.get_target())
broken_since = broken_since.split("\n")
new_broken_since = [] #turning <p> into <br>
for x in broken_since:
la = x.replace("<p", "<br")
la = la.replace("</p>", "")
new_broken_since.append(la)
broken_since = new_broken_since
#checking if we are still in the lyrics block
until_the_end = []
l = 0
for k in broken_since:
kk = list(k)
if len(k) > 0:
'''
print("\n\n")
print(broken_since[l+1].find("<br"))
print(broken_since[l+1])
print("< l1 \n l2 >")
print(broken_since[l + 2].find("<br"))
print("\n\n")'''
if(l < 3 or k[0] != "<" or k[1] == "b"
or (broken_since[l+1].find("<br") > -1 and broken_since[l+2].find("<br"))
):
if (k.find("><br") == -1):
#print(k)
until_the_end.append("\n"+k)
else:
break
else:
#print("\n\n")
break
l = l + 1
#removing tags
final = ""
block = False
for j in until_the_end:
i = 0
moral = list(j)
for i in range(0, len(moral)):
if(moral[i] == "<"):
block = True
elif(moral[i] == ">"):
block = False
if(block==False and moral[i]!="<" and moral[i]!=">"):
final=final+moral[i]
return final
'''
oo = open("../../tmp/lrc", "r").read()
#print(oo)
get_text = _GetText(oo)
#print(get_text.get_target())
final = get_text.get_end()
final = final.encode("latin1").decode("utf-8")
'''
#print(final)
'''
#rs = open("../../tmp/lrc", "w+")
#rs.write(final)'
'''
|
munkireport/munkireport-php
|
public/assets/client_installer/payload/usr/local/munkireport/munkilib/reportcommon.py
|
Python
|
mit
| 17,507
| 0.000914
|
#!/usr/local/munkireport/munkireport-python2
# encoding: utf-8
from . import display
from . import prefs
from . import constants
from . import FoundationPlist
from munkilib.purl import Purl
from munkilib.phpserialize import *
import subprocess
import pwd
import sys
import hashlib
import platform
from urllib import urlencode
import re
import time
import os
# PyLint cannot properly find names inside Cocoa libraries, so issues bogus
# No name 'Foo' in module 'Bar' warnings. Disable them.
# pylint: disable=E0611
from Foundation import NSArray, NSDate, NSMetadataQuery, NSPredicate
from Foundation import CFPreferencesAppSynchronize
from Foundation import CFPreferencesCopyAppValue
from Foundation import CFPreferencesCopyKeyList
from Foundation import CFPreferencesSetValue
from Foundation import kCFPreferencesAnyUser
from Foundation import kCFPreferencesCurrentUser
from Foundation import kCFPreferencesCurrentHost
from Foundation import NSHTTPURLResponse
from SystemConfiguration import SCDynamicStoreCopyConsoleUser
# pylint: enable=E0611
# our preferences "bundle_id"
BUNDLE_ID = "MunkiReport"
class CurlError(Exception):
def __init__(self, status, message):
display_error(message)
finish_run()
def set_verbosity(level):
"""Set verbosity level."""
display.verbose = int(level)
def display_error(msg, *args):
"""Call display error msg handler."""
display.display_error("%s" % msg, *args)
def display_warning(msg, *args):
"""Call display warning msg handler."""
display.display_warning("%s" % msg, *args)
def display_detail(msg, *args):
"""Call display detail msg handler."""
display.display_detail("%s" % msg, *args)
def finish_run():
remove_run_file()
display_detail("## Finished run")
exit(0)
def remove_run_file():
touchfile = '/Users/Shared/.com.github.munkireport.run'
if os.path.exists(touchfile):
os.remove(touchfile)
def curl(url, values):
options = dict()
options["url"] = url
options["method"] = "POST"
options["content_type"] = "application/x-www-form-urlencoded"
options["body"] = urlencode(values)
options["logging_function"] = display_detail
options["connection_timeout"] = 60
if pref("UseMunkiAdditionalHttpHeaders"):
custom_headers = prefs.pref(constants.ADDITIONAL_HTTP_HEADERS_KEY)
if custom_headers:
options["additional_headers"] = dict()
for header in custom_headers:
m = re.search(r"^(?P<header_name>.*?): (?P<header_value>.*?)$", header)
if m:
options["additional_headers"][m.group("header_name")] = m.group(
"header_value"
)
else:
raise CurlError(
-1,
"UseMunkiAdditionalHttpHeaders defined, "
"but not found in Munki preferences",
)
# Build Purl with initial settings
connection = Purl.alloc().initWithOptions_(options)
connection.start()
try:
while True:
# if we did `while not connection.isDone()` we'd miss printing
# messages if we exit the loop first
if connection.isDone():
break
except (KeyboardInterrupt, SystemExit):
# safely kill the connection then re-raise
connection.cancel()
raise
except Exception, err: # too general, I know
# Let us out! ... Safely! Unexpectedly quit dialogs are annoying...
connection.cancel()
# Re-raise the error as a GurlError
raise CurlError(-1, str(err))
if connection.error != None:
# Gurl returned an error
display.display_detail(
"Download error %s: %s",
connection.error.code(),
connection.error.localizedDescription(),
)
if connection.SSLerror:
display_detail("SSL error detail: %s", str(connection.SSLerror))
display_detail("Headers: %s", connection.headers)
raise CurlError(
connection.error.code(), connection.error.localizedDescription()
)
if connection.response != None and connection.status != 200:
display.display_detail("Status: %s", connection.status)
display.display_detail("Headers: %s", connection.headers)
if connection.redirection != []:
display.display_detail("Redirection: %s", connection.redirection)
connection.headers["http_result_code"] = str(connection.status)
description = NSHTTPURLResponse.localizedStringForStatusCode_(connection.status)
connection.headers["http_result_description"] = description
if str(connection.status).startswith("2"):
return connection.get_response_data()
else:
# there was an HTTP error of some sort.
raise CurlError(
connection.status,
"%s failed, HTTP returncode %s (%s)"
% (
url,
connection.status,
connection.headers.get("http_result_description", "Failed"),
),
)
def get_hardware_info():
"""Uses system profiler to get hardware info for this machine."""
cmd = ["/usr/sbin/system_profiler", "SPHardwareDataType", "-xml"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, dummy_error) = proc.communicate()
try:
plist = FoundationPlist.readPlistFromString(output)
# system_profiler xml is an array
sp_dict = plist[0]
items = sp_dict["_items"]
sp_hardware_dict = items[0]
return sp_hardware_dict
except BaseException:
return {}
def get_long_username(username):
try:
long_name = pwd.getpwnam(username)[4]
except:
long_name = ""
return long_name.decode("utf-8")
def get_uid(username):
try:
uid = pwd.getpwnam(username)[2]
except:
uid = ""
return uid
def get_computername():
cmd = ["/usr/sbin/scutil", "--get", "ComputerName"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
output = output.strip()
return output.decode("utf-8")
def get_cpuinfo():
cmd = ["/usr/sbin/sysctl", "-n", "machdep.cpu.brand_string"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
output = output.strip()
return output.decode("utf-8")
def get_buildversion():
cmd = ["/usr/bin/sw_vers", "-buildVersion"]
proc = subprocess.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
output = output.strip()
return output.decode("utf-8")
def get_uptime():
cmd = ["/usr/sbin/sy
|
sctl", "-n", "kern.boottime"]
proc = subproces
|
s.Popen(
cmd,
shell=False,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(output, unused_error) = proc.communicate()
sec = int(re.sub(".*sec = (\d+),.*", "\\1", output))
up = int(time.time() - sec)
return up if up > 0 else -1
def set_pref(pref_name, pref_value):
"""Sets a preference, See prefs.py for details."""
CFPreferencesSetValue(
pref_name,
pref_value,
BUNDLE_ID,
kCFPreferencesAnyUser,
kCFPreferencesCurrentHost,
)
CFPreferencesAppSynchronize(BUNDLE_ID)
print "set pref"
try:
CFPreferencesSetValue(
pref_name,
pref_value,
BUNDLE_ID,
kCFPreferencesAnyUser,
kCFPreferencesCurrentHost,
)
CFPreferencesAppSynchronize(BUNDLE_ID)
except Exception:
|
Lisergishnu/LTXKit
|
uStripDesign.py
|
Python
|
gpl-2.0
| 5,581
| 0.03064
|
# -*- coding: utf-8 -*-
# @Author: Marco Benzi <marco.benzi@alumnos.usm.cl>
# @Date: 2015-06-07 19:44:12
# @Last Modified 2015-06-09
# @Last Modified time: 2015-06-09 16:07:05
# ==========================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERC
|
HANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Pub
|
lic License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ==========================================================================
import math
"""
Speed of light constant
"""
c = 3E8
"""
Vacuum permittivity
"""
e0 = 8.8541E-12
"""
Vacuum permeability
"""
u0 = 4E-7*math.pi
def getEffectivePermitivity(WHratio, er):
"""
Returns the effective permitivity for a given W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `er` : Relative permitivity of the dielectric.
"""
if WHratio <= 1:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5) + 0.04*(1-WHratio)**2)*(er -1)/2
else:
return (er + 1)/2 + ((1 + 12/WHratio)**(-0.5))*(er -1)/2
def getAuxVarA(Zo,er):
"""
Returns the auxiliary variable
A = (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (Zo)/60 * math.sqrt((er + 1)/2) + (er-1)/(er+1)*(0.23+0.11/er)
def getAuxVarB(Zo,er):
"""
Returns the auxiliary variable
B = (377*math.pi)/(2*Zo*math.sqrt(er))
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
return (377*math.pi)/(2*Zo*math.sqrt(er))
def getWHRatioA(Zo,er):
"""
Returns the W/H ratio for W/H < 2. If the result is > 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
A = getAuxVarA(Zo,er)
return (8*math.e**A)/(math.e**(2*A) - 2)
def getWHRatioB(Zo,er):
"""
Returns the W/H ratio for W/H > 2. If the result is < 2, then other method
should be used.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
B = getAuxVarB(Zo,er)
return (2/math.pi)*(B-1 - math.log(2*B - 1) + (er - 1)*(math.log(B-1) + 0.39 - 0.61/er)/(2*er))
def getCharacteristicImpedance(WHratio, ef):
"""
Returns the characteristic impedance of the medium, based on the effective
permitivity and W/H ratio.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `WHratio` : W/H ratio.
- `ef` : Effective permitivity of the dielectric.
"""
if WHratio <= 1:
return (60/math.sqrt(ef))*math.log(8/WHratio + WHratio/4)
else:
return (120*math.pi/math.sqrt(ef))/(WHratio + 1.393 + 0.667*math.log(WHratio +1.444))
def getWHRatio(Zo,er):
"""
Returns the W/H ratio, after trying with the two possible set of solutions,
for when W/H < 2 or else. When no solution, returns zero.
This function assumes that the thickenss of conductors is insignificant.
Parameters:
- `Zo` : Real impedance of the line.
- `er` : Relative permitivity of the dielectric.
"""
efa = er
efb = er
Zoa = Zo
Zob = Zo
while 1:
rA = getWHRatioA(Zoa,efa)
rB = getWHRatioB(Zob,efb)
if rA < 2:
return rA
if rB > 2:
return rB
Zoa = math.sqrt(efa)*Zoa
Zob = math.sqrt(efb)*Zob
def getCorrectedWidth(W,H,t):
"""
For significant conductor thickness, this returns the corrected width.
Paramenters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
"""
if t < H and t < W/2:
if W/H <= math.pi/2:
return W + (1 + math.log(2*H/t))*(t/math.pi)
else:
return W + (1 + math.log(4*math.pi*H/t))*(t/math.pi)
else:
print "The conductor is too thick!!"
def getConductorLoss(W,H,t,sigma,f,Zo):
"""
Returns the conductor loss in [Np/m].
Parameters:
- `W` : Width
- `H` : Height
- `t` : Conductor thickness
- `sigma` : Conductance of medium
- `f` : Operating frequency
- `Zo` : Characteristic impedance
"""
We = getCorrectedWidth(W,H,t)
P = 1 - (We/4/H)**2
Rs = math.sqrt((math.pi*f*u0)/sigma)
Q = 1 + H/We + (math.log((2*H)/t)-t/W)*H/(We*math.pi)
if W/H <= 1/(2*math.pi):
return (1 + H/We + (math.log(4*pi*W/t) + t/W)*H/(math.pi*We))*(8.68*Rs*P)/(2*pi*Zo*H)
elif W/H <= 2:
return (8.68*Rs*P*Q)/(2*math.pi*Zo*H)
else:
return ((8.68*Rs*Q)/(Zo*H))*(We/H + (We/math.pi/H)/(We/2/H)+0.94)*((H/We + 2*math.log(We/2/H + 0.94)/math.pi)**(-2))
def getDielectricLoss(er,ef,tanD,f):
"""
Returns the dielectric loss in [dB/cm].
Paramenters:
- `er` : Relative permitivity of the dielectric
- `ef` : Effective permitivity
- `tanD` : tan \delta
- `f` : Operating frequency
"""
lam = c/math.sqrt(ef)/f
return 27.3*(er*(ef-1)*tanD)/(lam*math.sqrt(er)*(er-1))
|
clbarnes/hiveplotter
|
test/simple_tests.py
|
Python
|
bsd-3-clause
| 882
| 0
|
from hiveplotter import HivePlot
|
from networkx import nx
import random
from unittest import TestCase
SEED = 1
NTYPES = ['A', 'B', 'C']
class SimpleCase(TestCase):
def make_graph(self):
G = nx.fast_gnp_random_graph(30, 0.2, seed=SEED)
for node, data in G.nodes_iter(data=True):
data['ntype'] = random.choice(NTYPES)
for src, tgt, data in G.edges_ite
|
r(data=True):
data['weight'] = random.random()
return G
def test_simple(self):
G = self.make_graph()
H = HivePlot(G, node_class_attribute='ntype')
H.draw()
H.save_plot('./output/main.pdf')
def test_dump_cfg(self):
G = self.make_graph()
H = HivePlot(G, node_class_attribute='ntype')
H.draw()
print(H.dump_config())
if __name__ == '__main__':
tests = SimpleCase()
tests.test_simple()
|
macarthur-lab/xbrowse
|
xbrowse_server/base/views/account_views.py
|
Python
|
agpl-3.0
| 4,556
| 0.004829
|
import json
import logging
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.core.mail import send_mail
from django.template import RequestContext
from django.template.loader import render_to_string
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.contrib.admin.views.decorators import staff_member_required
from settings import LOGIN_URL
from xbrowse_server.base.forms import LoginForm, SetUpAccountForm
from xbrowse_server.base.models import UserProfile
from xbrowse_server.base.utils import get_projects_for_user
from xbrowse_server.decorators import log_request
def landing_page(request):
return render(request, 'landing_page.html', {})
@csrf_exempt
def errorlog(request):
logger = logging.getLogger(__name__)
logger.erro
|
r('xbrowse JS error', extra={'request': request})
return HttpResponse(json.dumps({'success': True}))
@log_request('home')
def home(request):
if request.user.is_anonymous():
return landing_page(request)
|
projects = get_projects_for_user(request.user)
return render(request, 'home.html', {
'user': request.user,
'projects': projects,
'new_page_url': '/dashboard',
})
@log_request('login')
def login_view(request):
logout(request)
next = request.GET.get('next')
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
user = form.user
login(request, user)
if next and '.wsgi' not in next:
return redirect(next)
else:
return redirect('/')
else:
form = LoginForm()
return render(request, 'login.html', {
'form': form,
'next': next,
})
def logout_view(request):
logout(request)
return redirect('home')
@log_request('set_password')
def set_password(request):
error = None
token = request.GET.get('token')
if not token or len(token) < 1:
return HttpResponse('Invalid')
profile = get_object_or_404(UserProfile, set_password_token=token)
if request.method == 'POST':
form = SetUpAccountForm(request.POST)
if form.is_valid():
user = profile.user
user.set_password(form.cleaned_data['password1'])
user.save()
profile.set_password_token = ''
profile.display_name = form.cleaned_data['name']
profile.save()
u = authenticate(username=profile.user.username, password=form.cleaned_data['password1'])
login(request, u)
return redirect('home')
else:
form = SetUpAccountForm()
return render(request, 'set_password.html', {
'form': form,
'error': error,
})
def forgot_password(request):
error = None
if request.method == 'POST':
email = request.POST.get('email').lower()
if email is None or email == "":
error = "Please enter an email."
elif not User.objects.filter(email__iexact=email).exists():
error = "This email address is not valid."
else:
user = User.objects.get(email__iexact=email)
profile = user.profile
profile.set_password_token = User.objects.make_random_password(length=30)
profile.save()
email_content = render_to_string(
'emails/reset_password.txt',
{'user': user, 'BASE_URL': settings.BASE_URL },
)
send_mail('Reset your xBrowse password', email_content, settings.FROM_EMAIL, [email,], fail_silently=False )
return redirect('forgot_password_sent')
return render(request, 'forgot_password.html', {
'error': error,
'new_page_url': '/users/forgot_password',
})
def forgot_password_sent(request):
return render(request, 'forgot_password_sent.html', {
})
def style_css(request):
return render(request, 'css/style.css', {
}, content_type="text/css")
@log_request('user_summary')
@staff_member_required(login_url=LOGIN_URL)
def user_summary(request, username):
user = User.objects.get(username=username)
return render(request, 'user_summary.html', {
'user': user,
'projects': get_projects_for_user(user),
})
|
loehnertz/rattlesnake
|
rattlesnake.py
|
Python
|
mit
| 15,227
| 0.001773
|
import sys
import math
import wave
import struct
import curses
import pyaudio
import numpy as np
import matplotlib.pyplot as plt
# 'curses' configuration
stdscr = curses.initscr()
stdscr.nodelay(True)
curses.noecho()
curses.cbreak()
# PyAudio object variable
pa = pyaudio.PyAudio()
# The mode the user chose with a script argument
MODE = sys.argv[1]
# Size of each read-in chunk
CHUNK = 1
# Amount of channels of the live recording
CHANNELS = 2
# Sample width of the live recording
WIDTH = 2
# Sample rate in Hz of the live recording
SAMPLE_RATE = 44100
# Set how often data for the result will be saved (every nth CHUNK)
if MODE != '-p' and MODE != '--playback':
try:
NTH_ITERATION = int(sys.argv[3])
except (ValueError, IndexError):
print('The second argument has to be a number')
sys.exit()
def main():
# Execute the chosen mode
if MODE == '--file' or MODE == '-f':
file_mode()
elif MODE == '--live' or MODE == '-l':
live_mode()
elif MODE == '--playback' or MODE == '-p':
playback_mode()
else:
print('Please either choose file-mode, live-mode or playback-mode with the first argument')
def file_mode():
# Read in the given file
(waveform, stream) = readin(sys.argv[4])
# Give some feedback
stdscr.addstr('Now noise-cancelling the file')
# Collecting the volume levels in decibels in a list
decibel_levels = []
# Collecting the waves into lists
total_original = []
total_inverted = []
total_difference = []
# Counting the iterations of the while-loop
iteration = 0
# Determines the ratio of the mix
ratio = 1.0
# Determines if the noise-cancellation is active
active = True
# Read a first chunk and continue to do so for as long as there is a stream to read in
original = waveform.readframes(CHUNK)
while original != b'':
try:
# Capture if a key was pressed
pressed_key = stdscr.getch()
# If the 'o' key was pressed toggle the 'active' variable
if pressed_key == 111:
active = not active
# While the noise-cancellation is not activated the ratio should be 100% towards the orginial audio
if not active:
ratio = 2.0
else:
ratio = 1.0
# Increase the ratio of the mix
elif pressed_key == 43:
ratio += 0.01
# Decrease the ratio of the mix
elif pressed_key == 45:
ratio -= 0.01
# If the 'x' key was pressed abort the loop
elif pressed_key == 120:
break
# Invert the original audio
inverted = invert(original)
# Play back a mixed audio stream of both, original source and the inverted one
if active:
mix = mix_samples(original, inverted, ratio)
stream.write(mix)
# In case the noise-cancellation is not turned off temporarily, only play the orignial audio source
else:
stream.write(original)
# On every nth iteration append the difference between the level of the source audio and the inverted one
if iteration % NTH_ITERATION == 0:
# Clear the terminal before outputting the new value
stdscr.clear()
# Calculate the difference of the source and the inverted audio
difference = calculate_difference(original, inverted)
# Print the current difference
stdscr.addstr('Difference (in dB): {}\n'.format(difference))
# Append the difference to the list used for the plot
decibel_levels.append(difference)
# Calculate the waves for the graph
int_original, int_inverted, int_difference = calculate_wave(original, inverted, ratio)
total_original.append(int_original)
total_inverted.append(int_inverted)
total_difference.append(int_difference)
# Read in the next chunk of data
original = waveform.readframes(CHUNK)
# Add up one to the iterations
iteration += 1
except (KeyboardInterrupt, SystemExit):
break
# Stop the stream after there is no more data to read
stream.stop_stream()
stream.close()
# Outputting feedback regarding the end of the file
print('Finished noise-cancelling the file')
# Plot the results
if sys.argv[2] == '--decibel' or sys.argv[2] == '-db':
plot_results(decibel_levels, NTH_ITERATION)
elif sys.argv[2] == '--waves' or sys.argv[2] == '-wv':
plot_wave_results(total_original, total_inverted, total_difference, NTH_ITERATION)
# Revert the changes from 'curses'
curses.endwin()
# Terminate PyAudio as well as the program
pa.terminate()
sys.exit()
def live_mode():
# Start live recording
stdscr.addstr('Now noise-cancelling live')
# Create a new PyAudio object using the preset constants
stream = pa.open(
format=pa.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=SAMPLE_RATE,
frames_per_buffer=CHUNK,
input=True,
output=True
)
# Collecting the volume levels in decibels in a list
decibel_levels = []
# Collecting the waves into lists
total_original = []
total_inverted = []
total_difference = []
# Determines if the noise-cancellation is active
active = True
# Grab a chunk of data in iterations according to the preset constants
try:
for i in range(0, int(SAMPLE_RATE / CHUNK * sys.maxunicode)):
# Capture if a key was pressed
pressed_key = stdscr.getch()
# If the 'o' key was pressed toggle the 'active' variable
if pressed_key == 111:
active = not active
# If the 'x' key was pressed abort the loop
if pressed_key == 120:
break
|
# Read in a chunk of live audio on each iteration
original = stream.read(CHUNK)
# Invert the original audio
|
inverted = invert(original)
# Play back the inverted audio
stream.write(inverted, CHUNK)
# On every nth iteration append the difference between the level of the source audio and the inverted one
if i % NTH_ITERATION == 0:
# Clear the terminal before outputting the new value
stdscr.clear()
# Calculate the difference of the source and the inverted audio
difference = calculate_difference(original, inverted)
# Print the current difference
stdscr.addstr('Difference (in dB): {}'.format(difference))
# Append the difference to the list used for the plot
decibel_levels.append(difference)
# Calculate the waves for the graph
int_original, int_inverted, int_difference = calculate_wave(original, inverted)
total_original.append(int_original)
total_inverted.append(int_inverted)
total_difference.append(int_difference)
except (KeyboardInterrupt, SystemExit):
# Outputting feedback regarding the end of the file
print('Finished noise-cancelling the file')
# Plot the results
if sys.argv[2] == '--decibel' or sys.argv[2] == '-db':
plot_results(decibel_levels, NTH_ITERATION)
elif sys.argv[2] == '--waves' or sys.argv[2] == '-wv':
plot_wave_results(total_original, total_inverted, total_difference, NTH_ITERATION)
# Revert the changes from 'curses'
curses.endwin()
# Terminate the program
stream.stop_stream()
stream.close()
pa.terminate()
sys.exit()
def playback_mode():
# Read in the given file
(waveform, stream) = readin(sys.argv[2])
# Give some feedback
print('Now playing back the file')
|
ddurieux/alignak
|
alignak/sorteddict.py
|
Python
|
agpl-3.0
| 6,690
| 0.000149
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sorteddict.py
# Sorted dictionary (implementation for Python 2.x)
#
# Copyright (c) 2010 Jan Kaliszewski (zuo)
#
# The MIT License:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from bisect import bisect_left, insort
from itertools import izip, repeat
def dictdoc(method):
"A decorator making reuse of the ordinary dict's docstrings more concise."
dict_method = getattr(dict, method.__name__)
if hasattr(dict_method, '__doc__'):
method.__doc__ = dict_method.__doc__
return method
class SortedDict(dict):
'''Dictionary with sorted keys.
The interface is similar to the ordinary d
|
ict's one, but:
* methods: __repr__(), __str__(), __iter__(), iterkeys(), itervalues(),
iteritems(), keys(), values(), items() and popitem() -- return results
taking into consideration sorted keys order;
|
* new methods: largest_key(), largest_item(), smallest_key(),
smallest_item() added.
'''
def __init__(self, *args, **kwargs):
'''Like with the ordinary dict: from a mapping, from an iterable
of (key, value) pairs, or from keyword arguments.'''
dict.__init__(self, *args, **kwargs)
self._sorted_keys = sorted(dict.iterkeys(self))
@dictdoc
def __repr__(self):
return 'SortedDict({%s})' % ', '.join('%r: %r' % item
for item in self.iteritems())
@dictdoc
def __str__(self):
return repr(self)
@dictdoc
def __setitem__(self, key, value):
key_is_new = key not in self
dict.__setitem__(self, key, value)
if key_is_new:
insort(self._sorted_keys, key)
@dictdoc
def __delitem__(self, key):
dict.__delitem__(self, key)
del self._sorted_keys[bisect_left(self._sorted_keys, key)]
def __iter__(self, reverse=False):
'''D.__iter__() <==> iter(D) <==> D.iterkeys() -> an iterator over
sorted keys (add reverse=True for reverse ordering).'''
if reverse:
return reversed(self._sorted_keys)
else:
return iter(self._sorted_keys)
iterkeys = __iter__
def itervalues(self, reverse=False):
'''D.itervalues() -> an iterator over values sorted by keys
(add reverse=True for reverse ordering).'''
return (self[key] for key in self.iterkeys(reverse))
def iteritems(self, reverse=False):
'''D.iteritems() -> an iterator over (key, value) pairs sorted by keys
(add reverse=True for reverse ordering).'''
return ((key, self[key]) for key in self.iterkeys(reverse))
def keys(self, reverse=False):
'''D.keys() -> a sorted list of keys
(add reverse=True for reverse ordering).'''
return list(self.iterkeys(reverse))
def values(self, reverse=False):
'''D.values() -> a list of values sorted by keys
(add reverse=True for reverse ordering).'''
return list(self.itervalues(reverse))
def items(self, reverse=False):
'''D.items() -> a list of (key, value) pairs sorted by keys
(add reverse=True for reverse ordering).'''
return list(self.iteritems(reverse))
@dictdoc
def clear(self):
dict.clear(self)
del self._sorted_keys[:]
def copy(self):
'''D.copy() -> a shallow copy of D (still as a SortedDict).'''
return self.__class__(self)
@classmethod
@dictdoc
def fromkeys(cls, seq, value=None):
return cls(izip(seq, repeat(value)))
@dictdoc
def pop(self, key, *args, **kwargs):
if key in self:
del self._sorted_keys[bisect_left(self._sorted_keys, key)]
return dict.pop(self, key, *args, **kwargs)
def popitem(self):
'''D.popitem() -> (k, v). Remove and return a (key, value) pair with
the largest key; raise KeyError if D is empty.'''
try:
key = self._sorted_keys.pop()
except IndexError:
raise KeyError('popitem(): dictionary is empty')
else:
return key, dict.pop(self, key)
@dictdoc
def setdefault(self, key, default=None):
if key not in self:
insort(self._sorted_keys, key)
return dict.setdefault(self, key, default)
@dictdoc
def update(self, other=()):
if hasattr(other, 'keys') and hasattr(other, 'values'):
# mapping
newkeys = [key for key in other if key not in self]
else:
# iterator/sequence of pairs
other = list(other)
newkeys = [key for key, _ in other if key not in self]
dict.update(self, other)
for key in newkeys:
insort(self._sorted_keys, key)
def largest_key(self):
'''D.largest_key() -> the largest key; raise KeyError if D is empty.'''
try:
return self._sorted_keys[-1]
except IndexError:
raise KeyError('largest_key(): dictionary is empty')
def largest_item(self):
'''D.largest_item() -> a (key, value) pair with the largest key;
raise KeyError if D is empty.'''
key = self.largest_key()
return key, self[key]
def smallest_key(self):
'''D.smallest_key() -> the smallest key; raise KeyError if D is empty.'''
try:
return self._sorted_keys[0]
except IndexError:
raise KeyError('smallest_key(): dictionary is empty')
def smallest_item(self):
'''D.smallest_item() -> a (key, value) pair with the smallest key;
raise KeyError if D is empty.'''
key = self.smallest_key()
return key, self[key]
|
cliftonmcintosh/openstates
|
openstates/nh/__init__.py
|
Python
|
gpl-3.0
| 3,210
| 0.005296
|
import lxml.html
from .bills import NHBillScraper
from .legislators import NHLegislatorScraper
from .committees import NHCommitteeScraper
metadata = {
'abbreviation': 'nh',
'name': 'New Hampshire',
'capitol_timezone': 'America/New_York',
'legislature_name': 'New Hampshire General Court',
'legislature_url': 'http://www.gencourt.state.nh.us/',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
'terms': [
{'name': '2011-2012', 'sessions': ['2011', '2012'],
'start_year': 2011, 'end_year': 2012},
{'name': '2013-2014', 'sessions': ['2013', '2014'],
'start_year': 2013, 'end_year': 2014},
{'name': '2015-2016', 'sessions': ['2015', '2016'],
'start_year': 2015, 'end_year': 2016},
{'name': '2017-2018', 'sessions': ['2017'],
'
|
start_year': 2017, 'end_year': 2018}
],
'session_details': {
'2011': {'display_name': '2011 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2011%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2011 Sessio
|
n',
},
'2012': {'display_name': '2012 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2012%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2012 Session',
},
'2013': {'display_name': '2013 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2013%20Session%20Bill%20Status%20Tables.zip',
# Their dump filename changed, probably just a hiccup.
'_scraped_name': '2013',
# '_scraped_name': '2013 Session',
},
'2014': {'display_name': '2014 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2014%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2014 Session',
},
'2015': {'display_name': '2015 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2015%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2015 Session',
},
'2016': {'display_name': '2016 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2016%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2016 Session',
},
'2017': {'display_name': '2017 Regular Session',
'_scraped_name': '2017 Session',
},
},
'feature_flags': ['subjects', 'influenceexplorer'],
'_ignored_scraped_sessions': ['2013 Session','2017 Session Bill Status Tables Link.txt'],
}
def session_list():
from billy.scrape.utils import url_xpath
zips = url_xpath('http://gencourt.state.nh.us/downloads/',
'//a[contains(@href, "Bill%20Status%20Tables")]/text()')
return [zip.replace(' Bill Status Tables.zip', '') for zip in zips]
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
return doc.xpath('//html')[0].text_content()
|
radiasoft/radtrack
|
radtrack/plot/contourf_demo.py
|
Python
|
apache-2.0
| 3,308
| 0.009069
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
origin = 'lower'
#origin = 'upper'
delta = 0.025
x = y = np.arange(-3.0, 3.01, delta)
X, Y = np.meshgrid(x, y)
Z1 = plt.mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = plt.mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = 10 * (Z1 - Z2)
nr, nc = Z.shape
# put NaNs in one corner:
Z[-nr//6:, -nc//6:] = np.nan
# contourf will convert these to masked
Z = np.ma.array(Z)
# mask another corner:
Z[:nr//6, :nc//6] = np.ma.masked
# mask a circle in the middle:
interior = np.sqrt((X**2) + (Y**2)) < 0.5
Z[interior] = np.ma.masked
# We are using automatic selection of contour levels;
# this is usually not such a good idea, because they don't
# occur on nice boundaries, but we do it here for purposes
# of illustration.
CS = plt.contourf(X, Y, Z, 10, # [-1, -0.1, 0, 0.1],
#alpha=0.5,
cmap=plt.cm.bone,
origin=origin)
# Note that in the following, we explicitly pass in a subset of
# the contour levels used for the
|
filled contours. Alternatively,
# We could pass in additional levels to provide extra resolution,
# or leave out the l
|
evels kwarg to use all of the original levels.
CS2 = plt.contour(CS, levels=CS.levels[::2],
colors = 'r',
origin=origin,
hold='on')
plt.title('Nonsense (3 masked regions)')
plt.xlabel('word length anomaly')
plt.ylabel('sentence length anomaly')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('verbosity coefficient')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
plt.figure()
# Now make a contour plot with the levels specified,
# and with the colormap generated automatically from a list
# of colors.
levels = [-1.5, -1, -0.5, 0, 0.5, 1]
CS3 = plt.contourf(X, Y, Z, levels,
colors = ('r', 'g', 'b'),
origin=origin,
extend='both')
# Our data range extends outside the range of levels; make
# data below the lowest contour level yellow, and above the
# highest level cyan:
CS3.cmap.set_under('yellow')
CS3.cmap.set_over('cyan')
CS4 = plt.contour(X, Y, Z, levels,
colors = ('k',),
linewidths = (3,),
origin = origin)
plt.title('Listed colors (3 masked regions)')
plt.clabel(CS4, fmt = '%2.1f', colors = 'w', fontsize=14)
# Notice that the colorbar command gets all the information it
# needs from the ContourSet object, CS3.
plt.colorbar(CS3)
# Illustrate all 4 possible "extend" settings:
extends = ["neither", "both", "min", "max"]
cmap = plt.cm.get_cmap("winter")
cmap.set_under("magenta")
cmap.set_over("yellow")
# Note: contouring simply excludes masked or nan regions, so
# instead of using the "bad" colormap value for them, it draws
# nothing at all in them. Therefore the following would have
# no effect:
#cmap.set_bad("red")
fig, axs = plt.subplots(2,2)
for ax, extend in zip(axs.ravel(), extends):
cs = ax.contourf(X, Y, Z, levels, cmap=cmap, extend=extend, origin=origin)
fig.colorbar(cs, ax=ax, shrink=0.9)
ax.set_title("extend = %s" % extend)
ax.locator_params(nbins=4)
plt.show()
|
bkolli/swift
|
test/unit/proxy/test_sysmeta.py
|
Python
|
apache-2.0
| 16,039
| 0
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except
|
in compliance with the Licen
|
se.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import quote
import unittest
import os
from tempfile import mkdtemp
import shutil
from swift.common.storage_policy import StoragePolicy
from swift.common.swob import Request
from swift.common.utils import mkdirs, split_path
from swift.common.wsgi import monkey_patch_mimetools, WSGIContext
from swift.obj import server as object_server
from swift.proxy import server as proxy
import swift.proxy.controllers
from test.unit import FakeMemcache, debug_logger, FakeRing, \
fake_http_connect, patch_policies
class FakeServerConnection(WSGIContext):
'''Fakes an HTTPConnection to a server instance.'''
def __init__(self, app):
super(FakeServerConnection, self).__init__(app)
self.data = ''
def getheaders(self):
return self._response_headers
def read(self, amt=None):
try:
result = next(self.resp_iter)
return result
except StopIteration:
return ''
def getheader(self, name, default=None):
result = self._response_header_value(name)
return result if result else default
def getresponse(self):
environ = {'REQUEST_METHOD': self.method}
req = Request.blank(self.path, environ, headers=self.req_headers,
body=self.data)
self.data = ''
self.resp = self._app_call(req.environ)
self.resp_iter = iter(self.resp)
if self._response_headers is None:
self._response_headers = []
status_parts = self._response_status.split(' ', 1)
self.status = int(status_parts[0])
self.reason = status_parts[1] if len(status_parts) == 2 else ''
return self
def getexpect(self):
class ContinueResponse(object):
status = 100
return ContinueResponse()
def send(self, data):
self.data += data
def close(self):
pass
def __call__(self, ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
self.path = quote('/' + device + '/' + str(partition) + path)
self.method = method
self.req_headers = headers
return self
def get_http_connect(account_func, container_func, object_func):
'''Returns a http_connect function that delegates to
entity-specific http_connect methods based on request path.
'''
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
a, c, o = split_path(path, 1, 3, True)
if o:
func = object_func
elif c:
func = container_func
else:
func = account_func
resp = func(ipaddr, port, device, partition, method, path,
headers=headers, query_string=query_string)
return resp
return http_connect
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(replicas=1))])
class TestObjectSysmeta(unittest.TestCase):
'''Tests object sysmeta is correctly handled by combination
of proxy server and object server.
'''
def _assertStatus(self, resp, expected):
self.assertEqual(resp.status_int, expected,
'Expected %d, got %s'
% (expected, resp.status))
def _assertInHeaders(self, resp, expected):
for key, val in expected.items():
self.assertTrue(key in resp.headers,
'Header %s missing from %s' % (key, resp.headers))
self.assertEqual(val, resp.headers[key],
'Expected header %s:%s, got %s:%s'
% (key, val, key, resp.headers[key]))
def _assertNotInHeaders(self, resp, unexpected):
for key, val in unexpected.items():
self.assertFalse(key in resp.headers,
'Header %s not expected in %s'
% (key, resp.headers))
def setUp(self):
self.app = proxy.Application(None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(replicas=1),
container_ring=FakeRing(replicas=1))
monkey_patch_mimetools()
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda', 'tmp'))
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.obj_ctlr = object_server.ObjectController(
conf, logger=debug_logger('obj-ut'))
http_connect = get_http_connect(fake_http_connect(200),
fake_http_connect(200),
FakeServerConnection(self.obj_ctlr))
self.orig_base_http_connect = swift.proxy.controllers.base.http_connect
self.orig_obj_http_connect = swift.proxy.controllers.obj.http_connect
swift.proxy.controllers.base.http_connect = http_connect
swift.proxy.controllers.obj.http_connect = http_connect
def tearDown(self):
shutil.rmtree(self.tmpdir)
swift.proxy.controllers.base.http_connect = self.orig_base_http_connect
swift.proxy.controllers.obj.http_connect = self.orig_obj_http_connect
original_sysmeta_headers_1 = {'x-object-sysmeta-test0': 'val0',
'x-object-sysmeta-test1': 'val1'}
original_sysmeta_headers_2 = {'x-object-sysmeta-test2': 'val2'}
changed_sysmeta_headers = {'x-object-sysmeta-test0': '',
'x-object-sysmeta-test1': 'val1 changed'}
new_sysmeta_headers = {'x-object-sysmeta-test3': 'val3'}
original_meta_headers_1 = {'x-object-meta-test0': 'meta0',
'x-object-meta-test1': 'meta1'}
original_meta_headers_2 = {'x-object-meta-test2': 'meta2'}
changed_meta_headers = {'x-object-meta-test0': '',
'x-object-meta-test1': 'meta1 changed'}
new_meta_headers = {'x-object-meta-test3': 'meta3'}
bad_headers = {'x-account-sysmeta-test1': 'bad1'}
def test_PUT_sysmeta_then_GET(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, self.original_sysmeta_headers_1)
self._assertInHeaders(resp, self.original_meta_headers_1)
self._assertNotInHeaders(resp, self.bad_headers)
def test_PUT_sysmeta_then_HEAD(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
req = Request.blank(path, environ=env, headers=hdrs, body='x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank(path, environ=env)
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
self._assertInHeaders(resp, sel
|
Tayamarn/socorro
|
socorro/cron/jobs/ftpscraper.py
|
Python
|
mpl-2.0
| 19,943
| 0.000301
|
from __future__ import print_function
import datetime
import sys
import re
import os
import json
import urlparse
import fnmatch
import functools
import mock
import lxml.html
import requests
from requests.adapters import HTTPAdapter
from configman import Namespace
from configman.converters import class_converter, str_to_list
from crontabber.base import BaseCronApp
from crontabber.mixins import (
as_backfill_cron_app,
with_postgres_transactions
)
from socorro.cron import buildutil
from socorro.app.socorro_app import App, main
from socorro.lib.datetimeutil import string_to_datetime
def memoize_download(fun):
cache = {}
@functools.wraps(fun)
def inner(self, url):
if url not in cache:
cache[url] = fun(self, url)
return cache[url]
return inner
class ScrapersMixin(object):
"""
Mixin that requires to be able to call `self.download(some_url)`
and `self.skip_json_file(json_url)`.
"""
def get_links(self, url, starts_with=None, ends_with=None):
results = []
content = self.download(url)
if not content:
return []
if not (starts_with or ends_with):
raise NotImplementedError(
'get_links requires either `startswith` or `endswith`'
)
html = lxml.html.document_fromstring(content)
path = urlparse.urlparse(url).path
def url_match(link):
# The link might be something like "/pub/mobile/nightly/"
# but we're looking for a path that starts with "nightly".
# So first we need to remove what's part of the base URL
# to make a fair comparison.
if starts_with is not None:
# If the current URL is http://example.com/some/dir/
# and the link is /some/dir/mypage/ and the thing
# we're looking for is "myp" then this should be true
if link.startswith(path):
link = link.replace(path, '')
return link.startswith(starts_with)
elif ends_with:
return link.endswith(ends_with)
return False
for _, _, link, _ in html.iterlinks():
if url_match(link):
results.append(urlparse.urljoin(url, link))
return results
def parse_build_json_file(self, url, nightly=False):
content = self.download(url)
if content:
try:
kvpairs = json.loads(content)
kvpairs['repository'] = kvpairs.get('moz_source_repo')
if kvpairs['repository']:
kvpairs['repository'] = kvpairs['repository'].split(
'/', -1
)[-1]
kvpairs['build_type'] = kvpairs.get('moz_update_channel')
kvpairs['buildID'] = kvpairs.get('buildid')
# bug 1065071 - ignore JSON files that have keys with
# missing values.
if None in kvpairs.values():
self.config.logger.warning(
'warning, unsupported JSON file: %s', url
)
return kvpairs
# bug 963431 - it is valid to have an empty file
# due to a quirk in our build system
except ValueError:
self.config.logger.warning(
'Unable to JSON parse content %r',
content,
exc_info=True
)
def parse_info_file(self, url, nightly=False):
self.config.logger.debug('Opening %s', url)
content = self.download(url)
results = {}
bad_lines = []
if not content:
return results, bad_lines
contents = content.splitlines()
if nightly:
results = {'buildID': contents[0], 'rev': contents[1]}
if len(contents) > 2:
results['altrev'] = contents[2]
elif contents:
results = {}
for line in contents:
if line == '':
continue
try:
key, value = line.split('=')
results[key] = value
except ValueError:
bad_lines.append(line)
return results, bad_lines
def get_json_release(self, candidate_url, dirname):
version = dirname.split('-candidates')[0]
builds = self.get_links(candidate_url, starts_with='build')
if not builds:
return
latest_build = builds.pop()
version_build = os.path.basename(os.path.normpath(latest_build))
possible_platforms = (
'linux', 'mac', 'win', 'debug', # for Firefox
'android-api-16', 'android-api-15', 'android-x86', # for mobile
)
for platform in possible_platforms:
platform_urls = self.get_links(
latest_build,
starts_with=platform
)
for platform_url in platform_urls:
# We're only interested in going into depper directories.
# Inside a directory like 'firefox/candidates/45.3.0esr-candidates/build1/'
# there is likely to be regular files that match the
# 'possible_platforms' above. Skip those that aren't directories.
# This means we're much less likely to open URLs like
# '...45.3.0esr-candidates/build1/en-US/' which'll 404
if not platform_url.endswith('/'):
continue
platform_local_url = urlparse.urljoin(platform_url, 'en-US/')
json_files = self.get_links(
platform_local_url,
ends_with='.json'
)
for json_url in json_files:
if self.skip_json_file(json_url):
continue
kvpairs = self.parse_build_json_file(json_url)
if not kvpairs:
continue
kvpairs['version_build'] = version_build
yield (platform, version, kvpairs)
def get_json_nightly(self, nightly_url, dirname):
json_files = self.get_links(nightly_url, ends_with='.json')
for url in json_files:
if self.skip_json_file(url):
continue
basename = os.path.basename(url)
if '.en-US.' in url:
pv, platform = re.sub('\.json$', '', basename).s
|
plit('.en-US.')
elif '.multi.' in url:
pv, platform = re.sub('\.json$', '', basename).split('.multi.')
else:
continue
version = pv.split('-')[-1
|
]
repository = []
for field in dirname.split('-'):
# Skip until something is not a digit and once we've
# appended at least one, keep adding.
if not field.isdigit() or repository:
repository.append(field)
repository = '-'.join(repository).strip('/')
kvpairs = self.parse_build_json_file(url, nightly=True)
yield (platform, repository, version, kvpairs)
def get_release(self, candidate_url):
builds = self.get_links(candidate_url, starts_with='build')
if not builds:
self.config.logger.info('No build dirs in %s', candidate_url)
return
latest_build = builds.pop()
version_build = os.path.basename(os.path.normpath(latest_build))
info_files = self.get_links(latest_build, ends_with='_info.txt')
for info_url in info_files:
kvpairs, bad_lines = self.parse_info_file(info_url)
# os.path.basename works on URL looking things too
# and not just file path
platform = os.path.basename(info_url).split('_info.txt')[0]
# suppose the `info_url` is something like
# "https://archive.moz.../40.0.3-candidates/..11_info.txt"
# then look for the "40.0.3-candidates" part and remove
# "-candidate
|
nttks/edx-platform
|
biz/djangoapps/gx_org_group/tests/test_views.py
|
Python
|
agpl-3.0
| 34,785
| 0.003823
|
import json
from mock import patch
from django.core.urlresolvers import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from student.tests.factories import UserFactory
from biz.djangoapps.ga_invitation.tests.test_views import BizContractTestBase
from biz.djangoapps.ga_manager.tests.factories import
|
ManagerFactory
from biz.djangoapps.gx_org_group.models import Group, Right, Parent, Child
from biz.djangoapps.gx_org
|
_group.tests.factories import GroupUtil, GroupFactory
from biz.djangoapps.gx_member.tests.factories import MemberFactory
from biz.djangoapps.gx_member.models import Member
class OrgGroupListViewTest(BizContractTestBase):
"""
Test Class for gx_org_group
"""
def setUp(self):
super(BizContractTestBase, self).setUp()
self.user_gacco_staff = UserFactory(username='gacco_staff', is_staff=True, is_superuser=True)
self.user_tac_aggregator = UserFactory(username='tac_aggregator')
self.user_a_director = UserFactory(username='a_director')
self.user_manager1 = UserFactory(username='manager1')
self.user_manager2 = UserFactory(username='manager2')
self.org_a = self._create_organization(org_name='org_a', org_code='a', creator_org=self.gacco_organization)
self.manager_platformer = ManagerFactory.create(org=self.gacco_organization, user=self.user_gacco_staff,
permissions=[self.platformer_permission])
self.manager_manager1 = ManagerFactory.create(org=self.gacco_organization, user=self.user_manager1,
permissions=[self.manager_permission])
self.manager_manager2 = ManagerFactory.create(org=self.gacco_organization, user=self.user_manager2,
permissions=[self.manager_permission])
def _index_view(self):
"""
Returns URL of group list as index
:return:
"""
return reverse('biz:group:group_list')
def _delete_group(self):
"""
Returns URL of delete group
:return:
"""
return reverse('biz:group:delete_group')
def _upload_csv(self):
"""
Returns URL of file upload API
:return:
"""
return reverse('biz:group:upload_csv')
def _download_csv(self):
"""
Returns URL of group list download API
:return:
"""
return reverse('biz:group:download_csv')
def _download_headers_csv(self):
"""
Returns URL of group list download API
:return:
"""
return reverse('biz:group:download_headers_csv')
def _detail_view(self, selected_group_id):
"""
Returns URL of detail of group known access right settings
:param selected_group_id:
:return:
"""
return reverse('biz:group:detail', kwargs={'selected_group_id': selected_group_id})
def _accessible_user_list(self):
"""
Returns URL of accessible user list API
:return:
"""
return reverse('biz:group:accessible_user_list')
def _accessible_parent_list(self):
"""
Returns URL of parent group accessible user list API
:return:
"""
return reverse('biz:group:accessible_parent_list')
def _grant_right(self):
"""
Returns URL of access right grant API
:return:
"""
return reverse('biz:group:grant_right')
@property
def _csv_header(self):
return ",".join([
'Organization Group Code',
'Organization Group Name',
'Parent Organization Code',
'Parent Organization Name',
'notes'
]) + '\r\n'
@property
def _csv_data_first(self):
csv_data = "G01,G1,,,\r\n" \
"G01-01,G1-1,G01,G1,\r\n" \
"G01-01-01,G1-1-1,G01-01,G1-1,\r\n" \
"G01-01-02,G1-1-2,G01-01,G1-1,\r\n" \
"G01-02,G1-2,G01,G1,\r\n" \
"G02,G2,,,\r\n" \
"G02-01,G2-1,G02,G2,\r\n" \
"G02-01-01,G2-1-1,G02-01,G2-1,\r\n" \
"G02-01-02,G2-1-2,G02-01,G2-1,\r\n" \
"G02-02,G2-2,G02,G2,\r\n"
return csv_data
@property
def _csv_data_cir_err_master(self):
csv_data = "1000,group1,,,\r\n" \
"1000aaa,group3,1000,group1,\r\n" \
"1001,group4,,,\r\n" \
"1002,group3,1000,group1,\r\n" \
"1003,group3,1000,group1,\r\n" \
"1005,group5,,,\r\n" \
"1006,group6,,,\r\n" \
"1007,group7,1009,group9,\r\n" \
"1008,group8,,,\r\n" \
"1009,group9,,,\r\n" \
"aaaaaaaaabbbbbbbbbcc,group3,1000,group1,\r\n"
return csv_data
@property
def _csv_data_cir_err_tran(self):
csv_data = "1000,group6,1000,,\r\n"
return csv_data
def _test_upload_cir_err_master(self):
csv_header = self._csv_header
csv_data = self._csv_data_cir_err_master
csv_content = (csv_header + csv_data).encode('cp932')
upload_file = SimpleUploadedFile("org_group.csv", csv_content)
with self.skip_check_course_selection(current_organization=self.gacco_organization):
response = self.client.post(self._upload_csv(), {'organization': '', 'org_group_csv': upload_file})
self.assertEqual(200, response.status_code)
def _test_upload_first(self):
csv_header = self._csv_header
csv_data = self._csv_data_first
csv_content = (csv_header + csv_data).encode('cp932')
upload_file = SimpleUploadedFile("org_group.csv", csv_content)
with self.skip_check_course_selection(current_organization=self.gacco_organization):
response = self.client.post(self._upload_csv(), {'organization': '', 'org_group_csv': upload_file})
self.assertEqual(200, response.status_code)
self._test_group('G01', 'G1', '', '', '', 0, [],['G01-01', 'G01-02', 'G01-01-01', 'G01-01-02'])
self._test_group('G01-01', 'G1-1', 'G01', 'G1', '', 1, ['G01'], ['G01-01-01', 'G01-01-02'])
self._test_group('G01-01-01', 'G1-1-1', 'G01-01', 'G1-1', '', 2, ['G01', 'G01-01'], [])
self._test_group('G01-01-02', 'G1-1-2', 'G01-01', 'G1-1', '', 2, ['G01', 'G01-01'], [])
self._test_group('G01-02', 'G1-2', 'G01', 'G1', '', 1, ['G01'], [])
self._test_group('G02', 'G2','', '', '', 0, [], ['G02-01', 'G02-02', 'G02-01-01', 'G02-01-02'])
self._test_group('G02-01', 'G2-1', 'G02', 'G2', '', 1, ['G02'], ['G02-01-01', 'G02-01-02'])
self._test_group('G02-01-01', 'G2-1-1', 'G02-01', 'G2-1', '', 2, ['G02', 'G02-01'], [])
self._test_group('G02-01-02', 'G2-1-2', 'G02-01', 'G2-1', '', 2, ['G02', 'G02-01'], [])
self._test_group('G02-02', 'G2-2', 'G02', 'G2', '', 1, ['G02'],[])
def _test_upload_second(self):
csv_header = self._csv_header
csv_data = "G02,G02underG1,G01,G1,moved to under G1\r\n"
csv_content = (csv_header + csv_data).encode('cp932')
upload_file = SimpleUploadedFile("org_group.csv", csv_content)
with self.skip_check_course_selection(current_organization=self.gacco_organization):
response = self.client.post(self._upload_csv(), {'organization': '', 'org_group_csv': upload_file})
self.assertEqual(200, response.status_code)
self._test_group('G02', 'G02underG1', 'G01', 'G1', 'moved to under G1', 1, ['G01'], ['G02-01', 'G02-02', 'G02-01-01', 'G02-01-02'])
def _test_upload_third(self):
csv_header = self._csv_header
csv_data = "G03,G3,G01,G1,connect to under G1\r\n" \
"G03-01,G3-1,G03,G3,\r\n" \
"G03-01-01,G3-1-1,G03-01,G3-1,\r\n" \
"G03-01-02,G3-1-2,G03-
|
Debaq/Triada
|
CP_Marcha/TUG.py
|
Python
|
gpl-3.0
| 8,883
| 0.014643
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import serial
import time
import serial.tools.list_ports
#import json
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
from matplotlib.gridspec import GridSpec
#from mpl_toolkits.mplot3d import Axes3D
#import threading
import numpy as np
import datetime
import os
import sys
from scipy.interpolate import splrep, splev
from termcolor import colored, cprint
TIEMPO_PRUEBA = 5 ##TIEMPO DE LA PRUEBA EN SEGUNDOS
DELAY = 1 ## DELAY DE LA PRUEBA EN MUESTRAS, PARA QUITAR ARTEFACTOS DE INICIO
SUAVIZADO = 10 ## SUAVIZADO DE LAS CURVAS
MARKER_SIZE = 5 ## TAMAÑO DE LA MARCA
BAR_SIZE = 0.4 ##ANCHO DE LAS BARRAS
##RAW
vx0 = [] #Tiempo
yaw0 = [] #YAW
pitch0 = [] #PITCH
roll0 = [] #ROLL
## SUAVIZADO
s_yaw0 = []
s_pitch0 = []
s_roll0 = []
## PUNTOS MAXIMOS
yaw0_max = []
pitch0_max = []
roll0_max = []
vx_yaw0 = []
vx_pitch0 = []
vx_roll0 = []
##HERTZ
hz_pitch0 = []
hz_roll0 = []
hz_yaw0 = []
##CONDICIONES
cond1 = False
cond2 = False
cond3 = False
cond4 = False
state_test = 1
state_loop = 1
###############
##ARCHIVOS
name_folder = "none"
##MAIN
def main():
profilePatient()
ok = True
while (ok == True):
loop()
##LOOP
def loop():
global state_loop
if (state_loop == 1):
ardu()
reset()
selectTest()
#smooth(SUAVIZADO)
#buscar()
#hertz_state()
graph()
borrarDatos()
if (state_loop == 2):
logo()
estado = input("Nuevo usuario? y/n: ")
if (estado == 'y'):
limpiar()
reset()
state_loop = 1
main()
else:
limpiar()
print(Salir)
exit()
if(state_loop == 3):
ardu()
reset()
selectTest()
graph()
borrarDatos()
##INGRESO DE DATOS DEL PACIENTE
global name_folder, TIEMPO_PRUEBA
logo()
number_input =''
try:
TIEMPO_PRUEBA= int(eval(input("Tiempo de Evaluación en Segundos [20] : ")) or 20)
except:
print("solo puede escribir numeros, vuelva a intentarlo")
time.sleep(1)
profilePatient()
while number_input == '':
number_input=eval(input("Id: "))
if number_input == '':
print("debe asignar un id")
time.sleep(1)
logo()
name_input = eval(input("Nombre: "))
lastname_input = eval(input("Apellido: "))
age_input = eval(input("Edad: "))
height_input = float(eval(input("Altura cm: ")))
weight_input = float(eval(input("Peso kg: ")))
name_folder = number_input+"_"+name_input+"_"+lastname_input
logo()
print(("ID = ",colored(number_input, 'blue',attrs=['bold'])))
print(("TIEMPO MAXIMO PARA EL TUG = ",colored(TIEMPO_PRUEBA, 'blue',attrs=['bold'])))
print(("NOMBRE = ",colored(name_input, 'blue',attrs=['bold']),colored(lastname_input,'blue',attrs=['bold'])))
print(("EDAD = ", colored(age_input,'blue',attrs=['bold'])))
print(("ALTURA = ", colored(height_input,'blue',attrs=['bold'])))
print(("PESO = ",colored(weight_input,'blue',attrs=['bold'])))
IMC = round((weight_input)/((height_input/100)**2), 1)
if IMC < 16:
colorIMC = 'red'
resIMC = 'Desnutrición severa'
elif IMC >=16.1 and IMC <=18.4:
colorIMC = 'magenta'
resIMC = 'Desnutrición Moderada'
elif IMC >=18.5 and IMC <=22:
colorIMC = 'yellow'
resIMC = 'Bajo Peso'
elif IMC >=22.1 and IMC <=24.0:
colorIMC
|
= 'green'
resIMC = 'Peso Normal'
elif IMC >=25 and IMC <=29.9:
colorIMC = 'yellow'
resIMC = 'Sobrepeso'
elif IMC >=30 and IMC <=34.9:
colorIMC
|
= 'magenta'
resIMC = 'Obesidad tipo I'
elif IMC >=35 and IMC <=39.9:
colorIMC = 'red'
resIMC = 'Obesidad tipo II'
elif IMC >40:
colorIMC = 'red'
resIMC = 'Obesidad tipo II'
print(("IMC = ",colored(IMC,colorIMC,attrs=['bold']), '-', colored(resIMC,colorIMC,attrs=['bold'])))
createPatient = eval(input("¿Los datos son Correctos? y/n: "))
if createPatient.lower() == "y":
limpiar()
createFolder()
createLog(number_input, name_input, lastname_input, age_input, str(height_input), str(weight_input))
else:
main()
##CREA LA CARPETA
def createFolder():
try:
global name_folder
os.makedirs(name_folder)
logo()
creado = colored(centerify('creado',80), 'green', attrs=['reverse'])
print(creado)
except OSError:
print("Datos ya creados, favor utilice oto Id")
main()
def selectTest():
global state_test
global vx0, yaw0, pitch0, roll0
global yaw0_max, pitch0_max, roll0_max, vx_yaw0, vx_pitch0, vx_roll0
global hz_pitch0, hz_roll0, hz_yaw0
state_test= input("Presione <enter> para comenzar:")
if (cond1 == True):
vx0 = []
yaw0 = []
pitch0 = []
roll0 = []
s_yaw0 = []
s_pitch0 = []
s_roll0 = []
yaw0_max = []
pitch0_max = []
roll0_max = []
vx_yaw0 = []
vx_pitch0 = []
vx_roll0 = []
hz_pitch0 = []
hz_roll0 = []
hz_yaw0 = []
collect()
##CREA LOG CON DATOS DEL PACIENTE
def createLog(number_input, name_input, lastname_input, age_input, height_input, weight_input):
name_Log_profile = number_input+"\n"+name_input+"\n"+lastname_input+"\n"+age_input+"\n"+height_input+"\n"+weight_input
Log_profile = name_folder+'.profile'
log = open(Log_profile, 'w')
log.write(name_Log_profile)
log.close()
##CONECCION DE ARDUINO
def ardu():
#try:
global arduino
port = list(serial.tools.list_ports.comports())
device = port[0]
arduino = serial.Serial(device.device, 9600, timeout=1.)
#time.sleep(2)
#arduino.write(b'9')
print("Receptor Conectado")
# except IndexError:
# raw_input("Conecte y presione <enter> tecla para volver a intentar")
# ardu()
##RESET DE ARDUINO
def reset():
global arduino
arduino.setDTR(False)
time.sleep(1)
arduino.flushInput()
arduino.setDTR(True)
def borrarDatos():
global state_loop
ok = input("tomar otra muestra? y/n: ")
if ok.lower() == "y":
state_loop = 3
else:
state_loop = 2
##RECOLECTA LOS DATOS
def collect(i):
global vx0, yaw0, pitch0, roll0, cond1
cond1 = True
date=datetime.datetime.now()
i = 0
t = 0
conteo = TIEMPO_PRUEBA
try:
while i <= TIEMPO_PRUEBA:
if (i==0.2):
log_test = open(name_folder+'/'+"TUG"+str(date.day)+'-'+str(date.month)+'-'+str(date.year)+'_'+str(date.hour)+'.'+str(date.minute)+str(date.second)+'.tug', 'a')
data = []
data.append(arduino.readline())
data = [x.replace("\r\n","") for x in data]
for line in data:
Type = line.split(",")
a = Type[0]
b = Type[1]
c = Type[2]
d = Type[3]
e = Type[4]
f = Type[5]
g = Type[6]
Line = (a + "," + b + "," + c + "," + d + "," + e + "," + f + "," + g +"\r\n")
log_test.write(Line)
#log_test.close()
a = float(a)
b = float(b)
c = float(c)
d = float(d)
e = float(e)
f = float(f)
g = float(g)
if(len(vx0)==0):
t = t + d
d = d - t
if(len(vx0)>=1):
d = d -t
d = d/1000
limpiar()
print(Medir)
print(log_test.name)
print(d)
i = d
vx0.append(d)
yaw0.append(c)
pitch0.append(a)
roll0.append(b)
except ValueError:
#print"Error"
#raw_input("volver a intentar? ")
collect()
except IndexError:
#print"Error"
|
thisisshi/cloud-custodian
|
tools/c7n_azure/tests_azure/tests_resources/test_iot_hub.py
|
Python
|
apache-2.0
| 921
| 0
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from ..azure_common import BaseTest, arm_template
class IoTHubTest(BaseTest):
def setUp(self):
super(IoTHubTest, self).setUp()
def test_iot_hub_schema_validate(self):
with self.
|
sign_out_patch():
p = self.load_policy({
'name': 'test-iot-hub-compliance',
'resource': 'azure.iothub'
}, validate=True)
self.assertTrue(p)
@arm_template('iothub.json
|
')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-iothub',
'resource': 'azure.iothub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cctest-iothub*'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
|
kavasoglu/ocl_web
|
ocl_web/libs/ocl/concept_class.py
|
Python
|
mpl-2.0
| 229
| 0
|
# from ..ocl import
|
ApiResource
# class ConceptClass(ApiResource):
# def __init__(self):
# super(ConceptClass, self).__init__()
# self.names = []
# self.descriptions = []
# se
|
lf.sources = []
|
bleepbloop/Pivy
|
scons/scons-local-1.2.0.d20090919/SCons/Platform/hpux.py
|
Python
|
isc
| 1,763
| 0.002836
|
"""engine.SCons.Platform.hpux
Platform-specific initialization for HP-UX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all c
|
opies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS B
|
E
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/hpux.py 4369 2009/09/19 15:58:29 scons"
import posix
def generate(env):
posix.generate(env)
#Based on HP-UX11i: ARG_MAX=2048000 - 3000 for environment expansion
env['MAXLINELENGTH'] = 2045000
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
codenote/chromium-test
|
tools/telemetry/telemetry/page/page_test_runner.py
|
Python
|
bsd-3-clause
| 2,691
| 0.010777
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.page import page_test
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.test import discover
def Main(test_dir, page_set_filenames):
"""Turns a PageTest into a command-line program.
Args:
test_dir: Path to directory containing PageTests.
"""
tests = discover.DiscoverClasses(test_dir,
os.path.join(test_dir, '..'),
page_test.PageTest)
# Naively find the test. If we use the browser options parser, we run
# the risk of failing to parse if we use a test-specific parameter.
test_name = None
for arg in sys.argv:
if arg in tests:
test_name = arg
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <test> <page_set>')
page_runner.PageRunner.AddCommandLineOptions(parser)
test = None
if test_name is not None:
if test_name not in tests:
sys.stderr.write('No test name %s found' % test_name)
sys.exit(1)
test = tests[test_name]()
test.AddCommandLineOptions(parser)
_, args = parser.parse_args()
if test is None or len(args) != 2:
parser.print_usage()
print >> sys.stderr, 'Available tests:\n%s\n' % ',\n'.join(
sorted(tests.keys()))
print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
sorted([os.path.relpath(f)
for f in page_set_filenames]))
sys.exit(1)
ps = page_set.PageSet.FromFile(args[1])
results = page_test.PageTestResults()
return RunTestOnPageSet(options, ps, test, results)
def RunTestOnPageSet(options, ps, test, results):
test.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
if not possible_browser:
print >> sys.stderr, """No browser found.\n
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
with page_runner.PageRunner(ps) as runner:
runner.Run(options, po
|
ssible_browser, test, results)
print '%i pages succeed\n' % len(results.page_successes)
if len(results.page_failures):
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(
|
results.skipped_pages):
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
return min(255, len(results.page_failures))
|
privateip/ansible
|
lib/ansible/plugins/doc_fragments/shell_windows.py
|
Python
|
gpl-3.0
| 1,460
| 0.002055
|
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Windows shell documentation fragment
# FIXME: set_module_language don't belong here but must be set so they don't fail when someone
# get_option('set_module_language') on this plugin
DOCUMENTATION = """
options:
async_dir:
description:
- Directory in which ansible will keep async job information.
- Before Ansible 2.8, this was set to C(remote_tmp + "\\.ansible_async").
default: '%USERPROFILE%\\.ansible_async'
ini:
- section: powershell
key: async_dir
vars:
|
- name: ansible_a
|
sync_dir
version_added: '2.8'
remote_tmp:
description:
- Temporary directory to use on targets when copying files to the host.
default: '%TEMP%'
ini:
- section: powershell
key: remote_tmp
vars:
- name: ansible_remote_tmp
set_module_language:
description:
- Controls if we set the locale for modules when executing on the
target.
- Windows only supports C(no) as an option.
type: bool
default: 'no'
choices: ['no', False]
environment:
description:
- List of dictionaries of environment variables and their values to use when
executing commands.
type: list
default: [{}]
"""
|
deepmind/open_spiel
|
open_spiel/python/algorithms/psro_v2/meta_strategies.py
|
Python
|
apache-2.0
| 5,344
| 0.008046
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Meta-strategy solvers for PSRO."""
import numpy as np
from open_spiel.python.algorithms import lp_solver
from open_spiel.python.algorithms import projected_replicator_dynamics
import pyspiel
EPSILON_MIN_POSITIVE_PROBA = 1e-8
def uniform_strategy(solver, return_joint=False):
"""Returns a Random Uniform distribution on policies.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
uniform distribution on strategies.
"""
policies = solver.get_policies()
policy_lengths = [len(pol) for pol in policies]
result = [np.ones(pol_len) / pol_len for pol_len in policy_lengths]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def softmax_on_range(number_policies):
x = np.array(list(range(number_policies)))
x = np.exp(x-x.max())
x /= np.sum(x)
return x
def uniform_biased_strategy(solver, return_joint=False):
"""Returns a Biased Random Uniform distribution on policies.
The uniform distribution is biased to prioritize playing against more recent
policies (Policies that were appended to the policy list later in training)
instead of older ones.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
uniform distribution on strategies.
"""
policies = solver.get_policies()
if not isinstance(policies[0], list):
policies = [policies]
policy_lengths = [len(pol) for pol in policies]
result = [softmax_on_range(pol_len) for pol_len in policy_lengths]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def renormalize(probabilities):
"""Replaces all negative entries with zeroes and normalizes the result.
Args:
probabilities: probability vector to renormalize. Has to be one-dimensional.
Returns:
Renormalized probabilities.
"""
probabilities[probabilities < 0] = 0
probabilities = probabilities / np.sum(probabilities)
return probabilities
def get_joint_strategy_from_marginals(probabilities):
"""Returns a joint strategy matrix from a list of marginals.
Args:
probabilities: list of probabilities.
Returns:
A joint strategy from a list of marginals.
"""
probas = []
for i in range(len(probabilities)):
probas_shapes = [1] * len(probabilities)
probas_shapes[i] = -1
probas.append(probabilities[i].reshape(*probas_shapes))
result = np.product(probas)
return result.reshape(-1)
def nash_strategy(solver, return_joint=False):
"""Returns nash distribution on meta game matrix.
This method only works for two player zero-sum games.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
Nash distribution on strategies.
"""
meta_games = solver.get_meta_game()
if not isinstance(meta_games, list):
meta_games = [meta_games, -meta_games]
meta_games = [x.tolist() for x in meta_games]
if len(meta_games) != 2:
raise NotImplementedError(
"nash_strategy solver works only for 2p zero-sum"
"games, but was invoked for a {} player game".format(len(meta_games)))
nash_prob_1, nash_prob_2, _, _ = (
lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(*meta_games)))
result = [
renormalize(np.array(nash_prob_1).reshape(-1)),
renormalize(np.array(nash_prob_2).reshape(-1))
]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def prd_strategy(solver, return_joint=False):
"""Computes Projected Replicator Dynamics strategies.
Args:
solver: GenPSROSolver instance.
|
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
PRD-computed strategies.
"""
meta_games = solver.get_meta_game()
if not isinstance(meta_games, lis
|
t):
meta_games = [meta_games, -meta_games]
kwargs = solver.get_kwargs()
result = projected_replicator_dynamics.projected_replicator_dynamics(
meta_games, **kwargs)
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
META_STRATEGY_METHODS = {
"uniform_biased": uniform_biased_strategy,
"uniform": uniform_strategy,
"nash": nash_strategy,
"prd": prd_strategy,
}
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-nistats/package.py
|
Python
|
lgpl-2.1
| 1,272
| 0.002358
|
# Copyri
|
ght 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Projec
|
t Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNistats(PythonPackage):
"""Modeling and Statistical analysis of fMRI data in Python."""
homepage = "https://github.com/nilearn/nistats"
pypi = "nistats/nistats-0.0.1rc0.tar.gz"
version('0.0.1rc0', sha256='dcc4c4e410f542fd72e02e12b3b6531851bae2680d08ad29658b272587ef2f98')
version('0.0.1b2', sha256='a853149087bafbf1bed12664ed8889a63ff15dde1fb7a9d51e8a094afc8d695d')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.11:', type=('build', 'run'))
depends_on('py-scipy@0.17:', type=('build', 'run'))
depends_on('py-scikit-learn@0.18:', type=('build', 'run'))
depends_on('py-nibabel@2.0.2:', type=('build', 'run'))
# needs +plotting to avoid ModuleNotFoundError:
# 'nilearn.plotting.js_plotting_utils' when importing nistats.reporting
# Functionality has been incorporated into py-nilearn@0.7:
depends_on('py-nilearn+plotting@0.4:0.6', type=('build', 'run'))
depends_on('py-pandas@0.18:', type=('build', 'run'))
|
hirofumi0810/tensorflow_end2end_speech_recognition
|
models/encoders/core/student_cnn_xe.py
|
Python
|
mit
| 4,732
| 0.000634
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Student CNN encoder for XE training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models.encoders.core.cnn_util import conv_layer, max_pool, batch_normalization
############################################################
# Architecture: (feature map, kernel(f*t), stride(f,t))
# CNN1: (128, 9*9, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (3,1)
# CNN2: (256, 3*4, (1,1)) * 1 layers
# Batch normalization
# ReLU
# Max pool (1,1)
# fc: 2048 (ReLU) * 4 layers
############################################################
class StudentCNNXEEncoder(object):
"""Student CNN encoder for XE training.
Args:
input_size (int): the dimensions of input vectors.
This is expected to be num_channels * 3 (static + Δ + ΔΔ)
splice (int): frames to splice
num_stack (int): the number of frames to stack
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
name (string, optional): the name of encoder
"""
def __init__(self,
input_size,
splice,
num_stack,
parameter_init,
name='cnn_student_xe_encoder'):
assert input_size % 3 == 0
self.num_channels = (input_size // 3) // num_stack // splice
self.splice = splice
self.num_stack = num_stack
self.parameter_init = parameter_init
self.name = name
def __call__(self, inputs, keep_prob, is_training):
"""Construct model graph.
Args:
inputs (placeholder): A tensor of size
`[B, input_size (num_channels * splice * num_stack * 3)]`
keep_prob (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
is_training (bool):
Returns:
outputs: Encoder states.
if time_major is True, a tensor of size `[T, B, output_dim]`
otherwise, `[B, output_dim]`
"""
# inputs: 2D tensor `[B, input_dim]`
batch_size = tf.shape(inputs)[0]
input_dim = inputs.shape.as_list()[-1]
# NOTE: input_dim: num_channels * splice * num_stack * 3
# for debug
# print(input_dim) # 1200
# print(self.num_channels) # 40
# print(self.splice) # 5
# print(self.num_stack) # 2
assert input_dim == self.num_channels * self.splice * self.num_stack * 3
# Reshape to 4D tensor `[B, num_channels, splice * num_stack, 3]`
inputs = tf.reshape(
inputs,
shape=[batch_size, self.num_channels, self.splice * self.num_stack, 3])
# NOTE: filter_size: `[H, W, C_in, C_out]`
with tf.variable_scope('CNN1'):
inputs = conv_layer(inputs,
filter_size=[9, 9, 3, 128],
stride=[1, 1],
parameter_init=self.parameter_init,
activation='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[3, 1],
stride=[3, 1],
name='max_pool')
with tf.variable_scope('CNN2'):
inputs = conv_layer(inputs,
filter_size=[3, 4, 128, 256],
stride=[1, 1],
parameter_init=self.parameter_init,
activation
|
='relu')
inputs = batch_normalization(inputs, is_training=is_training)
inputs = max_pool(inputs,
pooling_size=[1, 1],
stride=[1, 1],
name='max_pool')
# Reshape to 2D tensor `[B, new_h * new_w * C_out]`
outputs = tf.reshape(
inputs, sh
|
ape=[batch_size, np.prod(inputs.shape.as_list()[-3:])])
for i in range(1, 5, 1):
with tf.variable_scope('fc%d' % (i)) as scope:
outputs = tf.contrib.layers.fully_connected(
inputs=outputs,
num_outputs=2048,
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
return outputs
|
carpedm20/fbchat
|
tests/threads/test_page.py
|
Python
|
bsd-3-clause
| 669
| 0.001495
|
import fbchat
from fbchat import PageData
def test_page_from_graphql(session):
data = {
"id": "123456",
"name": "Some school",
"profile_picture": {"uri": "https://scontent-arn2-1.xx.fbcdn.net/v/..."},
|
"url": "https://www.facebook.com/some-school/",
"category_type": "SCHOOL",
"city": None,
}
assert PageData(
session=session,
id="123456",
photo=fbchat.Image
|
(url="https://scontent-arn2-1.xx.fbcdn.net/v/..."),
name="Some school",
url="https://www.facebook.com/some-school/",
city=None,
category="SCHOOL",
) == PageData._from_graphql(session, data)
|
Atom1c/home
|
board/board/wsgi.py
|
Python
|
unlicense
| 385
| 0.002597
|
"""
WSGI config for board project.
It exposes the WSGI callable as a module-level var
|
iable named ``application``.
|
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "board.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
networks-lab/tidyextractors
|
tidyextractors/tidymbox/mbox_to_pandas.py
|
Python
|
gpl-3.0
| 6,462
| 0.002787
|
# *********************************************************************************************
# Copyright (C) 2017 Joel Becker, Jillian Anderson, Steve McColl and Dr. John McLevey
#
# This file is part of the tidyextractors package developed for Dr John McLevey's Networks Lab
# at the University of Waterloo. For more information, see
# http://tidyextractors.readthedocs.io/en/latest/
#
# tidyextractors is free software: you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# tidyextractors is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with tidyextractors.
# If not, see <http://www.gnu.org/licenses/>.
# *********************************************************************************************
import os
import re
import tqdm
import mailbox
import warnings
import pandas as pd
import email.utils as email
import email.header as header
# Adapted from Phil Deutsch's "mbox-analysis" https://github.com/phildeutsch/mbox-analysis
def clean_addresses(addresses):
"""
Cleans email address.
:param addresses: List of strings (email addresses)
:return: List of strings (cleaned email addresses)
"""
if addresses is None:
return []
addresses = addresses.replace("\'", "")
address_list = re.split('[,;]', addresses)
clean_list = []
for address in address_list:
temp_clean_address = clean_address(address)
clean_list.append(temp_clean_address)
return clean_list
def clean_address(address):
"""
Cleans a single email address.
:param address: String (email address)
:return: String (clean email address)
"""
if isinstance(address, header.Header):
return clean_address(address.encode('ascii'))
elif isinstance(address, str):
address = address.replace("<", "")
address = address.replace(">", "")
address = address.replace("\"", "")
address = address.replace("\n", " ")
address = address.replace("MAILER-DAEMON", "")
address = address.lower().strip()
email = None
for word in address.split(' '):
email_regex = re.compile(
"^[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$"
)
email = re.match(email_regex, word)
if email is not None:
clean_email = email.group(0)
if email is None:
if address.split(' ')[-1].find('@') > -1:
clean_email = address.split(' ')[-1].strip()
elif address.split(' ')[-1].find('?') > -1:
clean_email = 'n/a'
else:
clean_email = address
return clean_email
elif address is None:
return None
else:
raise ValueError('An unexpected type was given to clean_address. Address was {}'.format(address))
return None
def get_body(message):
"""
Extracts body text from an mbox message.
:param message: Mbox message
:return: String
"""
try:
sm = str(message)
body_start = sm.find('iamunique', sm.find('iamunique')+1)
body_start = sm.find('Content-Transfer-Encoding', body_start+1)
body_start = sm.find('\n', body_start+1)+1
body_end = sm.find('From: ', body_start + 1)
if body_end == -1:
body_end = sm.find('iamunique', body_start + 1)
body_end = sm.find('\n', body_end - 25)
body = sm[body_start:body_end]
body = body.replace("=20\n", "")
body = body.replace("=FC", "ü")
body = body.replace("=F6", "ö")
body = body.replace("=84", "\"")
body = body.replace("=94", "\"")
body = body.replace("=96", "-")
body = body.replace("=92", "\'")
body = body.replace("=93", "\"")
body = body.replace("=E4", "ä")
body = body.replace("=DF", "ss")
body = body.replace("=", "")
body = body.replace("\"", "")
body = body.replace("\'", "")
except:
body = None
return body
def write_table(mboxfile, mailTable):
"""
Takes a list and extends it with lists of data, which is
extracted from mbox messages.
:param mboxfile: Mbox file name/path
:param mailTable: A list (of lists)
:return: An extended list of lists
"""
mail_box_contents = mailbox.mbox(mboxfile)
m_pbar = tqdm.tqdm(range(0,len(mail_box_contents)))
m_pbar.set_description('Extracting mbox messages...')
count = 0
update_interval = min(50,len(mail_box_contents))
for message in mail_box_contents:
count += 1
if count % update_interval == 0:
m_pbar.update(update_interval)
clean_from = clean_address(message['From'])
clean_to = clean_addresses(message['To'])
clean_cc = clean_addresses(message['Cc'])
try:
clean_date = email.parsedate_to_datetime(message['Date'])
except:
clean_date = None
mailTable.append([
clean_from,
clean_to,
clean_cc,
clean_date,
message['Subject'],
get_body(message)
])
def mbox_to_pandas(mbox_path):
"""
Extracts all mbox messages from mbox files in mbox_path.
:param mbox_path: Path to an mbox file OR a directory containing mbox files.
:return: A Pandas DataFrame with messages as rows/observations.
"""
if os.path.isfile(mbox_path):
mbox_files = [mbox_path]
else:
mbox_files = [os.path.join(dirpath, f) for dirpath, dirnames, files in
|
os.walk(mbox_path) for f in files if f.endswith('mbox')]
mail_table = []
f_pbar = tqdm.tqdm(range(0,len(mbox_files)))
f_pbar.set_description('Extracting mbox files...')
for mbox_file in mbox_files:
write_table(mbox_fi
|
le, mail_table)
f_pbar.update(1)
df_out = pd.DataFrame(mail_table)
df_out.columns = ['From', 'To', 'Cc', 'Date', 'Subject', 'Body']
df_out['NumTo'] = df_out['To'].map(lambda i: len(i))
df_out['NumCC'] = df_out['Cc'].map(lambda i: len(i))
return df_out
|
missionpinball/mpf
|
mpf/tests/test_DropTargets.py
|
Python
|
mit
| 18,577
| 0.001453
|
from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase
from unittest.mock import MagicMock, patch
from mpf.tests.MpfTestCase import MpfTestCase
class TestDropTargets(MpfTestCase):
def get_config_file(self):
return 'test_drop_targets.yaml'
def get_machine_path(self):
return 'tests/machine_files/drop_targets/'
def get_platform(self):
return 'smart_virtual'
def test_reset_and_playfield_active(self):
self.mock_event("playfield_active")
self.hit_switch_and_run("switch1", 1)
self.hit_switch_and_run("switch2", 1)
# playfield should is active when drop target are shot down
self.assertEventCalled("playfield_active")
self.mock_event("playfield_active")
self.assertTrue(self.machine.drop_targets["left1"].complete)
self.assertTrue(self.machine.drop_targets["left2"].complete)
self.assertFalse(self.machine.drop_targets["left3"].complete)
# reset the bank. this should not trigger playfield_active
self.machine.drop_target_banks["left_bank"].reset()
self.advance_time_and_run(1)
self.assertEventNotCalled("playfield_active")
self.assertFalse(self.machine.drop_targets["left1"].complete)
self.assertFalse(self.machine.drop_targets["left2"].complete)
self.assertFalse(self.machine.drop_targets["left3"].complete)
def test_drop_target_bank(self):
self.assertIn('left1', self.machine.drop_targets)
self.assertIn('left2', self.machine.drop_targets)
self.assertIn('left3', self.machine.drop_targets)
self.assertIn('left_bank', self.machine.drop_target_banks)
self.machine.coils["coil1"].pulse = MagicMock(return_value=200)
self.assertFalse(self.machine.drop_targets["left1"].complete)
self.assertFalse(self.machine.drop_targets["left2"].complete)
self.assertFalse(self.machine.drop_targets["left3"].complete)
self.assertFalse(self.machine.drop_target_banks["left_bank"].complete)
self.hit_switch_and_run("switch1", 1)
self.hit_switch_and_run("switch2", 1)
self.assertTrue(self.machine.drop_targets["left1"].complete)
self.assertTrue(self.machine.drop_targets["left2"].complete)
self.assertFalse(self.machine.drop_targets["left3"].complete)
self.assertFalse(self.machine.drop_target_banks["left_bank"].complete)
assert not self.mach
|
ine.coils["coil1"].pulse.called
self.hit_switch_and_run("switch3", .5)
self.assertTrue(self.machine.drop_targets["left1"].complete)
self.assertTrue(self.machine.drop_targets["left2"].complete)
self.assertTrue(self.machine.drop_targets["left3"].complete)
self.assertTrue(self.machine.drop_target_banks["left_bank"].complete)
assert not self.machine.coils["coil1"].pulse.called
# it should reset after 1s
self.advance_ti
|
me_and_run(.5)
self.machine.coils["coil1"].pulse.assert_called_once_with(max_wait_ms=100)
# after another 100ms the switches releases
self.release_switch_and_run("switch1", 0)
self.release_switch_and_run("switch2", 0)
self.release_switch_and_run("switch3", 1)
self.assertFalse(self.machine.drop_targets["left1"].complete)
self.assertFalse(self.machine.drop_targets["left2"].complete)
self.assertFalse(self.machine.drop_targets["left3"].complete)
self.assertFalse(self.machine.drop_target_banks["left_bank"].complete)
# check that the bank does not reset if already down
self.machine.coils["coil1"].pulse = MagicMock(return_value=100)
self.machine.drop_target_banks['left_bank'].reset()
assert not self.machine.coils["coil1"].pulse.called
# reset should work with one target down
self.hit_switch_and_run("switch1", 1)
self.machine.drop_target_banks['left_bank'].reset()
self.machine.coils["coil1"].pulse.assert_called_once_with(max_wait_ms=100)
def test_knockdown_and_reset(self):
self.mock_event("unexpected_ball_on_playfield")
self.machine.coils["coil2"].pulse = MagicMock(wraps=self.machine.coils["coil2"].pulse)
self.machine.coils["coil3"].pulse = MagicMock(wraps=self.machine.coils["coil3"].pulse)
self.assertFalse(self.machine.drop_targets["left6"].complete)
# knock it down
self.post_event("knock_knock")
self.advance_time_and_run(.3)
assert not self.machine.coils["coil2"].pulse.called
self.machine.coils["coil3"].pulse.assert_called_once_with(max_wait_ms=100)
# ignore ms means the state is not updated yet
self.assertFalse(self.machine.drop_targets["left6"].complete)
self.advance_time_and_run(.3)
# and now it is
self.assertTrue(self.machine.drop_targets["left6"].complete)
# reset it
self.machine.coils["coil3"].pulse.reset_mock()
self.post_event("reset_target")
self.advance_time_and_run(.3)
assert not self.machine.coils["coil3"].pulse.called
self.machine.coils["coil2"].pulse.assert_called_once_with(max_wait_ms=100)
# ignore ms means the state is not updated yet
self.assertTrue(self.machine.drop_targets["left6"].complete)
self.advance_time_and_run(6)
# and now it is
self.assertFalse(self.machine.drop_targets["left6"].complete)
self.assertEventNotCalled("unexpected_ball_on_playfield")
def test_drop_targets_in_mode(self):
self.machine.modes['mode1'].start()
self.advance_time_and_run()
self.machine.coils["coil2"].pulse = MagicMock(return_value=30)
self.assertFalse(self.machine.drop_targets["left4"].complete)
self.assertFalse(self.machine.drop_targets["left5"].complete)
self.assertFalse(self.machine.drop_targets["left6"].complete)
self.assertFalse(self.machine.drop_target_banks["left_bank_2"].complete)
self.hit_switch_and_run("switch4", 1)
self.hit_switch_and_run("switch5", 1)
self.assertTrue(self.machine.drop_targets["left4"].complete)
self.assertTrue(self.machine.drop_targets["left5"].complete)
self.assertFalse(self.machine.drop_targets["left6"].complete)
self.assertFalse(self.machine.drop_target_banks["left_bank_2"].complete)
self.machine.modes['mode1'].stop()
self.advance_time_and_run()
self.assertTrue(self.machine.drop_targets["left4"].complete)
self.assertTrue(self.machine.drop_targets["left5"].complete)
self.assertFalse(self.machine.drop_targets["left6"].complete)
self.assertFalse(self.machine.drop_target_banks["left_bank_2"].complete)
# should not complete the bank
self.hit_switch_and_run("switch6", .1)
self.assertTrue(self.machine.drop_targets["left4"].complete)
self.assertTrue(self.machine.drop_targets["left5"].complete)
self.assertTrue(self.machine.drop_targets["left6"].complete)
self.assertFalse(self.machine.drop_target_banks["left_bank_2"].complete)
self.post_event("reset_target")
self.machine.modes['mode1'].start()
self.advance_time_and_run()
# mode is running again. should complete
self.hit_switch_and_run("switch4", .1)
self.hit_switch_and_run("switch5", .1)
self.hit_switch_and_run("switch6", .1)
self.assertTrue(self.machine.drop_targets["left4"].complete)
self.assertTrue(self.machine.drop_targets["left5"].complete)
self.assertTrue(self.machine.drop_targets["left6"].complete)
self.assertTrue(self.machine.drop_target_banks["left_bank_2"].complete)
def test_drop_target_reset(self):
target = self.machine.drop_targets["left6"]
self.machine.coils["coil2"].pulse = MagicMock()
self.machine.coils["coil3"].pulse = MagicMock()
self.assertSwitchState("switch6", 0)
# target up. it should not reset
target.reset()
self.advance_time_and_run()
assert not self.machine.coils["coil2"].pulse.called
assert not self.machine.coils["coil3"].pulse.calle
|
wasit7/PythonDay
|
django/mysite6/mysite6/wsgi.py
|
Python
|
bsd-3-clause
| 391
| 0
|
"""
WSGI config for mysite6 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on t
|
his file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite6.settings"
|
)
application = get_wsgi_application()
|
rosenvladimirov/addons
|
product_barcodes_bg/__init__.py
|
Python
|
agpl-3.0
| 992
| 0.001008
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2015 ERP|O
|
PEN (www.erpopen.nl).
#
# This program is free software: you can redistribute it and/or modify
# it u
|
nder the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
|
chalbersma/persist_transaction
|
archive.py
|
Python
|
gpl-3.0
| 3,154
| 0.045656
|
#!/usr/bin/env python3
from configparser import ConfigParser
from colorama import Fore, Back, Style
import time
import argparse
import ast
import pymysql
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="JSON Config File with our Storage Info", required=True)
parser.add_argument("-V", "--verbose", action="store_true", help="Enable Verbose Mode")
parser._optionals.title = "DESCRIPTION "
# Parser Args
args = parser.parse_args()
# Grab Variables
CONFIG=args.config
VERBOSE=args.verbose
def archive_collections(CONFIG, VERBOSE) :
# Process Config
try:
# Read Our INI with our data collection rules
config = ConfigParser()
config.read(CONFIG)
except Exception as e: # pylint: disable=broad-except, invalid-name
sys.exit('Bad configuration file {}'.format(e))
# Grab me Collections Items Turn them into a Dictionary
config_items=dict()
# Collection Items
for section in config :
config_items[section]=dict()
for item in config[section]:
config_items[section][item] = config[section][item]
if VERBOSE:
print("Config Items: ", config_items)
do_archive = True
try :
# Note that Autocommit is off
db_conn = pymysql.connect(host=config_items["db"]["dbhostname"], port=int(config_items["db"]["dbport"]), \
user=config_items["db"]["dbus
|
er"], passwd=config_items
|
["db"]["dbpassword"], \
db=config_items["db"]["dbname"], autocommit=True )
except Exception as e :
# Error
print("Error Connecting to Datbase with error: ", str(e) )
do_archive = False
if do_archive == True :
# Set Archive Time
ARCHIVE_TIME = int(time.time())
if VERBOSE:
print("Archive Time: " , str(ARCHIVE_TIME))
# Create Query Strings
grab_delete_ids = "select id from trked_trans where active = False and lastChecked < FROM_UNIXTIME(" + str(ARCHIVE_TIME) +" ) - interval 7 DAY ;"
remove_trked_trans_sql = "DELETE FROM trked_trans where id = %s ; "
remove_attempt_sql = "DELETE FROM attempts where fk_trked_trans_id = %s ; "
cur = db_conn.cursor()
if VERBOSE:
print(grab_delete_ids)
print(populate_archive_sql)
print(remove_overachieving_sql)
success = True
try:
cur.execute(grab_delete_ids)
to_delete_ids=cur.fetchall()
except Exception as e :
if VERBOSE:
print(Fore.RED, "Trouble with id grabbing query ", str(grab_delete_ids) , " error : ", str(e), Style.RESET_ALL)
success = False
else :
# Worked So Do the
try :
cur.execute(remove_trked_trans_sql, to_delete_ids)
trans_removed = cur.rowcount
cur.execute(remove_attempt_sql, to_delete_ids)
attempts_removed = cur.rowcount
except Exception as e :
if VERBOSE:
print(Fore.RED, "Trouble with removal queries error : ", str(e), Style.RESET_ALL)
success = False
if success == True :
print(Fore.GREEN, "Long Transaction Archived", str(trans_removed), " | Attempt records removed ", str(attempts_removed), Style.RESET_ALL)
else :
print(Fore.RED, "Archiving has failed" , Style.RESET_ALL)
if __name__ == "__main__":
archive_collections(CONFIG, VERBOSE)
|
shaunwbell/FOCI_Analysis
|
temp/griddata.py
|
Python
|
mit
| 3,221
| 0.005899
|
# griddata.py - 2010-07-11 ccampo
import numpy as np
def griddata(x, y, z, binsize=0.01, retbin=True, retloc=True):
"""
Place unevenly spaced 2D data on a grid by 2D binning (nearest
neighbor interpolation).
Parameters
----------
x : ndarray (1D)
The idependent data x-axis of the grid.
y : ndarray (1D)
The idependent data y-axis of the grid.
z : ndarray (1D)
The dependent data in the form z = f(x,y).
binsize : scalar, optional
The full width and height of each bin on the grid. If each
bin is a cube, then this is the x and y dimension. This is
the step in both directions, x and y. Defaults to 0.01.
retbin : boolean, optional
Function returns `bins` variable (see below for description)
if set to True. Defaults to True.
retloc : boolean, optional
Function returns `wherebins` variable (see below for description)
if set to True. Defaults to True.
Returns
-------
grid : ndarray (2D)
The evenly gridded data. The value of each cell is the median
value of the contents of the bin.
bins : ndarray (2D)
A g
|
rid the same shape as `grid`, except the value of each cell
is the n
|
umber of points in that bin. Returns only if
`retbin` is set to True.
wherebin : list (2D)
A 2D list the same shape as `grid` and `bins` where each cell
contains the indicies of `z` which contain the values stored
in the particular bin.
Revisions
---------
2010-07-11 ccampo Initial version
"""
# get extrema values.
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
# make coordinate arrays.
xi = np.arange(xmin, xmax+binsize, binsize)
yi = np.arange(ymin, ymax+binsize, binsize)
xi, yi = np.meshgrid(xi,yi)
# make the grid.
grid = np.zeros(xi.shape, dtype=x.dtype)
nrow, ncol = grid.shape
if retbin: bins = np.copy(grid)
# create list in same shape as grid to store indices
if retloc:
wherebin = np.copy(grid)
wherebin = wherebin.tolist()
# fill in the grid.
for row in range(nrow):
for col in range(ncol):
xc = xi[row, col] # x coordinate.
yc = yi[row, col] # y coordinate.
# find the position that xc and yc correspond to.
posx = np.abs(x - xc)
posy = np.abs(y - yc)
ibin = np.logical_and(posx < binsize/2., posy < binsize/2.)
ind = np.where(ibin == True)[0]
# fill the bin.
bin = z[ibin]
if retloc: wherebin[row][col] = ind
if retbin: bins[row, col] = bin.size
if bin.size != 0:
binval = np.median(bin)
grid[row, col] = binval
else:
grid[row, col] = np.nan # fill empty bins with nans.
# return the grid
if retbin:
if retloc:
return grid, bins, wherebin
else:
return grid, bins
else:
if retloc:
return grid, wherebin
else:
return grid
|
reflectometry/direfl
|
direfl/gui/simulation_page.py
|
Python
|
mit
| 41,666
| 0.002328
|
# Copyright (C) 2006-2011, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: James Krycka
"""
This module implements the SimulationPage class that provides the simulation
feature of the application. It creates simulated reflectometry data files from
the user's model description and user specified parameter settings which are
then used to perform a direct inversion to generate a scattering length density
profile of the sample.
"""
#==============================================================================
from __future__ import print_function
import os
import sys
import time
import numpy as np
import wx
from wx.lib import delayedresult
import matplotlib
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as Toolbar
# The Figure object is used to create backend-independent plot representations.
from matplotlib.figure import Figure
from matplotlib.font_manager import FontProperties
# For use in the matplotlib toolbar.
from matplotlib.widgets import Slider, Button, RadioButtons
# Wx-Pylab magic for displaying plots within an application's window.
from matplotlib import _pylab_helpers
from matplotlib.backend_bases import FigureManagerBase
#from matplotlib import pyplot as plt
import pylab
from ..api.resolution import bins, binwidths
from ..api.simulate import Simulation
from .utilities import example_data
|
from .input_list import InputListPanel
from .instrument_params import InstrumentParameters
from .wx_utils import (popup_error_message, popup_warning_message,
StatusBarInfo, ExecuteInThread, WorkInProgress)
# Text strings for use in file selection dialog boxes.
DATA_FILES = "Data files (*.dat)|*.dat"
TEXT_FILES = "Text files (*.txt)|*.txt"
ALL_FILES = "All files (*.*)|*.*"
# Resource files.
DEMO_MODEL1
|
_DESC = "demo_model_1.dat"
DEMO_MODEL2_DESC = "demo_model_2.dat"
DEMO_MODEL3_DESC = "demo_model_3.dat"
# Custom colors.
WINDOW_BKGD_COLOUR = "#ECE9D8"
PALE_YELLOW = "#FFFFB0"
# Other constants
NEWLINE = "\n"
NEWLINES_2 = "\n\n"
DATA_ENTRY_ERRMSG = """\
Please correct any highlighted field in error,
then retry the operation.\n
Yellow indicates an input value is required.
Red means the input value has incorrect syntax."""
INSTR_PARAM_ERRMSG = """\
Please edit the instrument data to supply missing
required parameters needed to compute resolution for
the simulated datasets."""
INSTR_CALC_RESO_ERRMSG = """\
Please specify an instrument to be used for calculating
resolution for the simulated datasets, or disable this
calculation by answering 'No' to the 'With Resolution'
question at the bottom of the page."""
SIM_HELP1 = """\
Edit parameters then click Compute to generate a density profile \
from your model."""
#==============================================================================
class SimulationPage(wx.Panel):
"""
This class implements phase reconstruction and direct inversion analysis
of two simulated surround variation data sets (generated from a model)
to produce a scattering length density profile of the sample.
"""
def __init__(self, parent, id=wx.ID_ANY, colour="", fignum=0, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.fignum = fignum
self.SetBackgroundColour(colour)
self.sbi = StatusBarInfo()
self.sbi.write(1, SIM_HELP1)
# Split the panel into parameter and plot subpanels.
sp = wx.SplitterWindow(self, style=wx.SP_3D|wx.SP_LIVE_UPDATE)
if wx.Platform == "__WXMAC__": # workaround to set sash position on
sp.SetMinimumPaneSize(300) # frame.Show() to desired initial value
else:
sp.SetMinimumPaneSize(100)
# Create display panels as children of the splitter.
self.pan1 = wx.Panel(sp, wx.ID_ANY, style=wx.SUNKEN_BORDER)
self.pan1.SetBackgroundColour(colour)
self.pan2 = wx.Panel(sp, wx.ID_ANY, style=wx.SUNKEN_BORDER)
self.pan2.SetBackgroundColour("WHITE")
# Initialize the left and right panels.
self.init_param_panel()
self.init_plot_panel()
# Attach the child panels to the splitter.
sp.SplitVertically(self.pan1, self.pan2)
sp.SetSashPosition(300) # on Mac needs to be set after frame.Show()
sp.SetSashGravity(0.2) # on resize grow mostly on right side
# Put the splitter in a sizer attached to the main panel of the page.
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(sp, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Fit(self)
def init_param_panel(self):
"""Initializes the parameter input panel of the SimulationPage."""
# Determine the border size for widgets placed inside a StaticBox.
# On the Mac, a generous minimum border is provided that is sufficient.
if wx.Platform == "__WXMAC__":
SBB = 0
else:
SBB = 5
#----------------------------
# Section 1: Model Parameters
#----------------------------
# Note that a static box must be created before creating the widgets
# that appear inside it (box and widgets must have the same parent).
sbox1 = wx.StaticBox(self.pan1, wx.ID_ANY, "Model Parameters")
# Create instructions for using the model description input box.
line1 = wx.StaticText(self.pan1, wx.ID_ANY,
label="Define the Surface, Sample, and Substrate")
line2 = wx.StaticText(self.pan1, wx.ID_ANY,
label="layers of your model (one layer per line):")
demo_model_params = \
"# SLDensity Thickness Roughness" + \
NEWLINES_2 + NEWLINES_2 + NEWLINES_2 + NEWLINE
# Create an input box to enter and edit the model description and
# populate it with a header but no layer information.
# Note that the number of lines determines the height of the box.
self.model = wx.TextCtrl(self.pan1, wx.ID_ANY, value=demo_model_params,
style=wx.TE_MULTILINE|wx.TE_WORDWRAP|wx.RAISED_BORDER)
self.model.SetBackgroundColour(WINDOW_BKGD_COLOUR)
# Group model parameter widgets into a labeled section and
# manage them with a static box sizer.
sbox1_sizer = wx.StaticBoxSizer(sbox1, wx.VERTICAL)
sbox1_sizer.Add(line1, 0, wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, border=SBB)
sbox1_sizer.Add(line2, 0, wx.EXPAND|wx.LEFT|wx.RIGHT, border=SBB)
sbox1_sizer.Add((-1, 4), 0, wx.EXPAND|wx.LEFT|wx.RIGHT, border=SBB)
sbox1_sizer.Add(self.model, 1, wx.EXPAND|wx.BOTTOM|wx.LEFT|wx.RIGHT,
border=SBB)
#---------------------------------
# Section 2: Instrument Parameters
#---------------------------------
sbox2 = wx.StaticBox(self.pan1, wx.ID_ANY, "Resolution Parameters")
# Instantiate object that manages and stores instrument metadata.
self.instr_param = InstrumentParameters()
# Create a panel for gathering inst
|
coreycb/horizon
|
openstack_dashboard/dashboards/project/images/utils.py
|
Python
|
apache-2.0
| 3,845
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard.api import glance
def get_available_images(request, project_id=None, images_cache=None):
"""Returns a list of images that are public or owned by the given
project_id. If project_id is not specified, only public images
are returned.
:param images_cache: An optional dict-like object in which to
cache public and per-project id image metadata.
"""
if images_cache is None:
images_cache = {}
public_images = images_cache.get('public_images', [])
images_by_project = images_cache.get('images_by_project', {})
if 'public_images' not in images_cache:
public = {"is_public": True,
"status": "active"}
try:
images, _more, _prev = glance.image_list_detailed(
request, filters=public)
[public_images.append(image) for image in images]
images_cache['public_images'] = public_images
except Exception:
exceptions.handle(request,
_("Unable to retrieve public images."))
# Preempt if we don't have a project_id yet.
if project_id is None:
images_by_project[project_id] = []
if project_id not in images_by_project:
owner = {"property-owner_id": project_id,
"status": "active"}
try:
owned_images, _more, _prev = glance.image_list_detailed(
request, filters=owner)
images_by_project[project_id] = owned_images
except Exception:
owned_images = []
exceptions.handle(request,
_("Unable to retrieve images for "
"the current project."))
else:
owned_images = images_by_project[project_id]
if 'images_by_project' not in images_cache:
images_cache['images_by_project'] = images_by_project
images = owned_images + public_images
# Remove duplicate images
image_ids = []
final_images = []
for image in images:
if image.id not in image_ids:
image_ids.append(image.id)
final_images.append(image)
return [image for image in final_images
if image.container_format not in ('aki', 'ari')]
def image_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all images.
Generates a sorted list of images available. And returns a list of
(id, name
|
) tuples
|
.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
try:
images = get_available_images(request, request.user.project_id)
except Exception:
exceptions.handle(request, _('Unable to retrieve images'))
images.sort(key=lambda c: c.name)
images_list = [('', _('Select Image'))]
for image in images:
image_label = u"{} ({})".format(image.name, filesizeformat(image.size))
images_list.append((image.id, image_label))
if not images:
return [("", _("No images available")), ]
return images_list
|
tensorflow/tpu
|
models/official/efficientnet/eval_ckpt_main.py
|
Python
|
apache-2.0
| 4,717
| 0.003604
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eval checkpoint driver.
This is an example evaluation script for users to understand the EfficientNet
model checkpoints on CPU. To serve EfficientNet, please consider to export a
`SavedModel` from checkpoints and use tf-serving to serve.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import model_builder_factory
import preprocessing
import utils
flags.DEFINE_string('model_name', 'efficientnet-b0', 'Model name to eval.')
flags.DEFINE_string('runmode', 'examples', 'Running mode: examples or imagenet')
flags.DEFINE_string(
'imagenet_eval_glob', None, 'Imagenet eval image glob, '
'such as /imagenet/ILSVRC2012*.JPEG')
flags.DEFINE_string(
'imagenet_eval_label', N
|
one, 'Imagenet eval label file path, '
'such as /imagenet/ILSVRC2012_validation_ground_truth.txt')
flags.DEFINE_string('ckpt_dir', '/tmp/ckpt/', 'Checkpoint folders')
flags.DEFINE_boolean('enable_ema', True, 'Enable exponential moving average.')
flags.DEFINE_string('export_ckpt', None, 'Exported ckpt for eval graph.')
flags.DEFINE_string('example_img', '/tmp/panda.jpg',
'Filepath for a single example image.')
|
flags.DEFINE_string('labels_map_file', '/tmp/labels_map.txt',
'Labels map from label id to its meaning.')
flags.DEFINE_bool('include_background_label', False,
'Whether to include background as label #0')
flags.DEFINE_bool('advprop_preprocessing', False,
'Whether to use AdvProp preprocessing.')
flags.DEFINE_integer('num_images', 5000,
'Number of images to eval. Use -1 to eval all images.')
class EvalCkptDriver(utils.EvalCkptDriver):
"""A driver for running eval inference."""
def build_model(self, features, is_training):
"""Build model with input features."""
tf.logging.info(self.model_name)
model_builder = model_builder_factory.get_model_builder(self.model_name)
if self.advprop_preprocessing:
# AdvProp uses Inception preprocessing.
features = features * 2.0 / 255 - 1.0
else:
features -= tf.constant(
model_builder.MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)
features /= tf.constant(
model_builder.STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)
logits, _ = model_builder.build_model(
features, self.model_name, is_training)
probs = tf.nn.softmax(logits)
probs = tf.squeeze(probs)
return probs
def get_preprocess_fn(self):
"""Build input dataset."""
return preprocessing.preprocess_image
def get_eval_driver(model_name,
include_background_label=False,
advprop_preprocessing=False):
"""Get a eval driver."""
image_size = model_builder_factory.get_model_input_size(model_name)
return EvalCkptDriver(
model_name=model_name,
batch_size=1,
image_size=image_size,
include_background_label=include_background_label,
advprop_preprocessing=advprop_preprocessing)
# FLAGS should not be used before main.
FLAGS = flags.FLAGS
def main(unused_argv):
logging.set_verbosity(logging.ERROR)
driver = get_eval_driver(FLAGS.model_name, FLAGS.include_background_label,
FLAGS.advprop_preprocessing)
if FLAGS.runmode == 'examples':
# Run inference for an example image.
driver.eval_example_images(FLAGS.ckpt_dir, [FLAGS.example_img],
FLAGS.labels_map_file, FLAGS.enable_ema,
FLAGS.export_ckpt)
elif FLAGS.runmode == 'imagenet':
# Run inference for imagenet.
driver.eval_imagenet(FLAGS.ckpt_dir, FLAGS.imagenet_eval_glob,
FLAGS.imagenet_eval_label, FLAGS.num_images,
FLAGS.enable_ema, FLAGS.export_ckpt)
else:
print('must specify runmode: examples or imagenet')
if __name__ == '__main__':
app.run(main)
|
12019/cyberflex-shell
|
cards/cardos_card.py
|
Python
|
gpl-2.0
| 4,862
| 0.005965
|
import utils, TLV_utils
from iso_7816_4_card import *
import building_blocks
class CardOS_Card(ISO_7816_4_Card,building_blocks.Card_with_ls):
DRIVER_NAME = ["CardOS"]
ATRS = [
("3bf2180002c10a31fe58c80874", None),
]
APDU_LIFECYCLE = C_APDU("\x00\xCA\x01\x83\x00")
APDU_PHASE_CONTROL = C_APDU("\x80\x10\x00\x00\x00")
APDU_LIST_X = C_APDU("\x80\x16\x01\x00\x00")
LIST_X_DF = 0
LIST_X_EF = 1
LS_L_SIZE_TAG = 0x80
CARDOS_LIFE_CYCLE_STATUS_BYTE_DESCRIPTIONS = [
(0x10, "operational"),
(0x20, "Administration"),
(0x23, "Personalization"),
(0x26, "Initialisation"),
(0x34, "Manufacturing"),
(0x3F, "Death"),
(0x29, "Erase in Progress"),
]
STATUS_WORDS = ( {
"6283": "File is deactivated",
"6300": "Authentication failed",
"6581": "EEPROM error, command aborted",
"6700": "LC invalid",
"6881": "Logical channel not supported",
"6981": "Command can not be used for file structure",
"6982": "Required access right not granted",
"6983": "BS object blocked",
"6984": "BS object has invalid format",
"6985": "No random number available",
"6986": "No current EF selected",
"6987": "Key object for SM not found",
"6988": "Key object used for SM has invalid format",
"6A80": "Invalid parameters in data field",
"6A81": "Function/mode not supported",
"6A82": "File not found",
"6A83": "Record/object not found",
"6A84": "Not enough memory in file / in file system available",
"6A85": "LC does not fit the TLV structure of the data field",
"6A86": "P1/P2 invalid",
"6A87": "LC does not fit P1/P2",
"6A88": "Object not found (GET DATA)",
"6C00": "LC does not fit the data to be sent (e.g. SM)",
"6D00": "INS invalid",
"6E00": "CLA invalid (Hi nibble)",
"6F00": "Technical error:\n + It was tried to create more than 254 records in a file\n + Package uses SDK version which is not compatible to API version\n + Package contains invalid statements (LOAD EXECUTABLE)",
"6F81": "File is invalidated b
|
ecause of checksum error (prop.)",
"6F82": "Not enough memory available in XRAM",
"6F83": "Transaction error (i.e. command must not be used in transaction)",
"6F84": "General protection fault (prop.)",
"6F85": "Internal failure of PK-API (e.g. wrong CCMS format)",
"6F86": "Key Object not found",
"6F87": "Chaini
|
ng error",
"6FFF": "Internal assertion (invalid internal error)\n + This error is no runtime error, but an internal error which can occur because of a programming error only.",
"9000": "Command executed correctly",
"9001": "Command exectued correctly; EEPROM weakness detected (EEPROM written with second trial; the EEPROM area overwritten has a limited lifetime only)",
"9850": "Overflow using INCREASE / underflow using DECREASE"
} )
def list_x(self, x):
"Get a list of x objects, where x is one of 0 (DFs) or 1 (EFs) or 2 (DFs and EFs)"
## FIXME I just guessed this information
result = self.send_apdu(C_APDU(self.APDU_LIST_X, p1=x))
files = []
unpacked = TLV_utils.unpack(result.data)
for tag, length, value in unpacked:
if isinstance(value, list):
for tag, length, value in value:
if tag == 0x86:
files.append(value)
else:
if tag == 0x86:
files.append(value)
return files
def cmd_listdirs(self):
"List DFs in current DF"
result = self.list_x(0)
print "DFs: " + ", ".join([utils.hexdump(a, short=True) for a in result])
def cmd_listfiles(self):
"List EFs in current DF"
result = self.list_x(1)
print "EFs: " + ", ".join([utils.hexdump(a, short=True) for a in result])
def cmd_lifecycle(self):
"Check the current lifecycle"
result = self.send_apdu(C_APDU(self.APDU_LIFECYCLE))
#status = binascii.b2a_hex(result.data)
for hex, mes in self.CARDOS_LIFE_CYCLE_STATUS_BYTE_DESCRIPTIONS:
if (int(binascii.b2a_hex(result.data), 16) == hex):
print "Satus: " + mes
break
def cmd_phase_control(self):
"change lifecycle between Administration and Operational"
result = self.send_apdu(C_APDU(self.APDU_PHASE_CONTROL))
COMMANDS = {
"list_dirs": cmd_listdirs,
"list_files": cmd_listfiles,
"ls": building_blocks.Card_with_ls.cmd_list,
"check_lifecycle": cmd_lifecycle,
"phase_control": cmd_phase_control,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.