repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
nivbend/bdd_bot
|
bddbot/test/constants.py
|
Python
|
mit
| 367
| 0
|
""
|
"Constant definitions for testing purposes."""
from bddbot.config import TEST_COMMAND
BANK_PATH_1 = "banks/first.bank"
BANK_PATH_2 = "banks/second.bank"
FEATURE_PATH_1 = BANK_PATH_1.replace("bank", "feature")
FEATURE_PATH_2 = BANK_PATH_2.replace("bank", "feature")
(HOST, PORT) = ("bank_server", 0xBDD)
CLIENT = "client"
DEFAULT_TEST_COMMANDS = [TEST_COMMAND,
|
]
|
atombrella/django-rest-framework
|
tests/test_model_serializer.py
|
Python
|
bsd-2-clause
| 43,267
| 0.00141
|
"""
The `ModelSerializer` and `HyperlinkedModelSerializer` classes are essentially
shortcuts for automatically creating serializers based on a given model class.
These tests deal with ensuring that we correctly map the model fields onto
an appropriate set of serializer fields for each case.
"""
from __future__ import unicode_literals
import decimal
from collections import OrderedDict
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import (
MaxValueValidator, MinLengthValidator, MinValueValidator
)
from django.db import models
from django.db.models import DurationField as ModelDurationField
from django.test import TestCase
from django.utils import six
from rest_framework import serializers
from rest_framework.compat import set_many, unicode_repr
def dedent(blocktext):
return '\n'.join([line[12:] for line in blocktext.splitlines()[1:-1]])
# Tests for regular field mappings.
# ---------------------------------
class CustomField(models.Field):
"""
A custom model field simply for testing purposes.
"""
pass
class OneFieldModel(models.Model):
char_field = models.CharField(max_length=100)
class RegularFieldsModel(models.Model):
"""
A model class for testing regular flat fields.
"""
auto_field = models.AutoField(primary_key=True)
big_integer_field = models.BigIntegerField()
boolean_field = models.BooleanField(default=False)
char_field = models.CharField(max_length=100)
comma_separated_integer_field = models.CommaSeparatedIntegerField(max_length=100)
date_field = models.DateField()
datetime_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=3, decimal_places=1)
email_field = models.EmailField(max_length=100)
float_field = models.FloatField()
integer_field = models.IntegerField()
null_boolean_field = models.NullBooleanField()
positive_integer_field = models.PositiveIntegerField()
positive_small_integer_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField(max_length=100)
small_integer_field = models.SmallIntegerField()
text_field = models.TextField(max_length=100)
file_field = models.FileField(max_length=100)
time_field = models.TimeField()
url_field = models.URLField(max_length=100)
custom_field = CustomField()
file_path_field = models.FilePathField(path='/tmp/')
def method(self):
return 'method'
COLOR_CHOICES = (('red', 'Red'), ('blue', 'Blue'), ('green', 'Green'))
DECIMAL_CHOICES = (('low', decimal.Decimal('0.1')), ('medium', decimal.Decimal('0.5')), ('high', decimal.Decimal('0.9')))
class FieldOptionsModel(models.Model):
value_limit_field = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
length_limit_field = models.CharField(validators=[MinLengthValidator(3)], max_length=12)
blank_field = models.CharField(blank=True, max_length=10)
null_field = models.IntegerField(null=True)
default_field = models.IntegerField(default=0)
descriptive_field = models.IntegerField(help_text='Some help text', verbose_name='A label')
choices_field = models.CharField(max_length=100, choices=COLOR_CHOICES)
class ChoicesModel(models.Model):
choices_field_with_nonstandard_args = models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES, verbose_name='A label')
class Issue3674ParentModel(models.Model):
title = models.CharField(max_length=64)
class Issue3674ChildModel(models.Model):
parent = models.ForeignKey(Issue3674ParentModel, related_name='children', on_delete=models.CASCADE)
value = models.CharField(primary_key=True, max_length=64)
class UniqueChoiceModel(models.Model):
CHOICES = (
('choice1', 'choice 1'),
('choice2', 'choice 1'),
)
name = models.CharField(max_length=254, unique=True, choices=CHOICES)
class TestModelSerializer(TestCase):
def test_create_method(self):
class TestSerializer(serializers.ModelSerializer):
|
non_model_field = serializers.CharField()
class Meta:
|
model = OneFieldModel
fields = ('char_field', 'non_model_field')
serializer = TestSerializer(data={
'char_field': 'foo',
'non_model_field': 'bar',
})
serializer.is_valid()
with self.assertRaises(TypeError) as excinfo:
serializer.save()
msginitial = 'Got a `TypeError` when calling `OneFieldModel.objects.create()`.'
assert str(excinfo.exception).startswith(msginitial)
def test_abstract_model(self):
"""
Test that trying to use ModelSerializer with Abstract Models
throws a ValueError exception.
"""
class AbstractModel(models.Model):
afield = models.CharField(max_length=255)
class Meta:
abstract = True
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = AbstractModel
fields = ('afield',)
serializer = TestSerializer(data={
'afield': 'foo',
})
with self.assertRaises(ValueError) as excinfo:
serializer.is_valid()
msginitial = 'Cannot use ModelSerializer with Abstract Models.'
assert str(excinfo.exception).startswith(msginitial)
class TestRegularFieldMappings(TestCase):
def test_regular_fields(self):
"""
Model fields should map to their equivalent serializer fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = '__all__'
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
big_integer_field = IntegerField()
boolean_field = BooleanField(required=False)
char_field = CharField(max_length=100)
comma_separated_integer_field = CharField(max_length=100, validators=[<django.core.validators.RegexValidator object>])
date_field = DateField()
datetime_field = DateTimeField()
decimal_field = DecimalField(decimal_places=1, max_digits=3)
email_field = EmailField(max_length=100)
float_field = FloatField()
integer_field = IntegerField()
null_boolean_field = NullBooleanField(required=False)
positive_integer_field = IntegerField()
positive_small_integer_field = IntegerField()
slug_field = SlugField(max_length=100)
small_integer_field = IntegerField()
text_field = CharField(max_length=100, style={'base_template': 'textarea.html'})
file_field = FileField(max_length=100)
time_field = TimeField()
url_field = URLField(max_length=100)
custom_field = ModelField(model_field=<tests.test_model_serializer.CustomField: custom_field>)
file_path_field = FilePathField(path='/tmp/')
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_field_options(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = FieldOptionsModel
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
value_limit_field = IntegerField(max_value=10, min_value=1)
length_limit_field = CharField(max_length=12, min_length=3)
blank_field = CharField(allow_blank=True, max_length=10, required=False)
null_field = IntegerField(allow_null=True, required=False)
default_field = IntegerField(required=False)
descriptive_field = IntegerField(help_text='Some help text', label='A label')
choices_field = ChoiceField(choices=(('red', 'Red'), ('blue', 'Blue'), ('green', 'Green')))
""")
|
google/cyanobyte
|
test/sampleData/circuitpython/BH1750FVI.py
|
Python
|
apache-2.0
| 4,207
| 0.001426
|
# Copyright (C) 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
|
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations
|
under the License.
#
# Auto-generated file for BH1750FVI v0.1.0.
# Generated from peripherals/BH1750FVI.yaml using Cyanobyte Codegen v0.1.0
"""
Class for BH1750FVI
"""
from adafruit_bus_device.i2c_device import I2CDevice
I2C_ADDRESS_35 = 35
I2C_ADDRESS_92 = 92
class BH1750FVI:
"""
Rohm Light Sensor
"""
REGISTER_CONTINUOUSHRES2MODE = 17
REGISTER_CONTINUOUSHRESMODE = 16
REGISTER_CONTINUOUSLYLRESMODE = 19
REGISTER_ONCEHRES2MODE = 33
REGISTER_ONCEHRESMODE = 32
REGISTER_ONCELRESMODE = 35
REGISTER_POWERDOWN = 0
REGISTER_POWERON = 1
REGISTER_RESET = 7
def __init__(self, i2c, address):
# Initialize connection to peripheral
self.i2c_device = I2CDevice(i2c, address)
self.device_address = address
self._lifecycle_begin()
def set_continuoushres2mode(self):
"""
Start measurement at 0.5lx resolution. Typically 120ms.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_continuoushresmode(self):
"""
Start measurement at 1lx resolution. Typically 120ms.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_continuouslylresmode(self):
"""
Start measurement at 4lx resolution. Typically 16ms.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_oncehres2mode(self):
"""
Start measurement at 0.5lx resolution. Typically 120ms.
Power Down after measurement.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_oncehresmode(self):
"""
Start measurement at 1lx resolution. Typically 120ms.
Power Down after measurement.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_oncelresmode(self):
"""
Start measurement at 4lx resolution. Typically 16ms.
Power Down after measurement.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_powerdown(self):
"""
No active state
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_poweron(self):
"""
Waiting for measurement command
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_reset(self):
"""
Reset data register value. Not accepted in Power Down mode.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def _lifecycle_begin(self):
"""
Sends a POWER ON cmd to device
"""
self.set_poweron()
def command_powerdown(self):
"""
Things you can do to device
"""
self.set_powerdown()
def command_reset(self):
"""
Things you can do to device
"""
self.set_poweron()
self.set_reset()
def read_lightintensity(self):
"""
Read light intensity from device
"""
intensity = None # Variable declaration
with self.i2c_device as i2c:
_byte_list = bytearray(2)
i2c.readinto(_byte_list)
intensity = 0
intensity = intensity << 8 | _byte_list[0]
intensity = intensity << 8 | _byte_list[1]
return intensity
|
hizni/vod-systems
|
vod_systems/vod/urls.py
|
Python
|
mit
| 6,067
| 0.008076
|
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from vod import user_views
from vod.alias_id_views import AliasIdListView, AliasIdCreateView, AliasIdUpdateView, AliasIdRetireView
from vod.datatype_views import DataTypeListView, DataTypeCreateView, DataTypeUpdateView, DataTypeRetireView
from vod.institution_views import InstitutionListView, InstitutionCreateView, InstitutionUpdateView, \
InstitutionRetireView
from vod.patient_views import PatientListView, PatientCreateView, PatientUpdateView, PatientRetireView, \
PatientIdentifiersDetailView, PatientAliasCreateView, PatientTransplantCreateView
from vod.transplant_views import TransplantListView, TransplantCreateView, TransplantUpdateView, TransplantRetireView
from vod.data_views import RawDataListView, RawDataProcessingView, DataAnalysisDetailView
from vod.cleansing_views import DataCleansingTemplatesListView, DataCleansingTemplateCreateView, DataCleansingTemplateFieldsUpdateView
from vod.upload_views import UploadListView
from vod.user_views import UserListView, UserCreateView, UserUpdateView, UserRetireView, LoginView
from vod import helper_views
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'login', LoginView.as_view(), name='vod-login'),
url(r'logout', user_views.logout, name='vod-logout'),
# url routes for superuser (admin) related views
url(r'^user/list/$', login_required(UserListView.as_view()), name='user-list'),
url(r'^user/create/$', login_required(UserCreateView.as_view()), name='user-create'),
url(r'^user/update/(?P<id>\d+)/$', login_required(UserUpdateView.as_view()), name='user-update'),
url(r'^user/delete/(?P<id>\d+)/$', login_required(UserRetireView.as_view()), name='user-retire'),
url(r'^institution/list/$', login_required(InstitutionListView.as_view()), name='institution-list'),
url(r'^institution/create/$', login_required(InstitutionCreateView.as_view()), name='institution-create'),
url(r'^institution/update/(?P<id>\d+)/$', login_required(InstitutionUpdateView.as_view()), name='institution-update'),
url(r'^institution/delete/(?P<id>\d+)/$', login_required(InstitutionRetireView.as_view()), name='institution-retire'),
url(r'^aliasid/list/$', login_required(AliasIdListView.as_view()), name='alias-id-list'),
url(r'^aliasid/create/$', login_required(AliasIdCreateView.as_view()), name='alias-id-create'),
url(r'^aliasid/update/(?P<id>\d+)/$', login_required(AliasIdUpdateView.as_view()), name='alias-id-update'),
url(r'^aliasid/delete/(?P<id>\d+)/$', login_required(AliasIdRetireView.as_view()), name='alias-id-retire'),
url(r'^datatype/list/$', login_required(DataTypeListView.as_view()), name='datatype-list'),
url(r'^datatype/create/$', login_required(DataTypeCreateView.as_view()), name='datatype-create'),
url(r'^datatype/update/(?P<id>\d+)/$', login_required(DataTypeUpdateView.as_view()), name='datatype-update'),
url(r'^datatype/delete/(?P<id>\d+)/$', login_required(DataTypeRetireView.as_view()), name='datatype-retire'),
url(r'^transplant/list/$', login_required(TransplantListView.as_view()), name='transplant-list'),
url(r'^transplant/create/$', login_required(TransplantCreateView.as_view()), name='transplant-create'),
url(r'^transplant/update/(?P<id>\d+)/$', login_required(TransplantUpdateView.as_view()), name='transplant-update'),
url(r'^transplant/delete/(?P<id>\d+)/$', login_required(TransplantRetireView.as_view()), name='transplant-retire'),
# url routes for staff (normal user) related views
url(r'^upload/list/$', login_required(UploadListView.as_view()), name='upload-list'),
url(r'^patient/list/$', login_required(PatientListView.as_view()), name='patient-list'),
url(r'^patient/create/$', login_required(PatientCreateView.as_view()), name='patient-create'),
url(r'^patient/update/(?P<id>\d+)/$', login_required(PatientUpdateView.as_view()), name='patient-update'),
url(r'^patient/delete/(?P<id>\d+)/$', login_required(PatientRetireView.as_view()), name='patient-retire'),
url(r'^patient/create-alias/(?P<id>\d+)/$', login_required(PatientAliasCreateView.as_view()), name='patient-create-alias'),
url(r'^patient/create-transplant/(?P<id>\d+)/$', login_required(PatientTransplantCreateView.as_view()), name='patient-create-transplant'),
url(r'^patient/detail/(?P<id>\d+)/$', login_required(PatientIdentifiersDetailView.as_view()), name='patient-detail'),
# url routes to view data
url(r'^data/uploaded-raw/$', login_required(RawDataListView.as_view()), name='raw-data-list'),
# url(r'^data/uploaded-raw/complete/(?P<id>\d+)/$', login_required(RawDataProcessingView.as_view()), name='data-complete'),
# url(r'^data/uploaded-raw/valid/(?P<id>\d+)/$', login_required(RawDataProcessingView.as_view()), name='data-valid'),
url(r'^data/detail/(?P<id>\d+)/(?P<tid>\d+)/$', login_required(DataAnalysisDetailView.as_view()), name='data-analysis-detail'),
url(r'^data/cleansing-profile/$', login_required(DataCleansingTemplatesListView.as_view()), name='cleansing-profile-list'),
# url(r'^data/cleansing-profile/create/$', login_required(DataCleansingTemplateCreateView.as_view()), name='cleansing-profile-create'),
# url(r'^data/cleansing-profile/detail/(?P<id>\d+)/$', login_required(DataCleansingTemplateFieldsListView.as_view()), name='cleansing-profile-detail'),
url(r'^data/cleansing-profile/detail/update/(?P<id>\d+)/$', login_required(DataCleansingTemplateFieldsUpdateView.as_view()), name='cleansing-template-field-update'),
# route to helper views
url(r'^ajax/validate_username/$', helper_views
|
.validate_username, name='validate_username'),
url(r'^ajax/cleansing-profile-detail/$', helper_views.dataCleansingTemplateFields_asJSON, name='ajax-cleansing-profile-detail'),
url(r'^ajax/models/$', helper_views.modelsInApp, name='app-models'),
]
if settings.DEBUG:
urlpattern
|
s += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
zestrada/nova-cs498cc
|
nova/virt/hyperv/snapshotops.py
|
Python
|
apache-2.0
| 4,875
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM snapshot operations.
"""
import os
from oslo.config import cfg
from nova.compute import task_states
from nova.image import glance
from nova.openstack.common import log as logging
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SnapshotOps(object):
def __init__(self):
self._pathutils = pathutils.PathUtils()
self._vmutils = vmutils.VMUtils()
self._vhdutils = vhdutils.VHDUtils()
def _save_glance_image(self, context, name, image_vhd_path):
(glance_image_service,
image_id) = glance.get_remote_image_service(context, name)
image_metadata = {"is_public": False,
"disk_format": "vhd",
"container_format": "bare",
"properties": {}}
with self._pathutils.open(image_vhd_path, 'rb') as f:
glance_image_service.update(context, image_id, image_metadata, f)
def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
instance_name = instance["name"]
LOG.debug(_("Creating snapshot for instance %s"), instance_name)
snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
export_dir = None
try:
src_vhd_path = self._pathutils.get_vhd_path(instance_name)
LOG.debug(_("Getting info for VHD %s"), src_vhd_path)
src_base_disk_path = self._vhdutils.get_vhd_parent_path(
src_vhd_path)
export_dir = self._pathutils.get_export_dir(instance_name)
dest_vhd_path = os.path.join(export_dir, os.path.basename(
src_vhd_path))
LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'),
locals())
self._pathutils.copyfile(
|
src_vhd_path, dest_vhd_path)
i
|
mage_vhd_path = None
if not src_base_disk_path:
image_vhd_path = dest_vhd_path
else:
basename = os.path.basename(src_base_disk_path)
dest_base_disk_path = os.path.join(export_dir, basename)
LOG.debug(_('Copying base disk %(src_vhd_path)s to '
'%(dest_base_disk_path)s'), locals())
self._pathutils.copyfile(src_base_disk_path,
dest_base_disk_path)
LOG.debug(_("Reconnecting copied base VHD "
"%(dest_base_disk_path)s and diff "
"VHD %(dest_vhd_path)s"), locals())
self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
dest_base_disk_path)
LOG.debug(_("Merging base disk %(dest_base_disk_path)s and "
"diff disk %(dest_vhd_path)s"), locals())
self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
image_vhd_path = dest_base_disk_path
LOG.debug(_("Updating Glance image %(image_id)s with content from "
"merged disk %(image_vhd_path)s"), locals())
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._save_glance_image(context, name, image_vhd_path)
LOG.debug(_("Snapshot image %(image_id)s updated for VM "
"%(instance_name)s"), locals())
finally:
try:
LOG.debug(_("Removing snapshot %s"), name)
self._vmutils.remove_vm_snapshot(snapshot_path)
except Exception as ex:
LOG.exception(ex)
LOG.warning(_('Failed to remove snapshot for VM %s')
% instance_name)
if export_dir:
LOG.debug(_('Removing directory: %s'), export_dir)
self._pathutils.rmtree(export_dir)
|
DailyActie/Surrogate-Model
|
01-codes/OpenMDAO-Framework-dev/openmdao.util/src/openmdao/util/wrkpool.py
|
Python
|
mit
| 3,630
| 0
|
import Queue
import atexit
import logging
import threading
import traceback
class WorkerPool(object):
""" Pool of worker threads; grows as necessary. """
_lock = threading.Lock()
_pool = None # Singleton.
def __init__(self):
self._idle = [] # Queues of idle workers.
self._workers = {} # Maps queue to worker.
atexit.register(self.cleanup)
@staticmethod
def get_instance():
""" Return singleton instance. """
with WorkerPool._lock:
if WorkerPool._pool is None:
WorkerPool._pool = WorkerPool()
return WorkerPool._pool
@staticmethod
def cleanup():
""" Cleanup resources (worker threads). """
WorkerPool.get_instance()._cleanup()
def _cleanup(self):
""" Cleanup resources (worker threads). """
with self._lock:
for queue in self._workers:
queue.put((None, None, None, None))
self._workers[queue].join(1)
if self._workers[queu
|
e].is_alive():
logging.debug('WorkerPool: worker join timed-out.')
try:
self._idle.remove(queue)
except ValueError:
pass # Never released due to some other issue...
self._idle = []
self._workers = {}
@staticmethod
def get(one_shot=False):
"""
Get a worker queue from the pool. Work requests should be of the form:
``(callable, *a
|
rgs, **kwargs, reply_queue)``
Work replies are of the form:
``(queue, retval, exc, traceback)``
one_shot: bool
If True, the worker will self-release after processing one request.
"""
return WorkerPool.get_instance()._get(one_shot)
def _get(self, one_shot):
""" Get a worker queue from the pool. """
with self._lock:
try:
return self._idle.pop()
except IndexError:
queue = Queue.Queue()
worker = threading.Thread(target=self._service_loop,
args=(queue, one_shot))
worker.daemon = True
worker.start()
self._workers[queue] = worker
return queue
@staticmethod
def release(queue):
"""
Release a worker queue back to the pool.
queue: Queue
Worker queue previously obtained from :meth:`get`.
"""
return WorkerPool.get_instance()._release(queue)
def _release(self, queue):
""" Release a worker queue back to the pool. """
with self._lock:
self._idle.append(queue)
def _service_loop(self, request_q, one_shot):
""" Get (callable, args, kwargs) from request_q and queue result. """
while True:
callable, args, kwargs, reply_q = request_q.get()
if callable is None:
request_q.task_done()
return # Shutdown.
exc = None
trace = None
retval = None
try:
retval = callable(*args, **kwargs)
except Exception as exc:
# Sometimes we have issues at shutdown.
try:
trace = traceback.format_exc()
except Exception: # pragma no cover
return
request_q.task_done()
if reply_q is not None:
reply_q.put((request_q, retval, exc, trace))
if one_shot:
self._release(request_q)
|
jaeilepp/eggie
|
eggie.py
|
Python
|
bsd-2-clause
| 1,903
| 0.017867
|
#!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPT
|
ION) HOWEVER CAUSED AND
#ON ANY THEORY OF
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
google-research/google-research
|
direction_net/dataset.py
|
Python
|
apache-2.0
| 15,616
| 0.005635
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate the wide baseline stereo image dataset from the Matterport3D.
We generate the data by randomly sample different perspective views from
panoramic images in Matterport3D to create a large scale dataset with a large
varieties of motion. The dataset contains a pair of perspective images labeled
with the relative rotation from camera 2 to camera 1, and the relative
translation direction in the frame of camera 1.
Matterport3D: https://niessner.github.io/Matterport/
https://arxiv.org/pdf/1709.06158.pdf
"""
import collections
import math
import numpy as np
from pano_utils import math_utils
from pano_utils import transformation
import tensorflow.compat.v1 as tf
def world_to_image_projection(p_world, intrinsics, pose_w2c):
"""Project points in the world frame to the image plane.
Args:
p_world: [HEIGHT, WIDTH, 3] points in the world's coordinate frame.
intrinsics: [3, 3] camera's intrinsic matrix.
pose_w2c: [3, 4] camera pose matrix (world to camera).
Returns:
[HEIGHT, WIDTH, 2] points in the image coordinate.
[HEIGHT, WIDTH, 1] the z depth.
"""
shape = p_world.shape.as_list()
height, width = shape[0], shape[1]
p_world_homogeneous = tf.concat([p_world, tf.ones([height, width, 1])], -1)
p_camera = tf.squeeze(
tf.matmul(pose_w2c[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_world_homogeneous, -1)), -1)
p_camera = p_camera*tf.constant([1., 1., -1.], shape=[1, 1, 3])
p_image = tf.squeeze(tf.matmul(intrinsics[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_camera, -1)), -1)
z = p_image[:, :, -1:]
return tf.math.divide_no_nan(p_image[:, :, :2], z), z
def image_to_world_projection(depth, intrinsics, pose_c2w):
"""Project points on the image to the world frame.
Args:
depth: [HEIGHT, WIDTH, 1] the depth map contains the radial distance from
the camera eye to each point corresponding to each pixel.
intrinsics: [3, 3] camera's intrinsic matrix.
pose_c2w: [3, 4] camera pose matrix (camera to world).
Returns:
[HEIGHT, WIDTH, 3] points in the world's coordinate frame.
"""
shape = depth.shape.as_list()
height, width = shape[0], shape[1]
xx, yy = tf.meshgrid(tf.lin_space(0., width-1., width),
tf.lin_space(0., height-1., height))
p_pixel_homogeneous = tf.concat([tf.stack([xx, yy], axis=-1),
tf.ones([height, width, 1])], -1)
p_image = tf.squeeze(tf.matmul(
tf.matrix_inverse(intrinsics[tf.newaxis, tf.newaxis, :]),
tf.expand_dims(p_pixel_homogeneous, -1)), -1)
z = depth*tf.reduce_sum(
tf.math.l2_normalize(p_image, axis=-1)*tf.constant([[[0., 0., 1.]]]),
axis=-1,
keepdims=True)
p_camera = z*p_image
# convert to OpenGL coordinate system.
p_camera = p_camera*tf.constant([1., 1., -1.], shape=[1, 1, 3])
p_camera_homogeneous = tf.concat(
[p_camera, tf.ones(shape=[height, width,
|
1])], -1)
# Convert camera coordinates to world coordinates.
p_world = tf.squeeze(
tf.matmul(pose_c2w[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_camera_homogeneous, -1)), -1)
return p_world
def overlap_mask(depth1,
pose1_c2w,
depth2,
pose2_c2w,
|
intrinsics):
"""Compute the overlap masks of two views using triangulation.
The masks have the same shape of the input images. A pixel value is true if it
can be seen by both cameras.
Args:
depth1: [HEIGHT, WIDTH, 1] the depth map of the first view.
pose1_c2w: [3, 4] camera pose matrix (camera to world) of the first view.
pose1_c2w[:, :3] is the rotation and pose1_c2w[:, -1] is the translation.
depth2: [HEIGHT, WIDTH, 1] the depth map of the second view.
pose2_c2w: [3, 4] camera pose matrix (camera to world) of the second view.
pose1_c2w[:, :3] is the rotation and pose1_c2w[:, -1] is the translation.
intrinsics: [3, 3] camera's intrinsic matrix.
Returns:
[HEIGHT, WIDTH] two overlap masks of the two inputs respectively.
"""
pose1_w2c = tf.matrix_inverse(
tf.concat([pose1_c2w, tf.constant([[0., 0., 0., 1.]])], 0))[:3]
pose2_w2c = tf.matrix_inverse(
tf.concat([pose2_c2w, tf.constant([[0., 0., 0., 1.]])], 0))[:3]
p_world1 = image_to_world_projection(depth1, intrinsics, pose1_c2w)
p_image1_in_2, z1_c2 = world_to_image_projection(
p_world1, intrinsics, pose2_w2c)
p_world2 = image_to_world_projection(depth2, intrinsics, pose2_c2w)
p_image2_in_1, z2_c1 = world_to_image_projection(
p_world2, intrinsics, pose1_w2c)
shape = depth1.shape.as_list()
height, width = shape[0], shape[1]
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Error tolerance.
eps = 1e-4
# check the object seen by camera 2 is also projected to camera 1's image
# plane and in front of the camera 1.
mask_h2_in_1 = tf.logical_and(
tf.less_equal(p_image2_in_1[:, :, 1], height+eps),
tf.greater_equal(p_image2_in_1[:, :, 1], 0.-eps))
mask_w2_in_1 = tf.logical_and(
tf.less_equal(p_image2_in_1[:, :, 0], width+eps),
tf.greater_equal(p_image2_in_1[:, :, 0], 0.-eps))
# check the projected points are within the image boundaries and in front of
# the camera.
mask2_in_1 = tf.logical_and(
tf.logical_and(mask_h2_in_1, mask_w2_in_1), tf.squeeze(z2_c1, -1) > 0)
# check the object seen by camera 1 is also projected to camera 2's image
# plane and in front of the camera 2.
mask_h1_in_2 = tf.logical_and(
tf.less_equal(p_image1_in_2[:, :, 1], height+eps),
tf.greater_equal(p_image1_in_2[:, :, 1], 0.-eps))
mask_w1_in_2 = tf.logical_and(
tf.less_equal(p_image1_in_2[:, :, 0], width+eps),
tf.greater_equal(p_image1_in_2[:, :, 0], 0.-eps))
# check the projected points are within the image boundaries and in front of
# the camera.
mask1_in_2 = tf.logical_and(
tf.logical_and(mask_h1_in_2, mask_w1_in_2), tf.squeeze(z1_c2, -1) > 0)
return mask1_in_2, mask2_in_1
def overlap_ratio(mask1, mask2):
"""Check if the overlapping ratio of the input is within given limits.
The overlap ratio is measured by the minimum of the ratio between the area
seen by both cameras and the image size. This function returns a ViewPair
object containing the perspective images, the masks that shows the common area
seen by both cameras, the camera's field of view (FoV), the relative rotation
from camera 2 to camera 1, and the relative translation direction in the frame
of camera 1.
Args:
mask1: [HEIGHT, WIDTH] overlapping mask.
mask2: [HEIGHT, WIDTH] overlapping mask.
Returns:
A tf.float32 tensor.
"""
shape = mask1.shape.as_list()
height, width = shape[0], shape[1]
return tf.min(tf.reduce_sum(tf.cast(mask1, tf.float32))/(height * width),
tf.reduce_sum(tf.cast(mask2, tf.float32))/(height * width))
# This is written for Matterport3D's directory structure.
def generate_from_meta(meta_data_path,
pano_data_dir,
pano_height=1024,
pano_width=2048,
output_height=512,
output_width=512):
"""Generate the stereo image dataset from Matterport3D using the meta data.
Example call:
ds = generate_from_meta(
meta_data_path='matterport3d/saved_meta/R90_fov90/test_meta/',
pano_data_dir='matterport3d/pano/')
Args:
meta_data_path: (string) the path to the meta data files.
pano_data_dir: (string) the
|
openstack/octavia
|
octavia/certificates/common/local.py
|
Python
|
apache-2.0
| 3,779
| 0
|
# Copyright (c) 2014 Rackspace US, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common classes for local filesystem certificate handling
"""
import os
from oslo_config import cfg
from octavia.certificates.commo
|
n import cert
TLS_CERT_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_CA_CERT', '/etc/ssl/certs/ssl-cert-snak
|
eoil.pem'
)
TLS_KEY_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_CA_KEY', '/etc/ssl/private/ssl-cert-snakeoil.key'
)
TLS_PKP_DEFAULT = os.environ.get('OS_OCTAVIA_CA_KEY_PASS')
TLS_PASS_AMPS_DEFAULT = os.environ.get('TLS_PASS_AMPS_DEFAULT',
'insecure-key-do-not-use-this-key')
TLS_DIGEST_DEFAULT = os.environ.get('OS_OCTAVIA_CA_SIGNING_DIGEST', 'sha256')
TLS_STORAGE_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_STORAGE', '/var/lib/octavia/certificates/'
)
certgen_opts = [
cfg.StrOpt('ca_certificate',
default=TLS_CERT_DEFAULT,
help='Absolute path to the CA Certificate for signing. Defaults'
' to env[OS_OCTAVIA_TLS_CA_CERT].'),
cfg.StrOpt('ca_private_key',
default=TLS_KEY_DEFAULT,
help='Absolute path to the Private Key for signing. Defaults'
' to env[OS_OCTAVIA_TLS_CA_KEY].'),
cfg.StrOpt('ca_private_key_passphrase',
default=TLS_PKP_DEFAULT,
help='Passphrase for the Private Key. Defaults'
' to env[OS_OCTAVIA_CA_KEY_PASS] or None.'),
cfg.StrOpt('server_certs_key_passphrase',
default=TLS_PASS_AMPS_DEFAULT,
help='Passphrase for encrypting Amphora Certificates and '
'Private Keys. Must be 32, base64(url) compatible, '
'characters long. Defaults to env[TLS_PASS_AMPS_DEFAULT] '
'or insecure-key-do-not-use-this-key',
regex=r'^[A-Za-z0-9\-_=]{32}$',
required=True),
cfg.StrOpt('signing_digest',
default=TLS_DIGEST_DEFAULT,
help='Certificate signing digest. Defaults'
' to env[OS_OCTAVIA_CA_SIGNING_DIGEST] or "sha256".'),
cfg.IntOpt('cert_validity_time',
default=30 * 24 * 60 * 60,
help="The validity time for the Amphora Certificates "
"(in seconds)."),
]
certmgr_opts = [
cfg.StrOpt('storage_path',
default=TLS_STORAGE_DEFAULT,
help='Absolute path to the certificate storage directory. '
'Defaults to env[OS_OCTAVIA_TLS_STORAGE].')
]
class LocalCert(cert.Cert):
"""Representation of a Cert for local storage."""
def __init__(self, certificate, private_key, intermediates=None,
private_key_passphrase=None):
self.certificate = certificate
self.intermediates = intermediates
self.private_key = private_key
self.private_key_passphrase = private_key_passphrase
def get_certificate(self):
return self.certificate
def get_intermediates(self):
return self.intermediates
def get_private_key(self):
return self.private_key
def get_private_key_passphrase(self):
return self.private_key_passphrase
|
sonjagruen/elephant
|
elephant/signal_processing.py
|
Python
|
bsd-3-clause
| 9,104
| 0.00022
|
'''
Basic processing procedures for analog signals (e.g., performing a z-score of a signal, or filtering a signal).
:copyright: Copyright 2014-2015 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
'''
from __future__ import division, print_function
import numpy as np
import scipy.signal
import quantities as pq
import neo
def zscore(signal, inplace=True):
'''
Apply a z-score operation to one or several AnalogSignalArray objects.
The z-score operation subtracts the mean :math:`\\mu` of the signal, and
divides by its standard deviation :math:`\\sigma`:
.. math::
Z(x(t))= \\frac{x(t)-\\mu}{\\sigma}
If an AnalogSignalArray containing multiple signals is provided, the
z-transform is always calculated for each signal individually.
If a list of AnalogSignalArray objects is supplied, the mean and standard
deviation are calculated across all objects of the list. Thus, all list
elements are z-transformed by the same values of :math:`\\mu` and
:math:`\\sigma`. For AnalogSignalArrays, each signal of the array is
treated separately across list elements. Therefore, the number of signals
must be identical for each AnalogSignalArray of the list.
Parameters
----------
signal : neo.AnalogSignalArray or list of neo.AnalogSignalArray
Signals for which to calculate the z-score.
inplace : bool
If True, the contents of the input signal(s) is replaced by the
z-transformed signal. Otherwise, a copy of the original
AnalogSignalArray(s) is returned. Default: True
Returns
-------
neo.AnalogSignalArray or list of neo.AnalogSignalArray
The output format matches the input format: for each supplied
AnalogSignalArray object a corresponding object is returned containing
the z-transformed signal with the unit dimensionless.
Use Case
--------
You may supply a list of AnalogSignalArray objects, where each object in
the list contains the data of one trial of the experiment, and each signal
of the AnalogSignalArray corresponds to the recordings from one specific
electrode in a particular trial. In this scenario, you will z-transform the
signal of each electrode separately, but transform all trials of a given
electrode in the same way.
Examples
--------
>>> a = neo.AnalogSignalArray(
... np.array([1, 2, 3, 4, 5, 6]).reshape(-1,1)*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> b = neo.AnalogSignalArray(
... np.transpose([[1, 2, 3, 4, 5, 6], [11, 12, 13, 14, 15, 16]])*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> c = neo.AnalogSignalArray(
... np.transpose([[21, 22, 23, 24, 25, 26], [31, 32, 33, 34, 35, 36]])*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> print zscore(a)
[[-1.46385011]
[-0.87831007]
[-0.29277002]
[ 0.29277002]
[ 0.87831007]
[ 1.46385011]] dimensionless
>>> print zscore(b)
[[-1.46385011 -1.46385011]
[-0.87831007 -0.87831007]
[-0.29277002 -0.29277002]
[ 0.29277002 0.29277002]
[ 0.87831007 0.87831007]
[ 1.46385011 1.46385011]] dimensionless
>>> print zscore([b,c]) # doctest: +NORMALIZE_WHITESPACE
[<AnalogSignalArray(array([[-1.11669108, -1.08361877],
[-1.0672076 , -1.04878252],
[-1.01772411, -1.01394628],
[-0.96824063, -0.97911003],
[-0.91875714, -0.94427378],
[-0.86927366, -0.90943753]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>,
<AnalogSignalArray(array([[ 0.78170952, 0.84779261],
[ 0.86621866, 0.90728682],
[ 0.9507278 , 0.96678104],
[ 1.03523694, 1.02627526],
[ 1.11974608, 1.08576948],
[ 1.20425521, 1.1452637 ]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>]
'''
# Transform input to a list
if type(signal) is not list:
signal = [signal]
# Calculate mean and standard deviation
m = np.mean(np.concatenate(signal), axis=0, keepdims=True)
s = np.std(np.concatenate(signal), axis=0, keepdims=True)
if not inplace:
# Create new signal instance
result = [sig.duplicate_with_new_array(
(sig.magnitude - m.magnitude) / s.magnitude) for sig in signal]
for sig in result:
sig /= sig.units
else:
# Overwrite signal
for sig in signal:
sig[:] = pq.Quantity(
(sig.magnitude - m.magnitude) / s.magnitude,
units=sig.units)
sig /= sig.units
result = signal
# Return single object, or list of objects
if len(result) == 1:
return result[0]
else:
return result
def butter(signal, highpass_freq=None, lowpass_freq=None, order=4,
filter_function='filtfilt', fs=1.0, axis=-1):
"""
Butterworth filtering function for neo.AnalogSignalArray. Filter type is
determined according to how values of `highpass_freq` and `lowpass_freq`
are given (see Parameters section for details).
Parameters
----------
signal : AnalogSignalArray or Quantity array or NumPy ndarray
Time series data to be filtere
|
d. When given as Quantity array or NumPy
ndarray, the sampling frequency should be given through the keyword
argument `fs`.
highpass_freq, lowpass_freq : Quantity or float
|
High-pass and low-pass cut-off frequencies, respectively. When given as
float, the given value is taken as frequency in Hz.
Filter type is determined depending on values of these arguments:
* highpass_freq only (lowpass_freq = None): highpass filter
* lowpass_freq only (highpass_freq = None): lowpass filter
* highpass_freq < lowpass_freq: bandpass filter
* highpass_freq > lowpass_freq: bandstop filter
order : int
Order of Butterworth filter. Default is 4.
filter_function : string
Filtering function to be used. Either 'filtfilt'
(`scipy.signal.filtfilt()`) or 'lfilter' (`scipy.signal.lfilter()`). In
most applications 'filtfilt' should be used, because it doesn't bring
about phase shift due to filtering. Default is 'filtfilt'.
fs : Quantity or float
The sampling frequency of the input time series. When given as float,
its value is taken as frequency in Hz. When the input is given as neo
AnalogSignalArray, its attribute is used to specify the sampling
frequency and this parameter is ignored. Default is 1.0.
axis : int
Axis along which filter is applied. Default is -1.
Returns
-------
filtered_signal : AnalogSignalArray or Quantity array or NumPy ndarray
Filtered input data. The shape and type is identical to those of the
input.
"""
def _design_butterworth_filter(Fs, hpfreq=None, lpfreq=None, order=4):
# set parameters for filter design
Fn = Fs / 2.
# - filter type is determined according to the values of cut-off
# frequencies
if lpfreq and hpfreq:
if hpfreq < lpfreq:
Wn = (hpfreq / Fn, lpfreq / Fn)
btype = 'bandpass'
else:
Wn = (lpfreq / Fn, hpfreq / Fn)
btype = 'bandstop'
elif lpfreq:
Wn = lpfreq / Fn
btype = 'lowpass'
elif hpfreq:
Wn = hpfreq / Fn
btype = 'highpass'
else:
raise ValueError(
"Either highpass_freq or lowpass_freq must be given"
)
# return filter coefficients
return scipy.signal.butter(order, Wn, btype=btype)
# design filter
Fs = signal.sampling_rate.rescale(pq.Hz).magnitude \
if hasattr(signal, 'sampling_rate') else fs
Fh = highpass_freq.rescale(pq.Hz).magnitude \
if isinstance(highpass_freq, pq.quantity.Quantity) else highpass_freq
Fl = lowpass_freq.rescale(pq.Hz).magnitude \
if isinstance(lowpass_freq, pq.quantity.Quantity) else low
|
KKfo/captcha_solver
|
experiment.py
|
Python
|
gpl-3.0
| 7,350
| 0.011837
|
import requests
from PIL import Image, ImageEnhance, ImageChops, ImageFilter
from io import BytesIO, StringIO
import time
import sys, os
import codecs
url = 'http://d1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net'
imgurl = url + '/captcha.php'
headers = { 'Host' : 'd1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net',
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip, deflate',
'DNT' : '1',
'Referer' : 'http://http://d1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net/',
'Cookie' : 'PHPSESSID=',#erased
'Authorization' : 'Basic ',#erased
# 'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded' }
def recognize(img, bounds):
# read dataset of images for each letter
imgs = {}
datfile = open("ads.dat", "rt")
line = datfile.readline()
while line!
|
="":
key = line[0]
if key not in imgs:
imgs[key] = []
imgs[key].append(Image.open(StringIO.StringIO(line[2:-1].decode("hex"))))
line = datfile.readline()
datfile.close()
# calculate difference with dataset for each boundbox
word = ""
for bound in bounds:
guess = []
total = (img.crop(bound).s
|
ize)[0]*(img.crop(bound).size)[1]*1.0
for key in imgs:
for pattern in imgs[key]:
diff = ImageChops.difference(img.crop(bound), pattern.resize(img.crop(bound).size, Image.NEAREST))
pixels = list(diff.getdata())
samePixCnt = sum(i==0 for i in pixels)
guess.append([samePixCnt, key])
guess.sort(reverse=True)
word = word+guess[0][1]
print(total, guess[0:3], guess[0][0]/total, guess[1][0]/total, guess[2][0]/total)
print(word)
return word.replace("_", "")
def separate(img):
# count number of pixels for each column
colPixCnts = []
for col in range(img.size[0]):
pixels = list(img.crop([col, 0, col+1, img.size[1]]).getdata())
colPixCnts.append(sum(i==0 for i in pixels))
print (colPixCnts)
print("\n")
# average out pixel counts for trough column
for i in range(3, len(colPixCnts)-3, 2):
if colPixCnts[i-3]>4 and colPixCnts[i+3]>4:
colPixCnts[i-2:i+3] = [j+10 for j in colPixCnts[i-2:i+3]]
print(colPixCnts)
print("\n")
# calculate all bounding boxes of all letters
bounds = []
left = 0
right = 0
for col in range(img.size[0]): # slice all letters per column
if left==0 and colPixCnts[col]>20: # if (begin not set) and (col has letter)
left = col # then letter begin
if left!=0 and colPixCnts[col]<=20: # if (begin is set) and (col no letter)
right = col # then letter end
if right-left>8: # if (the letter is wide enough)
##############################################
print((right-left))
top = -1
bottom = -1
prev = -1
curr = -1
for row in range(img.size[1]): # slice single letter per row
pixels = list(img.crop([left, row, right, row+1]).getdata())
rowPixCnt = sum(i==255 for i in pixels)
if rowPixCnt==(right-left): # if (row no letter)
curr = row
if (curr-prev)>(bottom-top): # if (the letter is tall enough)
top = prev
bottom = curr
prev = curr
if (img.size[1]-prev)>(bottom-top): # if (the letter align to bottom)
top = prev
bottom = img.size[1]
##############################################
bounds.append([left, top+1, right, bottom]) # top row should has letter
left = 0
right = 0
print(bounds)
return bounds
def prepare(im):
im2 = Image.new("P",im.size,255)
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
if pix == 1: # these are the numbers to get
im2.putpixel((y,x),0)
# im2 = im2.convert("RGB")
im2 = im2.resize((im2.size[0]*8, im2.size[1]*8), Image.BILINEAR)
# im2 = im2.resize((int(im2.size[0] / 2), int(im2.size[1] / 2)), Image.ANTIALIAS)
# im2 = ImageEnhance.Contrast(im2).enhance(1.4)
# im2 = ImageEnhance.Sharpness(im2).enhance(5)
# im2 = ImageChops.invert(im2)
# im2 = im2.filter(ImageFilter.MedianFilter(3))
# im2 = im2.convert('P')
return im2
def _train(img, bounds):
datfile = open("ads.dat", "rt")
lines = datfile.readlines()
datfile.close()
datfile = open("ads.dat", "at")
for bound in bounds:
img.crop(bound).show()
letter = input("Type in the letters you see in the image above (ENTER to skip): ")
bmpfile = BytesIO()
img.crop(bound).save(bmpfile, format='BMP')
# g = codecs.encode(bmpfile.getvalue(), 'hex_codec')
s = codecs.encode(bmpfile.getvalue(), 'hex')
s = codecs.decode(s)
line = letter+"|"+s+"\n"
if (letter!="") and (line not in lines): # if (not skipped) and (not duplicated)
datfile.write(line)
print(line)
bmpfile.close()
datfile.close()
def vertical_cut(im):
im = im.convert("P")
im2 = Image.new("P",im.size,255)
im = im.convert("P")
temp = {}
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
temp[pix] = pix
if pix == 1: # these are the numbers to get
im2.putpixel((y,x),0)
# new code starts here
inletter = False
foundletter=False
start = 0
end = 0
letters = []
for y in range(im2.size[0]): # slice across
for x in range(im2.size[1]): # slice down
pix = im2.getpixel((y,x))
if pix != 255:
inletter = True
if foundletter == False and inletter == True:
foundletter = True
start = y
if foundletter == True and inletter == False:
foundletter = False
end = y
letters.append((start,end))
inletter=False
bounds = []
for letter in letters:
bounds.append([ letter[0] , 0, letter[1], im2.size[1] ])
print(bounds)
return bounds
if __name__=="__main__":
# if len(sys.argv) < 2:
# print(("usage: %s image" % (sys.argv[0])))
# sys.exit(2)
# file_name = sys.argv[1]
# img = Image.open(file_name).convert('P')
i = 0
while i < 3 :
response = requests.get(imgurl, headers = headers)
the_page = response.content
file = BytesIO(the_page)
img = Image.open(file)
# img = prepare(img)
img = img.resize((img.size[0]*4, img.size[1]*4), Image.BILINEAR)
img.show()
# bounds = separate(img)
bounds = vertical_cut(img)
_train(img, bounds)
i = i + 1
|
paradiseOffice/Bash_and_Cplus-plus
|
CPP/full_examples/pyqt/chap03/item.py
|
Python
|
gpl-2.0
| 3,660
| 0.005738
|
#!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
"""Provides the Item example classes.
"""
class Item(object):
def __init__(self, artist, title, year=None):
self.__artist = artist
self.__title = title
self.__year = year
def artist(self):
return self.__artist
def setArtist(self, artist):
self.__artist = artist
def title(self):
return self.__title
def setTitle(self, title):
self.__title = title
def year(self):
return self.__year
def setYear(self, year):
self.__year = year
def __str__(self):
year = ""
if self.__year is not None:
year = " in {0}".format(self.__year)
return "{0} by {1}{2}".format(self.__title, self.__artist, year)
class Painting(Item):
def __init__(self, artist, title, year=None):
super(Painting, self).__init__(artist, title, year)
class Sculpture(Item):
def __init__(self, artist, title, year=None, material=None):
super(Sculpture, self).__init__(artist, title, year)
self.__material = material
def material(self):
return self.__material
def setMaterial(self, material):
self.__material = material
def __str__(self):
materialString = ""
if self.__material is not None:
materialString = " ({0})".format(self.__material)
return "{0}{1}".format(super(Sculpture, self).__str__(),
materialString)
class Dimension(object):
def __init__(self, width, height, depth=None):
self.__width = width
self.__height = height
self.__depth = depth
def width(self):
return self.__width
def setWidth(self, width):
self.__width = width
def height(self):
return self.__height
def setHeight(self, height):
self.__height = height
def depth(self):
return self.__dept
|
h
def setDepth(self, depth):
self.__depth = depth
def area(self):
raise NotImplemented
def volume(self):
raise NotImplemented
if __name__ == "__main__":
items = []
items.append(Painting("Cecil Collins", "The Poet", 1941))
items.append(Painting("Cecil Colli
|
ns", "The Sleeping Fool", 1943))
items.append(Painting("Edvard Munch", "The Scream", 1893))
items.append(Painting("Edvard Munch", "The Sick Child", 1896))
items.append(Painting("Edvard Munch", "The Dance of Life", 1900))
items.append(Sculpture("Auguste Rodin", "Eternal Springtime", 1917,
"plaster"))
items.append(Sculpture("Auguste Rodin", "Naked Balzac", 1917,
"plaster"))
items.append(Sculpture("Auguste Rodin", "The Secret", 1925,
"bronze"))
uniquematerials = set()
for item in items:
print(item)
if hasattr(item, "material"):
uniquematerials.add(item.material())
print("Sculptures use {0} unique materials".format(
len(uniquematerials)))
|
stormi/tsunami
|
src/primaires/salle/masques/coordonnees2d/__init__.py
|
Python
|
bsd-3-clause
| 3,189
| 0.0044
|
# -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTOR
|
S "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYR
|
IGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <coords2d>."""
import re
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
# Constantes
RE_COORDS = re.compile(r"^-?[0-9]+\.-?[0-9]+$")
class Coordonnees2D(Masque):
"""Masque <coordonnees2d>.
On attend des coordonnées en 2D en paramètre
sous la forme x.y
"""
nom = "coords2d"
nom_complet = "coordonnées 2D"
def init(self):
"""Initialisation des attributs"""
self.coords = (None, None)
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
str_coordonnees = liste_vers_chaine(commande).lstrip()
str_coordonnees = str_coordonnees.split(" ")[0]
if not str_coordonnees:
raise ErreurValidation(
"Précisez des coordonnées.", False)
if not RE_COORDS.search(str_coordonnees):
raise ErreurValidation(
"Ceci ne sont pas des coordonnées valides.", False)
self.a_interpreter = str_coordonnees
commande[:] = commande[len(str_coordonnees):]
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
coordonnees = self.a_interpreter
coordonnees = tuple(int(e) for e in coordonnees.split("."))
self.coords = coordonnees
return True
|
datastreaming/mflow_nodes
|
tests/helpers.py
|
Python
|
gpl-3.0
| 1,389
| 0.0036
|
import json
from mflow_nodes.processors.base import BaseProcessor
from mflow_nodes.stream_node import get_processor_function, get_receiver_function
from mflow_nodes.node_manager import NodeManager
def setup_file_writing_receiver(connect_address, output_filename):
"""
Setup a node that writis the message headers into an output file for later inspection.
:param connect_address: Address the node connects to.
:param output_filename: Output file.
:return: Instance of ExternalProcessWrapper.
"""
# Format the output file.
with open(output_filename, 'w') as output_file:
output_file.write("[]")
def process_message(message):
with open(ou
|
tput_filename, 'r') as input_file:
test_data = json.load(input_file)
test_data.append(message.get_header())
with open(output_filename, 'w') as output:
output.write(json.dumps(test_data, indent=4))
processor = BaseProcessor()
processor.process_message = process_message
receiver = N
|
odeManager(processor_function=get_processor_function(processor=processor,
connection_address=connect_address),
receiver_function=get_receiver_function(connection_address=connect_address),
processor_instance=processor)
return receiver
|
cvubrugier/rtslib-fb
|
rtslib/alua.py
|
Python
|
apache-2.0
| 15,637
| 0.001151
|
'''
Implements the RTS ALUA Target Port Group class.
This file is part of RTSLib.
Copyright (c) 2016 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from .node import CFSNode
from .utils import RTSLibError, RTSLibALUANotSupported, fread, fwrite
import six
alua_rw_params = ['alua_access_state', 'alua_access_status',
'alua_write_metadata', 'alua_access_type', 'preferred',
'nonop_delay_msecs', 'trans_delay_msecs',
'implicit_trans_secs', 'alua_support_offline',
'alua_support_standby', 'alua_support_transitioning',
'alua_support_active_nonoptimized',
'alua_support_unavailable', 'alua_support_active_optimized']
alua_ro_params = ['tg_pt_gp_id', 'members', 'alua_support_lba_dependent']
alua_types = ['None', 'Implicit', 'Explicit', 'Implicit and Explicit']
alua_statuses = ['None', 'Altered by Explicit STPG', 'Altered by Implicit ALUA']
class ALUATargetPortGroup(CFSNode):
"""
ALUA Target Port Group interface
"""
def __repr__(self):
return "<ALUA TPG %s>" % self.name
def __init__(self, storage_object, name, tag=None):
"""
@param storage_object: backstore storage object to create ALUA group for
@param name: name of ALUA group
@param tag: target port group id. If not passed in, try to look
up existing ALUA TPG with the same name
"""
if storage_object.alua_supported is False:
raise RTSLibALUANotSupported("Backend does not support ALUA setup")
# default_tg_pt_gp takes tag 1
if tag is not None and (tag > 65535 or tag < 1):
raise RTSLibError("The TPG Tag must be between 1 and 65535")
super(ALUATargetPortGroup, self).__init__()
self.name = name
self.storage_object = storage_object
self._path = "%s/alua/%s" % (storage_object.path, name)
if tag is not None:
try:
self._create_in_cfs_ine('create')
except OSError as msg:
raise RTSLibError(msg)
try:
fwrite("%s/tg_pt_gp_id" % self._path, tag)
except IOError as msg:
self.delete()
raise RTSLibError("Cannot set id to %d: %s" % (tag, str(msg)))
else:
try:
self._create_in_cfs_ine('lookup')
except OSError as msg:
raise RTSLibError(msg)
# Public
def delete(self):
"""
Delete ALUA TPG and unmap from LUNs
"""
self._check_self()
# default_tg_pt_gp created by the kernel and cannot be deleted
if self.name == "default_tg_pt_gp":
raise RTSLibError("Can not delete default_tg_pt_gp")
# This will reset the ALUA tpg to default_tg_pt_gp
super(ALUATargetPortGroup, self).delete()
def _get_alua_access_state(self):
self._check_self()
path = "%s/alua_access_state" % self.path
return int(fread(path))
def _set_alua_access_state(self, newstate):
self._check_self()
path = "%s/alua_access_state" % self.path
try:
fwrite(path, str(int(newstate)))
except IOError as e:
raise RTSLibError("Cannot change ALUA state: %s" % e)
def _get_alua_access_status(self):
self._check_self()
path = "%s/alua_access_status" % self.path
status = fread(path)
return alua_statuses.index(status)
def _set_alua_access_status(self, newstatus):
self._check_self()
path = "%s/alua_access_status" % self.path
try:
fwrite(path, str(int(newstatus)))
except IOError as e:
raise RTSLibError("Cannot change ALUA status: %s" % e)
def _get_alua_access_type(self):
self._check_self()
path = "%s/alua_access_type" % self.path
alua_type = fread(path)
return alua_types.index(alua_type)
def _set_alua_access_type(self, access_type):
self._check_self()
path = "%s/alua_access_type" % self.path
try:
fwrite(path, str(int(access_type)))
except IOError as e:
raise RTSLibError("Cannot change ALUA access type: %s" % e)
def _get_preferred(self):
self._check_self()
path = "%s/preferred" % self.path
return int(fread(path))
def _set_preferred(self, pref):
self._check_self()
path = "%s/preferred" % self.path
try:
fwrite(path, str(int(pref)))
except IOError as e:
raise RTSLibError("Cannot set preferred: %s" % e)
def _get_alua_write_metadata(self):
self._check_self()
path = "%s/alua_write_metadata" % self.path
return int(fread(path))
def _set_alua_write_metadata(self, pref):
self._check_self()
path = "%s/alua_write_metadata" % self.path
try:
fwrite(path, str(int(pref)))
except IOError as e:
raise RTSLibError("Cannot set alua_write_metadata: %s" % e)
def _get_alua_support_active_nonoptimized(self):
self._check_self()
path = "%s/alua_support_active_nonoptimized" % self.path
return int(fread(path))
def _set_alua_support_active_nonoptimized(self, enabled):
self._check_self()
path = "%s/alua_su
|
pport_active_nonoptimized" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_active_nonoptimized: %s" % e)
def _get_alua_support_active_optimized(self):
self._check_self()
path = "%s/alua_support_active_optimized" % self.path
return int(fread(path))
def _set_alua_support_active_optimized(self, enabled):
self._check_self()
path = "%s/alua_suppo
|
rt_active_optimized" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_active_optimized: %s" % e)
def _get_alua_support_offline(self):
self._check_self()
path = "%s/alua_support_offline" % self.path
return int(fread(path))
def _set_alua_support_offline(self, enabled):
self._check_self()
path = "%s/alua_support_offline" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_offline: %s" % e)
def _get_alua_support_unavailable(self):
self._check_self()
path = "%s/alua_support_unavailable" % self.path
return int(fread(path))
def _set_alua_support_unavailable(self, enabled):
self._check_self()
path = "%s/alua_support_unavailable" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_unavailable: %s" % e)
def _get_alua_support_standby(self):
self._check_self()
path = "%s/alua_support_standby" % self.path
return int(fread(path))
def _set_alua_support_standby(self, enabled):
self._check_self()
path = "%s/alua_support_standby" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_standby: %s" % e)
def _get_alua_support_transitioning(self):
self._check_self()
path = "%s/alua_support_transitioning" % self.path
return int(fread(path))
def _set_alua_support_transitioning(self, enabled):
|
classam/threepanel
|
threepanel/dashboard/management/commands/clear_cache.py
|
Python
|
agpl-3.0
| 258
| 0.007752
|
from django.core.ma
|
nagement.base import BaseCommand, CommandError
from django.core.cache import cache
class Command(BaseCommand):
help = 'Clears the cache'
def ha
|
ndle(self, *args, **options):
print("Clearing cache!")
cache.clear()
|
SocialCognitiveSystems/PRIMO
|
primo2/tests/Inference_test.py
|
Python
|
lgpl-3.0
| 13,455
| 0.012337
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PRIMO2 -- Probabilistic Inference Modules.
# Copyright (C) 2013-2017 Social Cognitive Systems Group,
# Faculty of Technology, Bielefeld University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
from primo2.networks import BayesianNetwork
from primo2.nodes import DiscreteNode
from primo2.io import XMLBIFParser
from primo2.inference.order import Orderer
from primo2.inference.exact import VariableElimination
from primo2.inference.exact import FactorTree
class EliminationOderTest(unittest.TestCase):
def test_min_degree_elimination_order(self):
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
order = Orderer.get_min_degree_order(bn)
#Test for all possible/equivalent orders since the actual order might is not
#determined based on the random nature hash in Python3
potentialOrders = [["slippery_road", "wet_grass", "sprinkler", "winter", "rain"],
["slippery_road", "wet_grass", "sprinkler", "rain", "winter"],
["slippery_road", "wet_grass", "rain", "sprinkler", "winter"],
["slippery_road", "wet_grass", "rain", "winter", "sprinkler"],
["slippery_road", "wet_grass", "winter", "rain", "sprinkler"],
["slippery_road", "wet_grass", "winter", "sprinkler", "rain"],
["slippery_road", "winter", "sprinkler", "wet_grass", "rain"],
["slippery_road", "winter", "sprinkler", "rain", "wet_grass"],
["slippery_road", "winter", "rain", "sprinkler", "wet_grass"],
["slippery_road", "winter", "rain", "wet_grass", "sprinkler"],
["slippery_road", "winter", "wet_grass", "sprinkler", "rain"],
["slippery_road", "winter", "wet_grass", "rain", "sprinkler"],
["slippery_road", "sprinkler", "winter", "wet_grass", "rain"],
["slippery_road", "s
|
prinkler", "winter", "rain", "wet_grass"],
["slippery_road", "sprinkler", "wet_grass", "winte
|
r", "rain"],
["slippery_road", "sprinkler", "wet_grass", "rain", "winter"],
["slippery_road", "sprinkler", "rain", "winter", "wet_grass"],
["slippery_road", "sprinkler", "rain", "wet_grass", "winter"],
["slippery_road", "rain", "wet_grass", "sprinkler", "winter"],
["slippery_road", "rain", "wet_grass", "winter", "sprinkler"],
["slippery_road", "rain", "winter", "wet_grass", "sprinkler"],
["slippery_road", "rain", "winter", "sprinkler", "wet_grass"],
["slippery_road", "rain", "sprinkler", "wet_grass", "winter"],
["slippery_road", "rain", "sprinkler", "winter", "wet_grass"]]
self.assertTrue(order in potentialOrders)
"""
TODO BETTER TEST WITH CERTAIN ORDER!
"""
#Check error handling
with self.assertRaises(TypeError) as cm:
Orderer.get_min_degree_order("Not a Bayesian Network.")
self.assertEqual(str(cm.exception), "Only Bayesian Networks are currently supported.")
def test_random_elimination_order(self):
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
order = Orderer.get_random_order(bn)
variables = ["slippery_road", "winter", "rain", "sprinkler", "wet_grass"]
self.assertEqual(len(order), len(variables))
for v in variables:
self.assertTrue(v in order)
#Check error handling
with self.assertRaises(TypeError) as cm:
Orderer.get_min_degree_order("Not a Bayesian Network.")
self.assertEqual(str(cm.exception), "Only Bayesian Networks are currently supported.")
class VariableEliminationTest(unittest.TestCase):
def setUp(self):
self.bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
def test_empty_cpt(self):
bn = BayesianNetwork()
from primo2.nodes import DiscreteNode
n1 = DiscreteNode("a")
n2 = DiscreteNode("b")
bn.add_node(n1)
bn.add_node(n2)
bn.add_edge(n1,n2)
res = VariableElimination.naive_marginals(bn, ["a"])
np.testing.assert_array_almost_equal(res.get_potential(), np.array([0.0, 0.0]))
def test_naive_marginals(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["winter"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.6, 0.4]))
def test_naive_marginal_evidence_trivial(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["rain"], {"winter": "true"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.8, 0.2]))
def test_naive_marginal_evidence_trivial_multiple_evidence(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["wet_grass"], {"sprinkler": "true", "rain": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.1, 0.9]))
def test_naive_marginal_evidence(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["wet_grass"], {"winter": "true"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.668, 0.332]))
def test_naive_marginal_evidence_multiple_evidence(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["wet_grass"], {"winter": "true", "rain": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.02, 0.98]))
def test_bucket_marginals(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["winter"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.6, 0.4]))
#
def test_bucket_marginal_evidence_trivial(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["rain"], {"wet_grass": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.158858, 0.841142]))
def test_bucket_marginal_evidence_trivial_multiple_evidence(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["wet_grass"], {"sprinkler": "true", "rain": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.1, 0.9]))
def test_bucket_marginal_evidence(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["wet_grass"], {"winter": "true"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.668, 0.332]))
def test_bucket_marginal_evidence_multiple_evidence(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["wet_grass"], {"winter": "true", "rain": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.02, 0.98]))
### TODO check multiple marginals
# def test_bucket_multiple_marginals(self):
# resFactor = VariableElimination.bucket_marginals(self.bn, ["wet_grass", "rain"], {"winter": "true", "slippery_road": "false"})
|
FAB4D/humanitas
|
data_collection/ts/climate/get_climate_data.py
|
Python
|
bsd-3-clause
| 3,498
| 0.006861
|
#---------------------------------
#Joseph Boyd - joseph.boyd@epfl.ch
#---------------------------------
from bs4 import BeautifulSoup
from urllib2 import urlopen
import csv
BASE_URL = 'http://www.tutiempo.net'
PAGE_1 = '/en/Climate/India/IN.html'
PAGE_2 = '/en/Climate/India/IN_2.html'
headings = ['Location', 'Year', 'Month', 'T', 'TM', 'Tm', 'SLP', 'H', 'PP', 'VV', 'V', 'VM', 'VG', 'RA', 'SN', 'TS', 'FG']
MAX_ROWS = 100000
FIRST_YEAR = 1999
def get_links(url):
html = urlopen(url).read()
soup = BeautifulSoup(html, 'lxml')
location_links = soup.find('div', id='ListadosV4')
locations_links = [BASE_URL + li.a['href'] for li in location_links.findAll('li')]
return locations_links
def write_log(message):
f_log = open("log.txt", 'a')
f_log.write(message)
f_log.close()
def main():
links = get_links(BASE_URL + PAGE_1)
links.extend(get_links(BASE_URL + PAGE_2))
csvfile = open('climate_data_1.csv', 'wb')
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headings)
num_rows = 0; num_files = 1
for link in links:
print ('Retrieving data from %s ...\n'%(link))
html = urlopen(link).read()
soup = BeautifulSoup(html, 'lxml')
year_list = soup.find('div', id='SelectYear')
title = link.split('/')[-2]
print ('Location: %s\n'%(title))
if year_list is None:
continue
for li in year_list.findAll('li'):
year = int(','.join(li.findAll(text=True)))
print (str(year) + '\n')
if year >= FIRST_YEAR:
html = urlopen(BASE_URL + li.a['href']).read()
soup = BeautifulSoup(html, 'lxml')
month_list = soup.find('div', id='SelectMes')
if month_list is None:
month_list = soup.find('div','ListasLeft')
if month_list is None:
continue
for month in month_list.findAll('li'):
month_name = ','.join(month.findAll(text=True))
if month_name[0:10] == 'Historical':
month_name = month_name.split(" ")[1]
print (month_name + '\n')
|
html = urlopen(BASE_URL + month.a['href']).read()
soup = BeautifulSoup(html, 'lxml')
climate_table = soup.find('table', 'TablaClima')
if climate_table is None:
continue
climate_rows = climate_table.f
|
indAll('tr')
for row in climate_rows[1:-2]:
data = row.findAll('td')
print_line = [title, year, month_name]
for datum in data:
a = ','.join(datum.findAll(text=True))
print_line.append(a.encode('utf8'))
csv_writer.writerow(print_line)
num_rows += 1
if num_rows == MAX_ROWS:
csvfile.close()
num_files += 1
csvfile = open('climate_data_%s.csv'%(num_files), 'wb')
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headings)
num_rows = 0
csvfile.close()
if __name__ == '__main__':
main()
|
benoitsteiner/tensorflow-xsmm
|
tensorflow/python/ops/linalg/linear_operator_full_matrix.py
|
Python
|
apache-2.0
| 6,537
| 0.001836
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorFullMatrix"]
@tf_export("linalg.LinearOperatorFullMatrix")
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
self._check_matrix(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
matrix = ops.convert_to_tensor(matri
|
x, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.get_shape().ndims is not None and matrix.get_shape().ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
|
% matrix)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return linear_operator_util.matmul_with_broadcast(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _to_dense(self):
return self._matrix
|
saullocastro/pyNastran
|
pyNastran/op2/dev/op2.py
|
Python
|
lgpl-3.0
| 118,219
| 0.000068
|
# -*- coding: utf-8 -*-
"""
Some Python tools for reading select data from Nastran .op2 files.
Converted from the Yeti version.
Can read files in big or little endian format.
@author: Tim Widrick
"""
from __future__ import print_function
import sys
import struct
import itertools as it
import warnings
from six import PY2
import numpy as np
import pyNastran.op2.dev.n2y as n2y
# Notes on the op2 format.
#
# DATA BLOCK:
# All data blocks (including header) start with header 3 elements:
# [reclen, key, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes in
# key (either 4 or 8)
# - key = 4 or 8 byte integer specifying number of words in next
# record
# - endrec = reclen
#
# DATA SET, can be multiple records:
# Next is [reclen, data, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes
# in data
# - data = reclen bytes long, variable format; may be part of
# a data set or the complete set
# - endrec = reclen
#
# Next is info about whether we're done with current data set:
# [reclen, key, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes
# in key (either 4 or 8)
# - key = 4 or 8 byte integer specifying number of words in
# next record; if 0, done with data set
# - endrec = reclen
#
# If not done, we have [reclen, data, endrec] for part 2 (and
# so on) for the record.
#
# Once data set is complete, we have: [reclen, key, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes in
# key (either 4 or 8)
# - key = 4 or 8 byte integer specifying number of words in next
# record (I think ... not useful?)
# - endrec = reclen
#
# Then: [reclen, rec_type, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes in
# rec_type (either 4 or 8)
# - rec_type = 0 if table (4 or 8 bytes)
# - endrec = reclen
#
# Then, info on whether we're done with data block:
# [reclen, key, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes in
# key (either 4 or 8)
# - key = 4 or 8 byte integer specifying number of words in next
# record; if 0, done with data block
# - endrec = reclen
#
# If not done, we have [reclen, data, endrec] for record 2 and so
# on, until data block is read in.
def expand_dof(ids, pvgrids):
"""
Expands vector of ids to [id, dof].
Parameters
----------
ids : 1d array-like
Vector of node ids
pvgrids : 1d array-like
True/False vector same length as `ids`. The True entries
indicate which elements in `ids` are grids; those will get all
6 DOF while all other ids will just get 0 for the DOF.
Returns
-------
dof : 2d ndarray
2 column matrix: [id, dof]
Examples
--------
>>> import numpy as np
>>> import op2
>>> ids = [1, 2, 3, 4]
>>> pvgrids = [True, False, False, True]
>>> expand_dof(ids, pvgrids)
array([[1, 1],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[2, 0],
[3, 0],
[4, 1],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6]])
"""
ids, pvgrids = np.atleast_1d(ids, pvgrids)
n = len(ids)
dof = np.zeros((n, 6), int)
dof[pvgrids] = np.arange(1, 7)
V = np.zeros((n, 6), bool)
V[:, 0] = True
V[pvgrids, 1:] = True
expids = np.reshape(ids, (-1, 1)) * V
V = V.flatten()
expids = expids.flatten()
dof = dof.flatten()
return np.vstack((expids[V], dof[V])).T
class OP2(object):
"""Class for reading Nastran op2 files and nas2cam data files."""
def __init__(self, filename=None):
self._fileh = None
self._CodeFuncs = None
if isinstance(filename, str):
self._op2_open(filename)
def __del__(self):
if self._fileh:
self._fileh.close()
self._fileh = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self._fileh:
self._fileh.close()
self._fileh = None
return False
@property
def CodeFuncs(self):
"""See :func:`_check_code`."""
if self._CodeFuncs is None:
def func1(item_code):
if item_code // 1000 in [2, 3, 6]:
return 2
return 1
def func2(item_code):
return item_code % 100
def func3(item_code):
return item_code % 1000
def func4(item_code):
return item_code // 10
def func5(item_code):
return item_code % 10
def func6(item_code):
warnings.warn('Function code 6 method not verified',
RuntimeWarning)
if item_code & 8:
return 0
return 1
def func7(item_code):
v = item_code // 1000
if v in [0, 2]:
return 0
if v in [1, 3]:
return 1
return 2
def funcbig(func_code, item_code):
return item_code & (func_code & 65535)
self._CodeFuncs = {
1: func1, 2: func2, 3: func3, 4: func4,
5: func5, 6: func6, 7: func7,
'big': funcbig,
}
return self._CodeFuncs
def _op2_open(self, filename):
"""
Open op2 file in correct endian mode.
Sets these class variables:
_fileh : file handle
Value returned by open().
_swap : bool
True if bytes must be swapped to correct endianness.
_bit64 : True or False
True if 'key' integers are 64-bit.
_endian : string
Will be '=' if `swap` is False; otherwise, either '>' or '<'
for big-endia
|
n and lit
|
tle-endian, respectively.
_intstr : string
Either `endian` + 'i4' or `endian` + 'i8'.
_ibytes : integer
Either 4 or 8 (corresponds to `intstr`)
_int32str : string
`endian` + 'i4'.
_label : string
The op2 header label or, if none, None.
_date : vector
Three element date vector, or None.
_nastheader : string
Nastran header for file, or None.
_postheaderpos : integer
File position after header.
dbnames : dictionary
See :func:`directory` for description. Contains data block
names, bytes in file, file positions, and for matrices, the
matrix size.
dblist : list
See :func:`directory` for description. Contains same info
as dbnames, but in a list of ordered and formatted strings.
_Str4 : struct.Struct object
Precompiled for reading 4 byte integers (corresponds to
`int32str`).
_Str : struct.Struct object
Precompiled for reading 4 or 8 byte integers (corresponds
to `intstr`).
File is positioned after the header label (at `postheaderpos`).
"""
self._fileh = open(filename, 'rb')
self.dbnames = []
self.dblist = []
reclen = struct.unpack('i', self._fileh.read(4))[0]
self._fileh.seek(0)
reclen = np.array(reclen, dtype=np.int32)
if not np.any(reclen == [4, 8]):
self._swap = True
reclen = reclen.byteswap()
if not np.any(reclen == [4, 8]):
self._fileh.close()
self._fileh = None
raise RuntimeError('Could not decipher file. First'
'4-byte integer should be 4 or 8.')
if sys.byteorder == 'little':
self._endian = '>'
else:
|
JohanComparat/nbody-npt-functions
|
bin/bin_SMHMr/MD10-pie-plot.py
|
Python
|
cc0-1.0
| 5,119
| 0.034186
|
import time
t0 = time.time()
import os
import numpy as n
import sys
import glob
import cPickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
from scipy.interpolate import interp1d
L_box = 1000./0.6777
tracer_names = n.array(['S8_ELG', 'S8_BG1', 'S8_BG2', 'S5_GAL', 'S8_QSO', 'S6_AGN', 'S5_BCG'])
marker_dict={'S5_BCG':'1', 'S5_GAL':'2', 'S6_AGN':'3', 'S8_BG1':',', 'S8_BG2':',', 'S8_ELG':',', 'S8_QSO':'x'}
color_dict ={'S5_BCG':'r', 'S5_GAL':'r', 'S6_AGN':'m', 'S8_BG1':'k', 'S8_BG2':'g', 'S8_ELG':'b', 'S8_QSO':'g'}
p0 = n.array([[-1., -1.]])
points = {'S5_BCG':p0, 'S5_GAL':p0, 'S6_AGN':p0, 'S8_BG1':p0, 'S8_BG2':p0, 'S8_ELG':p0, 'S8_QSO':p0}
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
zs = n.arange(0.,4,0.001)
dc_2_z = interp1d(cosmoMD.comoving_distance(zs),zs)
import astropy.io.fits as fits
sf = fits.open(os.path.join(os.environ['MD10'],'output_MD_1.0Gpc.fits'))[1].data
plot_dir = '/afs/mpe/www/people/comparat/eRoMok/pie_plots/'
work_dir = os.path.join(os.environ['MD10'],'work_agn')
# redshift loop
#ii = 0
def get_slice(cpickle_dump_file, x_observer=0., y_observer=0., z_observer = 0., x_shift=0., y_shift=0., z_shift=0., slice_z_min=0., slice_z_max = 10., distance_min=0., distance_max = L_box):
snap_selection = (sf['comoving_distance']<distance_max)&(sf['comoving_distance']>distance_min)
snaps = sf[snap_selection]
z_all = sf['redshift'][snap_selection]
z_boundaries = n.hstack((dc_2_z(distance_min), (z_all[1:]+z_all[:-1])/2., dc_2_z(distance_max)))
for ii, el in enumerate(snaps): # in range(len(z_all)):
z_min, z_max = z_boundaries[ii], z_boundaries[ii+1]
r_min, r_max = cosmoMD.comoving_distance(z_min).value, cosmoMD.comoving_distance(z_max).value
position_files = n.array(glob.glob(os.path.join(work_dir, 'out_'+el['snap_name']+'_SAM_Nb_?.fits')))
position_files.sort()
# position file loop
print r_min, r_max
for index in range(len(position_files)):
print time.time()-t0
print position_files[index]
positions = fits.open(position_files[index])[1].data
tracer_files = n.array(glob.glob(os.path.join(work_dir, 'out_'+el['snap_name']+'_SAM_Nb_'+str(index)+'_4MOST_*.fits')))
tracer_files.sort()
# tra
|
cer loop
#path_2_tracer_file = tracer_files[0]
for path_2_tracer_file in tr
|
acer_files:
print path_2_tracer_file
spl_bn = os.path.basename(path_2_tracer_file)[:-5].split('_')
tracer_name = spl_bn[-2]+'_'+spl_bn[-1]
ids = fits.open(path_2_tracer_file)[1].data['line_number']
x_i = positions['x'][ids]/0.6777 - x_observer + x_shift
y_i = positions['y'][ids]/0.6777 - y_observer + y_shift
z_i = positions['z'][ids]/0.6777 - z_observer + z_shift
shell = (x_i*x_i + y_i*y_i + z_i*z_i < r_max**2.) & (x_i*x_i + y_i*y_i + z_i*z_i > r_min**2.)
slice = (shell) & (z_i>slice_z_min) &(z_i<slice_z_max)
points[tracer_name] = n.vstack(( points[tracer_name], n.transpose([x_i[slice], y_i[slice]]) ))
f=open(cpickle_dump_file, 'w')
cPickle.dump(points,f)
f.close()
return points
points_1 = get_slice(os.path.join(work_dir, 'slice_1_Lbox.pkl'))
points_2 = get_slice(os.path.join(work_dir, 'slice_2_Lbox.pkl'), x_shift = L_box, distance_min=L_box, distance_max = 2*L_box)
points_3 = get_slice(os.path.join(work_dir, 'slice_3_Lbox.pkl'), x_shift = 2*L_box, distance_min=2*L_box, distance_max = 3*L_box)
points_4 = get_slice(os.path.join(work_dir, 'slice_4_Lbox.pkl'), x_shift = 3*L_box, distance_min=3*L_box, distance_max = 4*L_box)
points_1 = cPickle.load(open(os.path.join(work_dir, 'slice_1_Lbox.pkl'),'r'))
points_2 = cPickle.load(open(os.path.join(work_dir, 'slice_2_Lbox.pkl'),'r'))
points_3 = cPickle.load(open(os.path.join(work_dir, 'slice_3_Lbox.pkl'),'r'))
points_4 = cPickle.load(open(os.path.join(work_dir, 'slice_4_Lbox.pkl'),'r'))
def plot_slice(points, name='slice_1_Lbox.png', lims=(0,L_box)) :
p.figure(0, ((6,6)))
p.axes([0.17,0.17,0.78,0.78])
for tracer in tracer_names:
x_pos, y_pos = points[tracer].T
p.plot(x_pos, y_pos,marker=marker_dict[tracer],color=color_dict[tracer],rasterized=True,ls='None',label=tracer)
p.legend(loc=0, frameon=False, fontsize=9)
p.xlabel('Mpc')
p.ylabel('Mpc')
p.xlim(lims)
p.ylim((0,L_box))
p.title(str(n.round(dc_2_z(lims[0]),2))+'<z<'+str(n.round(dc_2_z(lims[1]),2)) )
p.savefig(os.path.join(plot_dir, name))
p.clf()
plot_slice(points_1, name='slice_1_Lbox.png', lims=(0*L_box,1*L_box))
plot_slice(points_2, name='slice_2_Lbox.png', lims=(1*L_box,2*L_box))
plot_slice(points_3, name='slice_3_Lbox.png', lims=(2*L_box,3*L_box))
plot_slice(points_4, name='slice_4_Lbox.png', lims=(3*L_box,4*L_box))
sys.exit()
p.figure(0, ((6,6)))
p.axes([0.17,0.17,0.78,0.78])
for tracer in tracer_names:
x_pos, y_pos = points_2[tracer].T
p.plot(x_pos, y_pos,marker=marker_dict[tracer],color=color_dict[tracer],rasterized=True,ls='None',label=tracer)
p.legend(loc=0, frameon=False, fontsize=9)
p.xlabel('Mpc')
p.ylabel('Mpc')
p.xlim(lims)
p.ylim((0.,L_box))
p.savefig(os.path.join(plot_dir, 'slice_2_Lbox.png'))
p.clf()
|
sissaschool/xmlschema
|
xmlschema/namespaces.py
|
Python
|
mit
| 10,509
| 0.001713
|
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module contains classes for managing maps related to namespaces.
"""
import re
from typing import Any, Container, Dict, Iterator, List, Optional, MutableMapping, \
Mapping, TypeVar
from .exceptions import XMLSchemaValueError, XMLSchemaTypeError
from .helpers import local_name
from .aliases import NamespacesType
###
# Base classes for managing namespaces
class NamespaceResourcesMap(MutableMapping[str, Any]):
"""
Dictionary for storing information about namespace resources. The values are
lists of objects. Setting an existing value appends the object to the value.
Setting a value with a list sets/replaces the value.
"""
__slots__ = ('_store',)
def __init__(self, *args: Any, **kwargs: Any):
self._store: Dict[str, List[Any]] = {}
self.update(*args, **kwargs)
def __getitem__(self, uri: str) -> Any:
return self._store[uri]
def __setitem__(self, uri: str, value: Any) -> None:
if isinstance(value, list):
self._store[uri] = value[:]
else:
try:
self._store[uri].append(value)
except KeyError:
self._store[uri] = [value]
def __delitem__(self, uri: str) -> None:
del self._store[uri]
def __iter__(self) -> Iterator[str]:
return iter(self._store)
def __len__(self) -> int:
return len(self._store)
def __repr__(self) -> str:
return repr(self._store)
def clear(self) -> None:
self._store.clear()
class NamespaceMapper(MutableMapping[str, str]):
"""
A class to map/unmap namespace prefixes to URIs. The mapped namespaces are
automatically registered when set. Namespaces can be updated overwriting
the existing registration or inserted using an alternative prefix.
:param namespaces: initial data with namespace prefixes and URIs. \
The provided dictionary is bound with the instance, otherwise a new \
empty dictionary is used.
:param strip_namespaces: if set to `True` uses name mapping methods that strip \
namespace information.
"""
__slots__ = '_namespaces', 'strip_namespaces', '__dict__'
_namespaces: NamespacesType
def __init__(self, namespaces: Optional[NamespacesType] = None,
strip_namespaces: bool = False):
if namespaces is None:
self._namespaces = {}
else:
self._namespaces = namespaces
self.strip_namespaces = strip_namespaces
def __setattr__(self, name: str, value: str) -> None:
if name == 'strip_namespaces':
if value:
self.map_qname = self.unmap_qname = self._local_name # type: ignore[assignment]
elif getattr(self, 'strip_namespaces', False):
self.map_qname = self._map_qname # type: ignore[assignment]
self.unmap_qname = self._unmap_qname # type: ignore[assignment]
super(NamespaceMapper, self).__setattr__(name, value)
def __getitem__(self, prefix: str) -> str:
return self._namespaces[prefix]
def __setitem__(self, prefix: str, uri: str) -> None:
self._namespaces[prefix] = uri
def __delitem__(self, prefix: str) -> None:
del self._namespaces[prefix]
def __iter__(self) -> Iterator[str]:
return iter(self._namespaces)
def __len__(self) -> int:
return len(self._namespaces)
@property
def namespaces(self) -> NamespacesType:
return self._namespaces
@property
def default_namespace(self) -> Optional[str]:
return self._namespaces.get('')
def clear(self) -> None:
self._namespaces.clear()
def insert_item(self, prefix: str, uri: str) -> None:
"""
A method for setting an item that checks the prefix before inserting.
In case of collision the prefix is changed adding a numerical suffix.
"""
if not prefix:
if '' not in self._namespaces:
self._namespaces[prefix] = uri
return
elif self._namespaces[''] == uri:
return
prefix = 'default'
while prefix in self._namespaces:
if self._namespaces[prefix] == uri:
return
match = re.search(r'(\d+)$', prefix)
if match:
index = int(match.group()) + 1
prefix = prefix[:match.span()[0]] + str(index)
else:
prefix += '0'
self._namespaces[prefix] = uri
def _map_qname(self, qname: str) -> str:
"""
Converts an extended QName to the prefixed format. Only registered
namespaces are mapped.
:param qname: a QName in extended format or a local name.
:return: a QName in prefixed format or a local name.
"""
try:
if qname[0] != '{' or not self._namespaces:
return qname
namespace, local_part = qname[1:].split('}')
except IndexError:
return qname
except ValueError:
raise XMLSchemaValueError("the argument 'qname' has a w
|
rong format: %r" % qname)
except TypeError:
raise XMLSchemaTypeError("the argument 'qname' must be a string-like object")
for prefix, uri in sorted(self._namespaces.items(), reverse=True):
if uri == namespace:
return '%s:%s' % (prefix, local_part) if prefix else local_part
else:
return qname
map_qname = _map_qname
def _unmap_qname(self, qname: str,
name_table: Optional[Container[Optional[s
|
tr]]] = None) -> str:
"""
Converts a QName in prefixed format or a local name to the extended QName format.
Local names are converted only if a default namespace is included in the instance.
If a *name_table* is provided a local name is mapped to the default namespace
only if not found in the name table.
:param qname: a QName in prefixed format or a local name
:param name_table: an optional lookup table for checking local names.
:return: a QName in extended format or a local name.
"""
try:
if qname[0] == '{' or not self._namespaces:
return qname
prefix, name = qname.split(':')
except IndexError:
return qname
except ValueError:
if ':' in qname:
raise XMLSchemaValueError("the argument 'qname' has a wrong format: %r" % qname)
if not self._namespaces.get(''):
return qname
elif name_table is None or qname not in name_table:
return '{%s}%s' % (self._namespaces.get(''), qname)
else:
return qname
except (TypeError, AttributeError):
raise XMLSchemaTypeError("the argument 'qname' must be a string-like object")
else:
try:
uri = self._namespaces[prefix]
except KeyError:
return qname
else:
return '{%s}%s' % (uri, name) if uri else name
unmap_qname = _unmap_qname
@staticmethod
def _local_name(qname: str, *_args: Any, **_kwargs: Any) -> str:
return local_name(qname)
def transfer(self, namespaces: NamespacesType) -> None:
"""
Transfers compatible prefix/namespace registrations from a dictionary.
Registrations added to namespace mapper instance are deleted from argument.
:param namespaces: a dictionary containing prefix/namespace registrations.
"""
transferred = []
for k, v in namespaces.items():
if k in self._namespaces:
if v != self._namespaces[k]:
continue
else:
self[k] = v
|
1uk/3tsqd
|
classes/Player.py
|
Python
|
mit
| 198
| 0.010101
|
import pygame
from pyga
|
me import event
class Player:
def __init__(self, p_id)
|
:
self.points = None
self.p_id = p_id
def turn(self, nr):
return pygame.event.get()
|
alexkolar/home-assistant
|
homeassistant/components/ifttt.py
|
Python
|
mit
| 1,789
| 0
|
"""
homeassistant.components.ifttt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This component enable you to trigger Maker IFTTT recipes.
Check https://ifttt.com/maker for details.
Configuration:
To use Maker IFTTT you will need to add something like the following to your
config/configuration.yaml.
ifttt:
key: xxxxx-x-xxxxxxxxxxxxx
Variables:
key
*Required
Your api key
"""
import logging
import requests
from homeassistant.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
DOMAIN = "ifttt"
SERVICE_TRIGGER = 'trigger'
ATTR_EVENT = 'event'
ATTR_VALUE1 = 'value1'
ATTR_VALUE2 = 'value2'
ATTR_VALUE3 = 'value3'
DEPENDENCIES = []
REQUIREMENTS = ['pyfttt==0.3']
def trigger(hass, event, value1=None, value2=None, value3=None):
""" Trigger a Maker IFTTT recipe """
data = {
ATTR_EVENT: event,
ATTR_VALUE1: value1,
ATTR_VALUE2: value2,
ATTR_VAL
|
UE3: value3,
}
hass.services.call(DOMAIN, SERVICE_TRIGGER, data)
def setup(hass, config):
""" Setup the ifttt service component """
if not validate_config(config, {DOMAIN: ['key']}, _LOGGER):
return False
key = config[DOMAIN]['key']
def trigger_service(call):
""" Handle ifttt trigger service call
|
s. """
event = call.data.get(ATTR_EVENT)
value1 = call.data.get(ATTR_VALUE1)
value2 = call.data.get(ATTR_VALUE2)
value3 = call.data.get(ATTR_VALUE3)
if event is None:
return
try:
import pyfttt as pyfttt
pyfttt.send_event(key, event, value1, value2, value3)
except requests.exceptions.RequestException:
_LOGGER.exception("Error communicating with IFTTT")
hass.services.register(DOMAIN, SERVICE_TRIGGER, trigger_service)
return True
|
wenxichen/tensorflow_yolo2
|
src/imagenet/imagenet_train_darknet.py
|
Python
|
mit
| 4,911
| 0.001833
|
"""Train ILSVRC2017 Data using homemade scripts."""
import cv2
import os
import math
import tensorflow as tf
from multiprocessing import Process, Queue
import os
import sys
FILE_DIR = os.path.dirname(__file__)
sys.path.append(FILE_DIR + '/../')
import config as cfg
from img_dataset.ilsvrc2017_cls_multithread import ilsvrc_cls
from yolo2_nets.darknet import darknet19
from yolo2_nets.net_utils import get_ordered_ckpts
from utils.timer import Timer
slim = tf.contrib.slim
def get_validation_process(imdb, queue_in, queue_out):
"""Get validation dataset. Run in a child process."""
while True:
queue_in.get()
images, labels = imdb.get()
queue_out.put([images, labels])
imdb = ilsvrc_cls('train', data_aug=True, multithread=cfg.MULTITHREAD)
val_imdb = ilsvrc_cls('val',
|
batch_size=64)
# set up child process for getting validation data
queue_in = Queue()
queue_out = Queue()
val_data_process = Process(target=get_validat
|
ion_process,
args=(val_imdb, queue_in, queue_out))
val_data_process.start()
queue_in.put(True) # start getting the first batch
CKPTS_DIR = cfg.get_ckpts_dir('darknet19', imdb.name)
TENSORBOARD_TRAIN_DIR, TENSORBOARD_VAL_DIR = cfg.get_output_tb_dir(
'darknet19', imdb.name)
input_data = tf.placeholder(tf.float32, [None, 224, 224, 3])
label_data = tf.placeholder(tf.int32, None)
is_training = tf.placeholder(tf.bool)
logits = darknet19(input_data, is_training=is_training)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_data, logits=logits)
loss = tf.reduce_mean(loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# train_op = tf.train.AdamOptimizer(0.0005).minimize(loss)
train_op = tf.train.MomentumOptimizer(0.001, 0.9).minimize(loss)
correct_pred = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), label_data)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
######################
# Initialize Session #
######################
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(TENSORBOARD_TRAIN_DIR)
val_writer = tf.summary.FileWriter(TENSORBOARD_VAL_DIR)
# # initialize variables, assume all vars are new now
# init_op = tf.global_variables_initializer()
# sess.run(init_op)
# load previous models
ckpts = get_ordered_ckpts(sess, imdb, 'darknet19')
variables_to_restore = slim.get_variables_to_restore()
# # change optimizer
# print('Initializing variables for the new optimizer')
# optimzer_vars = [var for var in tf.global_variables()
# if "Momentum" in var.name]
# init_op = tf.variables_initializer(optimzer_vars)
# sess.run(init_op)
# for var in optimzer_vars:
# if var in variables_to_restore:
# variables_to_restore.remove(var)
print('Restorining model snapshots from {:s}'.format(ckpts[-1]))
old_saver = tf.train.Saver(variables_to_restore)
old_saver.restore(sess, str(ckpts[-1]))
print('Restored.')
fnames = ckpts[-1].split('_')
old_epoch = int(fnames[-1][:-5])
imdb.epoch = old_epoch + 1
# simple model saver
cur_saver = tf.train.Saver()
T = Timer()
for i in range(imdb.total_batch * 10 + 1):
T.tic()
images, labels = imdb.get()
_, loss_value, acc_value, train_summary = sess.run(
[train_op, loss, accuracy, merged], {input_data: images, label_data: labels, is_training: 1})
_time = T.toc(average=False)
print('epoch {:d}, iter {:d}/{:d}, training loss: {:.3}, training acc: {:.3}, take {:.2}s'
.format(imdb.epoch, (i + 1) % imdb.total_batch,
imdb.total_batch, loss_value, acc_value, _time))
if (i + 1) % 25 == 0:
T.tic()
val_images, val_labels = queue_out.get()
val_loss_value, val_acc_value, val_summary = sess.run(
[loss, accuracy, merged], {input_data: val_images, label_data: val_labels, is_training: 0})
_val_time = T.toc(average=False)
print('###validation loss: {:.3}, validation acc: {:.3}, take {:.2}s'
.format(val_loss_value, val_acc_value, _val_time))
queue_in.put(True)
global_step = imdb.epoch * imdb.total_batch + (i % imdb.total_batch)
train_writer.add_summary(train_summary, global_step)
val_writer.add_summary(val_summary, global_step)
if (i % (imdb.total_batch * 2) == 0):
save_path = cur_saver.save(sess, os.path.join(
CKPTS_DIR,
cfg.TRAIN_SNAPSHOT_PREFIX + '_epoch_' + str(imdb.epoch - 1) + '.ckpt'))
print("Model saved in file: %s" % save_path)
# terminate child processes
if cfg.MULTITHREAD:
imdb.close_all_processes()
queue_in.cancel_join_thread()
queue_out.cancel_join_thread()
val_data_process.terminate()
|
cournape/talkbox
|
scikits/talkbox/linpred/__init__.py
|
Python
|
mit
| 156
| 0.019231
|
__all__ = []
from common import *
im
|
port common
__all__ += common.__all__
|
from levinson_lpc import *
import levinson_lpc
__all__ += levinson_lpc.__all__
|
EnTeQuAk/pytest-django-casperjs
|
conftest.py
|
Python
|
bsd-3-clause
| 1,003
| 0
|
import os
import os.path
def pytest_configure(config):
test_db = os.environ.get('DB', 'sqlite')
os.environ['DJANGO_SETTINGS_MODULE'] = 'pytest_django_casperjs.tests.settings' # noqa
from django.conf import settings
if test_db == 'postgresql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'pytest_django_casperjs_test',
})
elif test_db == 'mysql':
import pymysql
pymysql.install_as_MySQLdb()
set
|
tings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'USER': 'root',
'NAME': 'pytest_django_casperjs_test',
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'djang
|
o.db.backends.sqlite3',
'NAME': ':memory:',
})
else:
raise RuntimeError('Unsupported database configuration %s' % test_db)
|
nan86150/ImageFusion
|
src/main/fusion_dwb.py
|
Python
|
mit
| 2,018
| 0.002973
|
#!/usr/bin/env python
# encoding: utf-8
from ImageFusion import ImageFusion
from PIL import Image
import numpy as np
import pylab as plt
import pywt
class FusionDWB(ImageFusion):
""" Image Fusion based wavelet """
def __init__(self, imageNames = None, zt=2, ap=2, mp=0):
self._imageNames = imageNames
self._images = []
self._fusionImage = None
self._zt = zt # level num
self._ap = ap # 0-average, 1-min, 2-max
self._mp = mp # 0-average, 1-min, 2-max
def _load_images(self):
for name in self._imageNames:
self._images.append(np.array(Image.open(name), 'f'))
def fusion(self):
self._load_images()
coeffss = []
for image in self._images:
coeffss.append(pywt.wavedec2(image, 'db1', level=self._zt))
# low pass
if self._mp == 0:
cAF = coeffss[0][0]
for coeffs in coeffss[1:]:
cAF += coeffs[0]
cAF = cAF/len(coeffs)
# high pass
if self._ap == 2:
hipassF = coeffss[0][1:]
for coeffs in coeffss[1:]: # every image
for idxLevel, HVDs in enumerate(coeffs[1:]): # every level
for idxDirec, HVD in enumerate(HVDs):
maxMap = hipassF[idxLevel][idxDirec] < HVD
hipassF[idxLevel][idxDirec][maxMap] = HVD[maxMap]
coeffsFusion = [cAF,] + hipassF
self._fusionImage =
|
pywt.waverec2(coeffsFusion, 'db1')
return self._fusionImage
def plot(self):
plt.figure(0)
plt.gray()
plt.subplot(131)
plt.imshow(self._images[0])
plt.subplot(132)
plt.imshow(self._images[1])
plt.subplo
|
t(133)
plt.imshow(self._fusionImage)
plt.show()
if __name__ == '__main__':
IMAGEPATH = "../../images/multifocus/"
imLists = [IMAGEPATH+"a01_1.tif",IMAGEPATH+"a01_2.tif"]
fu = FusionDWB(imLists)
fu.fusion()
fu.plot()
|
abarnert/pyinterval
|
test/test_basic.py
|
Python
|
bsd-3-clause
| 13,302
| 0.005488
|
# Copyright (c) 2008, Stefano Taschini <taschini@ieee.org>
# All rights reserved.
# See LICENSE for details.
import unittest
from interval import interval, fpu
class FpuTestCase(unittest.TestCase):
def test_third(self):
"Nearest rounding of 1/3 is downwards."
self.assertEqual(1/3.0, fpu.down(lambda: 1.0 / 3.0))
self.assertTrue(1/3.0 < fpu.up(lambda: 1.0 / 3.0))
self.assertEqual(-1/3.0, fpu.up(lambda: 1.0 / -3.0))
self.assertTrue(-1/3.0 > fpu.down(lambda: 1.0 / -3.0))
def test_fourth(self):
" 1/4 is exact."
self.assertEqual(1/4.0, fpu.down(lambda: 1.0 / 4.0))
self.assertEqual(1/4.0, fpu.up(lambda: 1.0 / 4.0))
self.assertEqual(-1/4.0, fpu.up(lambda: 1.0 / -4.0))
self.assertEqual(-1/4.0, fpu.down(lambda: 1.0 / -4.0))
def test_fifth(self):
"Nearest rounding of 1/5 is upwards."
self.assertEqual(1/5.0, fpu.up(lambda: 1.0 / 5.0))
self.assertTrue(1/5.0 > fpu.down(lambda: 1.0 / 5.0))
self.assertEqual(-1/5.0, fpu.down(lambda: 1.0 / -5.0))
self.assertTrue(-1/5.0 < fpu.up(lambda: 1.0 / -5.0))
def test_ieee754(self):
"fpu.float respect ieee754 semantics."
self.assertEqual(fpu.infinity + fpu.infinity, fpu.infinity)
self.assertTrue(fpu.isnan(fpu.nan))
self.assertTrue(fpu.isnan(0.0 * fpu.infinity))
self.assertTrue(fpu.isnan(fpu.infinity - fpu.infinity))
def test_float_coercion(self):
"Only real-number scalars should be able to coerce as fpu.float"
self.assertRaises(Exception, lambda: float(1,2))
self.assertRaises(Exception, lambda: float((1,2)))
self.assertRaises(Exception, lambda: float([1,2]))
self.assertRaises(Exception, lambda: float('a'))
self.assertRaises(Exception, lambda: float(1+1j))
def test_min(self):
"Verify corner cases with nan, -inf, +inf"
self.assertEqual(fpu.min((1.0, 2.0)), 1.0)
self.assertEqual(fpu.min((1.0, fpu.infinity)), 1.0)
self.assertEqual(fpu.min((1.0, -fpu.infinity)), -fpu.infinity)
self.assertTrue(fpu.isnan(fpu.min((1.0, -fpu.nan))))
def test_max(self):
"Verify corner cases with nan, -inf, +inf"
self.assertEqual(fpu.max((1.0, 2.0)), 2.0)
self.assertEqual(fpu.max((1.0, fpu.infinity)), fpu.infinity)
self.assertEqual(fpu.max((1.0, -fpu.infinity)), 1.0)
self.assertTrue(fpu.isnan(fpu.max((1.0, fpu.nan))))
def test_power(self):
x = 1/3.0
# The cube of one third should depend on the rounding mode
self.assertTrue(fpu.down(lambda: x*x*x) < fpu.up(lambda: x*x*x))
# But using the built-in power operator, it doesn't necessarily do it
# print fpu.down(lambda: x**3) < fpu.up(lambda: x**3))
# So we define an integer power methods that does
self.assertTrue(fpu.power_rd(x, 3) < fpu.power_ru(x, 3))
self.assertTrue(fpu.power_rd(-x, 3) < fpu.power_ru(-x, 3))
self.assertTrue(fpu.power_rd(x, 4) < fpu.power_ru(x, 4))
self.assertTrue(fpu.power_rd(-x, 4) < fpu.power_ru(-x, 4))
self.assertEqual(
(fpu.down(lambda: x*x*x), fpu.up(lambda: x*x*x)),
(fpu.power_rd(x, 3), fpu.power_ru(x, 3)))
class ModuleTestCase(unittest.TestCase):
def test_namespace(self):
import interval
self.assertEqual(
dir(interval),
['__builtins__', '__doc__', '__file__', '__name__', '__path__', 'fpu', 'imath', 'inf', 'interval'])
class IntervalTestCase(unittest.TestCase):
def test_trivial_constructor(self):
self.assertEqual(interval[1], ((1, 1),))
self.assertEqual(interval(1), ((1, 1),))
self.assertEqual(interval[1, 2], ((1, 2),))
self.assertEqual(interval(1, 2), ((1, 1), (2, 2)))
self.assertEqual(interval([1, 2], [3, 4]), ((1, 2), (3, 4)))
self.assertEqual(interval([1,2]), interval(interval([1, 2])))
def test_nan_constructor(self):
self.assertEqual(interval[2, fpu.nan], ((-fpu.infinity, fpu.infinity),))
self.assertEqual(interval[2, fpu.nan], ((-fpu.infinity, fpu.infinity),))
self.assertEqual(interval(2, fpu.nan, 9), ((-fpu.infinity, fpu.infinity),))
def test_failing_constructor(self):
self.assertRaises(interval.ComponentError, lambda: interval[1, [2, 3]])
self.assertRaises(interval.ComponentError, lambda: interval[1, 2, 3])
self.assertRaises(interval.ComponentError, lambda: interval(0, [1, 2, 3]))
self.assertRaises(interval.ComponentError, lambda:
|
interval(0, [1, [2, 3]]))
self.assertRaises(interval.ComponentError, lambda: interval['a', 1])
def test_canonical_constructor(self):
|
self.assertEqual(interval([1, 3], [4, 6], [2, 5], 9), ((1, 6), (9, 9)))
self.assertEqual(interval[2 ** (52 + 1) - 1], interval[9007199254740991.0])
self.assertEqual(interval[2 ** (52 + 1) + 1], interval[4503599627370496 * 2.0, 4503599627370497 * 2.0])
self.assertEqual(interval[-2 ** (52 + 1) + 1], interval[-9007199254740991.0])
self.assertEqual(interval[-2 ** (52 + 1) - 1], interval[-4503599627370497 * 2.0, -4503599627370496 * 2.0])
self.assertEqual(interval[2 ** (52 + 2) + 1], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[2 ** (52 + 2) + 2], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[2 ** (52 + 2) + 3], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 1], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 2], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 3], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
def test_unary(self):
self.assertEqual(interval[1, 2], +interval[1, 2])
self.assertEqual(interval[-2, -1], -interval[1, 2])
def test_sum(self):
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity] + interval[fpu.infinity])
self.assertEqual(interval[4, 6], interval[1, 2] + interval[3, 4])
self.assertEqual(interval[3, fpu.infinity], interval[1, fpu.infinity] + interval[2])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity, -1] + interval[2, +fpu.infinity])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity] + interval[8, +fpu.infinity])
self.assertEqual(interval([1, 2], [10, fpu.infinity]) + interval([1,9],[-2,-1]), interval([-1, 1], [2, fpu.infinity]))
self.assertEqual(interval[1, 9] + interval([1, 2], [10, fpu.infinity]), interval[2, fpu.infinity])
def test_sum_coercion(self):
self.assertEqual(interval[1,2] + 2, interval[3, 4])
self.assertRaises(TypeError, lambda: interval[1,2] + 1j)
self.assertEqual(1 + interval[4, 5], interval[5, 6])
self.assertRaises(TypeError, lambda: (1, 2) + interval[1,2])
self.assertEqual(fpu.infinity + interval[4, 5], interval[fpu.infinity])
def test_sub(self):
self.assertEqual(interval[1, 2] - interval[3, 4], interval[-3.0, -1.0])
self.assertEqual(interval[1, 2] - 0.5, interval[0.5, 1.5])
self.assertEqual(1.5 - interval[1, 2], interval[-0.5, 0.5])
def test_mul(self):
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], fpu.infinity * interval[0])
self.assertEqual(interval[+fpu.infinity], interval[+fpu.infinity] * interval[3])
self.assertEqual(interval[-8, +10], interval[1, 2] * interval[-4, 5])
self.assertEqual(interval[3, 8], interval[1, 2] * interval[3, 4])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[0,1 ] * interval[2, +fpu.infinity])
self.assertEqual(interval[2, fpu.infinity], interval[-fpu.infinity,-2] * interval[-fpu.infinity,-1])
self.assertEqual(interval([1, 2], [3, 4]) * interval[0.5, 2], interval[0.5, 8])
self.assertEqual(interval[1, 2] * 2, interval[2, 4])
def test_inverse(s
|
111pontes/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_clns_isis_datatypes.py
|
Python
|
apache-2.0
| 1,675
| 0.014328
|
""" Cisco_IOS_XR_clns_isis_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empt
|
y, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class IsisAddressFamilyEnum(Enum):
"""
IsisAddressFamilyEnum
Isis address family
.. data:: ipv4 = 0
IPv4
.. data:: ipv6 = 1
IPv6
"""
|
ipv4 = 0
ipv6 = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_datatypes as meta
return meta._meta_table['IsisAddressFamilyEnum']
class IsisInternalLevelEnum(Enum):
"""
IsisInternalLevelEnum
Isis internal level
.. data:: not_set = 0
Level not set
.. data:: level1 = 1
Level1
.. data:: level2 = 2
Level2
"""
not_set = 0
level1 = 1
level2 = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_datatypes as meta
return meta._meta_table['IsisInternalLevelEnum']
class IsisSubAddressFamilyEnum(Enum):
"""
IsisSubAddressFamilyEnum
Isis sub address family
.. data:: unicast = 0
Unicast
.. data:: multicast = 1
Multicast
"""
unicast = 0
multicast = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_datatypes as meta
return meta._meta_table['IsisSubAddressFamilyEnum']
|
ThreatConnect-Inc/tcex
|
tcex/api/tc/v2/notifications/notifications.py
|
Python
|
apache-2.0
| 3,187
| 0.000628
|
"""TcEx Notification Module"""
# standard library
import json
import logging
from typing import TYPE_CHECKING
# first-party
from tcex.exit.error_codes import handle_error
if TYPE_CHECKING:
# third-pa
|
rty
from requests import Session
# get tcex logger
logger = logging.getLogger('tcex')
class Notifications:
"""TcEx Notification Class"""
def __init__(self, session_tc: 'Session'):
"""Initialize the Class properties.
Args:
session_tc: An configured instance of request.Session with TC API Auth.
"""
self.session_tc = session_tc
# properties
self._is_organization = False
self._notification_type = None
|
self._recipients = None
self._priority = 'Low'
self.log = logger
def recipients(self, notification_type, recipients, priority='Low'):
"""Set vars for the passed in data. Used for one or more recipient notification.
.. code-block:: javascript
{
"notificationType": notification_type,
"priority": priority
"isOrganization": false,
"recipients": recipients
}
Args:
notification_type (str): The type of notification being sent.
recipients (str): A comma delimited string of recipients.
priority (str): The priority: Low, Medium, High.
"""
self._notification_type = notification_type
self._recipients = recipients
self._priority = priority
self._is_organization = False
def org(self, notification_type, priority='Low'):
"""Set vars for the passed in data. Used for org notification.
.. code-block:: javascript
{
"notificationType": notification_type,
"priority": priority
"isOrganization": true
}
Args:
notification_type (str): The notification type.
priority (str): The priority: Low, Medium, High.
"""
self._notification_type = notification_type
self._recipients = None
self._priority = priority
self._is_organization = True
def send(self, message):
"""Send our message
Args:
message (str): The message to be sent.
Returns:
requests.models.Response: The response from the request.
"""
body = {
'notificationType': self._notification_type,
'priority': self._priority,
'isOrganization': self._is_organization,
'message': message,
}
if self._recipients:
body['recipients'] = self._recipients
self.log.debug(f'notification body: {json.dumps(body)}')
# create our tcex resource
r = self.session_tc.post('/v2/notifications', json=body)
if r.status_code == 400:
# specifically handle unknown users
self.log.error(f'Failed to send notification ({r.text})')
elif not r.ok: # pragma: no cover
handle_error(750, [r.status_code, r.text])
# return response body
return r.json()
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/__init__.py
|
Python
|
gpl-2.0
| 7,425
| 0.002559
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
:mod:`MDAnalysis` --- analysis of molecular simulations in python
=================================================================
MDAnalysis (https://www.mdanalysis.org) is a python toolkit to analyze
molecular dynamics trajectories generated by CHARMM, NAMD, Amber,
Gromacs, or LAMMPS.
It allows one to read molecular dynamics trajectories and access the
atomic coordinates through numpy arrays. This provides a flexible and
relatively fast framework for complex analysis tasks. In addition,
CHARMM-style atom selection commands are implemented. Trajectories can
also be manipulated (for instance, fit to a reference structure) and
written out. Time-critical code is written in C for speed.
Help is also available through the mailinglist at
http://groups.google.com/group/mdnalysis-discussion
Please report bugs and feature requests through the issue tracker at
https://github.com/MDAnalysis/mdanalysis/issues
Citation
--------
When using MDAnalysis in published work, please cite
R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
MDAnalysis: A Python package for the rapid analysis of molecular dynamics
simulations. In S. Benthall and S. Rostrup, editors, Proceedings of the 15th
Python in Science Conference, pages 98-105, Austin, TX, 2016. SciPy,
doi:10.25080/majora-629e541a-00e
N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and
O. Beckstein. MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics
Simulations. J. Comput. Chem. 32 (2011), 2319--2327, doi:`10.1002/jcc.21787`_
https://www.mdanalysis.org
For citations of included algorithms and sub-modules please see the references_.
.. _`10.1002/jcc.21787`: http://dx.doi.org/10.1002/jcc.21787
.. _references: https://docs.mdanalysis.org/documentation_pages/references.html
Getting started
---------------
Import the package::
>>> import MDAnalysis
(note that not everything in MDAnalysis is imported right away; for
additional functionality you might have to import sub-modules
separately, e.g. for RMS fitting ``import MDAnalysis.analysis.align``.)
Build a "universe" from a topology (PSF, PDB) and a trajectory (DCD, XTC/TRR);
here we are assuming that PSF, DCD, etc contain file names. If you don't have
trajectories at hand you can play with the ones that come with MDAnalysis for
testing (see below under `Examples`_)::
>>> u = MDAnalysis.Universe(PSF, DCD)
Select the C-alpha atoms and store them as a group of atoms::
>>> ca = u.select_atoms('name CA')
>>> len(ca)
214
Calculate the centre of mass of the CA and of all atoms::
>>> ca.center_of_mass()
array([ 0.06873595, -0.04605918, -0.24643682])
>>> u.atoms.center_of_mass()
array([-0.01094035, 0.05727601, -0.12885778])
Calculate the CA end-to-end distance (in angstroem)::
>>> import numpy as np
>>> coord = ca.positions
>>> v = coord[-1] - coord[0] # last Ca minus first one
>>> np.sqrt(np.dot(v, v,))
10.938133
Define a function eedist():
>>> def eedist(atoms):
... coord = atoms.positions
... v = coord[-1] - coord[0]
... return sqrt(dot(v, v,))
...
>>> eedist(ca)
10.938133
and analyze all timesteps *ts* of the trajectory::
>>> for ts in u.trajectory:
... print eedist(ca)
10.9381
10.8459
10.4141
9.72062
....
See Also
--------
:class:`MDAnalysis.core.universe.Universe` for details
Examples
--------
MDAnalysis comes with a number of real trajectories for testing. You
can also use them to explore the functionality and ensure that
everything is working properly::
from MDAnalysis import *
from MDAnalysis.tests.datafiles import PSF,DCD, PDB,XTC
u_dims_adk = Universe(PSF,DCD)
u_eq_adk = Universe(PDB, XTC)
The PSF and DCD file are a closed-form-to-open-form transition of
Adenylate Kinase (from [Beckstein2009]_) and the PDB+XTC file are ten
frames from a Gromacs simulation of AdK solvated in TIP4P water with
the OPLS/AA force field.
.. [Beckstein2009] O. Beckstein, E.J. Denning, J.R. Perilla and T.B. Woolf,
Zipping and Unzipping of Adenylate Kinase: Atomistic Insights into the
Ensemble of Open <--> Closed Transitions. J Mol Biol 394 (2009), 160--176,
doi:10.1016/j.jmb.2009.09.009
"""
__all__ = ['Universe', 'Writer', 'fetch_mmtf',
'AtomGroup', 'ResidueGroup', 'SegmentGroup']
import logging
import warnings
logger = logging.getLogger("MDAnalysis.__init__")
from .version import __version__
try:
from .authors import __authors__
except ImportError:
logger.info('Could not find authors.py, __authors__ will be empty.')
__authors__ = []
# Registry of Readers, Parsers and Writers known to MDAnalysis
# Metaclass magic fills these as cla
|
sses are declared.
_READERS = {}
_READER_HINTS = {}
_SINGLEFRAME_WRITERS = {}
_MULTIFRAME_WRITERS = {}
_PARSERS = {}
_PARSER_HINTS = {}
_SELECTION_WRITERS = {}
_CONVERTERS = {}
# Registry of TopologyAttributes
_TOPOLOGY_ATTRS = {} # {attrname: cls}
_TOPOLOGY_TRANSPLA
|
NTS = {} # {name: [attrname, method, transplant class]}
_TOPOLOGY_ATTRNAMES = {} # {lower case name w/o _ : name}
# custom exceptions and warnings
from .exceptions import (
SelectionError, NoDataError, ApplicationError, SelectionWarning,
MissingDataWarning, ConversionWarning, FileFormatWarning,
StreamWarning
)
from .lib import log
from .lib.log import start_logging, stop_logging
logging.getLogger("MDAnalysis").addHandler(log.NullHandler())
del logging
# only MDAnalysis DeprecationWarnings are loud by default
warnings.filterwarnings(action='once', category=DeprecationWarning,
module='MDAnalysis')
from . import units
# Bring some often used objects into the current namespace
from .core.universe import Universe, Merge
from .core.groups import AtomGroup, ResidueGroup, SegmentGroup
from .coordinates.core import writer as Writer
# After Universe import
from .coordinates.MMTF import fetch_mmtf
from . import converters
from .due import due, Doi, BibTeX
due.cite(Doi("10.25080/majora-629e541a-00e"),
description="Molecular simulation analysis library",
path="MDAnalysis", cite_module=True)
due.cite(Doi("10.1002/jcc.21787"),
description="Molecular simulation analysis library",
path="MDAnalysis", cite_module=True)
del Doi, BibTeX
|
dnxbjyj/python-basic
|
gui/wxpython/wxPython-demo-4.0.1/samples/dataview/CustomRenderer.py
|
Python
|
mit
| 5,439
| 0.005516
|
import sys
import wx
import wx.dataview as dv
#import os; print('PID:'+str(os.getpid())); raw_input("Press enter...")
#----------------------------------------------------------------------
class MyCustomRenderer(dv.DataVi
|
ewCustomRenderer):
def __init__(self, log, *args, **kw):
dv.DataViewCustomRenderer.__init__(self, *args, **kw)
self.log = log
self.value = None
def SetValue(self, value):
#self.log.write('MyCustomRenderer.SetValue: %s\n' % value)
self.value = v
|
alue
return True
def GetValue(self):
#self.log.write('MyCustomRenderer.GetValue\n')
return self.value
def GetSize(self):
# Return the size needed to display the value. The renderer
# has a helper function we can use for measuring text that is
# aware of any custom attributes that may have been set for
# this item.
value = self.value if self.value else ""
size = self.GetTextExtent(value)
return size
def Render(self, rect, dc, state):
if state != 0:
self.log.write('Render: %s, %d\n' % (rect, state))
if not state & dv.DATAVIEW_CELL_SELECTED:
# we'll draw a shaded background to see if the rect correctly
# fills the cell
dc.SetBrush(wx.Brush('light grey'))
dc.SetPen(wx.TRANSPARENT_PEN)
rect.Deflate(1, 1)
dc.DrawRoundedRectangle(rect, 2)
# And then finish up with this helper function that draws the
# text for us, dealing with alignment, font and color
# attributes, etc
value = self.value if self.value else ""
self.RenderText(value,
4, # x-offset, to compensate for the rounded rectangles
rect,
dc,
state # wxDataViewCellRenderState flags
)
return True
# The HasEditorCtrl, CreateEditorCtrl and GetValueFromEditorCtrl
# methods need to be implemented if this renderer is going to
# support in-place editing of the cell value, otherwise they can
# be omitted.
def HasEditorCtrl(self):
self.log.write('HasEditorCtrl')
return True
def CreateEditorCtrl(self, parent, labelRect, value):
self.log.write('CreateEditorCtrl: %s' % labelRect)
ctrl = wx.TextCtrl(parent,
value=value,
pos=labelRect.Position,
size=labelRect.Size)
# select the text and put the caret at the end
ctrl.SetInsertionPointEnd()
ctrl.SelectAll()
return ctrl
def GetValueFromEditorCtrl(self, editor):
self.log.write('GetValueFromEditorCtrl: %s' % editor)
value = editor.GetValue()
return True, value
# The LeftClick and Activate methods serve as notifications
# letting you know that the user has either clicked or
# double-clicked on an item. Implementing them in your renderer
# is optional.
def LeftClick(self, pos, cellRect, model, item, col):
self.log.write('LeftClick')
return False
def Activate(self, cellRect, model, item, col):
self.log.write('Activate')
return False
#----------------------------------------------------------------------
# To help focus this sample on the custom renderer, we'll reuse the
# model class from another sample.
from IndexListModel import TestModel
class TestPanel(wx.Panel):
def __init__(self, parent, log, model=None, data=None):
self.log = log
wx.Panel.__init__(self, parent, -1)
# Create a dataview control
self.dvc = dv.DataViewCtrl(self, style=wx.BORDER_THEME
| dv.DV_ROW_LINES
#| dv.DV_HORIZ_RULES
| dv.DV_VERT_RULES
| dv.DV_MULTIPLE
)
# Create an instance of the model
if model is None:
self.model = TestModel(data, log)
else:
self.model = model
self.dvc.AssociateModel(self.model)
# Now we create some columns.
c0 = self.dvc.AppendTextColumn("Id", 0, width=40)
c0.Alignment = wx.ALIGN_RIGHT
c0.MinWidth = 40
# We'll use our custom renderer for these columns
for title, col, width in [ ('Artist', 1, 170),
('Title', 2, 260),
('Genre', 3, 80)]:
renderer = MyCustomRenderer(self.log, mode=dv.DATAVIEW_CELL_EDITABLE)
column = dv.DataViewColumn(title, renderer, col, width=width)
column.Alignment = wx.ALIGN_LEFT
self.dvc.AppendColumn(column)
# Layout
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self.dvc, 1, wx.EXPAND)
#----------------------------------------------------------------------
def main():
from data import musicdata
app = wx.App()
frm = wx.Frame(None, title="CustomRenderer sample", size=(700,500))
pnl = TestPanel(frm, sys.stdout, data=musicdata)
frm.Show()
app.MainLoop()
if __name__ == '__main__':
main()
#----------------------------------------------------------------------
|
westernx/sgfs
|
sgfs/actions/create_structure.py
|
Python
|
bsd-3-clause
| 1,165
| 0.009442
|
from sgfs import SGFS
from sgactions.utils import notify, progress, alert
def run_create(**kwargs):
_run(False, **kwargs)
def run_preview(**kwargs):
_run(True, **kwargs)
def _run(dry_run, entity_type, selected_ids, **kwargs):
title='Preview Folders' if dry_run else 'Creating Folders'
verb
|
= 'previewing' if dry_run else 'creating'
progress(message=('Previewing' if dry_run else 'Creating') + ' folders for %s %ss; please wait...' % (len(selected_ids), entity_type))
sgfs = SGFS()
entities = sgfs.session.merge([dict(type=entity_type, id=id_) for id_ in selected_ids])
heirarchy = sgfs.session.fetch_heirarchy(entities)
sgfs.sess
|
ion.fetch_core(heirarchy)
command_log = sgfs.create_structure(entities, dry_run=dry_run)
if command_log:
details = '\n'.join(command_log)
if dry_run:
alert(title='Folder Preview', message=details)
else:
notify(
message='Created folders for %s %ss.' % (len(selected_ids), entity_type),
details=details,
)
else:
notify(message='Folders are already up to date.')
|
boweeb/nhweb
|
config/settings/common.py
|
Python
|
bsd-3-clause
| 9,076
| 0.001102
|
# -*- coding: utf-8 -*-
"""
Django settings for nhweb project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('nhweb')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'nhweb.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middlew
|
are.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'nhweb.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
#
|
See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Jesse Butcher""", 'boweeb@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///nhweb"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuratio
|
gregbillock/Spectrum-Access-System
|
src/prop/model.py
|
Python
|
apache-2.0
| 6,985
| 0.013314
|
# Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module implements the combination of the eHata and ITM models
# according to the requirements developed in the Winnforum WG1 Propagation
# task group.
import math
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ehata import ehata
from itm import pytm
from geo import tropoClim
from geo import refractivity
from geo import ned_indexer
from geo import nlcd_indexer
from geo import land_use
from geo import vincenty
# f in MHz; d and h1/h2 all in meters
def FreeSpacePathLoss(f, d, h1, h2):
r = math.sqrt(d*d + (h1-h2)*(h1-h2))
return 20*math.log10(r) + 20*math.log10(f) - 27.56
class PropagationLossModel:
def __init__(self, itu_dir, ned_dir, nlcd_dir):
self.climIndx = tropoClim.ClimateIndexer(itu_dir)
self.refractivityIndx = refractivity.RefractivityIndexer(itu_dir)
self.nedIndx = ned_indexer.NedIndexer(ned_dir)
self.nlcdIndx = nlcd_indexer.NlcdIndexer(nlcd_dir)
# Calculate the ITM adjusted propagation loss given the
# assumptions on the ITM model.
def ITM_AdjustedPropagationLoss(self, lat1, lng1, h1, lat2, lng2, h2, f, reliability):
dielectric_constant = 25.0 # good ground
soil_conductivity = 0.02 # good ground
polarization = 1
confidence = 0.5
# get surface refractivity and radio climate from path midpoint
dist, bearing, rev_bearing = vincenty.dist_bear_vincenty(lat1, lng1, lat2, lng2)
lat_c, lng_c, alpha2 = vincenty.to_dist_bear_vincenty(lat1, lng1, dist/2.0, bearing)
print 'Midpoint = %f, %f' % (lat_c, lng_c)
radio_climate = self.climIndx.TropoClim(lat_c, lng_c)
refractivity = self.refractivityIndx.Refractivity(lat_c, lng_c)
print 'Using climate %d' % radio_climate
print 'Using refractivity %f' % refractivity
print 'Using freq %f' % f
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
print profile[0], profile[1]
#print profile
print 'total distance is ', profile[0]*profile[1]
loss = pytm.point_to_point(profile, h1, h2,
dielectric_constant,
soil_conductivity,
refractivity,
f,
radio_climate,
polarization,
confidence,
reliability)
print 'ITM P2P is ', loss
return loss
# Adjusted propagation loss according to the adjustments in R2-SGN-04
# distance d, heights h1, h2 all in meters
# frequency f in MHz
def ExtendedHata_AdjustedPropagationLoss(self, lat1, lng1, h1, lat2, lng2, h2, f, land_cat):
d, bearing, rev_bearing = vincenty.dist_bear_vincenty(lat1, lng1, lat2, lng2)
d = d*1000.0
print 'EHata distance=', d
if d <= 100.0:
# return FSPL
print 'FSPL'
return FreeSpacePathLoss(f, d, h1, h2)
if d > 100.0 and d <= 1000.0:
print 'interp FSPL and ehata'
# interpolate FSPL and ehata
fspl_loss = FreeSpacePathLoss(f, 100.0, h1, h2)
print ' fspl_loss=', fspl_loss
ehata_loss, abm = ehata.ExtendedHata_MedianBasicPropLoss(f, 1.0, h1, h2, land_cat)
print ' ehata_loss=', ehata_loss
print ' ( abm=', abm
return fspl_loss + (1.0 + math.log10(d/1000.0))*(ehata_loss - fspl_loss)
if d > 1000.0 and d < 80000.0:
# return eHata value without adjustment.
print 'EHata only for d=%f' % d
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
return ehata.ExtendedHata_PropagationLoss(f, h1, h2, land_cat, profile)
if d >= 80000.0:
print 'EHata for distance %f > 80km' % d
# Derive profile_80km
lat_80, lng_80, heading = vincenty.to_dist_bear_vincenty(lat1, lng1, 80.0, bearing)
print '80km point is %f %f' % (lat_80, lng_80)
profile_80km = self.nedIndx.Profile(lat1, lng1, lat_80, lng_80)
# Find J adjustment...
ehata_loss = ehata.ExtendedHata_PropagationLoss(f, h1, h2, land_cat, profile_80km)
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat_80, lng_80, h
|
2, f, 0.5)
J = ehata_loss - itm_loss
pri
|
nt 'Got ehata=%f itm=%f J=%f' % (ehata_loss, itm_loss, J)
if J < 0.0:
J = 0.0
return self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5) + J
def LandClassification(self, lat, lng):
code = self.nlcdIndx.NlcdCode(lat, lng)
return self.nlcdIndx.NlcdLandCategory(code)
# This is the oracle for propagation loss from point 1 to point 2 at frequency f (Mhz).
def PropagationLoss(self, f, lat1, lng1, h1, lat2, lng2, h2, land_cat=''):
if land_cat == '':
code = self.nlcdIndx.NlcdCode(lat2, lng2)
if code == 11:
code = self.nlcdIndx.NlcdCode(lat1, lng1)
land_cat = land_use.NlcdLandCategory(code)
print 'Using land_cat =', land_cat
# Calculate effective heights of tx and rx:
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
h1eff, h2eff = EffectiveHeights(h1, h2, profile)
if land_cat == 'RURAL' or h1eff >= 200: # Only h1eff (CBSD effective height) counts
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5)
print 'Returning itm_loss for rural > 200: ', itm_loss
return itm_loss
else:
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5)
ehata_loss = self.ExtendedHata_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, land_cat)
if ehata_loss > itm_loss:
return ehata_loss
return itm_loss
# Run directly, takes args of "lat1, lng1, h1, lat2, lng2, h2, f" and prints the
# (median) propagation loss in dB.
if __name__ == '__main__':
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
ituDir = os.path.join(os.path.join(rootDir, 'data'), 'itu')
nedDir = os.path.join(os.path.join(rootDir, 'data'), 'ned')
nlcdDir = os.path.join(os.path.join(rootDir, 'data'), 'nlcd')
prop = PropagationLossModel(ituDir, nedDir, nlcdDir)
loss = prop.PropagationLoss(float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3]),
float(sys.argv[4]), float(sys.argv[5]), float(sys.argv[6]),
float(sys.argv[7]))
print 'Propagation Loss = ', loss, ' dB'
|
pytest-dev/pytest-bdd
|
tests/feature/test_outline.py
|
Python
|
mit
| 5,878
| 0.00051
|
"""Scenario Outline tests."""
import textwrap
from pytest_bdd.utils import collect_dumped_objects
from tests.utils import assert_outcomes
STEPS = """\
from pytest_bdd import parsers, given, when, then
from pytest_bdd.utils import dump_obj
@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers")
def given_cucumbers(start):
assert isinstance(start,
|
int)
dump_obj(start)
return {"start": start}
@when(parsers.parse("I eat {eat:g} cucumbers"))
def eat_cucumbers(cucumbers, eat):
assert isinstance(eat, float)
dump_obj(eat)
cucumbers["eat"] = eat
@then(parsers.parse("I should have {left} cucumbers"))
def should_have_left_cucumbers(cucumbers, left):
ass
|
ert isinstance(left, str)
dump_obj(left)
assert cucumbers["start"] - cucumbers["eat"] == int(left)
"""
def test_outlined(testdir):
testdir.makefile(
".feature",
outline=textwrap.dedent(
"""\
Feature: Outline
Scenario Outline: Outlined given, when, thens
Given there are <start> cucumbers
When I eat <eat> cucumbers
Then I should have <left> cucumbers
Examples:
| start | eat | left |
| 12 | 5 | 7 | # a comment
| 5 | 4 | 1 |
"""
),
)
testdir.makeconftest(textwrap.dedent(STEPS))
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import scenario
@scenario(
"outline.feature",
"Outlined given, when, thens",
)
def test_outline(request):
pass
"""
)
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=2)
# fmt: off
assert collect_dumped_objects(result) == [
12, 5.0, "7",
5, 4.0, "1",
]
# fmt: on
def test_unused_params(testdir):
"""Test parametrized scenario when the test function lacks parameters."""
testdir.makefile(
".feature",
outline=textwrap.dedent(
"""\
Feature: Outline
Scenario Outline: Outlined with unused params
Given there are <start> cucumbers
When I eat <eat> cucumbers
# And commented out step with <unused_param>
Then I should have <left> cucumbers
Examples:
| start | eat | left | unused_param |
| 12 | 5 | 7 | value |
"""
),
)
testdir.makeconftest(textwrap.dedent(STEPS))
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import scenario
@scenario("outline.feature", "Outlined with unused params")
def test_outline(request):
pass
"""
)
)
result = testdir.runpytest()
assert_outcomes(result, passed=1)
def test_outlined_with_other_fixtures(testdir):
"""Test outlined scenario also using other parametrized fixture."""
testdir.makefile(
".feature",
outline=textwrap.dedent(
"""\
Feature: Outline
Scenario Outline: Outlined given, when, thens
Given there are <start> cucumbers
When I eat <eat> cucumbers
Then I should have <left> cucumbers
Examples:
| start | eat | left |
| 12 | 5 | 7 |
| 5 | 4 | 1 |
"""
),
)
testdir.makeconftest(textwrap.dedent(STEPS))
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import scenario
@pytest.fixture(params=[1, 2, 3])
def other_fixture(request):
return request.param
@scenario(
"outline.feature",
"Outlined given, when, thens",
)
def test_outline(other_fixture):
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=6)
def test_outline_with_escaped_pipes(testdir):
"""Test parametrized feature example table with escaped pipe characters in input."""
testdir.makefile(
".feature",
outline=textwrap.dedent(
r"""\
Feature: Outline With Special characters
Scenario Outline: Outline with escaped pipe character
# Just print the string so that we can assert later what it was by reading the output
Given I print the <string>
Examples:
| string |
| bork |
| \|bork |
| bork \| |
| bork\|\|bork |
| \| |
| bork \\ |
| bork \\\| |
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import scenario, given, parsers
from pytest_bdd.utils import dump_obj
@scenario("outline.feature", "Outline with escaped pipe character")
def test_outline_with_escaped_pipe_character(request):
pass
@given(parsers.parse("I print the {string}"))
def i_print_the_string(string):
dump_obj(string)
"""
)
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=7)
assert collect_dumped_objects(result) == [
r"bork",
r"|bork",
r"bork |",
r"bork||bork",
r"|",
r"bork \\",
r"bork \\|",
]
|
benpicco/mate-deskbar-applet
|
deskbar/handlers/calculator.py
|
Python
|
gpl-2.0
| 8,080
| 0.017946
|
#
# calculator.py : A calculator module for the deskbar applet.
#
# Copyright (C) 2008 by Johannes Buchner
# Copyright (C) 2007 by Michael Hofmann
# Copyright (C) 2006 by Callum McKenzie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Callum McKenzie <callum@spooky-possum.org> - Original author
# Michael Hofmann <mh21@piware.de> - compatibility changes for deskbar 2.20
# Johannes Buchner <buchner.johannes@gmx.at> - Made externally usable
#
# This version of calculator can be used with converter
# read how at http://twoday.tuwien.ac.at/jo/search?q=calculator+converter+deskbar
#
from __future__ import division
from deskbar.handlers.actions.CopyToClipboardAction import CopyToClipboardAction
from deskbar.defs import VERSION
from gettext import gettext as _
import deskbar.core.Utils
import deskbar.interfaces.Match
import deskbar.interfaces.Module
import logging
import math
import re
LOGGER = logging.getLogger(__name__)
HANDLERS = ["CalculatorModule"]
def bin (n):
"""A local binary equivalent of the hex and oct builtins."""
if (n == 0):
return "0b0"
s = ""
if (n < 0):
while n != -1:
s = str (n & 1) + s
n >>= 1
return "0b" + "...111" + s
else:
while n != 0:
s = str (n & 1) + s
n >>= 1
return "0b" + s
# These next three make sure {hex, oct, bin} can handle floating point,
# by rounding. This makes sure things like hex(255/2) behave as a
# programmer would expect while allowing 255/2 to equal 127.5 for normal
# people. Abstracting out the body of these into a single function which
# takes hex, oct or bin as an argument seems to run into problems with
# those functions not being defined correctly in the resticted eval (?).
def lenient_hex (c):
try:
return hex (c)
except TypeError:
return hex (int (c))
def lenient_oct (c):
try:
return oct (c)
except TypeError:
return oct (int (c))
def lenient_bin (c):
try:
return bin (c)
except TypeError:
return bin (int (c))
class CalculatorAction (CopyToClipboardAction):
def __init__ (self, text, answer):
CopyToClipboardAction.__init__ (self, answer, answer)
self.text = text
def get_verb(self):
return _("Copy <b>%(origtext)s = %(name)s</b> to clipboard")
def get_name(self, text = None):
"""Because the text variable for history entries contains the text
typed for the history search (and not the text of the orginal action),
we store the original text seperately."""
result = CopyToClipboardAction.get_name (self, text)
result["origtext"] = self.text
return result
def get_tooltip(self, text=None):
return self._name
class CalculatorMatch (deskbar.interfaces.Match):
def __init__ (self, text, answer, **kwargs):
deskbar.interfaces.Match.__init__ (self, name = text,
icon = "gtk-add", category = "calculator", **kwargs)
self.answer = str (answer)
self.add_action (CalculatorAction (text, self.answer))
def get_hash (self):
return self.answer
class CalculatorModule (deskbar.interfaces.Module):
INFOS = {"icon": deskbar.core.Utils.load_icon ("gtk-add"),
"name": _("Calculator"),
"description": _("Calculate simple equations"),
"version" : VERSION,
"categories" : { "calculator" : { "name" : _("Calculator") }}}
def __init__ (self):
deskbar.interfaces.Module.__init__ (self)
self.hexre = re.compile ("0[Xx][0-9a-fA-F_]*[0-9a-fA-F]")
self.binre = re.compile ("0[bB][01_]*[01]")
def _number_parser (self, match, base):
"""A generic number parser, regardless of base. It also ignores the
'_' character so it can be used as a separator. Note how we skip
the first two characters since we assume it is something like '0x'
or '0b' and identifies the base."""
table = { '0' : 0, '1' : 1, '2' : 2, '3' : 3, '4' : 4,
'5' : 5, '6' : 6, '7' : 7, '8' : 8, '9' : 9,
'a' : 10, 'b' : 11, 'c' : 12, 'd' : 13,
'e' : 14, 'f' : 15 }
d = 0
for c in match.group()[2:]:
if c != "_":
d = d * base + table[c]
return str (d)
def _binsub (self, match):
"""Because python doesn't handle binary literals, we parse it
ourselves and replace it with a decimal representation."""
return self._number_parser (match, 2)
def _hexsub (self, match):
"""Parse the hex literal ourselves. We could let python do it, but
since we have a generic parser we use that instead."""
return self._number_parser (match, 16)
def run_query (self, query):
"""We evaluate the equation by first replacing hex and binary literals
with their decimal representation. (We need to check hex, so we can
distinguish 0x10b1 as a hex number, not 0x1 followed by 0b1.) We
severely restrict the eval environment. Any errors are ignored."""
restricted_dictionary = { "__builtins__" : None, "abs" : abs,
"acos" : math.acos, "asin" : math.asin,
"atan" : math.atan, "atan2" : math.atan2,
"bin" : lenient_bin,"ceil" : math.ceil,
"cos" : math.cos, "cosh" : math.cosh,
"degrees" : math.degrees,
"exp" : math.exp, "floor" : math.floor,
"hex" : lenient_hex, "int" : int,
"log" : math.log, "pow" : math.pow,
"log10" : math.log10, "oct" : lenient_oct,
"pi" : math.pi, "radians" : math.radians,
"round": round, "sin" : math.sin,
"sinh" : math.sinh, "sqrt" : math.sqrt,
"tan" : math.tan, "tanh" : math.tanh}
try:
scrubbedquery = query.lower()
scrubbedquery = self.hexre.sub (self._hexsub, scrubbedquery)
scrubbedquery = self.binre.sub (self._binsub, scrubbedquery)
for (c1, c2) in (("[", "("),
|
("{", "("), ("]", ")"), ("}", ")")):
scrubbedquery = scrubbedquery.replace (c1, c2)
answer = eval (scrubbedquery, restricted_dictionary)
# Try and avoid echoing back simple numbers. Note that this
# doesn't work well for floating point, e.g. '3.' behaves badly.
if str (answer) == query:
return None
# We need thi
|
s check because the eval can return function objects
# when we are halfway through typing the expression.
if isinstance (answer, (float, int, long, str)):
return answer
else:
return None
except Exception, e:
LOGGER.debug (str(e))
return None
def query (self, query):
answer = self.run_query(query)
if answer != None:
result = [CalculatorMatch (query, answer)]
self._emit_query_ready (query, result)
return answer
else:
return []
|
OddBloke/moore
|
matches/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 12,520
| 0.006709
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Card'
db.create_table('matches_card', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateField')()),
('promotion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['promotions.Promotion'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=127, null=True, blank=True)),
))
db.send_create_signal('matches', ['Card'])
# Adding model
|
'Role'
db.create_table('matches_role', (
('description', self.gf('django.db.models.fields.CharField')(max_length=255, primary_ke
|
y=True)),
))
db.send_create_signal('matches', ['Role'])
# Adding model 'Participation'
db.create_table('matches_participation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.CardEvent'])),
('participant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wrestlers.WrestlingEntity'])),
('role', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.Role'])),
))
db.send_create_signal('matches', ['Participation'])
# Adding model 'EventType'
db.create_table('matches_eventtype', (
('description', self.gf('django.db.models.fields.CharField')(max_length=127, primary_key=True)),
))
db.send_create_signal('matches', ['EventType'])
# Adding model 'CardEvent'
db.create_table('matches_cardevent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reviewed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('reviewed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('order', self.gf('django.db.models.fields.IntegerField')()),
('card', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.Card'])),
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.EventType'])),
))
db.send_create_signal('matches', ['CardEvent'])
# Adding model 'MatchTypeAspect'
db.create_table('matches_matchtypeaspect', (
('description', self.gf('django.db.models.fields.CharField')(max_length=127, primary_key=True)),
))
db.send_create_signal('matches', ['MatchTypeAspect'])
# Adding model 'MatchType'
db.create_table('matches_matchtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=127)),
))
db.send_create_signal('matches', ['MatchType'])
# Adding M2M table for field aspects on 'MatchType'
db.create_table('matches_matchtype_aspects', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('matchtype', models.ForeignKey(orm['matches.matchtype'], null=False)),
('matchtypeaspect', models.ForeignKey(orm['matches.matchtypeaspect'], null=False))
))
db.create_unique('matches_matchtype_aspects', ['matchtype_id', 'matchtypeaspect_id'])
# Adding model 'Match'
db.create_table('matches_match', (
('cardevent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['matches.CardEvent'], unique=True, primary_key=True)),
('match_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.MatchType'])),
('winner', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='won_matches', null=True, to=orm['wrestlers.WrestlingEntity'])),
))
db.send_create_signal('matches', ['Match'])
def backwards(self, orm):
# Deleting model 'Card'
db.delete_table('matches_card')
# Deleting model 'Role'
db.delete_table('matches_role')
# Deleting model 'Participation'
db.delete_table('matches_participation')
# Deleting model 'EventType'
db.delete_table('matches_eventtype')
# Deleting model 'CardEvent'
db.delete_table('matches_cardevent')
# Deleting model 'MatchTypeAspect'
db.delete_table('matches_matchtypeaspect')
# Deleting model 'MatchType'
db.delete_table('matches_matchtype')
# Removing M2M table for field aspects on 'MatchType'
db.delete_table('matches_matchtype_aspects')
# Deleting model 'Match'
db.delete_table('matches_match')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharFiel
|
lesserwhirls/scipy-cwt
|
scipy/sparse/construct.py
|
Python
|
bsd-3-clause
| 12,885
| 0.006054
|
"""Functions to construct sparse matrices
"""
__docformat__ = "restructuredtext en"
__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand']
from warnings import warn
import numpy as np
from sputils import upcast
from csr import csr_matrix
from csc import csc_matrix
from bsr import bsr_matrix
from coo import coo_matrix
from lil import lil_matrix
from dia import dia_matrix
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : format of the result (e.g. "csr")
By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
--------
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
>>> diags = array([0,-1,2])
>>> spdiags(data, diags, 4, 4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shap
|
e=(m,n)).asformat(format
|
)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : integer
Shape of the identity matrix.
dtype :
Data type of the matrix
format : string
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> identity(3).todense()
matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if format in ['csr','csc']:
indptr = np.arange(n+1, dtype=np.intc)
indices = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
cls = eval('%s_matrix' % format)
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
row = np.arange(n, dtype=np.intc)
col = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
elif format == 'dia':
data = np.ones(n, dtype=dtype)
diags = [0]
return dia_matrix((data,diags), shape=(n,n))
else:
return identity(n, dtype=dtype, format='csr').asformat(format)
def eye(m, n, k=0, dtype='d', format=None):
"""eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
"""
m,n = int(m),int(n)
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> A = csr_matrix(array([[0,2],[5,0]]))
>>> B = csr_matrix(array([[1,2],[3,4]]))
>>> kron(A,B).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> kron(A,[[1,2],[3,4]]).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
#B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
#use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(identity(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, identity(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) #since L + R is not always same format
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> hstack( [A,B] ).todense()
matrix([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5,6]])
>>> vstack( [A,B] ).todense()
matrix([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([ [b] for b in blocks ], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks
grid of sparse matrices with compatible shapes
an entry of None implies an all-zero matrix
form
|
12019/pyscard
|
smartcard/wx/APDUHexValidator.py
|
Python
|
lgpl-2.1
| 2,088
| 0
|
# -*- coding: iso-8859-15 -*-
"""
A wxValidator that matches APDU in hexadecimal such as:
A4 A0 00 00 02
A4A0000002
__author__ = "http://www.gemalto.com"
Copyright 2001-20
|
10 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software;
|
you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import re
import string
import wx
# a regexp to match ATRs and APDUs
hexbyte = "[0-9a-fA-F]{1,2}"
apduregexp = re.compile("((%s)[ ]*)*" % hexbyte)
class APDUHexValidator(wx.PyValidator):
'''A wxValidator that matches APDU in hexadecimal such as:
A4 A0 00 00 02
A4A0000002'''
def __init__(self):
wx.PyValidator.__init__(self)
self.Bind(wx.EVT_CHAR, self.OnChar)
def Clone(self):
return APDUHexValidator()
def Validate(self, win):
tc = self.GetWindow()
val = tc.GetValue()
if not apduregexp.match(value):
return False
return True
def OnChar(self, event):
key = event.GetKeyCode()
if wx.WXK_SPACE == key or chr(key) in string.hexdigits:
value = event.GetEventObject().GetValue() + chr(key)
if apduregexp.match(value):
event.Skip()
return
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
event.Skip()
return
if not wx.Validator_IsSilent():
wx.Bell()
return
|
openstack/ironic-inspector
|
ironic_inspector/plugins/local_link_connection.py
|
Python
|
apache-2.0
| 6,493
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic LLDP Processing Hook"""
import binascii
from construct import core
import netaddr
from openstack import exceptions
from oslo_config import cfg
from oslo_utils import netutils
from ironic_inspector.common import lldp_parsers
from ironic_inspector.common import lldp_tlvs as tlv
from ironic_inspector.plugins import base
from ironic_inspector import utils
LOG = utils.getProcessingLogger(__name__)
CONF = cfg.CONF
PORT_ID_ITEM_NAME = "port_id"
SWITCH_ID_ITEM_NAME = "switch_id"
LLDP_PROC_DATA_MAPPING =\
{lldp_parsers.LLDP_CHASSIS_ID_NM: SWITCH_ID_ITEM_NAME,
lldp_parsers.LLDP_PORT_ID_NM: PORT_ID_ITEM_NAME}
class GenericLocalLinkConnectionHook(base.ProcessingHook):
"""Process mandatory LLDP packet fields
Non-vendor specific LLDP packet fields processed for each NIC found for a
baremetal node, port ID and chassis ID. These fields if found and if v
|
alid
will be saved into the local link connection info port id and switch id
fields on the Ironic port that represents that NIC.
"""
def _get_local_link_patch(self, tlv_type, tlv_value, port, node_info):
try:
data = bytearray(binascii.unhexlify(tlv_value))
except TypeError:
|
LOG.warning("TLV value for TLV type %d not in correct"
"format, ensure TLV value is in "
"hexidecimal format when sent to "
"inspector", tlv_type, node_info=node_info)
return
item = value = None
if tlv_type == tlv.LLDP_TLV_PORT_ID:
try:
port_id = tlv.PortId.parse(data)
except (core.MappingError, netaddr.AddrFormatError) as e:
LOG.warning("TLV parse error for Port ID: %s", e,
node_info=node_info)
return
item = PORT_ID_ITEM_NAME
value = port_id.value.value if port_id.value else None
elif tlv_type == tlv.LLDP_TLV_CHASSIS_ID:
try:
chassis_id = tlv.ChassisId.parse(data)
except (core.MappingError, netaddr.AddrFormatError) as e:
LOG.warning("TLV parse error for Chassis ID: %s", e,
node_info=node_info)
return
# Only accept mac address for chassis ID
if 'mac_address' in chassis_id.subtype:
item = SWITCH_ID_ITEM_NAME
value = chassis_id.value.value
if item and value:
if (not CONF.processing.overwrite_existing and
item in port.local_link_connection):
return
return {'op': 'add',
'path': '/local_link_connection/%s' % item,
'value': value}
def _get_lldp_processed_patch(self, name, item, lldp_proc_data, port,
node_info):
if 'lldp_processed' not in lldp_proc_data:
return
value = lldp_proc_data['lldp_processed'].get(name)
if value:
# Only accept mac address for chassis ID
if (item == SWITCH_ID_ITEM_NAME and
not netutils.is_valid_mac(value)):
LOG.info("Skipping switch_id since it's not a MAC: %s", value,
node_info=node_info)
return
if (not CONF.processing.overwrite_existing and
item in port.local_link_connection):
return
return {'op': 'add',
'path': '/local_link_connection/%s' % item,
'value': value}
def before_update(self, introspection_data, node_info, **kwargs):
"""Process LLDP data and patch Ironic port local link connection"""
inventory = utils.get_inventory(introspection_data)
ironic_ports = node_info.ports()
for iface in inventory['interfaces']:
if iface['name'] not in introspection_data['all_interfaces']:
continue
mac_address = iface['mac_address']
port = ironic_ports.get(mac_address)
if not port:
LOG.debug("Skipping LLC processing for interface %s, matching "
"port not found in Ironic.", mac_address,
node_info=node_info, data=introspection_data)
continue
lldp_data = iface.get('lldp')
if lldp_data is None:
LOG.warning("No LLDP Data found for interface %s",
mac_address, node_info=node_info,
data=introspection_data)
continue
patches = []
# First check if lldp data was already processed by lldp_basic
# plugin which stores data in 'all_interfaces'
proc_data = introspection_data['all_interfaces'][iface['name']]
for name, item in LLDP_PROC_DATA_MAPPING.items():
patch = self._get_lldp_processed_patch(name, item,
proc_data, port,
node_info)
if patch is not None:
patches.append(patch)
# If no processed lldp data was available then parse raw lldp data
if not patches:
for tlv_type, tlv_value in lldp_data:
patch = self._get_local_link_patch(tlv_type, tlv_value,
port, node_info)
if patch is not None:
patches.append(patch)
try:
node_info.patch_port(port, patches)
except exceptions.BadRequestException as e:
LOG.warning("Failed to update port %(uuid)s: %(error)s",
{'uuid': port.id, 'error': e},
node_info=node_info)
|
miquelcampos/GEAR_mc
|
gear/xsi/curve.py
|
Python
|
lgpl-3.0
| 14,878
| 0.004772
|
'''
This file is part of GEAR_mc.
GEAR_mc is a fork of Jeremie Passerin's GEAR project.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General
|
Public License as published by
the Free Software
|
Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com www.jeremiepasserin.com
Fork Author: Miquel Campos hello@miqueltd.com www.miqueltd.com
Date: 2013 / 08 / 16
'''
## @package gear.xsi.curve
# @author Jeremie Passerin
#
# @brief create, merge, split curves...
##########################################################
# GLOBAL
##########################################################
# gear
from gear.xsi import xsi, c, XSIMath, XSIFactory
import gear.xsi.utils as uti
import gear.xsi.transform as tra
##########################################################
# DRAW
##########################################################
# ========================================================
## Create a curve attached to given centers. One point per center.\n
# Do to so we use a cluster center operator per point. We could use an envelope (method to do so is in the code), but there was a reason I can't remember why it was better to use clustercenter.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param centers List of X3DObject or Collection - Object that will drive the curve.
# @param close Boolean - True to close the fcurve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @return NurbCurve - The newly created curve.
def addCnsCurve(parent, name, centers, close=False, degree=1):
# convert collections to list
centers = [center for center in centers]
if degree == 3:
if len(centers) == 2:
centers.insert(0, centers[0])
centers.append(centers[-1])
elif len(centers) == 3:
centers.append(centers[-1])
points = []
for center in centers:
points.append(center.Kinematics.Global.Transform.PosX)
points.append(center.Kinematics.Global.Transform.PosY)
points.append(center.Kinematics.Global.Transform.PosZ)
points.append(1)
curve = parent.AddNurbsCurve(points, None, close, degree, c.siNonUniformParameterization, c.siSINurbs, name)
crv_geo = curve.ActivePrimitive.Geometry
for i, center in enumerate(centers):
cluster = crv_geo.AddCluster( c.siVertexCluster, "center_%s"%i, [i] )
xsi.ApplyOp( "ClusterCenter", cluster.FullName+";"+center.FullName, 0, 0, None, 2)
# Here is a method to replace the cluster centers with an envelope
# envelopeop = curve.ApplyEnvelope(cCenters)
#
# aWeights = []
# for i in range(cCenters.Count):
# for j in range(cCenters.Count):
# if i == j:
# aWeights.append(100)
# else:
# aWeights.append(0)
#
# envelopeop.Weights.Array = aWeights
return curve
# ========================================================
## Create a NurbsCurve with a single subcurve.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param points List of Double - positions of the curve in a one dimension array [point0X, point0Y, point0Z, 1, point1X, point1Y, point1Z, 1, ...].
# @param close Boolean - True to close the curve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the Null (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurve(parent, name, points, close=False, degree=1, t=XSIMath.CreateTransform(), color=[0,0,0]):
curve = parent.AddNurbsCurve(points, None, close, degree, c.siNonUniformParameterization, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
# ========================================================
## Create a NurbsCurve with multiple subcurve.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param points List of Double - positions of the curve in a one dimension array [point0X, point0Y, point0Z, 1, point1X, point1Y, point1Z, 1, ...].
# @param ncp List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param kn List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param nkn List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param close List of Boolean - True to close the curve.
# @param degree List of Integer - 1 for linear curve, 3 for Cubic.
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the Null (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurve2(parent, name, points, ncp=[], kn=[], nkn=[], close=[], degree=[], t=XSIMath.CreateTransform(), color=[0,0,0]):
pointCount = len(ncp)
aPar = [c.siNonUniformParameterization for i in range(pointCount)]
curve = parent.AddNurbsCurveList2(pointCount, points, ncp, kn, nkn, close, degree, aPar, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
# ========================================================
## Create a NurbsCurve with a single subcurve from a list of position.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param positions List of SIVector3 - positions of the curve points.
# @param close Boolean - True to close the curve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @param knotsPara - knots parametrization in the curve
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the object (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurveFromPos(parent, name, positions, close=False, degree=1, knotsPara=c.siNonUniformParameterization, t=XSIMath.CreateTransform(), color=[0,0,0]):
points = []
for v in positions:
points.append(v.X)
points.append(v.Y)
points.append(v.Z)
points.append(1)
curve = parent.AddNurbsCurve(points, None, close, degree, knotsPara, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
##########################################################
# SUBCURVES
##########################################################
# Merge Curves ===========================================
## Merge given curve in one unique curve.
# @param curve List of NurbsCurve - The curves to merge.
# @return NurbsCurve.
def mergeCurves(curves):
points = []
ncp = []
kn = []
nkn = []
closed = []
degree = []
for curve in curves:
curve_matrix = curve.Kinematics.Global.Transform.Matrix4
for nurbscrv in curve.ActivePrimitive.Geometry.Curves:
ncp.append(nurbscrv.ControlPoints.Count)
kn.extend(nurbscrv.Knots.Array)
nkn.append(len(nurbscrv.Knots.Array))
closed.append(isClosed(nurbscrv))
degree.append(nurbscrv.Degree)
for point in nurbscrv.ControlPoints:
point_pos = point.Position
point_pos.MulByMatrix4InPlace(curve_matrix)
points.extend([point_pos.X, point_pos.Y,point_pos.Z, 1])
if len(ncp) > 1:
curve = addCurve2(xsi.ActiveSceneRoot, "curve", points, ncp, kn, nkn, closed, degree)
else:
curve = addCurve(xsi.ActiveSceneRoot, "curve", points, closed[0], degree[0])
return curve
# Split Curves ===========================================
## Split the sub curve of given curve.
# @param curve NurbsCurve - The curves to split.
# @return List of Nu
|
sim1234/Odtwarzacz
|
prog.py
|
Python
|
gpl-3.0
| 4,653
| 0.012261
|
## Copyright (c) 2012 Szymon Zmilczak
##
##
## This file is part of Odtwarzacz.
##
## Odtwarzacz is free software; you c
|
an redistribute it and/or modi
|
fy
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## Odtwarzacz is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Odtwarzacz; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import wx, os
from player import MusicPlayer
from explorator import LfileExplorer
from library import QueueUI
from timer import TimeKeeper, TimePicker
def config(filename):
f = open(filename)
c = {}
for l in f:
t = l.split("=")
if len(t[1]) > 0 and t[1][-1] == "\n":
t[1] = t[1][:-1]
c[t[0]] = t[1]
return c
#MyFilePattern = r"\A.*\.(((m|M)(p|P)3)|((o|O)(g|G)(g|G)))\Z" #".*\.(((m|M)(p|P)3)|((m|M)(p|P)2)|((w|W)(m|M)(a|A))|((a|A)(c|C)3)|((o|O)(g|G)(g|G))|((a|A)(c|C)(c|C)))" #".*\.((mp3|mp2|wma|ac3|ogg|acc)"
class myframe(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, title=u'Odtwarzacz', size = (800,600))
self.SetBackgroundColour((220,220,255))
self.SetMinSize((400, 300))
c = config("config.txt")
self.CreateStatusBar()
filemenu = wx.Menu()
menuAbout = filemenu.Append(wx.ID_ABOUT, u"O programie",u" Informacje o tym programie")
menuExit = filemenu.Append(wx.ID_EXIT, u"Wyjście",u" Wychondzi z programu")
menuBar = wx.MenuBar()
menuBar.Append(filemenu, u"&Plik")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.onAbout, menuAbout)
self.Bind(wx.EVT_MENU, self.onExit, menuExit)
self.Bind(wx.EVT_CLOSE, self.onExit)
#startPath = "D:\\Gas n' Metal"
sizer2 = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer3 = wx.BoxSizer(wx.VERTICAL)
self.te = wx.StaticText(self, -1, u"Biblioteka:", (0, 0))
f = self.te.GetFont()
f.SetPixelSize((10,25))
self.te.SetFont(f)
sizer3.Add(self.te, 0, wx.BOTTOM, 0)
self.d = LfileExplorer(self, (0,0), (500,600), c["paths"].split(","), c["file_pattern"], 1, self.OnFilePick)
sizer3.Add(self.d, 1, wx.ALL|wx.EXPAND, 0)
sizer.Add(sizer3, 1, wx.RIGHT|wx.EXPAND, 0)
sizer4 = wx.BoxSizer(wx.VERTICAL)
self.tq = wx.StaticText(self, -1, u"Kolejka:", (0, 0))
self.tq.SetFont(f)
sizer4.Add(self.tq, 0, wx.BOTTOM, 0)
self.q = QueueUI(self, c["paths"].split(","), c["file_pattern"], (505,0), (500,600))
sizer4.Add(self.q, 1, wx.ALL|wx.EXPAND, 0)
sizer.Add(sizer4, 1, wx.ALL|wx.EXPAND, 0)
sizer2.Add(sizer, 6, wx.ALL|wx.EXPAND, 0)
tp = TimePicker(self, wx.DefaultPosition)
tp.ShowModal()
self.lag = tp.GetLag()
tp.Destroy()
print "Lag set to", self.lag
self.tk = TimeKeeper("przerwy.txt", self.lag, self.OnTStart, self.OnTEnd, self.UpdateClock)
self.mp = MusicPlayer(self, self.OnAskNext, (0,450), (700,100))
self.mp.SetMinSize((200, 100))
sizer2.Add(self.mp, 0, wx.TOP|wx.EXPAND, 0)
self.SetSizer(sizer2)
self.SetAutoLayout(True)
def UpdateClock(self, time):
t = (time, 0)
self.GetStatusBar().SetFields(t[:1])
def onAbout(self, e):
d = wx.MessageDialog(self, u"Ten program został stworzony w celach edukacyjnych przez Sim1234", u"O programie", wx.OK)
d.ShowModal()
d.Destroy()
#e.Skip()
def onExit(self, e):
self.tk.stop()
self.mp.clean()
e.Skip()
def OnFilePick(self, path):
self.q.add(path)
def OnAskNext(self):
return self.q.next()
def OnTStart(self):
self.mp.next()
self.mp.epp(1)
print "Start"
def OnTEnd(self):
self.mp.epp(-1)
print "End"
def main():
app = wx.PySimpleApp()
frame = myframe()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
|
google/smbios-validation-tool
|
dmiparse/dmiparse_test.py
|
Python
|
apache-2.0
| 3,929
| 0.002036
|
# Lint as: python3
"""Unit tests for dmiparse."""
import os
import dmiparse
from google3.pyglib import resources
from google3.testing.pybase import googletest
TEST_PATH = 'google3/third_party/py/dmiparse/test_data'
class DmiParserTest(googletest.TestCase):
def setUp(self):
super(DmiParserTest, self).setUp()
data_path = os.path.join(TEST_PATH, 'less_compliant_smbios_records.txt')
self.data_file = resources.GetResourceFilename(data_path)
def testDmiParseNoDumpFileRaisesException(self):
with self.assertRaises(FileNotFoundError):
dmiparse.DmiParser('').parse()
def testDmiParseReturnsExpectedRecords(self):
records, _ = dmiparse.DmiParser(self.data_file).parse()
self.assertLen(records, 4)
self.assertIn('0x0002', records)
self.assertIn('0x0125', records)
self.assertIn('0x0126', records)
def testDmiParseReturnsValidBaseBoardRecord(self):
records, _ = dmiparse.DmiParser(self.data_file).parse()
self.assertIn('0x0002', records)
base_board_record = records['0x0002']
self.assertEqual('0x0002', base_board_record.handle_id)
self.assertEqual(2, base_board_record.type_id)
self.assertLen(base_board_record.props, 9)
self.assertIn('Product Name', base_board_record.props)
self.assertEqual('Magnesium', base_board_record.props['Product Name'].val)
self.assertEqual([], base_board_record.props['Product Name'].items)
self.assertIn('Version', base_board_record.props)
self.assertEqual('1234567890', base_board_record.props['Version'].val)
self.assertEqual([], base_board_record.props['Version'].items)
self.assertIn('UUID', base_board_record.props)
self.assertEqual('03000200-0400-0500-0006-000700080009',
base_board_record.props['UUID'].val)
self.assertEqual([], base_board_record.props['UUID'].items)
self.assertIn('Location In Chassis', base_board_record.props)
self.assertEqual('Riser1',
base_board_record.props['Location In Chassis'].val)
self.assertEqual([], base_board_record.props['Location In Chassis'].items)
self.assertIn('Chassis Handle', base_board_record.props)
self.assertEqual('0x0003', base_board_record.props['Chassis Handle'].val)
self.assertEqual([], base_board_record.props['Chassis Handle'].items)
self.assertIn('MAC Address', base_board_record.props)
self.assertEqual('00:1b:83:15:a3:24',
base_board_record.props['MAC Address'].val)
self.assertEqual([], base_board_record.props['MAC Address'].items)
self.assertIn('Contained Object Handles', base_board_record.props)
self.assertEqual('5',
base_board_record.props['Contained Object Handles'].val)
self.assertEqual(['0x009A', '0x009B', '0x009C', '0x009D', '0x009E'],
base_board_record.props['Contained Object Handles'].items)
self.assertIn('Characteristics', base_board_record.props)
self.assertEqual('', base_board_record.props['Characteristics'].val)
self.assertEqual([
'PCI is supported', 'BIOS is upgradeable', 'ACPI is supported',
'UEFI is supported'
], base_board_record.props['Characteristics'].items)
def testDmiParseIndentation(self):
records, _ = dmiparse.DmiParser(self.data_file).parse()
self.assertIn('0x0058', records)
oem_specific_record = records['0x0058']
self.assertIn('Strings', oem_specific_record.props)
|
self.assertEqual([
'WLYDCRB.86B.WR.64.2019.19.3.03.1837', '0. 0. 0', '4:2.1.21', 'N/A',
'FRU: Ver 1.21', 'N/A', 'N/A'
], oem_specific_record.props[
|
'Strings'].items)
def testDmiParseReturnsValidGroups(self):
_, groups = dmiparse.DmiParser(self.data_file).parse()
self.assertIn(2, groups)
self.assertEqual(['0x0002'], groups[2])
self.assertIn(14, groups)
self.assertEqual(['0x0125', '0x0126'], groups[14])
if __name__ == '__main__':
googletest.main()
|
wcmckee/brobeurdotcom
|
cache/.mako.tmp/index.tmpl.py
|
Python
|
mit
| 5,371
| 0.003351
|
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1440369075.543512
_enable_loop = True
_template_filename = u'themes/monospace/templates/index.tmpl'
_template_uri = u'index.tmpl'
_source_encoding = 'utf-8'
_exports = [u'content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace(u'comments', context._clean_inheritance_tokens(), templateuri=u'comments_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, u'comments')] = ns
ns = runtime.TemplateNamespace(u'helper', context._clean_inheritance_tokens(), templateuri=u'index_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, u'helper')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'base.tmpl', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
date_format = context.get('date_format', UNDEFINED)
helper = _mako_get_namespace(context, 'helper')
messages = context.get('messages', UNDEFINED)
posts = context.get('posts', UNDEFINED)
_link = context.get('_link', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
comments = _mako_get_namespace(context, 'comments')
index_teasers = context.get('index_teasers', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
date_format = context.get('date_format', UNDEFINED)
helper = _mako_get_namespace(context, 'helper')
messages = context.get('messages', UNDEFINED)
posts = context.get('posts', UNDEFINED)
_link = context.get('_link', UNDEFINED)
def content():
return render_content(context)
comments = _mako_get_namespace(context, 'comments')
index_teasers = context.get('index_teasers', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n')
for post in posts:
__M_writer(u' <div class="postbox">\n <h1><a href="')
__M_writer(unicode(post.permalink()))
__M_writer(u'">')
__M_writer(unicode(post.title()))
__M_writer(u'</a></h1>\n <div class="meta" style="background-color: rgb(234, 234, 234); ">\n <span class="authordate">\n ')
__M_writer(unicode(messages("Posted:")))
__M_writer(u' <time class="published" datetime="')
__M_writer(unicode(post.date.isoformat()))
__M_writer(u'">')
__M_writer(unicode(post.formatted_date(date_format)))
__M_writer(u'</time>\n </span>\n <br>\n <span class="tags">Tags: \n')
if post.tags:
for tag in post.tags:
__M_writer(u' <a class="tag" href="')
__M_writer(unicode(_link('tag', tag)))
__M_writer(u'"><span>')
__M_writer(unicode(tag))
__M_writer(u'</span></a>\n')
__M_writer(u' </span>\n </div>\n ')
__M_writer(unicode(post.text(teaser_only=index_teasers)))
__M_writer(u'\n')
if not post.meta('nocomments'):
__M_writer(u' ')
__M_writer(unicode(comments.comment_link(post.permalink(), post.base_path)))
__M_writer(u'\n')
__M_writer(u' </div>\n')
__M_writer(u' ')
__M_writer(unicode(helper.html_pager()))
__M_writer(u'\n ')
__M_writer(unicode(comments.comment_link_script()))
__M_writer(u'\n\t')
__M_writer(unicode(helper.mathjax_script(posts)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"22": 3, "25": 2, "31": 0, "45": 2, "46": 3, "47": 4, "52": 31, "58": 5, "71": 5, "72": 6, "73": 7, "74": 8, "75": 8, "76": 8, "77": 8, "78": 11, "79": 11, "80": 11, "81": 11, "82": 11, "83": 11, "84": 15, "85": 16, "86": 17, "87": 17, "88": 17, "89": 17, "90": 17, "91": 20, "92": 22, "93": 22, "94": 23, "95": 24, "96":
|
24, "97": 24, "98": 26, "99": 28, "100": 28, "101": 28, "102"
|
: 29, "103": 29, "104": 30, "105": 30, "111": 105}, "uri": "index.tmpl", "filename": "themes/monospace/templates/index.tmpl"}
__M_END_METADATA
"""
|
beraldoleal/entendaobrasil
|
scripts/import.py
|
Python
|
gpl-2.0
| 642
| 0.007788
|
#!/usr/bin/env python
from api.camara.orgaos import *
from api.camara.deputados import *
from core.models import *
import django
django.setup()
## Orgaos Webservice
orgaos = OrgaosCamara()
orgaos.importar_tipos_orgaos()
orgaos.importar_orgaos()
orga
|
os.importar_cargos()
## Deputados Webservice
deputados = DeputadosCamara()
deputados.importar_partidos()
deputados.importar_deputados()
#deputados.importar_detalhes_deputados()
# Google Images download
#for parlamentar in Parlamentar.objects.all():
# parlamentar.download_photos()
# Wikipedia data
#for parlamentar
|
in Parlamentar.objects.all():
# parlamentar.get_wikipedia_data()
|
staslev/beam
|
sdks/python/apache_beam/io/gcp/internal/clients/storage/storage_v1_messages.py
|
Python
|
apache-2.0
| 78,320
| 0.004941
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated message classes for storage version v1.
Stores and retrieves potentially large, immutable data objects.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import message_types as _message_types
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'storage'
class Bucket(_messages.Message):
"""A bucket.
Messages:
CorsValueListEntry: A CorsValueListEntry object.
LifecycleValue: The bucket's lifecycle configuration. See lifecycle
management for more information.
LoggingValue: The bucket's logging configuration, which defines the
destination bucket and optional name prefix for the current bucket's
logs.
OwnerValue: The owner of the bucket. This is always the project team's
owner group.
VersioningValue: The bucket's versioning configuration.
WebsiteValue: The bucket's website configuration.
Fields:
acl: Access controls on the bucket.
cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration.
defaultObjectAcl: Default access controls to apply to new objects when no
ACL is provided.
etag: HTTP 1.1 Entity tag for the bucket.
id: The ID of the bucket.
kind: The kind of item this is. For buckets, this is always
storage#bucket.
lifecycle: The bucket's lifecycle configuration. See lifecycle management
for more information.
location: The location of the bucket. Object data for objects in the
bucket resides in physical storage within this region. Defaults to US.
See the developer's guide for the authoritative list.
logging: The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
metageneration: The metadata generation of this bucket.
name: The name of the bucket.
owner: The owner of the bucket. This is always the project team's owner
group.
projectNumber: The project number of the project the bucket belongs to.
selfLink: The URI of this bucket.
storageClass: The bucket's storage class. This defines how objects in the
bucket are stored and determines the SLA and the cost of storage. Values
include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to
STANDARD. For more information, see storage classes.
timeCreated: The creation time of the bucket in RFC 3339 format.
updated: The modification time of the bucket in RFC 3339 format.
versioning: The bucket's versioning configuration.
website: The bucket's website configuration.
"""
class CorsValueListEntry(_messages.Message):
"""A CorsValueListEntry object.
Fields:
maxAgeSeconds: The value, in seconds, to return in the Access-Control-
Max-Age header used in preflight responses.
method: The list of HTTP methods on which to include CORS response
headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
of methods, and means "any method".
origin: The list of Origins eligible to receive CORS response headers.
Note: "*" is permitted in the list of origins, and means "any Origin".
responseHeader: The list of HTTP headers other than the simple response
headers to give permission for the user-agent to share across domains.
"""
maxAgeSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32)
method = _messages.StringField(2, repeated=True)
origin = _messages.StringField(3, repeated=True)
responseHeader = _messages.StringField(4, repeated=True)
class LifecycleValue(_messages.Message):
"""The bucket's lifecycle configuration. See lifecycle management for more
information.
Messages:
RuleValueListEntry: A RuleValueListEntry object.
Fields:
rule: A lifecycle management rule, which is made of an action to take
and the condition(s) under which the action will be taken.
"""
class RuleValueListEntry(_messages.Message):
"""A RuleValueListEntry object.
Messages:
ActionValue: The action to take.
ConditionValue: The condition(s) under which the action will be taken.
Fields:
action: The action to take.
condition: The condition(s) under which the action will be taken.
"""
class ActionValue(_messages.Message):
"""The action to take.
Fields:
type: Type of the action. Currently, only Delete is supported.
"""
type = _messages.StringField(1)
class ConditionValue(_messages.Message):
"""The condition(s) under which the action will be taken.
Fields:
age: Age of an object (in days). This condition is satisfied when an
object reaches the specified age.
createdBefore: A date in RFC 3339 format with only the date part
(for instance, "2013-01-15"). This condition is satisfied when an
object is created before midnight of the specified date in UTC.
isLive: Relevant only for versioned objects. If the value is true,
this condition matches live objects; if the value is false, it
matches archived objects.
numNewerVersions: Relevant only for versioned objects. If the value
is N, this condition is satisfied when there are at least N
versions (including the live version) newer than this version of
the object.
"""
age = _messages.IntegerField(1, variant=_messages.Variant.INT32)
createdBefore = extra_types.DateField(2)
isLive = _messages.BooleanField(3)
numNewerVersions = _messages.IntegerField(4, variant=_mes
|
sages.Variant.INT32)
action = _messages.MessageField('ActionValue', 1)
condition = _messages.MessageField('ConditionValue', 2)
rule = _messages.MessageField('RuleValueListEntry', 1, repeated=True)
class LoggingValue(_messages.Message):
"""The bucket's logging configuration, which defines the
|
destination
bucket and optional name prefix for the current bucket's logs.
Fields:
logBucket: The destination bucket where the current bucket's logs should
be placed.
logObjectPrefix: A prefix for log object names.
"""
logBucket = _messages.StringField(1)
logObjectPrefix = _messages.StringField(2)
class OwnerValue(_messages.Message):
"""The owner of the bucket. This is always the project team's owner group.
Fields:
entity: The entity, in the form project-owner-projectId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
class VersioningValue(_messages.Message):
"""The bucket's versioning configuration.
Fields:
enabled: While set to true, versioning is fully enabled for this bucket.
"""
enabled = _messages.BooleanField(1)
class WebsiteValue(_messages.Message):
"""The bucket's website configuration.
Fields:
mainPageSuffix: Behaves as the bucket's directory index where missing
objects are treated as potential directories.
notFoundPage: The custom object to return when a requested resource is
not found.
"""
mainPageSuffix = _messa
|
Osmose/kitsune
|
kitsune/search/api.py
|
Python
|
bsd-3-clause
| 4,930
| 0.00142
|
from django.conf import settings
from rest_framework import serializers
from rest_framework.decorators import api_view
from rest_framework.response import Response
from kitsune.products.models import Product
from kitsune.questions.models import Question, QuestionMappingType
from kitsune.questions.api import QuestionSerializer
from kitsune.search import es_utils
from kitsune.sumo.api_utils import GenericAPIException
from kitsune.wiki.api import DocumentDetailSerializer
from kitsune.wiki.models import Document, DocumentMappingType
def positive_integer(value):
if value < 0:
raise serializers.ValidationError('This field must be positive.')
def valid_product(value):
if not value:
return
if not Product.objects.filter(slug=value).exists():
raise serializers.ValidationError(
'Could not find product with slug "{0}".'.format(value)
)
def valid_locale(value):
if not value:
return
if value not in settings.SUMO_LANGUAGES:
if value in settings.NON_SUPPORTED_LOCALES:
fallback = settings.NON_SUPPORTED_LOCALES[value] or settings.WIKI_DEFAULT_LANGUAGE
raise serializers.ValidationError(
'"{0}" is not supported, but has fallback locale "{1}".'.format(
value, fallback))
else:
raise serializers.ValidationError(
'Could not find locale "{0}".'.format(value)
)
class SuggestSerializer(serializers.Serializer):
q = serializers.CharField(required=True)
locale = serializers.CharField(
required=False, default=settings.WIKI_DEFAULT_LANGUAGE,
validators=[valid_locale])
product = serializers.CharField(
required=False, default='',
validators=[valid_product])
max_questions = serializers.IntegerField(
required=False, default=10,
validators=[positive_integer])
max_documents = serializers.IntegerField(
required=False, default=10,
validators=[positive_integer])
@api_view(['GET', 'POST'])
def suggest(request):
if request.data and request.GET:
raise GenericAPIException(
400, 'Put all parameters either in the querystring or the HTTP request body.')
serializer = SuggestSerializer(data=(request.data or request.GET))
if not serializer.is_valid():
raise GenericAPIException(400, serializer.errors)
searcher = (
es_utils.AnalyzerS()
.es(urls=settings.ES_URLS)
.inde
|
xes(es_utils.read_index('default')))
data = serializer.validated_data
return Response({
|
'questions': _question_suggestions(
searcher, data['q'], data['locale'], data['product'], data['max_questions']),
'documents': _document_suggestions(
searcher, data['q'], data['locale'], data['product'], data['max_documents']),
})
def _question_suggestions(searcher, text, locale, product, max_results):
if max_results <= 0:
return []
search_filter = es_utils.F(
model='questions_question',
question_is_archived=False,
question_is_locked=False,
question_is_solved=True)
if product:
search_filter &= es_utils.F(product=product)
if locale:
search_filter &= es_utils.F(question_locale=locale)
questions = []
searcher = _query(searcher, QuestionMappingType, search_filter, text, locale)
question_ids = [result['id'] for result in searcher[:max_results]]
questions = [
QuestionSerializer(instance=q).data
for q in Question.objects.filter(id__in=question_ids)
]
return questions
def _document_suggestions(searcher, text, locale, product, max_results):
if max_results <= 0:
return []
search_filter = es_utils.F(
model='wiki_document',
document_category__in=settings.SEARCH_DEFAULT_CATEGORIES,
document_locale=locale,
document_is_archived=False)
if product:
search_filter &= es_utils.F(product=product)
documents = []
searcher = _query(searcher, DocumentMappingType, search_filter, text, locale)
doc_ids = [result['id'] for result in searcher[:max_results]]
documents = [
DocumentDetailSerializer(instance=doc).data
for doc in Document.objects.filter(id__in=doc_ids)
]
return documents
def _query(searcher, mapping_type, search_filter, query_text, locale):
query_fields = mapping_type.get_query_fields()
query = {}
for field in query_fields:
for query_type in ['match', 'match_phrase']:
key = '{0}__{1}'.format(field, query_type)
query[key] = query_text
# Transform query to be locale aware.
query = es_utils.es_query_with_analyzer(query, locale)
return (searcher
.doctypes(mapping_type.get_mapping_type_name())
.filter(search_filter)
.query(should=True, **query))
|
nkoech/csacompendium
|
csacompendium/csa_practice/api/practicelevel/practicelevelviews.py
|
Python
|
mit
| 2,046
| 0.002444
|
from csacompendium.csa_practice.models import PracticeLevel
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import PracticeLevelListFilter
from csacompendium.csa_practice.api.practicelevel.practicelevelserializers import practice_level_serializers
def practice_level_views():
"""
Practice level views
:return: All practice level views
:rtype: Object
"""
practice_level_serializer = practice_level_serializers()
class PracticeLevelCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
permission_classes = [IsAuthenticated]
class PracticeLevelListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = PracticeLevelListFilter
pagination_class = APILimitOffsetPagination
class PracticeLevelDetailAPIView(DetailViewUpdateDelete
|
):
"""
Updates a record.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
permission_classes = [IsAuthenticated, IsAdminUser]
lookup_field = 'slug'
return {
|
'PracticeLevelListAPIView': PracticeLevelListAPIView,
'PracticeLevelDetailAPIView': PracticeLevelDetailAPIView,
'PracticeLevelCreateAPIView': PracticeLevelCreateAPIView
}
|
toonn/capselai
|
clustering/augmented_dendrogram.py
|
Python
|
bsd-2-clause
| 540
| 0.007407
|
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as p
|
lt
def augmented_dendrogram(*args, **kwargs):
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
for i, d in zip(ddata['icoord'], ddata['dcoord']):
x = 0.5 * sum(i[1:3])
y = d[1]
#plt.plot(x, y, 'ro')
plt.annotate("%.3g" % y, (x, y), xyte
|
xt=(0, -8),
textcoords='offset points',
va='top', ha='center')
return ddata
|
EvilDako/PyTraining
|
fixture/session.py
|
Python
|
gpl-2.0
| 1,421
| 0.003519
|
__author__ = 'dako'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_c
|
ss_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(sel
|
f):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
|
ADEQUATeDQ/portalmonitor
|
odpw/core/model.py
|
Python
|
gpl-3.0
| 12,953
| 0.012738
|
from sqlalchemy import Column, String, Integer, ForeignKey, SmallInteger, TIMESTAMP, BigInteger, ForeignKeyConstraint, \
Boolean, func, select, Float, distinct
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
import structlog
log =structlog.get_logger()
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
tmp=''
tab_portals = tmp+'portals'
tab_portalevolution=tmp+'portalevolution'
tab_portalsnapshot=tmp+'portalsnapshot'
tab_portalsnapshotquality=tmp+'portalsnapshotquality'
tab_portalsnapshotdynamicity=tmp+'portalsnapshotdyn'
tab_portalsnapshotfetch=tmp+'portalsnapshotfetch'
tab_formatdist= tmp+"formatdist"
tab_licensedist
|
= tmp+"licensedist"
tab_isodist= tmp+"lice
|
nsedist"
tab_datasets=tmp+'datasets'
tab_datasetsquality=tmp+'datasetsquality'
tab_datasetsdata=tmp+'datasetsdata'
tab_resources=tmp+'metaresources'
tab_resourcesinfo=tmp+'resourcesinfo'
tab_resourcescrawllog=tmp+'resourcescrawllog'
tab_organisations=tmp+'organisations'
tab_organisationssnapshot=tmp+'organisationsnapshot'
tab_resourceshistory=tmp+'resourceshistory'
tab_resourcesfreshness=tmp+'resourcesfreshness'
class Portal(Base):
__tablename__ = tab_portals
id = Column(String, primary_key=True, index=True,nullable=False)
uri = Column(String, nullable=False)
apiuri = Column(String)
software = Column(String(12), nullable=False) # OpenDataSoft, CKAN, Socrata <13
iso = Column(String(2), nullable=False)
active = Column(Boolean, default=True,nullable=False)
snapshots = relationship("PortalSnapshot", back_populates="portal")
snapshotsquality = relationship("PortalSnapshotQuality", back_populates="portal")
@hybrid_property
def snapshot_count(self):
print len(self.snapshots)
return len(self.snapshots)
@snapshot_count.expression
def snapshot_count(cls):
return select([func.count(PortalSnapshot.snapshot)])\
.where(PortalSnapshot.portalid == cls.id).label("snapshot_count")
@hybrid_property
def first_snapshot(self):
print [s for s in self.snapshots]
return min([s.snapshot for s in self.snapshots])
@first_snapshot.expression
def first_snapshot(cls):
return select([func.min(PortalSnapshot.snapshot)])\
.where(PortalSnapshot.portalid == cls.id).label("first_snapshot")
@hybrid_property
def last_snapshot(self):
return max([s.snapshot for s in self.snapshots])
@last_snapshot.expression
def last_snapshot(cls):
return select([func.max(PortalSnapshot.snapshot)])\
.where(PortalSnapshot.portalid == cls.id).label("last_snapshot")
@hybrid_property
def datasetcount(self):
return self.snapshots.order_by(PortalSnapshot.snapshot.desc()).one().datasetcount
@datasetcount.expression
def datasetcount(cls):
q=select([PortalSnapshot.datasetcount])\
.where(PortalSnapshot.portalid == cls.id).order_by(PortalSnapshot.snapshot.desc()).limit(1).label("datasetcount")
return q
@hybrid_property
def resourcecount(self):
return self.snapshots.order_by(PortalSnapshot.snapshot.desc()).one().resourcecount
@resourcecount.expression
def resourcecount(cls):
q=select([PortalSnapshot.resourcecount])\
.where(PortalSnapshot.portalid == cls.id).order_by(PortalSnapshot.snapshot.desc()).limit(1).label("resourcecount")
return q
def __repr__(self):
return "<Portal(id=%s, uri='%s', apiuri='%s', software='%s', iso=%s)>" % (
self.id, self.uri, self.apiuri, self.software, self.iso)
class PortalSnapshot(Base):
__tablename__ = tab_portalsnapshot
portalid = Column(String, ForeignKey(tab_portals+'.id'), primary_key=True, index=True,nullable=False)
snapshot= Column( SmallInteger, primary_key=True)
portal = relationship("Portal", back_populates="snapshots")
start = Column(TIMESTAMP)
end = Column(TIMESTAMP)
status = Column(SmallInteger)
exc = Column(String)
datasetcount = Column(Integer)
datasetsfetched = Column(Integer)
resourcecount = Column(Integer)
@hybrid_property
def fetchtime(self):
return self.end-self.start
datasets = relationship("Dataset", back_populates="portalsnapshot")
def __repr__(self):
return "<PortalSnapshot(id=%s, snapshot=%s, start=%s, end=%s, status=%s,ds=%s,res=%s)>" % (
self.portalid, self.snapshot, self.start, self.end, self.status,self.datasetcount,self.resourcecount)
class Serializable(object):
__public__ = []
def to_dict(self):
d = {}
for field in self.__public__:
value = getattr(self, field)
if value:
d[field] = value
return d
class PortalSnapshotDynamicity(Base,Serializable):
__tablename__ = tab_portalsnapshotdynamicity
portalid = Column(String, ForeignKey(tab_portals+'.id'), primary_key=True, index=True,nullable=False)
snapshot= Column( SmallInteger, primary_key=True)
updated = Column(Integer)
added = Column(Integer)
deleted = Column(Integer)
static = Column(Integer)
intersected = Column(Integer)
dindex = Column(Integer)
changefrequ = Column(Float)
size = Column(Integer)
@hybrid_property
def dyratio(self):
return (self.added+self.deleted+self.updated)\
/(1.0* self.intersected) if self.intersected >0 else 0
@hybrid_property
def adddelratio(self):
return (self.added-self.deleted)\
/(1.0* (self.added+self.deleted))if ((self.added+self.deleted)) >0 else 0
@hybrid_property
def addRatio(self):
return (self.added) \
/ (1.0 * self.intersected)if self.intersected >0 else 0
@hybrid_property
def delRatio(self):
return (self.deleted) \
/ (1.0 * self.intersected)if self.intersected >0 else 0
@hybrid_property
def updatedRatio(self):
return ( self.updated) \
/ (1.0 * self.intersected)if self.intersected >0 else 0
@hybrid_property
def staticRatio(self):
return (self.static) \
/ (1.0 * self.intersected)if self.intersected >0 else 0
class PortalSnapshotQuality(Base):
__tablename__ = tab_portalsnapshotquality
portalid = Column(String, ForeignKey(tab_portals+'.id'), primary_key=True, index=True,nullable=False)
snapshot= Column( SmallInteger, primary_key=True)
portal = relationship("Portal", back_populates="snapshotsquality")
cocu = Column(Float)
cocuN = Column(Integer)
coce = Column(Float)
coceN = Column(Integer)
coda = Column(Float)
codaN = Column(Integer)
cofo = Column(Float)
cofoN = Column(Integer)
coli = Column(Float)
coliN = Column(Integer)
coac = Column(Float)
coacN = Column(Integer)
exda = Column(Float)
exdaN = Column(Integer)
exri = Column(Float)
exriN = Column(Integer)
expr = Column(Float)
exprN = Column(Integer)
exac = Column(Float)
exacN = Column(Integer)
exdi = Column(Float)
exdiN = Column(Integer)
exte = Column(Float)
exteN = Column(Integer)
exsp = Column(Float)
exspN = Column(Integer)
exco = Column(Float)
excoN = Column(Integer)
opfo = Column(Float)
opfoN = Column(Integer)
opma = Column(Float)
opmaN = Column(Integer)
opli = Column(Float)
opliN = Column(Integer)
datasets=Column(Integer)
def __repr__(self):
return "<PortalSnapshotQuality(id=%s, snapshot=%s, agg=%s)>" % (
self.portalid, self.snapshot, any([self.exda,self.coac,self.coce,self.cocu]))
class Dataset(Base):
__tablename__ = tab_datasets
id = Column( String, primary_key=True)
snapshot = Column( SmallInteger, primary_key=True, index=True)
portalid = Column( String, primary_key=True, index=True)
organisation = Column(String, index=True)
title = Column(String, index=
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/stc/package.py
|
Python
|
lgpl-2.1
| 1,737
| 0.000576
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Stc(AutotoolsPackage
|
):
"""STC: The Swift-Turbine Compiler"""
homepage = 'http://swift-lang.org/Swift-T'
url = 'http://swift-lang.github.io/swift-t-downloads/stc-0.7.3.tar.gz'
version('0.7.3', '6bf769f406f6c33d1c134521373718d3')
depends_on('java')
depends_on('ant')
depends_on('turbine')
depends_on('zsh', type='run')
def
|
configure_args(self):
args = ['--with-turbine=' + self.spec['turbine'].prefix]
return args
|
babble/babble
|
include/jython/Lib/test/test_trace.py
|
Python
|
apache-2.0
| 21,728
| 0.005339
|
# Testing the line trace facility.
from test import test_support
import unittest
import sys
import difflib
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away.
|
No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just o
|
ne instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test
|
wieden-kennedy/autoscalebot
|
autoscalebot/tests.py
|
Python
|
bsd-3-clause
| 21,789
| 0.001056
|
from copy import copy
import datetime
import time
import urllib2
from nose.tools import assert_equals
from nose.plugins.skip import SkipTest
from autoscalebot import TOO_LOW, JUST_RIGHT, TOO_HIGH
from autoscalebot.conf import AutoscaleSettings
from autoscalebot.models import HerokuAutoscaler
class TestSettings(AutoscaleSettings):
pass
test_settings = TestSettings()
test_settings.HEROKU_APP_NAME = "test-app"
test_settings.HEROKU_API_KEY = "1234567"
test_settings.HEARTBEAT_INTERVAL_IN_SECONDS = 30
test_settings.HEARTBEAT_URL = 'http://www.google.com'
test_settings.MAX_RESPONSE_TIME_IN_MS = 1000
test_settings.MIN_RESPONSE_TIME_IN_MS = 400
test_settings.NUMBER_OF_FAILS_TO_SCALE_UP_AFTER = 3
test_settings.NUMBER_OF_PASSES_TO_SCALE_DOWN_AFTER = 5
test_settings.MAX_DYNOS = 3
test_settings.MIN_DYNOS = 1
test_settings.INCREMENT = 1
test_settings.NOTIFY_IF_SCALE_DIFF_EXCEEDS_THRESHOLD = None
test_settings.NOTIFY_IF_SCALE_DIFF_EXCEEDS_PERIOD_IN_MINUTES = None
test_settings.NOTIFY_IF_NEEDS_EXCEED_MAX = True
test_settings.NOTIFY_IF_NEEDS_BELOW_MIN = True
test_settings.NOTIFICATION_BACKENDS = ["autoscalebot.backends.notification.TestBackend", ]
class MockHerokuProcesses:
def __init__(self):
self.current = 0
self._processes = [1, ]
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = [1, ]
return self._processes
def scale(self, new_num):
self._processes = [n + 1 for n in range(0, new_num)]
def __iter__(self):
return self
def next(self):
self.current += 1
if self.current > len(self.processes):
raise StopIteration
else:
return self.processes[self.current - 1]
class MockBrokenHerokuProcesses(MockHerokuProcesses):
def scale(self):
raise Exception
class MockHerokuApp:
def __init__(self, *args, **kwargs):
self.processes
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = {'web': MockHerokuProcesses(), }
return self._processes
class MockBrokenHerokuApp(MockHerokuApp):
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = {'web': MockBrokenHerokuProcesses(), }
return self._processes
class MockHerokuAutoscaler(HerokuAutoscaler):
def __init__(self, *args, **kwargs):
super(MockHerokuAutoscaler, self).__init__(*args, **kwargs)
self.heroku_app
@property
def heroku_app(self):
if not hasattr(self, "_heroku_app"):
self._heroku_app = MockHerokuApp()
return self._heroku_app
def out_of_band_heroku_scale(self, num_dynos):
# Ugly mock out of band scale
self.heroku_app.processes["web"]._processes = [1, 2, 3, 4]
self._num_dynos = len([i for i in self.heroku_app.processes["web"]._processes])
class MockValidResponse:
def read(self, *args, **kwargs):
return "A"
class Mock500Response:
def read(self, *args, **kwargs):
raise Exception
def mock_valid_urlopen(self, *args, **kwargs):
time.sleep(0.5)
return MockValidResponse()
def mock_invalid_urlopen(self, *args, **kwargs):
return Mock500Response()
def mock_fast_urlopen(self, *args, **kwargs):
return MockValidResponse()
def mock_slow_urlopen(self, *args, **kwargs):
time.sleep(2)
return MockValidResponse()
class TestHerokuAutoscaler:
def setUp(self):
self.test_scaler
@property
def test_scaler(self):
if not hasattr(self, "_test_scaler"):
self._test_scaler = MockHerokuAutoscaler(test_settings)
return self._test_scaler
def test_heroku_scale(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.heroku_scale(3)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.heroku_scale(5)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.heroku_scale(2)
assert_equals(self.test_scaler.num_dynos, 2)
def test_num_dynos(self):
self.test_scaler.heroku_scale(3)
assert_equals(len([i for i in self.test_scaler.heroku_app.processes['web']]), 3)
def test_add_to_history(self):
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(JUST_RIGHT)
assert_equals(self.test_scaler.results, [TOO_LOW, TOO_HIGH, JUST_RIGHT])
def test_add_to_history_caps_length(self):
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.results, [TOO_LOW, TOO_LOW, TOO_LOW, TOO_LOW, TOO_LOW])
def test_needs_scale_up_works(self):
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.needs_scale_up, False)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
assert_equals(self.test_scaler.needs_scale_up, True)
def test_needs_scale_down_works(self):
self.test_scaler.add_to_history(TOO_HIGH)
assert_equals(self.test_scaler.needs_scale_down, False)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_histor
|
y(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_hi
|
story(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.needs_scale_down, True)
def test_scale_up(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scale_up_stops_at_limit(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
def test_scale_down(self):
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.scale_down()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scale_down_stops_at_limit(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
assert_equals(self.test_scaler.num_dynos, 1)
def test_do_autoscale_up_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
|
vgamula/sp
|
server/accounts/forms.py
|
Python
|
mit
| 1,763
| 0
|
import trafaret as t
from server.c
|
ore.passwords import generate_password, check_password
from server.core.forms import TrafaretForm, TrafaretErro
|
r
class RegistrationForm(TrafaretForm):
fields = t.Dict({
t.Key('email'): t.Email(),
t.Key('password'): t.String(max_length=255),
t.Key('confirm'): t.String(max_length=255),
t.Key('accept_tos'): t.StrBool(),
})
async def extra_validation(self):
errors = {}
if self.data['confirm'] != self.data['password']:
errors['confirm'] = 'Passwords should match.'
if await self.db.users.find_one({'email': self.data['email']}):
errors['email'] = 'User with this email is already registered.'
if errors:
raise TrafaretError(errors)
async def save(self):
data = self.data
data_to_save = {
'email': data['email'],
'password': generate_password(data['password']),
}
result = await self.db.users.insert_one(data_to_save)
data_to_save['_id'] = result.inserted_id
return data_to_save
class LoginForm(TrafaretForm):
user = None
fields = t.Dict({
t.Key('email'): t.Email(),
t.Key('password'): t.String(max_length=255),
})
async def extra_validation(self):
errors = {}
user = await self.db.users.find_one({'email': self.data['email']})
if not user:
errors['email'] = 'User not found'
else:
if not check_password(self.data['password'], user.password):
errors['password'] = 'Password is not correct'
self.user = user
if errors:
raise TrafaretError(errors)
def get_user(self):
return self.user
|
robwarm/gpaw-symm
|
gpaw/__init__.py
|
Python
|
gpl-3.0
| 10,531
| 0.001045
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""Main gpaw module."""
import os
import sys
try:
from distutils.util import get_platform
except ImportError:
modulepath = os.environ.get('GPAW_GET_PLATFORM')
if modulepath is None:
errmsg = ('Error: Could not get platform from distutils. '
'Set the GPAW_GET_PLATFORM environment variable to '
'the architecture string printed during build.')
raise ImportError(errmsg)
def get_platform():
return modulepath
from glob import glob
from os.path import join, isfile
import numpy as np
assert not np.version.version.startswith('1.6.0')
__all__ = ['GPAW', 'Calculator',
'Mixer', 'MixerSum', 'MixerDif', 'MixerSum2',
'CG', 'Davidson', 'RMM_DIIS', 'LCAO',
'PoissonSolver',
'FermiDirac', 'MethfesselPaxton',
'restart']
class ConvergenceError(Exception):
pass
class KohnShamConvergenceError(ConvergenceError):
pass
class PoissonConvergenceError(ConvergenceError):
pass
# Check for special command line arguments:
debug = False
trace = False
dry_run = 0
memory_estimate_depth = 2
parsize_domain = None
parsize_bands = None
sl_default = None
sl_diagonalize = None
sl_inverse_cholesky = None
sl_lcao = None
sl_lrtddft = None
buffer_size = None
extra_parameters = {}
profile = False
i = 1
while len(sys.argv) > i:
arg = sys.argv[i]
if arg.startswith('--gpaw-'):
# Found old-style gpaw command line argument:
arg = '--' + arg[7:]
raise RuntimeError('Warning: Use %s instead of %s.' %
(arg, sys.argv[i]))
if arg == '--trace':
trace = True
elif arg == '--debug':
debug = True
elif arg.startswith('--dry-run'):
dry_run = 1
if len(arg.split('=')) == 2:
dry_run = int(arg.split('=')[1])
elif arg.startswith('--memory-estimate-depth'):
memory_estimate_depth = -1
if len(arg.split('=')) == 2:
memory_estimate_depth = int(arg.split('=')[1])
elif arg.startswith('--domain-decomposition='):
parsize_domain = [int(n) for n in arg.split('=')[1].split(',')]
if len(parsize_domain) == 1:
parsize_domain = parsize_domain[0]
else:
|
assert len(parsize_domain) == 3
elif arg.startswith('--state-parallelization='):
parsize_bands = int
|
(arg.split('=')[1])
elif arg.startswith('--sl_default='):
# --sl_default=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_default=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_default = ['d'] * 3
else:
sl_default = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_default.append(int(sl_args[sl_args_index]))
else:
sl_default.append(sl_args[sl_args_index])
elif arg.startswith('--sl_diagonalize='):
# --sl_diagonalize=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_diagonalize=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_diagonalize = ['d'] * 3
else:
sl_diagonalize = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_diagonalize.append(int(sl_args[sl_args_index]))
else:
sl_diagonalize.append(sl_args[sl_args_index])
elif arg.startswith('--sl_inverse_cholesky='):
# --sl_inverse_cholesky=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_inverse_cholesky=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_inverse_cholesky = ['d'] * 3
else:
sl_inverse_cholesky = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_inverse_cholesky.append(int(sl_args[sl_args_index]))
else:
sl_inverse_cholesky.append(sl_args[sl_args_index])
elif arg.startswith('--sl_lcao='):
# --sl_lcao=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_lcao=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_lcao = ['d'] * 3
else:
sl_lcao = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_lcao.append(int(sl_args[sl_args_index]))
else:
sl_lcao.append(sl_args[sl_args_index])
elif arg.startswith('--sl_lrtddft='):
# --sl_lcao=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_lcao=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_lrtddft = ['d'] * 3
else:
sl_lrtddft = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_lrtddft.append(int(sl_args[sl_args_index]))
else:
sl_lrtddft.append(sl_args[sl_args_index])
elif arg.startswith('--buffer_size='):
# Buffer size for MatrixOperator in MB
buffer_size = int(arg.split('=')[1])
elif arg.startswith('--gpaw='):
extra_parameters = eval('dict(%s)' % arg[7:])
elif arg == '--gpaw':
extra_parameters = eval('dict(%s)' % sys.argv.pop(i + 1))
elif arg.startswith('--profile='):
profile = arg.split('=')[1]
else:
i += 1
continue
# Delete used command line argument:
del sys.argv[i]
if debug:
np.seterr(over='raise', divide='raise', invalid='raise', under='ignore')
oldempty = np.empty
def empty(*args, **kwargs):
a = oldempty(*args, **kwargs)
try:
a.fill(np.nan)
except ValueError:
a.fill(-1000000)
return a
np.empty = empty
build_path = join(__path__[0], '..', 'build')
arch = '%s-%s' % (get_platform(), sys.version[0:3])
# If we are running the code from the source directory, then we will
# want to use the extension from the distutils build directory:
sys.path.insert(0, join(build_path, 'lib.' + arch))
def get_gpaw_python_path():
paths = os.environ['PATH'].split(os.pathsep)
paths.insert(0, join(build_path, 'bin.' + arch))
for path in paths:
if isfile(join(path, 'gpaw-python')):
return path
raise RuntimeError('Could not find gpaw-python!')
try:
setup_paths = os.environ['GPAW_SETUP_
|
xkollar/spacewalk
|
backend/server/rhnServer/search_notify.py
|
Python
|
gpl-2.0
| 1,394
| 0.001435
|
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-li
|
censes/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2
|
. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Sends notification to search-server that it should update server index
#
import xmlrpclib
from spacewalk.common.rhnLog import log_error
class SearchNotify:
def __init__(self, host="127.0.0.1", port="2828"):
self.addr = "http://%s:%s" % (host, port)
def notify(self, indexName="server"):
try:
client = xmlrpclib.ServerProxy(self.addr)
result = client.admin.updateIndex(indexName)
except Exception, e:
log_error("Failed to notify search service located at %s to update %s indexes"
% (self.addr, indexName), e)
return False
return result
if __name__ == "__main__":
search = SearchNotify()
result = search.notify()
print "search.notify() = %s" % (result)
|
AnyWi/py-ubnt-airviewer
|
airviewer.py
|
Python
|
bsd-2-clause
| 11,342
| 0.0082
|
#
# Copyright (c) 2016, AnyWi Technologies BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# UBNT airView alternative mockup for the Java client, with added functionality
# of storing data for later analytics
#
# Rick van der Zwet <rick.vanderzwet@anywi.com>
#
import requests
import telnetlib
import time
import sys
import numpy as np
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.ticker as plticker
# Latest firmware versions use HTTPS self-signed certificates by default
requests.packages.urllib3.disable_warnings()
#requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'
#try:
# requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += 'HIGH:!DH:!aNULL'
#except AttributeError:
# # no pyopenssl support used / needed / available
# pass
USERNAME = 'ubnt'
PASSWORD = 'ubnt'
HOST = "192.168.1.20"
PORT = 18888
TIMEOUT = 10
FRAME_SPEED = 1
BASE_URI = 'https://' + HOST + ':443'
def usage():
print(("Usage:" + sys.argv[0] + " <live|replay FILENAME>"))
print("")
print("Options:")
print(("\tlive \t=\tProcess live data from device " + HOST))
print("\treplay FILENAME \t=\tReplay FILENAME")
print("\trecord FILENAME \t=\tMake movie of FILENAME")
exit(128)
if len(sys.argv) == 2 and sys.argv[1] == 'live':
ACTION='live'
FILENAME = None
elif len(sys.argv) == 3 and sys.argv[1] == 'replay':
ACTION='replay'
FILENAME = sys.argv[2] # Stored data processing
FRAME_SPEED = 50
elif len(sys.argv) == 3 and sys.argv[1] == 'record':
ACTION='record'
FILENAME = sys.argv[2] # Stored data processing
FRAME_SPEED = 50
else:
usage()
def parse_get_frame_resp(line):
_,vals_raw = line.split(':')
vals = list(map(int, vals_raw.split(',')))
frame_nr = vals.pop(0)
return(frame_nr, vals)
#TODO: Make me dynamic parse from 'SCAN RANGE' response
scan_range_begin = 2402000000
scan_range_end = 2497000000
if not FILENAME:
print(("Enabling Ubiquiti airView at %s:%s@%s..." %(USERNAME, PASSWORD, HOST)))
# Request session cookie
s = requests.session()
s.get(BASE_URI + '/login.cgi', verify=False)
# Authenticate
r = s.post(BASE_URI + '/login.cgi',
{"username": USERNAME, "password": PASSWORD}, verify=False)
r.raise_for_status()
if 'Invalid credentials.' in r.text:
print("# CRIT: Username/password invalid!")
sys.exit(1)
# Enable airView
r = s.post(BASE_URI + '/airview.cgi',
{"start": 1}, verify=False)
r.raise_for_status()
print("Waiting for device to enter airView modus...")
# Allow device a few moments to settle
time.sleep(TIMEOUT)
print("Start scanning...")
tn = telnetlib.Telnet(HOST, PORT, timeout=TIMEOUT)
#tn.set_debuglevel(99)
# Storage on unique files
outfile = 'output-%s.dat' % int(time.time())
print(("Storing output at '%s'" % outfile))
fh = open(outfile, 'a')
def writeline(cmd):
""" Write line to device"""
ts = time.time()
tn.write(cmd.encode('ascii'))
print("Sending: %s", cmd.strip())
fh.write("%s\001%s" % (ts, cmd))
return ts
def getline():
"""Read line from device"""
line = tn.read_until(b"\n")
print('Received: %s', line.decode('ascii').strip())
fh.write("%s\001%s" % (time.time(), line.decode('ascii')))
return line.decode('ascii')
# Commands needs to have a trailing space if no arguments specified
writeline("CONNECT: \n")
getline()
#writeline("REQUEST RANGE: 240200
|
0000,2407000000\n") # 5 MHz
#writeline("REQUEST RANGE: 2402000000,2412000000\n") # 10 MHz
#writeline("REQUEST RANGE: 2402000000,2417000000\n") # 15 MHz
#writeline("REQUEST RANGE: 2402000000,2422000000\n") # 20 Mhz
#writeline("REQUEST RANGE: 2402000000,2477000000\n") # (ch 1-11 - US allocation)
#writeline("REQUEST RANGE: 2402000000,2487000000\n") #
|
(ch 1-13 - UK allocation)
#writeline("REQUEST RANGE: 2402000000,2497000000\n") # (ch 1-14)
writeline("REQUEST RANGE: 5150000000,5250000000\n") # 5.150-5.250 (U-NII Lower Band)
#writeline("REQUEST RANGE: 5250000000,5350000000\n") # 5.250-5.350 (U-NII Middle Band)
#writeline("REQUEST RANGE: 5470000000,5725000000\n") # 5.470-5.725 (U-NII Worldwide)
#writeline("REQUEST RANGE: 5150000000,5725000000\n") # (U-NII wide-spectrum)
getline()
writeline("START SCAN: \n")
getline()
print("Waiting for scan to start...")
time.sleep(2)
def get_frame(frame):
""" Get frame from device airView """
# TODO: Receiving frames in order, sometimes yield of empty responses. Already flush out maybe?
#writeline("GET FRAME: %s\n" % frame)
ts = writeline("GET FRAME: \n")
line = getline()
return((ts,) + parse_get_frame_resp(line))
else:
# No need for logic since we are processing stored data
sh = open(FILENAME, 'r')
def get_frame(frame):
global scan_range_begin, scan_range_end
""" Perform replay data processing """
while True:
line = sh.readline()
if not line:
return(None, None, None)
ts_raw, a = line.split('\001', 1)
ts = float(ts_raw)
cmd, ret = a.split(':', 1)
if cmd == 'FRAME':
return((ts,) + parse_get_frame_resp(a))
elif cmd == 'SCAN RANGE':
scan_range_begin, scan_range_end = map(int, ret.split(','))
kHz = lambda x: float(x) / 1000
MHz = lambda x: kHz(x) / 1000
GHz = lambda x: MHz(x) / 1000
# Get innitial frame number and bins sizes
_, frame_nr, vals = get_frame(None)
bin_size = len(vals)
bin_sample_khz = kHz(scan_range_end - scan_range_begin) / bin_size
print(("Bin size: %s" % bin_size))
print('Scan range: %s - %s MHz (delta: %s MHz)' % (MHz(scan_range_begin), MHz(scan_range_end), MHz(scan_range_end - scan_range_begin)))
# Start making picture
fig, ax = plt.subplots(figsize=(20,11))
fig.canvas.set_window_title('UBNT airView Client')
ax.set_ylabel('100ms units elapsed')
ax.set_xlabel('Frequency (sampled with bins of %s kHz)' % bin_sample_khz)
# Plotting 2.4GHz channels
#a = [2402,2412,2417,2422,2427,2432,2437,2442,2447,2452,2457,2462,2467,2472,2484,2497]
#channels = (np.array(a,dtype='float32') - 2402) / (bin_sample_khz / 1000)
# Plotting 5GHz channels
channels = list(range(32,68,4)) + list(range(100,148,4)) + list(range(149,169,4))
xticks = []
xticklabels = []
for channel in channels:
freq_mhz = 5000 + (channel * 5)
xtick = freq_mhz - MHz(scan_range_begin)
xticklabel = "%i (%s)" % (freq_mhz, channel)
xticks.append(xtick)
xticklabels.append(xticklabel)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
plt.xticks(rotation=45)
# Plo
|
youfou/wxpy
|
wxpy/ext/xiaoi.py
|
Python
|
mit
| 3,546
| 0.000897
|
# coding: utf-8
from __future__ import unicode_literals
# created by: Han Feng (https://github.com/hanx11)
import collections
import hashlib
import logging
import requests
from wxpy.api.messages import Message
from wxpy.ext.talk_bot_utils import get_context_user_id, next_topic
from wxpy.utils.misc import get_text_without_at_bot
from wxpy.utils import enhance_connection
logger = logging.getLogger(__name__)
from wxpy.compatible import *
class XiaoI(object):
"""
与 wxpy 深度整合的小 i 机器人
"""
# noinspection SpellCheckingInspection
def __init__(self, key, secret):
"""
| 需要通过注册获得 key 和 secret
| 免费申请: http://cloud.xiaoi.com/
:param key: 你申请的 key
:param secret: 你申请的 secret
"""
self.key = key
self.secret = secret
self.realm = "xiaoi.com"
self.http_method = "POST"
self.uri = "/ask.do"
self.url = "http://nlp.xiaoi.com/ask.do?platform=custom"
xauth = self._make_http_header_xauth()
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
headers.update(xauth)
self.session = requests.Session()
self.session.headers.update(headers)
|
enhance_connection(self.session)
def _make_signature(self):
"""
生成请求签名
"""
# 40位随机字符
# nonce = "".join([str(randint(0, 9)) for _ in range(40)])
nonce = "4103657107305326101203516108016101205331"
sha1 = "{0}:{1}:{2}".format(self.key, self.realm, self.secret).encode("utf-8")
sha1 = hashlib.sha1(sha1).hexdigest()
sh
|
a2 = "{0}:{1}".format(self.http_method, self.uri).encode("utf-8")
sha2 = hashlib.sha1(sha2).hexdigest()
signature = "{0}:{1}:{2}".format(sha1, nonce, sha2).encode("utf-8")
signature = hashlib.sha1(signature).hexdigest()
ret = collections.namedtuple("signature_return", "signature nonce")
ret.signature = signature
ret.nonce = nonce
return ret
def _make_http_header_xauth(self):
"""
生成请求认证
"""
sign = self._make_signature()
ret = {
"X-Auth": "app_key=\"{0}\",nonce=\"{1}\",signature=\"{2}\"".format(
self.key, sign.nonce, sign.signature)
}
return ret
def do_reply(self, msg):
"""
回复消息,并返回答复文本
:param msg: Message 对象
:return: 答复文本
"""
ret = self.reply_text(msg)
msg.reply(ret)
return ret
def reply_text(self, msg):
"""
仅返回答复文本
:param msg: Message 对象,或消息文本
:return: 答复文本
"""
error_response = (
"主人还没给我设置这类话题的回复",
)
if isinstance(msg, Message):
user_id = get_context_user_id(msg)
question = get_text_without_at_bot(msg)
else:
user_id = "abc"
question = msg or ""
params = {
"question": question,
"format": "json",
"platform": "custom",
"userId": user_id,
}
resp = self.session.post(self.url, data=params)
text = resp.text
for err in error_response:
if err in text:
return next_topic()
return text
|
Richert/BrainNetworks
|
BasalGanglia/stn_gpe_str_opt.py
|
Python
|
apache-2.0
| 14,060
| 0.004481
|
import os
import warnings
import numpy as np
from pyrates.utility.genetic_algorithm import CGSGeneticAlgorithm
from pandas import DataFrame, read_hdf
from copy import deepcopy
class CustomGOA(CGSGeneticAlgorithm):
def eval_fitness(self, target: list, **kwargs):
# define simulation conditions
worker_file = self.cgs_config['worker_file'] if 'worker_file' in self.cgs_config else None
param_grid = self.pop.drop(['fitness', 'sigma', 'results'], axis=1)
result_vars = ['r_e', 'r_p', 'r_a', 'r_m', 'r_f']
freq_targets = [0.0, np.nan, np.nan, np.nan, np.nan]
#param_grid, invalid_params = eval_params(param_grid)
conditions = [{}, # healthy control
{'k_pe': 0.2, 'k_ae': 0.2}, # AMPA blockade in GPe
{'k_pe': 0.2, 'k_ae': 0.2, 'k_pp': 0.2, 'k_pa': 0.2, 'k_pm': 0.2, 'k_aa': 0.2, 'k_ap': 0.2,
'k_am': 0.2}, # AMPA blockade and GABAA blockade in GPe
{'k_pp': 0.2, 'k_pa': 0.2, 'k_pm': 0.2, 'k_aa': 0.2, 'k_ap': 0.2,
'k_am': 0.2}, # GABAA blockade in GPe
{'k_pe': 0.0, 'k_ae': 0.0}, # STN blockade
{'k_ep': 0.2}, # GABAA blocker in STN
]
param_scalings = [
('delta_e', 'tau_e', 2.0),
('delta_p', 'tau_p', 2.0),
('delta_a', 'tau_a', 2.0),
('delta_m', 'tau_m', 2.0),
('delta_f', 'tau_f', 2.0),
('k_ee', 'delta_e', 0.5),
('k_ep', 'delta_e', 0.5),
('k_pe', 'delta_p', 0.5),
('k_pp', 'delta_p', 0.5),
('k_pa', 'tau_p', 0.5),
('k_pm', 'tau_p', 0.5),
('k_ae', 'tau_a', 0.5),
('k_ap', 'tau_a', 0.5),
('k_aa', 'tau_a', 0.5),
('k_am', 'tau_a', 0.5),
('k_mf', 'delta_m', 0.5),
('k_mm', 'delta_m', 0.5),
('k_fa', 'delta_f', 0.5),
('k_ff', 'delta_f', 0.5),
('eta_e', 'delta_e', 1.0),
('eta_p', 'delta_p', 1.0),
('eta_a', 'delta_a', 1.0),
('eta_m', 'delta_m', 1.0),
('eta_f', 'delta_f', 1.0),
]
chunk_size = [
60, # carpenters
100, # osttimor
60, # spanien
100, # animals
60, # kongo
60, # tschad
#100, # uganda
# 50, # tiber
#50, # giraffe
40, # lech
20, # rilke
12, # dinkel
#10, # rosmarin
#10, # mosambik
# 50, # compute servers
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
]
# perform simulations
if len(param_grid) > 0:
self.gs_config['init_kwargs'].update(kwargs)
res_file = self.cgs.run(
circuit_template=self.gs_config['circuit_template'],
param_grid=deepcopy(param_grid),
param_map=self.gs_config['param_map'],
simulation_time=self.gs_config['simulation_time'],
dt=self.gs_config['step_size'],
inputs=self.gs_config['inputs'],
outputs=self.gs_config['outputs'],
sampling_step_size=self.gs_config['sampling_step_size'],
permute=False,
chunk_size=chunk_size,
worker_file=worker_file,
worker_env=self.cgs_config['worker_env'],
gs_kwargs={'init_kwargs': self.gs_config['init_kwargs'], 'conditions': conditions,
'param_scalings': param_scalings},
worker_kwargs={'y': target, 'time_lim': 7200.0, 'freq_targets': freq_targets},
result_concat_axis=0)
results_tmp = read_hdf(res_file, key=f'Results/results
|
')
# calculate fitness
for gene_id in param_grid.index:
self.pop.at[gene_id, 'fitness'] = 1.0 / results_tmp.at[gene_id, 'fitness']
self.pop.at[gene_id, 'results'] = [results_tmp.at[gene_id, v]
|
for v in result_vars]
# set fitness of invalid parametrizations
#for gene_id in invalid_params.index:
# self.pop.at[gene_id, 'fitness'] = 0.0
# self.pop.at[gene_id, 'results'] = [0. for _ in result_vars]
def fitness(y, t):
y = np.asarray(y).flatten()
t = np.asarray(t).flatten()
diff = np.asarray([0.0 if np.isnan(t_tmp) else y_tmp - t_tmp for y_tmp, t_tmp in zip(y, t)]).flatten()
t[np.isnan(t)] = 1.0
t[t == 0] = 1.0
weights = 1 / np.abs(t)
return weights @ np.abs(diff)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
pop_size = 1024
pop_genes = {
'k_ee': {'min': 0, 'max': 15, 'size': pop_size, 'sigma': 0.1, 'loc': 1.0, 'scale': 0.5},
'k_ae': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_pe': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_pp': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_ep': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_ap': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_aa': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_pa': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_fa': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_mm': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_am': {'min': 0, 'max': 200, 'size': pop_size, 'sigma': 0.8, 'loc': 40.0, 'scale': 4.0},
'k_pm': {'min': 0, 'max': 200, 'size': pop_size, 'sigma': 0.5, 'loc': 5.0, 'scale': 1.0},
'k_mf': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_ff': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'eta_e': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_p': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_a': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_m': {'min': -10, 'max': 0, 'size': pop_size, 'sigma': 0.2, 'loc': -3.0, 'scale': 0.5},
'eta_f': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'delta_e': {'min': 0.01, 'max': 1.0, 'size': pop_size, 'sigma': 0.05, 'loc': 0.1, 'scale': 0.1},
'delta_p': {'min': 0.01, 'max': 1.0, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'delta_a': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.4, 'scale': 0.1},
'delta_m': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'delta_f': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'tau_e': {'min': 12, 'max': 12, 'size': pop_size, 'sigma': 0.0, 'loc': 12.0, 'scale': 0.0},
'tau_p': {'min': 24, 'max': 24, 'size': pop_size, 'sigma': 0.0, 'loc': 24.0, 'scale': 0.0},
'tau_a': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
'tau_m': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
'tau_f': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
#'tau_ee_v': {'min': 0.5, 'max': 1.0, 'size': 2, 'sigma': 0.1, 'loc': 0.5, 'scale': 0.1},
# 'tau_ei': {'min': 3.0, 'max': 5.0, 'size': 1, 'sigma': 0.1, 'loc': 4.0, 'scale': 0.1},
#'tau_ei_v': {'min': 0.5, 'max': 1.0, 'size': 2, 'sigma': 0.1, 'loc':
|
Asana/python-asana
|
asana/resources/gen/webhooks.py
|
Python
|
mit
| 7,089
| 0.00227
|
# coding=utf-8
class _Webhooks:
def __init__(self, client=None):
self.client = client
def create_webhook(self, params=None, **options):
"""Establish a webhook
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. O
|
ther times requests retur
|
n more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks"
return self.client.post(path, params, **options)
def delete_webhook(self, webhook_gid, params=None, **options):
"""Delete a webhook
:param str webhook_gid: (required) Globally unique identifier for the webhook.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks/{webhook_gid}".replace("{webhook_gid}", webhook_gid)
return self.client.delete(path, params, **options)
def get_webhook(self, webhook_gid, params=None, **options):
"""Get a webhook
:param str webhook_gid: (required) Globally unique identifier for the webhook.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks/{webhook_gid}".replace("{webhook_gid}", webhook_gid)
return self.client.get(path, params, **options)
def get_webhooks(self, params=None, **options):
"""Get multiple webhooks
:param Object params: Parameters for the request
- workspace {str}: (required) The workspace to query for webhooks in.
- resource {str}: Only return webhooks for the given resource.
:param **options
- offset {str}: Offset token. An offset to the next page returned by the API. A pagination request will return an offset token, which can be used as an input parameter to the next request. If an offset is not passed in, the API will return the first page of results. 'Note: You can only pass in an offset that was returned to you via a previously paginated request.'
- limit {int}: Results per page. The number of objects to return per page. The value must be between 1 and 100.
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks"
return self.client.get_collection(path, params, **options)
def update_webhook(self, webhook_gid, params=None, **options):
"""Update a webhook
:param str webhook_gid: (required) Globally unique identifier for the webhook.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks/{webhook_gid}".replace("{webhook_gid}", webhook_gid)
return self.client.put(path, params, **options)
|
GbalsaC/bitnamiP
|
venv/src/codejail/codejail/tests/test_safe_exec.py
|
Python
|
agpl-3.0
| 4,712
| 0
|
"""Test safe_exec.py"""
from cStringIO import StringIO
import os.path
import textwrap
import unittest
import zipfile
from nose.plugins.skip import SkipTest
from codejail import safe_exec
class SafeExecTests(unittest.TestCase):
"""The tests for `safe_exec`, to be mixed into specific test classes."""
# SafeExecTests is a TestCase so pylint understands the methods it can
# call, but it's abstract, so stop nose from running the tests.
__test__ = False
def safe_exec(self, *args, **kwargs):
"""The function under test.
This class will be mixed into subclasses that implement `safe_exec` to
give the tests something to test.
"""
raise NotImplementedError # pragma: no cover
def test_set_values(self):
globs = {}
self.safe_exec("a = 17", globs)
self.assertEqual(globs['a'], 17)
def test_files_are_copied(self):
globs = {}
self.safe_exec(
"a = 'Look: ' + open('hello.txt').read()", globs,
files=[os.path.dirname(__file__) + "/hello.txt"]
)
self.assertEqual(globs['a'], 'Look: Hello there.\n')
def test_python_path(self):
globs = {}
self.safe_exec(
"import module; a = module.const", globs,
python_path=[os.path.dirname(__file__) + "/pylib"]
)
self.assertEqual(globs['a'], 42)
def test_functions_calling_each_other(self):
globs = {}
self.safe_exec(textwrap.dedent("""\
def f():
return 1723
def g():
return f()
x = g()
"""), globs)
self.assertEqual(globs['x'], 1723)
def test_printing_stuff_when_you_shouldnt(self):
globs = {}
self.safe_exec("a = 17; print 'hi!'", globs)
self.assertEqual(globs['a'], 17)
def test_importing_lots_of_crap(self):
globs = {}
self.safe_exec(textwrap.dedent("""\
from numpy import *
a = 1723
"""), globs)
self.assertEqual(globs['a'], 1723)
def test_raising_exceptions(self):
globs = {}
with self.assertRaises(safe_exec.SafeExecException) as what_happened:
self.safe_exec(textwrap.dedent("""\
raise ValueError("That's not how you pour soup!")
"""), globs)
msg = str(what_happened.exception)
# The result may be repr'd or not, so the backslash needs to be
# optional in this match.
self.assertRegexpMatches(
msg,
r"ValueError: That\\?'s not how you pour soup!"
)
def test_extra_files(self):
globs = {}
extras = [
("extra.txt", "I'm extra!\n"),
("also.dat", "\x01\xff\x02\xfe"),
]
self.safe_exec(textwrap.dedent("""\
with open("extra.txt") as f:
extra = f.read()
with open("also.dat") as f:
also = f.read().encode("hex")
"""), globs, extra_files=extras)
self.assertEqual(globs['extra'], "I'm extra!\n")
self.assertEqual(globs['also'], "01ff02fe")
def test_extra_files_as_pythonpath_zipfile(self):
zipstring = StringIO()
zipf = zipfile.ZipFile(zipstring, "w")
zipf.writestr("zipped_module1.py", textwrap.dedent("""\
def func1(x):
return 2*x + 3
"""))
zipf.writestr("zipped_module2.py", textwrap.dedent("""\
def func2(s):
return "X" + s + s + "X"
"""))
zipf.close()
globs = {}
extras = [("code.zip", zipstring.getvalue())]
self.safe_exec(textwrap.dedent("""\
import zipped_module1 as zm1
import zipped_module2 as zm2
a = zm1.func1(10)
b = zm2.func2("hello")
"""), globs, python_path=["code.zip"], extra_files=extras)
self.assertEqual(globs['a'], 23)
self.assertEqual(globs['b'], "XhellohelloX")
class TestSafeExec(SafeExecTests, unittest.TestCase):
"""Run SafeExecTests, with the real safe_exec."""
__test__ = True
def safe_exec(self, *args, **kwargs):
safe_exec.safe_exec(*args, **kwargs)
class TestNotSafeExec(SafeExecTests, unittest.TestCase):
"""Run SafeExecTests, with not_safe_exec."""
__te
|
st__ = True
def setUp(self):
# If safe_exec is actually an alias to not_safe_exec, then there's no
# point running
|
these tests.
if safe_exec.UNSAFE: # pragma: no cover
raise SkipTest
def safe_exec(self, *args, **kwargs):
safe_exec.not_safe_exec(*args, **kwargs)
|
GearsAD/semisorted_arnerve
|
arnerve_bot/arnerve_bot/LCMRover.py
|
Python
|
mit
| 2,720
| 0.009926
|
'''
Created on Dec 3, 2014
@author: gearsad
'''
import sys
from roverpylot import rover
from bot_update_t import bot_update_t
from bot_control_command_t import bot_control_command_t
import lcm
# Try to start OpenCV for video
try:
import cv
except:
cv = None
class LCMRover(rover.Rover):
'''
A rover using LCM for control and camera feed upstream
'''
def Initialize(self, botname):
'''
Init the rover and store the name
'''
self.__botname = botname
self.__lcm = lcm.LCM("udpm://239.255.76.67:7667?ttl=1")
self.__controlSubscription = self.__lcm.subscribe("ARNerve_Bot_Control_" + self.__botname, self.UpdateBotControlHandler)
self.__lightsOn = 0
self.__infraredOn = 0
def processVideo(self, jpegbytes):
#try:
camUpdate = bot_update_t()
camUpdate.name = self.__botname
camUpdate.numBytes_cameraFrameJpeg = len(jpegbytes)
camUpdate.cameraFrameJpeg = jpegbytes
# Get the battery health as well
battery = self.getBatteryPercentage()
camUpdate.batteryPercentage = battery
self.__lcm.publish("ARNerve_Bot_Update_" + self.__botname, camUpdate.encode())
#except:
# print "Exception", sys.exc_info()[0]
# pass
def Update(self):
'''
Update the LCM
'''
self.__lcm.handle()
def Disconnect(self):
self.lc.unsubscribe(self.__controlSubscription)
def UpdateBotControlHandler(self, channel, data):
'''
Get the updated bot parameters and send them to the bot.
'''
controlParams = bot_control_command_t.decode(data)
# Check if it is the right bot.
if self.__botname != controlParams.name:
return
self.setTreads(controlParams.botTreadVelLeft, controlParams.botTr
|
eadVelright)
print "Setting the treads to {0}, {1}".format(controlParams.botTreadVelLeft, controlParams.botTreadVelright)
if self.__lightsOn != controlPa
|
rams.isLightsOn:
if controlParams.isLightsOn != 0:
self.turnLightsOn()
else:
self.turnLightsOff()
self.__lightsOn = controlParams.isLightsOn
if self.__infraredOn != controlParams.isInfraredOn:
if controlParams.isInfraredOn != 0:
self.turnInfraredOn()
else:
self.turnInfraredOff()
self.__infraredOn = controlParams.isInfraredOn
|
tpmccauley/invenio-previewer-ispy
|
tests/__init__.py
|
Python
|
gpl-2.0
| 871
| 0.001148
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio-Previewer-ISPY
# Copyright (C) 2014 CERN
#
# Invenio-Previewer-ISPY is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio-Previewer-ISPY is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# a
|
long with Invenio-Previewer-ISPY; if not, write to the Free Softw
|
are Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio-Previewer-ISPY testsuite."""
|
jgeewax/gcloud-python
|
error_reporting/google/cloud/error_reporting/client.py
|
Python
|
apache-2.0
| 10,193
| 0
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Stackdriver Logging API"""
import traceback
import google.cloud.logging.client
import six
class HTTPContext(object):
"""HTTPContext defines an object that captures the parameter for the
httpRequest part of Error Reporting API
:type method: str
:param method: The type of HTTP request, such as GET, POST, etc.
:type url: str
:param url: The URL of the request
:type user_agent: str
:param user_agent: The user agent information that is provided with the
request.
:type referrer: str
:param referrer: The referrer information that is provided with the
request.
:type response_status_code: int
:param response_status_code: The HTTP response status code for the request.
:type remote_ip: str
:param remote_ip: The IP address from which the request originated. This
can be IPv4, IPv6, or a token which is derived from
the IP address, depending on the data that has been
provided in the error report.
"""
def __init__(self, method=None, url=None,
user_agent=None, referrer=None,
response_status_code=None, remote_ip=None):
self.method = method
self.url = url
# intentionally camel case for mapping to JSON API expects
# pylint: disable=invalid-name
self.userAgent = user_agent
self.referrer = referrer
self.responseStatusCode = response_status_code
self.remoteIp = remote_ip
class Client(object):
"""Error Reporting client. Currently Error Reporting is done by creating
a Logging client.
:type project: str
:param project: the project which the client acts on behalf of. If not
passed falls back to the default inferred from the
environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
:type service: str
:param service: An identifier of the service, such as the name of the
executable, job, or Google App Engine service name. This
field is expected to have a low number of values that are
relatively stable over time, as opposed to version,
which can be changed whenever new code is deployed.
:type version: str
:param version: Represents the source code version that the developer
provided, which could represent a version label or a Git
SHA-1 hash, for example. If the developer did not provide
a version, the value is set to default.
:raises: :class:`ValueError` if the project is neither passed in nor
set in the environment.
"""
def __init__(self, project=None,
credentials=None,
http=None,
service=None,
version=None):
self.logging_client = google.cloud.logging.client.Client(
project, credentials, http)
self.service = service if service else self.DEFAULT_SERVICE
self.version = version
DEFAULT_SERVICE = 'python'
def _send_error_report(self, message,
report_location=None, http_context=None, user=None):
"""Makes the call to the Error Reporting API via the log stream.
This is the lower-level interface to build the payload, generally
users will use either report() or report_exception() to automatically
gather the parameters for this method.
Currently this method sends the Error Report by formatting a structured
log message according to
https://cloud.google.com/error-reporting/docs/formatting-error-messages
:type message: str
:param message: The stack trace that was reported or logged by the
service.
:type report_location: dict
:param report_location: The location in the source code where the
decision was made to report the error, usually the place
where it was logged. For a logged exception this would be the
source line where the exception is logged, usually close to
the place where it was caught.
This should be a Python dict that contains the keys 'filePath',
'lineNumber', and 'functionName'
:type http_context: :class`google.cloud.error_reporting.HTTPContext`
:param http_context: The HTTP request which was processed when the
error was triggered.
:type user: str
:param user: The user who caused or was affected by the crash. This can
be a user ID, an email address, or an arbitrary token that
uniquely identifies the user. When sending an error
report, leave this field empty if the user was not
logged in. In this case the Error Reporting system will
use other data, such as remote IP address,
to distinguish affected users.
"""
payload = {
'serviceContext': {
'service': self.service,
|
},
'message': '{0}'.format(message)
}
if self.version:
paylo
|
ad['serviceContext']['version'] = self.version
if report_location or http_context or user:
payload['context'] = {}
if report_location:
payload['context']['reportLocation'] = report_location
if http_context:
http_context_dict = http_context.__dict__
# strip out None values
payload['context']['httpContext'] = {
key: value for key, value in six.iteritems(http_context_dict)
if value is not None
}
if user:
payload['context']['user'] = user
logger = self.logging_client.logger('errors')
logger.log_struct(payload)
def report(self, message, http_context=None, user=None):
""" Reports a message to Stackdriver Error Reporting
https://cloud.google.com/error-reporting/docs/formatting-error-messages
:type message: str
:param message: A user-supplied message to report
:type http_context: :class`google.cloud.error_reporting.HTTPContext`
:param http_context: The HTTP request which was processed when the
error was triggered.
:type user: str
:param user: The user who caused or was affected by the crash. This
can be a user ID, an email address, or an arbitrary
token that uniquely identifies the user. When sending
an error report, leave this field empty if the user
was not logged in. In this case the Error Reporting
system will use other dat
|
yonglong009/pycharmNoteBook
|
lean_python3/dive_into_p3/tanslate.py
|
Python
|
gpl-3.0
| 1,160
| 0.025862
|
#!/usr/bin/env python3
import urllib
|
import codecs
from bs4 import BeautifulSoup
from sys import argv
import re,time
class Translate:
def start(self):
self._get_html_sourse()
s
|
elf._get_content("enc")
self._remove_tag()
self.print_result()
def _get_html_sourse(self):
word=argv[1] if len(argv)>1 else ''
url="http://dict.baidu.com/s?wd=%s&tn=dict" % word
self.htmlsourse=urllib.unicode(urllib.urlopen(url).read(),"gb2312","ignore").encode("utf-8","ignore")
def _get_content(self,div_id):
soup=BeautifulSoup("".join(self.htmlsourse), "lxml")
self.data=str(soup.find("div",{"id":div_id}))
def _remove_tag(self):
soup=BeautifulSoup(self.data, "lxml")
self.outtext=''.join([element for element in soup.recursiveChildGenerator() if isinstance(element,unicode)])
def print_result(self):
for item in range(1,10):
self.outtext=self.outtext.replace(str(item),"\n%s" % str(item))
self.outtext=self.outtext.replace(" ","\n")
print(self.outtext)
# from outofmemory.cn
if __name__ == "__main__":
Translate().Start()
|
rajalokan/keystone
|
keystone/tests/unit/test_cli.py
|
Python
|
apache-2.0
| 57,490
| 0.00007
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import uuid
import fixtures
import mock
import oslo_config.fixture
from oslo_db.sqlalchemy import migration
from oslo_log import log
from six.moves import configparser
from six.moves import range
from testtools import matchers
from keystone.auth import controllers
from keystone.cmd import cli
from keystone.cmd.doctor import caching
from keystone.cmd.doctor import credential
from keystone.cmd.doctor import database as doc_database
from keystone.cmd.doctor import debug
from keystone.cmd.doctor import federation
from keystone.cmd.doctor import ldap
from keystone.cmd.doctor import security_compliance
from keystone.cmd.doctor import tokens
from keystone.cmd.doctor import tokens_fernet
from keystone.common import dependency
from keystone.common.sql import upgrades
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone.identity.mapping_backends import mapping as identity_mapping
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit.ksfixtures import ldapdb
CONF = keystone.conf.CONF
class CliTestCase(unit.SQLDriverOverrides, unit.TestCase):
def config_files(self):
config_files = super(CliTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def test_token_flush(self):
self.useFixture(database.Database())
self.load_backends()
cli.TokenFlush.main()
class CliNoConfigTestCase(unit.BaseTestCase):
def setUp(self):
self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
self.config_fixture.register_cli_opt(cli.command_opt)
self.useFixture(fixtures.MockPatch(
'oslo_config.cfg.find_config_files', return_value=[]))
super(CliNoConfigTestCase, self).setUp()
# NOTE(crinkle): the command call doesn't have to actually work,
# that's what the other unit tests are for. So just mock it out.
class FakeConfCommand(object):
def __init__(self):
self.cmd_class = mock.Mock()
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', FakeConfCommand()))
self.logging = self.useFixture(fixtures.FakeLogger(level=log.WARN))
def test_cli(self):
expected_msg = 'Config file not found, using default configs.'
cli.main(argv=['keystone-manage', 'db_sync'])
self.assertThat(self.logging.output, matchers.Contains(expected_msg))
class CliBootStrapTestCase(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(CliBootStrapTestCase, self).setUp()
def config_files(self):
self.config_fixture.register_cli_opt(cli.command_opt)
config_files = super(CliBootStrapTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def config(self, config_files):
CONF(args=['bootstrap', '--bootstrap-password', uuid.uuid4().hex],
project='keystone',
default_config_files=config_files)
def test_bootstrap(self):
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
def _do_test_bootstrap(self, bootstrap):
bootstrap.do_bootstrap()
project = bootstrap.resource_manager.get_project_by_name(
bootstrap.project_name,
'default')
user = bootstrap.identity_manager.get_user_by_name(
bootstrap.username,
'default')
role = bootstrap.role_manager.get_role(bootstrap.role_id)
role_list = (
bootstrap.assignment_manager.get_roles_for_user_and_project(
user['id'],
project['id']))
self.assertIs(1, len(role_list))
self.assertEqual(role_list[0], role['id'])
# NOTE(morganfainberg): Pass an empty context, it isn't used by
# `authenticate` method.
bootstrap.identity_manager.authenticate(
self.make_request(),
user['id'],
bootstrap.password)
if bootstrap.region_id:
region = bootstrap.catalog_manager.get_region(bootstrap.region_id)
self.assertEqual(self.region_id, region['id'])
if bootstrap.service_id:
svc = bootstrap.catalog_manager.get_service(bootstrap.service_id)
self.assertEqual(self.service_name, svc['name'])
self.assertEqual(set(['admin', 'public', 'internal']),
set(bootstrap.endpoints))
urls = {'public': self.public_url,
'internal': self.internal_url,
'admin': self.admin_url}
for interface, url in urls.items():
endpoint_id = bootstrap.endpoints[interface]
endpoint = bootstrap.catalog_manager.get_endpoint(endpoint_id)
self.assertEqual(self.region_id, endpoint['region_id'])
self.assertEqual(url, endpoint['url'])
self.assertEqual(svc['id'], endpoint['service_id'])
self.assertEqual(interface, endpoint['interface'])
def test_bootstrap_is_idempotent_when_password_does_not_change(self):
# NOTE(morganfainberg): Ensure we can run bootstrap with the same
# configuration multiple times without erroring.
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
v3_token_controller = controllers.Auth()
v3_password_data = {
'identity': {
|
"methods": ["password"],
"password": {
"user": {
"name": bootstrap.username,
"password": bootstrap.password,
"domain": {
"id": CONF.identity.default_domain_id
|
}
}
}
}
}
auth_response = v3_token_controller.authenticate_for_token(
self.make_request(), v3_password_data)
token = auth_response.headers['X-Subject-Token']
self._do_test_bootstrap(bootstrap)
# build validation request
request = self.make_request(is_admin=True)
request.context_dict['subject_token_id'] = token
# Make sure the token we authenticate for is still valid.
v3_token_controller.validate_token(request)
def test_bootstrap_is_not_idempotent_when_password_does_change(self):
# NOTE(lbragstad): Ensure bootstrap isn't idempotent when run with
# different arguments or configuration values.
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
v3_token_controller = controllers.Auth()
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"name": bootstrap.username,
"password": bootstrap.password,
"domain": {
"id": CONF.identity.default_domain_id
}
}
}
}
}
auth_response = v3_token_controller.authenticate_for_token(
self.make_request(), v3_password_data)
token = auth_response.headers['X-Subject-Token']
os.environ['OS_BOOTSTRAP_PASSWORD'] = uuid.uuid4().hex
self._do_test_bootstra
|
Tocknicsu/nctuoj_contest
|
backend/handler/index.py
|
Python
|
apache-2.0
| 195
| 0.010256
|
import tornado
im
|
port tornado.gen
from req import Service
from req import ApiRequestHandler
class Index(ApiRequestHandler):
@tornado.gen.coroutine
def get(self):
self.rend
|
er()
|
forScie/FrankenCipher
|
franken.py
|
Python
|
gpl-3.0
| 13,770
| 0.030719
|
#!/usr/bin/python
# FRANKEN CIPHER
# WRITTEN FOR ACADEMIC PURPOSES
#
# AUTHORED BY: Dan C and james@forscience.xyz
#
# THIS SCRIPT IS WRITTEN TO DEMONSTRATE A UNIQUE ENCRYPTION ALGORITHM THAT IS INSPIRED BY A NUMBER
# OF EXISTING ALGORITHMS.
# THE SCRIPT IS WRITTEN ENTIRELY FOR ACADEMIC PURPOSES. NO WARRANTY OR GUARANTEES ARE
# OFFERED BY THE AUTHORS IN RELATION TO THE USE OF THIS SCRIPT.
#
# Usage: franken.py <"-v" (verbose)> <"-d" (decrypt)> <"-k" (key phrase)> <"-m" (string to encrypt/decrypt)>
#
# indentation: TABS!
import sys
import getopt
import collections
import binascii
import hashlib
import itertools
# GLOBALS
# define -v and -d as false (-d defaults to encrypt mode)
verbose_opt = False
decrypt_opt = False
key_phrase = '' # clear text key phrase
key_hashed = '' # hashed key phrase
clear_text = '' # starting message input
pigpen_message = '' # message after pigpen stage
encrypted_message = '' # the encrypted message
decrypted_message = '' # the decrypted message
# GLOBALS
# pigpen dictionaries
pigpen_A = {'A':'ETL', 'B':'ETM', 'C':'ETR', 'D':'EML', 'E':'EMM', 'F':'EMR', 'G':'EBL', 'H':'EBM', 'I':'EBR', 'J':'DTL',
'K':'DTM', 'L':'DTR', 'M':'DML', 'N':'DMM', 'O':'DMR', 'P':'DBL', 'Q':'DBM', 'R':'DBR', 'S':'EXT', 'T':'EXL', 'U':'EXR',
'V':'EXB', 'W':'DXT', 'X':'DXL', 'Y':'DXR', 'Z':'DXB', ' ':'EPS', '.':'EPF', ',':'EPC', '!':'EPE', '?':'EPQ', '"':'EPD',
'@':'EPA','0':'NTL', '1':'NTM', '2':'NTR', '3':'NML', '4':'NMM', '5':'NMR', '6':'NBL', '7':'NBM', '8':'NBR','9':'NXT'}
pigpen_B = {'C':'ETL', 'D':'ETM', 'A':'ETR', 'B':'EML', 'G':'EMM', 'H':'EMR', 'E':'EBL', 'F':'EBM', 'K':'EBR', 'L':'DTL',
'I':'DTM', 'J':'DTR', 'O':'DML', 'P':'DMM', 'M':'DMR', 'N':'DBL', 'S':'DBM', 'T':'DBR', 'Q':'EXT', 'R':'EXL', 'W':'EXR',
'X':'EXB', 'U':'DXT', 'V':'DXL', ' ':'DXR', ',':'DXB', 'Y':'EPS', '!':'EPF', 'Z':'EPC', '.':'EPE', '@':'EPQ', '0':'EPD',
'?':'EPA','"':'NTL', '3':'NTM', '4':'NTR', '1':'NML', '2':'NMM', '7':'NMR', '8':'NBL', '9':'NBM', '5':'NBR', '6':'NXT'}
pigpen_C = {'K':'ETL', 'L':'ETM', 'M':'ETR', 'N':'EML', 'O':'EMM', 'P':'EMR', 'Q':'EBL', 'R':'EBM', 'S':'EBR', 'U':'DTL',
'V':'DTM', 'W':'DTR', 'X':'DML', 'Y':'DMM', 'Z':'DMR', ' ':'DBL', '.':'DBM', ',':'DBR', '!':'EXT', '"':'EXL', '?':'EXR',
'@':'EXB', '0':'DXT', '1':'DXL', '2':'DXR', '3':'DXB', '4':'EPS', '5':'EPF', '6':'EPC', '7':'EPE', '8':'EPQ', '9':'EPD',
'A':'EPA','B':'NTL', 'C':'NTM', 'D':'NTR', 'E':'NML', 'F':'NMM', 'G':'NMR', 'H':'NBL', 'I':'NBM', 'J':'NBR','T':'NXT'}
# creates hashes of the key phrase inputted by the user
# in order for it to be used as a key
# the clear text key phrase string is retained
def keyGenerate():
global key_hashed
# create the hashes of the key phrase string
md5_hash = hashlib.md5(key_phrase.encode())
sha256_hash = hashlib.sha256(key_phrase.encode())
sha512_hash = hashlib.sha512(key_phrase.encode())
# concatenate the hash digests into one key
key_hashed = md5_hash.hexdigest() + sha256_hash.hexdigest() + sha512_hash.hexdigest()
# hash the entire key (so far) one more time and concatenate to make 1024bit key
key_hashed_hash = hashlib.md5(key_hashed.encode())
key_hashed += key_hashed_hash.hexdigest()
# vebose mode if verbose option is set
if verbose_opt:
print("[KEY GENERATION]: The key phrase is: \"" + key_phrase + "\"")
print("[KEY GENERATION]: \"" + key_phrase + "\" is independantly hashed 3 times using MD5, SHA256 and SHA512")
print("[KEY GENERATION]: The 3 hashes are concatenated with 1 more md5 hash, resulting in the 1024bit key:")
print("[KEY GENERATION]: \"" + key_hashed + "\"\n")
return
# selects the appropriate pigpen dictionary based on summing all of the ascii
# values in the key phrase and modulating the sum of the integers by 3 in order to retrieve
# one of 3 values. Returns the appropriate dictionary
def selectDict():
# sum ASCII value of each character in the clear text key phrase
ascii_total = 0
for x in key_phrase:
ascii_total += ord(x)
# modulo 3 ascii_total to find 0-3 result to select pigpen dict
if ascii_total % 3 == 0:
pigpen_dict = pigpen_A
elif ascii_total % 3 == 1:
pigpen_dict = pigpen_B
elif ascii_total % 3 == 2:
pigpen_dict = pigpen_C
# return the dictionary
return pigpen_dict
# convert message into pigpen alphabet. compare each letter to dict key.
# first makes all chars uppercase and ignores some punctuation.
# itterates through pigpen dict to find value based on clear message char as key
def pigpenForward():
global pigpen_message
# convert clear message to uppercase
message = clear_text.upper()
# itterate through dict looking for chars
for letter in message:
if letter in selectDict():
pigpen_message += selectDict().get(letter)
# verbose mode if verbose option is set
if verbose_opt:
print("[ENCRYPTION - Phase 1]: The clear text is:")
print("[ENCRYPTION - Phase 1]: \"" + clear_text + "\"")
print("[ENCRYPTION - Phase 1]: 1 of 3 dictionaries is derived from the sum of the pre-hashed key ASCII values (mod 3)")
print("[ENCRYPTION - Phase 1]: The clear text is converted into pigpen cipher text using the selected dictionary:")
print("[ENCRYPTION - Phase 1]: \"" + pigpen_message + "\"\n")
return
# reverses the pigpen pr
|
ocess. takes a pigp
|
en string and converts it back to clear text
# first creates a list of each 3 values from the inputted string (each element has 3 chars)
# then compares those elements to the pigpen dictionary to create the decrypted string
def pigpenBackward():
global decrypted_message
# convert encrypted message (int array) back to a single ascii string
message = ''
try:
for i in decrypted_message:
message += chr(i)
except:
print("[ERROR]: Incorrect key. Cannot decrypt.")
usageText()
# retrieve each 3 chars (one pigpen value) and form a list
message_list = [message[i:i+3] for i in range(0, len(message), 3)]
# zero out decrypted message string in order to store pigpen deciphered characters
decrypted_message = ''
# itterate through list elements and compare against pigpen dict
# to find correct key (clear text letter) and create decrypted string
for element in message_list:
for key, value in selectDict().iteritems():
if value == element:
decrypted_message += key
# verbose mode if verbose option is set
if verbose_opt:
print("[DECRYPTION - Phase 3]: 1 of 3 dictionaries is derived from the sum of the pre-hashed key ASCII values (mod 3)")
print("[DECRYPTION - Phase 3]: The values of the pigpen cipher text are looked up in the selected dictionary")
print("[DECRYPTION - Phase 3]: The pigpen cipher text is converted back into clear text:\n")
print("[DECRYPTION - COMPLETE]: \"" + decrypted_message + "\"\n")
return
# XORs an int value derived from the hashed key to each ascii int value of the message.
# The key value is looked up by using the value stored in that key array position to reference
# the array position that value points to. That value is then XOR'ed with the corresponding value of the message
# this occurs three times. Inspired by DES key sub key generation and RC4
def keyConfusion(message):
# create array of base10 ints from ascii values of chars in hashed key
key = []
for x in key_hashed:
key.append(ord(x))
# create a variable for cycling through the key array (in case the message is longer than key)
key_cycle = itertools.cycle(key)
# loop through the key and XOR the resultant value with the corresponding value in the message
for i in range(len(message)):
# find the value pointed to by the value of each element of the key (for each value in the message array)
key_pointer = key_cycle.next() % 128 # get the next key byte. mod 128 because 128 bytes in 1024bits
key_byte = key[key_pointer]
# XOR message byte with current key_byte
message[i] = message[i] ^ key_byte
# XOR message byte with the key byte pointed to by previous key byte value
key_byte = key[(key_byte % 128)]
message[i] = message[i] ^ key_byte
# once again XOR message byte with the next key byte pointed to by previous key byte value
key_byte = key[(key_byte % 128)]
message[i] = message[i] ^ key_byte
# verbose mode if verbose option is set
if verbose_opt:
|
rolapp/plugin.video.zattooboxExt.beta
|
resources/zattooDB.py
|
Python
|
bsd-2-clause
| 26,019
| 0.016529
|
# coding=utf-8
#
# copyright (C) 2017 Steffen Rolapp (github@rolapp.de)
#
# based on ZattooBoxExtended by Daniel Griner (griner.ch@gmail.com) license under GPL
#
# This file is part of ZattooHiQ
#
# zattooHiQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foun
|
dation, either version 3 of the Licens
|
e, or
# (at your option) any later version.
#
# zattooHiQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with zattooHiQ. If not, see <http://www.gnu.org/licenses/>.
#
import xbmc, xbmcgui, xbmcaddon, os, xbmcplugin, datetime, time
import json
from zapisession import ZapiSession
__addon__ = xbmcaddon.Addon()
_listMode_ = __addon__.getSetting('channellist')
_channelList_=[]
localString = __addon__.getLocalizedString
local = xbmc.getLocalizedString
_umlaut_ = {ord(u'ä'): u'ae', ord(u'ö'): u'oe', ord(u'ü'): u'ue', ord(u'ß'): u'ss'}
REMOTE_DBG = False
# append pydev remote debugger
if REMOTE_DBG:
# Make pydev debugger works for auto reload.
# Note pydevd module need to be copied in XBMC\system\python\Lib\pysrc
try:
import pysrc.pydevd as pydevd # with the addon script.module.pydevd, only use `import pydevd`
# stdoutToServer and stderrToServer redirect stdout and stderr to eclipse console
#pydevd.settrace('localhost', stdoutToServer=True, stderrToServer=True, suspend=False)
pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True)
except ImportError:
sys.stderr.write("Error: You must add org.python.pydev.debug.pysrc to your PYTHONPATH.")
sys.exit(1)
class ZattooDB(object):
def __init__(self):
self.conn = None
profilePath = xbmc.translatePath(__addon__.getAddonInfo('profile'))
if not os.path.exists(profilePath): os.makedirs(profilePath)
self.databasePath = os.path.join(profilePath, "zattoo.db")
self.connectSQL()
self.zapi=self.zapiSession()
def zapiSession(self):
zapiSession = ZapiSession(xbmc.translatePath(__addon__.getAddonInfo('profile')).decode('utf-8'))
if zapiSession.init_session(__addon__.getSetting('username'), __addon__.getSetting('password')):
return zapiSession
else:
# show home window, zattooHiQ settings and quit
xbmc.executebuiltin('ActivateWindow(10000)')
xbmcgui.Dialog().ok(__addon__.getAddonInfo('name'), __addon__.getLocalizedString(31902))
__addon__.openSettings()
zapiSession.renew_session()
import sys
sys.exit()
@staticmethod
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registering-an-adapter-callable
return time.mktime(ts.timetuple())
@staticmethod
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
def connectSQL(self):
import sqlite3
sqlite3.register_adapter(datetime.datetime, self.adapt_datetime)
sqlite3.register_converter('timestamp', self.convert_datetime)
self.conn = sqlite3.connect(self.databasePath, detect_types=sqlite3.PARSE_DECLTYPES)
self.conn.execute('PRAGMA foreign_keys = ON')
self.conn.row_factory = sqlite3.Row
# check if DB exists
c = self.conn.cursor()
try: c.execute('SELECT * FROM showinfos')
except: self._createTables()
def _createTables(self):
import sqlite3
c = self.conn.cursor()
try: c.execute('DROP TABLE channels')
except: pass
try:
c.execute('DROP TABLE programs')
print "DROP PROGRAM TABlE"
except: pass
try: c.execute('DROP TABLE updates')
except: pass
try: c.execute('DROP TABLE playing')
except: pass
try: c.execute('DROP TABLE showinfos')
except: pass
self.conn.commit()
try:
c.execute('CREATE TABLE channels(id TEXT, title TEXT, logo TEXT, weight INTEGER, favourite BOOLEAN, PRIMARY KEY (id) )')
c.execute('CREATE TABLE programs(showID TEXT, title TEXT, channel TEXT, start_date TIMESTAMP, end_date TIMESTAMP, restart BOOLEAN, series BOOLEAN, description TEXT, description_long TEXT, year TEXT, country TEXT, genre TEXT, category TEXT, image_small TEXT, image_large TEXT, updates_id INTEGER, FOREIGN KEY(channel) REFERENCES channels(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute('CREATE TABLE updates(id INTEGER, date TIMESTAMP, type TEXT, PRIMARY KEY (id) )')
#c.execute('CREATE TABLE playing(channel TEXT, start_date TIMESTAMP, action_time TIMESTAMP, current_stream INTEGER, streams TEXT, PRIMARY KEY (channel))')
c.execute('CREATE TABLE showinfos(showID INTEGER, info TEXT, PRIMARY KEY (showID))')
c.execute('CREATE TABLE playing(channel TEXT, current_stream INTEGER, streams TEXT, PRIMARY KEY (channel))')
c.execute('CREATE TABLE version(version TEXT, PRIMARY KEY (version))')
c.execute('CREATE INDEX program_list_idx ON programs(channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
self.conn.commit()
c.close()
except sqlite3.OperationalError, ex:
pass
def updateChannels(self, rebuild=False):
c = self.conn.cursor()
if rebuild == False:
date = datetime.date.today().strftime('%Y-%m-%d')
c.execute('SELECT * FROM updates WHERE date=? AND type=? ', [date, 'channels'])
if len(c.fetchall())>0:
c.close()
return
# always clear db on update
c.execute('DELETE FROM channels')
print "account "+ self.zapi.AccountData['account']['power_guide_hash']
api = '/zapi/v2/cached/channels/' + self.zapi.AccountData['account']['power_guide_hash'] + '?details=False'
channelsData = self.zapi.exec_zapiCall(api, None)
api = '/zapi/channels/favorites'
favoritesData = self.zapi.exec_zapiCall(api, None)
nr = 0
for group in channelsData['channel_groups']:
for channel in group['channels']:
logo = 'http://logos.zattic.com' + channel['qualities'][0]['logo_black_84'].replace('/images/channels', '')
try:
favouritePos = favoritesData['favorites'].index(channel['id'])
weight = favouritePos
favourite = True
except:
weight = 1000 + nr
favourite = False
c.execute('INSERT OR IGNORE INTO channels(id, title, logo, weight, favourite) VALUES(?, ?, ?, ?, ?)',
[channel['id'], channel['title'], logo, weight, favourite])
if not c.rowcount:
c.execute('UPDATE channels SET title=?, logo=?, weight=?, favourite=? WHERE id=?',
[channel['title'], logo, weight, favourite, channel['id']])
nr += 1
if nr>0: c.execute('INSERT INTO updates(date, type) VALUES(?, ?)', [datetime.date.today(), 'channels'])
self.conn.commit()
c.close()
return
def updateProgram(self, date=None, rebuild=False):
if date is None: date = datetime.date.today()
else: date = date.date()
c = self.conn.cursor()
if rebuild:
c.execute('DELETE FROM programs')
self.conn.commit()
# get whole day
fromTime = int(time.mktime(date.timetuple())) # UTC time for zattoo
toTime = fromTime + 86400 # is 24h maximum zattoo is sending?
#get program from DB and return if it's not empty
# if self._isDBupToDate(date, 'programs'):return
c.execute('SELECT * FROM programs WHERE start_date > ? AND end_date < ?', (fromTime+18000, fromTime+25200,)) #get shows between 05:00 and 07:00
count=c.fetchall()
if len(count)>0:
c.close()
|
muk-it/muk_dms
|
muk_dms_access/models/access_groups.py
|
Python
|
lgpl-3.0
| 1,927
| 0.008303
|
###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Documents Access
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with
|
this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
from odoo import models, fields, api
class AccessGroups(models.Model):
_inherit = 'muk_security.access_groups'
#----------------------------------------------------------
# Database
#----------------------------------------------------------
directories = fields.Many2many(
comodel_name='muk_dms.directory',
relation='muk_dms_directory_groups_rel',
string="Directories",
column1='gid',
column2='aid',
readonly=True)
count_directories = fields.Integer(
compute='_compute_count_directories',
string="Count Directories")
#----------------------------------------------------------
# Read, View
#----------------------------------------------------------
@api.depends('directories')
def _compute_count_directories(self):
for record in self:
record.count_directories = len(record.directories)
|
jorgecasals/VoiceTrainingTool
|
Repositories/Cache.py
|
Python
|
gpl-3.0
| 420
| 0.004762
|
class RepositoryCache:
def __init__(self):
self.data_dict = {}
def add_data(self, keys, data):
self.data_dict[keys, data]
def clear(self):
self.data_dict = {}
def is_data_cac
|
hed(self, keys):
result = self.data_dict.has_key(keys)
return result
def get_data(self, keys):
result = self.data_dict[keys]
return result
Cache
|
= RepositoryCache()
|
jeffhsu3/genda
|
tests/exon_utils_tests.py
|
Python
|
bsd-3-clause
| 1,802
| 0.021643
|
"""Testing for overlap intervals
"""
import unittest
from genda.transcripts.exon_utils import calcOverlap, collideIntervals, \
collapseIntervals
class TestOverlapFunctions(unittest.TestCase):
def setUp(self):
# Simple Overlap
self.simple = [(1,10), (6,15)]
# One interval enclosed in another
self.enclosed = [(100,200), (110,150)]
# Partial overlap
self.partial = [(150,300), (160,300), (170,330)]
# No overlap
self.no = [(150,300), (10,30)]
# Equal
self.equal = [(1,15), (1,5)]
#Complex interval list
self.full = [(7,20), (1,5), (8,11), (18,50), (100,150)]
def test_bpOve
|
rlap(self):
# Make sure overlaps are calculated correctly
self.assertEqual(calcOverlap(self.simple), 4)
self.assertEqual(calcOverlap(self.enclosed), 40)
self.assertEqual(calcOverlap(self.partial),400)
def test_collideIntervals(self):
self.assertEqual(collideIntervals(self.simpl
|
e[0], self.simple[1]),
[(1,15)])
self.assertEqual(collideIntervals(self.enclosed[0], self.enclosed[1]),
[(100,200)])
self.assertEqual(collideIntervals(self.no[0], self.no[1]),self.no)
def test_collapseIntervals(self):
self.assertEqual(collapseIntervals(self.simple), [(1,15)])
print(self.partial)
self.assertEqual(collapseIntervals(self.partial), [(150,330)])
print(self.full)
self.assertEqual(collapseIntervals(self.full), [(1,5),(7,50),(100,150)])
def test_unique_bp(self):
self.assertEqual(sum(map(lambda x \
:x[1]-x[0],collapseIntervals(self.partial))) -
calcOverlap(self.partial),330-150)
if __name__ == '__main__':
unittest.main()
|
WeirdCoder/LilyPadOS
|
04Dan/RandomStuff/listener.py
|
Python
|
mit
| 403
| 0.012407
|
import lcm
from lilylcm import 03Citrus
def my_handler(channel, data):
msg = 03Citrus.decode(data)
print("Received message on channel /"%s/"" % channel)
print(" value = %s" % str(msg.value))
print("")
lc = lcm.LCM()
subscriptio
|
n = lc.subscribe("03Citrus", my_handler)
try:
while True:
lc.handle()
except Key
|
boardInterrupt:
pass
lc.unsubscribe(subscription)
|
markuz/scripts
|
copyemail.py
|
Python
|
gpl-2.0
| 4,573
| 0.014433
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
#
# This file is part of my scripts project
#
# Copyright (c) 2013 Marco Antonio Islas Cruz
#
# This script is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This script is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# @author Marco Antonio Islas Cruz <markuz@islascruz.org>
# @copyright 2011 Marco Antonio Islas Cruz
# @license http://www.gnu.org/licenses/gpl.txt
import sys
import imaplib
import email
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-e", "--email", dest="email", action="store",
type="string",
help=("Username for the IMAP login. "
"This will be used on both servers if --new-email is "
"not defined"
))
parser.add_option("-n", "--new-email", dest="newemail", action="store",
type="string",
help="Username to connect to the new host")
parser.add_option("","--old-host", dest="oldhost",action="store",
type="string",
help="Old host. must be HOST:PORT")
parser.add_option("","--new-host", dest="newhost",action="store",
type="string",
help="New host, must be HOST:PORT")
parser.add_option("","--old-password", dest="oldpassword",action="store",
type="string",
help="old password")
parser.add_option("","--new-password", dest="newpassword",action="store",
type="string",
help="New password")
parser.add_option("","--prefix", dest="prefix",action="store",
type="string",
help="mailbox prefix")
options, args = parser.parse_args()
if not options.newemail:
options.newemail = options.email
OLDHOST= options.oldhost.split(":")[0]
OLDPORT= int(options.oldhost.split(":")[1])
NEWHOST=options.newhost.split(":")[0]
NEWPORT=int(options.newhost.split(":")[1])
def move_folder_messages(d, oldhost, newhost):
print "Entrando al directorio ", d
typ, dat = oldhost.select(d)
if typ != 'OK':
print "Cannot select %r"%d
#Seleccionar el directorio en el nuevo host.
typ, dat = newhost.select(d)
a,b = newhost.list()
print typ, dat, d,a,b
if typ != "OK":
print "Can't select folder: '%r'"%d
raise ValueError
typ, data = oldhost.search(None, "ALL")
for c, num in enumerate(data[0].split()):
typ, data = oldhost.fetch(num, "(RFC822)")
text = data[0][1]
msg = email.message_from_string(text)
subject = msg["Subject"]
message_id = msg["Message-ID"]
########try:
######## searchpattern = '(HEADER Message-ID "%s")'%message_id
######## result, data = newhost.uid('search',None,
######## searchpattern)
########except Exception, e:
######## print "No data: %s"%e
######## data = None
########print result, data
########if data and data[0]:
######## print ("Omitiendo el mensaje %s, ya se encuentra en el mailbox"
######## " destino (%r)" )%(subject, d)
######## continue
print "moviendo
|
el mensaje %s/%s"%(d,subject)
newhost.append(d, None, None, msg.as_string())
#Conectar al
|
host anterior
print "Connecting to %s:%d"%(OLDHOST, OLDPORT)
oldhost = imaplib.IMAP4(OLDHOST, OLDPORT)
print "Auth: %s,%s"%(options.email, options.oldpassword)
oldhost.login(options.email, options.oldpassword)
#Conectar al nuevo host
newhost = imaplib.IMAP4(NEWHOST, NEWPORT)
newhost.login(options.newemail, options.newpassword)
#Obtener la lista de directorios
result, dirs = oldhost.list()
print "Directorios encontrados"
dirs = map(lambda x: x.rsplit('"."', 1)[1].strip(), dirs)
newhdirs = map(lambda x: x.rsplit('"."', 1)[1].strip(), newhost.list()[1])
for directorio in dirs:
if directorio not in newhdirs:
print newhost.create(directorio)
for directorio in dirs:
move_folder_messages(directorio, oldhost, newhost)
####try:
#### move_folder_messages(directorio, oldhost, newhost)
####except Exception, e:
#### print "Error, mailbox: %s, error %r"%(directorio, e)
|
OCA/purchase-workflow
|
purchase_propagate_qty/tests/__init__.py
|
Python
|
agpl-3.0
| 107
| 0
|
# License AGPL-3
|
.0 or later (http://www.gnu.org/licenses/agpl.html).
|
from . import test_purchase_delivery
|
jojanper/draalcore
|
draalcore/models/admin.py
|
Python
|
mit
| 302
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Register models to admin view."""
# System
|
imports
from django.contrib import admin
from django.contrib.admin.models import LogEntry
# Project imports
from draalcore.models.admin_log import LogEnt
|
ryAdmin
admin.site.register(LogEntry, LogEntryAdmin)
|
ParalelniPolis/polis-heroku
|
paralelnipolis/settings.py
|
Python
|
lgpl-3.0
| 3,240
| 0.000309
|
"""
Django settings for paralelnipolis project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG =
|
True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'south',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttype
|
s',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'paralelnipolis.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paralelnipolis.wsgi.application'
if os.environ.get('PRODUCTION'):
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY')
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {}
DATABASES['default'] = dj_database_url.config()
# email settings
EMAIL_HOST = 'smtp.mandrillapp.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = '???@gmail.com'
EMAIL_HOST_PASSWORD = '???'
else:
DEBUG = True
SECRET_KEY = 'asdkfjh2i57yaw34gc6R*&@#*&Uaweyvfhaghjuy'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'paralelnipolis',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': '127.0.0.1',
'PORT': '',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
TEMPLATE_DEBUG = DEBUG
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
luisza/academica
|
matricula/contrib/bills/signals.py
|
Python
|
gpl-3.0
| 2,438
| 0.002871
|
# -*- coding: UTF-8 -*-
from django.db.models.signals import post_save
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.dispatch import receiver
from matricula.models import Enroll
from .models import Bill
from django.utils.translation import ugettext_lazy as _
from paypal.standard.ipn.signals import valid_ipn_received
from paypal.standard.models import ST_PP_COMPLETED
from datetime import datetime
from djan
|
go.utils.encoding import smart_text
from django.conf import settings
@receiver(post_save, sender=
|
Enroll)
def create_bill(sender, **kwargs):
instance = kwargs['instance']
if not instance.bill_created and instance.enroll_finished\
and instance.group.cost > 0:
instance.bill_created = True
Bill.objects.create(short_description=_("Enroll in %s") % (instance.group),
description=render_to_string('invoice.html',
{ 'student': instance.student,
'enroll': smart_text(instance.group),
'date': instance.enroll_date.strftime("%Y-%m-%d %H:%M"),
'group': instance.group,
}
),
amount=instance.group.cost,
student=instance.student,
currency=instance.group.currency,
)
instance.save()
def paypal_bill_paid(sender, **kwargs):
ipn_obj = sender
if ipn_obj.payment_status == ST_PP_COMPLETED:
try:
bill = Bill.objects.get(pk=ipn_obj.invoice)
bill.is_paid = True
bill.paid_date = datetime.now()
bill.transaction_id = ipn_obj.txn_id
bill.save()
ok = True
except Exception as e:
ok = False
# FIXME do something here
if ok:
invoice = render_to_string('email_invoice.html', {'bill': bill})
send_mail(_("Academica Invoice paid"),
_("Go to Academica"),
settings.DEFAULT_FROM_EMAIL,
[bill.student.email],
html_message=invoice,
fail_silently=False
)
valid_ipn_received.connect(paypal_bill_paid)
|
kalyan02/dayone
|
do/private_config-sample.py
|
Python
|
gpl-2.0
| 296
| 0.023649
|
# rename this file to private_config.py
# dropbox api creden
|
tials
DROPBOX_APP_ID=''
DROPBOX_API_SECRET=''
# django app sercret for salting and
|
hashing cookies
SECRET_KEY = ''
# automatic admin configuration
AUTO_ADMINS = (
('admin_username','admin_password','admin_password@yoursite.com'),
)
|
hamonikr-root/system-config-printer-gnome
|
cupshelpers/cupshelpers.py
|
Python
|
gpl-2.0
| 29,800
| 0.007282
|
## system-config-printer
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Red Hat, Inc.
## Authors:
## Florian Festi <ffesti@redhat.com>
## Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import cups, pprint, os, tempfile, re, string
import locale
from . import _debugprint
from . import config
class Printer:
_flags_blacklist = ["options", "local"]
def __init__(self, name, connection, **kw):
"""
@param name: printer name
@type name: string
@param connection: CUPS connection
@type connection: CUPS.Connection object
@param kw: printer attributes
@type kw: dict indexed by string
"""
self.name = name
self.connection = connection
self.class_members = []
have_kw = len (kw) > 0
fetch_attrs = True
if have_kw:
self.update (**kw)
if self.is_class:
fetch_attrs = True
else:
fetch_attrs = False
if fetch_attrs:
self.getAttributes ()
self._ppd = None # load on demand
def __del__ (self):
if self._ppd != None:
os.unlink(self._ppd)
def __repr__ (self):
return "<cupshelpers.Printer \"%s\">" % self.name
def _expand_flags(self):
def _ascii_lower(str):
return str.translate(string.maketrans(string.ascii_uppercase,
string.ascii_lowercase));
prefix = "CUPS_PRINTER_"
prefix_length = len(prefix)
# loop over cups constants
for name in cups.__dict__:
if name.startswith(prefix):
attr_name = \
_ascii_lower(name[prefix_length:])
if attr_name in self._flags_blacklist: continue
if attr_name == "class": attr_name = "is_class"
# set as attribute
setattr(self, attr_name,
bool(self.type & getattr(cups, name)))
def update(self, **kw):
"""
Update object from printer attributes.
@param kw: printer attributes
@type kw: dict indexed by string
"""
self.state = kw.get('printer-state', 0)
self.enabled = self.state != cups.IPP_PRINTER_STOPPED
self.device_uri = kw.get('device-uri', "")
self.info = kw.get('printer-info', "")
self.is_shared = kw.get('printer-is-shared', None)
self.location = kw.get('printer-location', "")
self.make_and_model = kw.get('printer-make-and-model', "")
self.type = kw.get('printer-type', 0)
self.uri_supported = kw.get('printer-uri-supported', "")
if type (self.uri_supported) != list:
self.uri_supported = [self.uri_supported]
self._expand_flags()
if self.is_shared is None:
self.is_shared = not self.not_shared
del self.not_shared
self.class_members = kw.get('member-names', [])
if type (self.class_members) != list:
self.class_members = [self.class_members]
self.class_members.sort ()
self.other_attributes = kw
def getAttributes(self):
"""
Fetch further attributes for the printer.
Normally only a small set of attributes is fetched. This
method is for fetching more.
"""
attrs = self.connection.getPrinterAttributes(self.name)
self.attributes = {}
self.other_attributes = {}
self.possible_attributes = {
'landscape' : ('False', ['True', 'False']),
'page-border' : ('none', ['none', 'single', 'single-thick',
'double', 'double-thick']),
}
for key, value in attrs.iteritems():
if key.endswith("-default"):
name = key[:-len("-default")]
if name in ["job-sheets", "printer-error-policy",
"printer-op-policy", # handled below
"notify-events", # cannot be set
"document-format", # cannot be set
"notify-lease-duration"]: # cannot be set
continue
supported = attrs.get(name + "-supported", None) or \
self.possible_attributes.get(name, None) or \
""
# Convert a list into a comma-separated string, since
# it can only really have been misinterpreted as a list
# by CUPS.
if isinstance (value, list):
value = reduce (lambda x, y: x+','+y, value)
self.attributes[name] = value
if attrs.has_key(name+"-supported"):
supported = attrs[name+"-supported"]
self.possible_attributes[name] = (value, supported)
elif (not key.endswith ("-supported") and
key != 'job-sheets-default' and
key != 'printer-error-policy' and
key != 'printer-op-policy' and
no
|
t key.startswith ('requesting-user-name-')):
self.other_attributes[key] = value
self.job_sheet_start, self.job_sheet_end = attrs.get(
'job-sheets-default', ('none', 'none'))
self.job_sheets_supported = attrs.get('job-sheets-supported', ['none'])
self.error_policy = attrs.get('printer-error-policy', 'no
|
ne')
self.error_policy_supported = attrs.get(
'printer-error-policy-supported', ['none'])
self.op_policy = attrs.get('printer-op-policy', "") or "default"
self.op_policy_supported = attrs.get(
'printer-op-policy-supported', ["default"])
self.default_allow = True
self.except_users = []
if attrs.has_key('requesting-user-name-allowed'):
self.except_users = attrs['requesting-user-name-allowed']
self.default_allow = False
elif attrs.has_key('requesting-user-name-denied'):
self.except_users = attrs['requesting-user-name-denied']
self.except_users_string = ', '.join(self.except_users)
self.update (**attrs)
def getServer(self):
"""
Find out which server defines this printer.
@returns: server URI or None
"""
if not self.uri_supported[0].startswith('ipp://'):
return None
uri = self.uri_supported[0][6:]
uri = uri.split('/')[0]
uri = uri.split(':')[0]
if uri == "localhost.localdomain":
uri = "localhost"
return uri
def getPPD(self):
"""
Obtain the printer's PPD.
@returns: cups.PPD object, or False for raw queues
@raise cups.IPPError: IPP error
"""
result = None
if self._ppd is None:
try:
self._ppd = self.connection.getPPD(self.name)
result = cups.PPD (self._ppd)
except cups.IPPError, (e, m):
if e == cups.IPP_NOT_FOUND:
result = False
else:
raise
if result == None and self._ppd != None:
result = cups.PPD (self._ppd)
return result
def setOption(self, name, value):
"""
Set a printer's option.
@param name: option name
@type name: string
|
bearops/ebzl
|
ebzl/lib/config.py
|
Python
|
bsd-3-clause
| 3,082
| 0
|
"""Helper module for parsing AWS ini config files."""
import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
AWS_CLI_CREDENTIALS_PATH = "~/.aws/credentials"
AWS_CLI_CONFIG_PATH = "~/.aws/config"
DEFAULT_PROFILE_NAME = os.getenv("AWS_DEFAULT_PROFILE", "default")
class NoConfigFoundException(Exception):
"""Config file not present."""
pass
def _get_config_parser(path):
"""Open and parse given config.
:type path: basestring
:rtype: ConfigParser.ConfigParser
"""
config_parser = configparser.ConfigParser()
try:
with open(os.path.expanduser(path), "rb") as f:
config_parser.readfp(f)
except IOError:
raise NoConfigFoundException("Can't find the config file: %s" % path)
else:
return config_parser
def _get_credentials_from_environment():
key = os.environ.get("AWS_ACCESS_KEY_ID")
secret = os.environ.get("AWS_SECRET_ACCESS_KEY")
return key, secret
def get_credentials(pro
|
file=None):
"""Returns AWS credentials.
Reads ~/.aws/credentials if the profile name is given or tries
to get them from environment otherwise. Returns a (key, secret)
tuple.
:type profile: basestring
:rtype: tuple
"""
if profile is None:
key, secret = _get_credentials_from_environment()
if
|
key is not None and secret is not None:
return key, secret
raise NoConfigFoundException("AWS credentials not found.")
config = _get_config_parser(path=AWS_CLI_CREDENTIALS_PATH)
key = config.get(profile, "aws_access_key_id")
secret = config.get(profile, "aws_secret_access_key")
return key, secret
def get_credentials_dict(profile):
"""Returns credentials as a dict (for use as kwargs).
:type profile: basestring
:rtype: dict
"""
key, secret = get_credentials(profile)
return {"aws_access_key_id": key,
"aws_secret_access_key": secret}
def get_profile_names():
"""Get available profile names.
:rtype: list
:returns: list of profile names (strings)
"""
try:
return _get_config_parser(path=AWS_CLI_CREDENTIALS_PATH).sections()
except NoConfigFoundException:
return []
def has_default_profile():
"""Is default profile present?
:rtype: bool
"""
return DEFAULT_PROFILE_NAME in get_profile_names()
def get_default_region(profile):
"""Get the default region for given profile from AWS CLI tool's config.
:type profile: basestring
:rtype: basestring
:returns: name of defalt region if defined in config, None otherwise
"""
try:
config = _get_config_parser(path=AWS_CLI_CONFIG_PATH)
except NoConfigFoundException:
return None
try:
return config.get("profile %s" % profile, "region")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
return config.get("default", "region")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return None
|
andredias/nikola
|
nikola/data/themes/base/messages/messages_bg.py
|
Python
|
mit
| 2,386
| 0
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "%d минути до прочитане",
"(active)": "(активно)",
"Also available in:": "Достъпно също на:",
"Archive": "Архив",
"Authors": "Автори",
"Categories": "Категории",
"Comments": "Коментари",
"LANGUAGE": "Български",
"Languages:": "Езици:",
"More posts about %s": "Още публикации относно %s",
"Newer posts": "Нови публикации",
"Next post": "Следваща публикация",
"Next": "",
"No posts found.": "Не са намерени публикации.",
"Nothing found.": "Нищо не е намерено.",
"Older posts": "Стари публикации",
"Original site": "Оригиналния сайт",
"Posted:": "Публикyвано:",
"Posts about %s": "Публикации относно %s",
"Posts by %s": "Публикации от %s",
"Posts for year %s": "Публикации за %s година",
"Posts for {month} {day}, {
|
year
|
}": "Публикации от {day} {month} {year}",
"Posts for {month} {year}": "Публикации за {month} {year}",
"Previous post": "Предишна публикация",
"Previous": "",
"Publication date": "Дата на публикуване",
"RSS feed": "RSS поток",
"Read in English": "Прочетете на български",
"Read more": "Чети нататък",
"Skip to main content": "Прескочи до основното съдържание",
"Source": "Изходен код",
"Subcategories:": "Подкатегории:",
"Tags and Categories": "Тагове и Категории",
"Tags": "Тагове",
"Toggle navigation": "",
"Uncategorized": "Без категория",
"Up": "",
"Updates": "Обновления",
"Write your page here.": "Напиши тук текста на твоята страница.",
"Write your post here.": "Напиши тук текста на твоята публикация.",
"old posts, page %d": "стари публикации, страница %d",
"page %d": "страница %d",
"{month} {day}, {year}": "",
"{month} {year}": "",
}
|
mensler/ansible
|
lib/ansible/module_utils/facts.py
|
Python
|
gpl-3.0
| 171,136
| 0.004494
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import time
import shlex
import errno
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
from ansible.module_utils.basic import get_all_subclasses
from ansible.module_utils.six import PY3, iteritems
from ansible.module_utils.six.moves import configparser, StringIO, reduce
from ansible.module_utils._text import to_native, to_text
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
# Check if we have SSLContext support
from ssl import create_default_context, SSLContext
del create_default_context
del SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
GATHER_TIMEOUT=None
DEFAULT_GATHER_TIMEOUT = 10
class TimeoutError(Exception):
pass
def timeout(seconds=None, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
local_seconds = seconds # Make local var as we modify this every time it's invoked
if local_seconds is None:
local_seconds = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(local_seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
# If we were called as @timeout, then the first parameter will be the
# function we are to wrap instead of the number of seconds. Detect this
# and correct it by setting seconds to our sentinel value and return the
# inner decorator function manually wrapped around the function
if callable(seconds):
func = seconds
seconds = None
return decorator(func)
# If we were called as @timeout([...]) then python itself will take
# care of wrapping the inner decorator around the function
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/usr/pkg/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/tools/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg5' },
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
{ 'pa
|
th' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/bin/swupd', 'name' : 'swupd' },
{ 'path' : '/usr/sbin/sorcery', 'name' : 'sorcery' },
]
def __init__(self, module, load_on_init=True, cached_facts=None):
self.module = mo
|
dule
if not cached_facts:
self.facts = {}
else:
self.facts = cached_facts
### TODO: Eventually, these should all get moved to populate(). But
# some of the values are currently being used by other subclasses (for
# instance, os_family and distribution). Have to sort out what to do
# about those first.
if load_on_init:
self.get_platform_facts()
self.facts.update(Distribution(module).populate())
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_apparmor_facts()
self.get_caps_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_service_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
self.get_python_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['us
|
praekelt/vumi-go
|
go/router/templatetags/router_tags.py
|
Python
|
bsd-3-clause
| 319
| 0
|
from django import template
from go.base.utils import get_router_view_definition
register = template.Library()
@register.simple_tag
def router_screen(router, view_name='show'):
view_def = get_router_view_definition(router.rout
|
er_type, router)
return view_def.get_view_url(view_name, router_
|
key=router.key)
|
robinwyss/raspberry-scripts
|
main.py
|
Python
|
mit
| 390
| 0
|
#
|
!/usr/bin/env python
import camera
import resize
import ftpupload
import time
# wait 10s to not interfer with the timelaps script
time.sleep(10)
print("taking a picture")
imagePath = camera.capture()
print("captured %s" % imagePath)
smallImagePath = resize.resizeImg(imagePath)
print("resized image")
print("uploading....")
ftpupload.uploadFile(smallImagePath)
print("upload completed")
| |
acm-uiuc/DJ-Roomba
|
dj_roomba/adrive.py
|
Python
|
gpl-2.0
| 906
| 0.005519
|
"""audio driver subsystem"""
from os.path import exists
from os import environ
from subprocess import check_call
from functools import partial
from .drive import Driver
import click
DRIVE_QUEUE = 'a.drive'
CTL_PATH
|
= '{}/.config/pianobar/ctl'.format(environ['HOME'])
COMMANDS = {'p', 'n', '^', '(', ')'}
def callback(ctl:'file_t', cmd:str) -> "IO ()":
"""writes command to ctl pipe"""
if cmd not in COMMANDS:
return
ctl.write(cmd)
ctl.flush()
@click.command()
@click.option('--ctl_path', default=CTL_PATH)
@click.opti
|
on('--queue', default=DRIVE_QUEUE)
def main(ctl_path:str, queue:str) -> "IO ()":
"""daemon for a.drive queue consumption"""
if not exists(ctl_path):
with open('/dev/null', 'w') as null:
check_call(['pianoctl'], stdout=null)
with open(ctl_path, 'w') as ctl:
Driver(callback=partial(callback, ctl), queue=queue).drive()
|
jandecaluwe/myhdl-examples
|
crusty_UK101/UK101AddressDecode/bench.py
|
Python
|
mit
| 691
| 0.013025
|
from myhdl import *
from UK101AddressDecode import UK101AddressDecode
def bench():
AL = Signal(intbv(0)[16:])
MonitorRom = Signal(bool(0))
ACIA = Signal(bool(0))
KeyBoardPort = Signal(bool(0))
VideoMem = Signal(bool(0))
BasicRom = Signal(bool(0))
Ram = Signal(bool(0))
dut = UK101AddressDecode(
AL,
MonitorRom,
ACIA,
KeyBoardPort,
Vi
|
deoMem,
BasicRom,
Ram)
@instance
def stimulus():
for i in range(0, 2**16):
AL.next = i
yield delay(10)
raise StopSimulation()
return dut, st
|
imulus
sim = Simulation(traceSignals(bench))
sim.run()
|
glad-web-developer/zab_sno
|
src/sno_galleries/models.py
|
Python
|
apache-2.0
| 1,164
| 0.00184
|
from django.db import models
from django.utils.html import format_html
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.fields import ImageField
from sno.models import Sno
class SnoGalleries(models.Model):
class Meta:
verbose_name = 'Фотография в галереи СНО'
verbose_name_plural = 'Фотографии в галереи СНО'
name = models.CharField
|
('Название фото', max_length=255, blank=True, null=True)
photo = ImageField(verbose_name='Фото', max_length=255)
description = models.TextField('Описание', blank=True, null=True)
sno = models.ForeignKey(Sno, verbose_name='СНО', on_delete=models.CASCADE)
date_created = models.DateField('Дата', auto_now_add=True)
def photo_preview(self):
img = get_thumbnail(self.photo, '75x75', crop='center')
return format_html('<a href="{}" target="_blank"><img style="width:75px; height:75px;" src="{}"></a>',
self.photo.url, img.url)
photo_preview.short_description = 'Фото'
def __str__(self):
return '%s (%s)' % (self.name, self.sno.short_name)
|
|
Inspq/ansible
|
lib/ansible/modules/system/debconf.py
|
Python
|
gpl-3.0
| 5,880
| 0.006633
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure .deb packages.
(c) 2014, Brian Coca <briancoca+ansible@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied.
- C(seen) was added in 2.2.
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text, seen]
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
- debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
# set to generate locales:
- debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
# Accept oracle license
- debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: true
vtype: select
# Specifying package you can register/return the list of questions and current values
- debconf:
name: tzdata
'''
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
s
|
etsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec = dict(
|
name = dict(required=True, aliases=['pkg'], type='str'),
question = dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text', 'seen']),
value = dict(required=False, type='str', aliases=['answer']),
unseen = dict(required=False, type='bool'),
),
required_together = ( ['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
oemof/examples
|
oemof_examples/tespy/solar_collector/solar_collector.py
|
Python
|
gpl-3.0
| 2,858
| 0.0007
|
# -*- coding: utf-8 -*-
from tespy.networks import network
from tespy.components import sink, source, solar_collector
from tespy.connections import connection
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
# %% network
fluid_list = ['H2O']
nw = network(fluids=fluid_list, p_unit='bar', T_unit='C')
# %% components
# sinks & sources
back = source('to collector')
feed = sink('from collector')
# collector
coll = solar_collector(label='solar thermal collector')
# %% connections
b_c = connection(back, 'out1', coll, 'in1')
c_f = connection(coll, 'out1', feed, 'in1')
nw.add_conns(b_c, c_f)
# %% component parameters
# set pressure ratio and heat flow, as well as dimensional parameters of
# the collector. E is missing, thus energy balance for radiation is not
# performed at this point
coll.set_attr(pr=0.99, Q=8e3)
# %% connection parameters
b_c.set_attr(p=5, T=35, fluid={'H2O': 1})
c_f.set_attr(p0=2, T=120)
# %% solving
# going through several parametrisation possibilities
print('###############')
print('simulation 1')
mode = 'design'
nw.solve(mode=mode)
nw.print_results()
# set absorption instead of outlet temperature
coll.set_attr(E=9e2, eta_opt=0.9, lkf_lin=1, lkf_quad=0.005, A=10, Tamb=10)
c_f.set_attr(T=np.nan)
print('###############')
print('simulation 2')
nw.solve(mode=mode)
nw.print_results()
# set outlet temperature and mass flow instead of heat flow and radiation
coll.set_attr(Q=np.nan, E=np.nan)
c_f.set_attr(T=100, m=1e-1)
print('###############')
print('design simulation')
nw.solve(mode=mode)
nw.print_results()
nw.save('design')
# looping over different ambient temperatures and levels of absorption
# (of the inclined surface) assuming constant mass flow
# set print_level to none
mode = 'offdesign'
nw.set_attr(iterinfo=False)
c_f.set_attr(
|
T=np.nan)
gridnum = 10
T_amb = np.linspace(-10, 30, gridnum, dtype=float)
E_glob = np.linspace(100,
|
1000, gridnum, dtype=float)
df = pd.DataFrame(columns=T_amb)
for E in E_glob:
eta = []
coll.set_attr(E=E)
for T in T_amb:
coll.set_attr(Tamb=T)
nw.solve(mode=mode, design_path='design')
eta += [coll.Q.val / (coll.E.val * coll.A.val)]
# cut out efficiencies smaller than zero
if eta[-1] < 0:
eta[-1] = np.nan
df.loc[E] = eta
print('###############')
print('offdesign performance map')
E, T = np.meshgrid(T_amb, E_glob)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(E, T, df.values)
# temperature difference -> mean collector temperature to ambient temperature
ax.set_xlabel('ambient temperature t_a in °C')
# absorption on the inclined surface
ax.set_ylabel('absorption E in $\mathrm{\\frac{W}{m^2}}$')
# thermal efficiency (no optical losses)
ax.set_zlabel('efficiency $\eta$')
plt.show()
|
brennie/reviewboard
|
reviewboard/webapi/tests/test_user.py
|
Python
|
mit
| 14,393
| 0
|
from __future__ import unicode_literals
from django.contrib.auth.models import Permission, User
from django.utils import six
from djblets.avatars.services.gravatar import GravatarService
from djblets.testing.decorators import add_fixtures
from djblets.webapi.testing.decorators import webapi_test_template
from kgb import SpyAgency
from reviewboard.accounts.backends import (AuthBackend,
get_enabled_auth_backends)
from reviewboard.accounts.models import Profile
from reviewboard.avatars import avatar_services
from reviewboard.avatars.testcase import AvatarServicesTestMixin
from reviewboard.site.models import LocalSite
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (user_item_mimetype,
user_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_user_item_url,
get_user_list_url)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(SpyAgency, BaseWebAPITestCase):
"""Testing the UserResource list API tests."""
fixtures = ['test_users']
sample_api_url = 'users/'
resource = resources.user
test_http_methods = ('GET',)
def setup_http_not_allowed_list_test(self, user):
return get_user_list_url()
def compare_item(self, item_rsp, obj):
self.assertEqual(item_rsp['id'], obj.pk)
self.assertEqual(item_rsp['username'], obj.username)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
if not populate_items:
items = []
elif with_local_site:
local_site = self.get_local_site(name=local_site_name)
items = list(local_site.users.all())
else:
items = list(User.objects.all())
return (get_user_list_url(local_site_name),
user_list_mimetype,
items)
@webapi_test_template
def test_get_filter_inactive(self):
"""Testing the GET <URL> API filters out inactive users by default"""
dopey = User.objects.get(username='dopey')
dopey.is_active = False
dopey.save()
rsp = self.api_get(get_user_list_url(),
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
user_pks = [user['id'] for user in rsp['users']]
returned_users = set(User.objects.filter(pk__in=user_pks))
expected_users = set(User.objects.filter(is_active=True))
self.assertEqual(returned_users, expected_users)
@webapi_test_template
def test_get_include_inactive(self):
"""Testing the GET <URL>/?include-inactive=1 API includes inactive
users
"""
dopey = User.objects.get(username='dopey')
dopey.is_active = False
dopey.save()
rsp = self.api_get(get_user_list_url(), {'include-inactive': '1'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
user_pks = [user['id'] for user in rsp['users']]
self.assertEqual(set(User.objects.filter(pk__in=user_pks)),
set(User.objects.all()))
@webapi_test_template
def test_get_include_inactive_true(self):
"""Testing th
|
e GET <URL>/?include-inactive=true API includes inactive
users
"""
dopey = User.objects.get(username='dopey')
dopey.is_active = False
dopey.save()
rsp = self.api_get(get_user_list_url(), {'include-inactive': 'true'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
user_pks = [user['id'] for user in rsp['users']]
|
self.assertEqual(set(User.objects.filter(pk__in=user_pks)),
set(User.objects.all()))
def test_get_with_q(self):
"""Testing the GET users/?q= API"""
rsp = self.api_get(get_user_list_url(), {'q': 'gru'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(len(rsp['users']), 1) # grumpy
def test_query_users_auth_backend(self):
"""Testing the GET users/?q= API
with AuthBackend.query_users failure
"""
class SandboxAuthBackend(AuthBackend):
backend_id = 'test-id'
name = 'test'
def query_users(self, query, request):
raise Exception
backend = SandboxAuthBackend()
self.spy_on(get_enabled_auth_backends, call_fake=lambda: [backend])
self.spy_on(backend.query_users)
rsp = self.api_get(get_user_list_url(), {'q': 'gru'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertTrue(backend.query_users.called)
def test_search_users_auth_backend(self):
"""Testing the GET users/?q= API
with AuthBackend.search_users failure
"""
class SandboxAuthBackend(AuthBackend):
backend_id = 'test-id'
name = 'test'
def search_users(self, query, request):
raise Exception
backend = SandboxAuthBackend()
self.spy_on(get_enabled_auth_backends, call_fake=lambda: [backend])
self.spy_on(backend.search_users)
rsp = self.api_get(get_user_list_url(), {'q': 'gru'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertTrue(backend.search_users.called)
#
# HTTP POST tests
#
@webapi_test_template
def test_post_anonymous(self):
"""Testing the POST <URL> API as an anonymous user"""
self.client.logout()
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'email@example.com',
},
expected_status=401)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('err', rsp)
self.assertIn('code', rsp['err'])
self.assertEqual(rsp['err']['code'], 103)
@webapi_test_template
def test_post(self):
"""Testing the POST <URL> API as a regular user"""
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'email@example.com'
},
expected_status=403)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('err', rsp)
self.assertIn('code', rsp['err'])
self.assertEqual(rsp['err']['code'], 101)
@webapi_test_template
def test_post_superuser(self):
"""Testing the POST <URL> API as a superuser"""
self.client.login(username='admin', password='admin')
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'email@example.com',
},
expected_mimetype=user_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.compare_item(rsp['user'], User.objects.get(username='username'))
@webapi_test_template
def test_post_auth_add_user_perm(self):
"""Testing the POST <URL> API as a user with the auth.add_user
permission
"""
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='auth',
codename='add_user'))
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'email@example
|
CodeForAfrica/grano
|
grano/logic/entities.py
|
Python
|
mit
| 6,574
| 0
|
import logging
import colander
from grano.core import db, celery
from grano.model import Entity
from grano.logic import properties as properties_logic
from grano.logic.references import ProjectRef, AccountRef
from grano.logic.references import SchemaRef, EntityRef
from grano.plugins import notify_plugins
log = logging.getLogger(__name__)
class EntityBaseValidator(colander.MappingSchema):
author = colander.SchemaNode(AccountRef())
project = colander.SchemaNode(ProjectRef())
class MergeValidator(colander.MappingSchema):
orig = colander.SchemaNode(EntityRef())
dest = colander.SchemaNode(EntityRef())
def validate(data, entity):
""" Due to some fairly weird interdependencies between the different
elements of the model, validation of entities has to happen in three
steps. """
validator = EntityBaseValidator()
sane = validator.deserialize(data)
project = sane.get('project')
schema_validator = colander.SchemaNode(colander.Mapping())
schema_validator.add(colander.SchemaNode(SchemaRef(project),
name='schema'))
sane.update(schema_validator.deserialize(data))
sane['properties'] = properties_logic.validate('entity', entity,
project, sane.get('schema'),
data.get('properties', []))
return sane
@celery.task
def _entity_changed(entity_id, operation):
""" Notify plugins about changes to an entity. """
def _handle(obj):
obj.entity_changed(entity_id, operation)
notify_plugins('grano.entity.change', _handle)
def save(data, files=None, entity=None):
""" Save or update an entity. """
data = validate(data, entity)
operation = 'create' if entity is None else 'update'
if entity is None:
entity = Entity()
entity.project = data.get('project')
entity.author = data.get('author')
db.session.add(entity)
entity.schema = data.get('schema')
prop_names = set()
for name, prop in data.get('properties').items():
prop_names.add(name)
prop['project'] = entity.project
prop['name'] = name
prop['author'] = data.get('author')
properties_logic.save(entity, prop, files=files)
for prop in entity.properties:
if prop.name not in prop_names:
prop.active = False
db.session.flush()
_entity_changed.delay(entity.id, operation)
return entity
def delete(entity):
""" Delete the entity and its properties, as well as any associated
relations. """
db.session.delete(entity)
_entity_changed.delay(entity.id, 'delete')
def merge(source, dest):
""" Copy all properties and relations from one entity onto another, then
mark the source entity as an ID alias for the destionation entity. """
if source.id == dest.id:
return source
if dest.same_as == source.id:
return source
if source.same_as == dest.id:
return dest
if dest.same_as is not None:
# potential infinite recursion here.
canonical = Entity.by_id(dest.same_as)
if canonical is not None:
return merge(source, canonical)
if dest.schema.is_parent(source.schema):
dest.schema = source.schema
dest_valid = [a.name for a in dest.schema.attributes]
dest_active = [p.name for p in dest.active_properties]
for prop in source.properties:
prop.entity = dest
if prop.name in dest_active:
prop.active = False
if prop.name not in dest_valid:
properties_logic.delete(prop)
for rel in source.inbound:
rel.target = dest
db.session.add(rel)
for rel in source.outbound:
rel.source = dest
db.session.add(rel)
source.same_as = dest.id
db.session.flush()
_entity_changed.delay(dest.id, 'update')
_entity_changed.delay(source.id, 'delete')
return dest
def apply_alias(project, author, canonical_name, alias_name, source_url=None):
""" Given two names, find out if there are existing entities for one or
both of them. If so, merge them into a single entity - or, if only the
entity associated with the alias exists - re-name the entity. """
# Don't import meaningless aliases.
if not len(canonical_name) or not len(alias_name):
return log.info("Not an alias: %s", canonical_name)
canonical = None
# de-duplicate existing entities with the same name.
known_names = set()
for existing in Entity.by_name_many(project, canonical_name):
for prop in existing.properties:
if prop.name != 'name':
continue
known_names.add(prop.value)
# make sure the canonical name is actually active
if prop.value == canonical_name:
prop.active = True
else:
prop.active = False
if canonical is not None and canonical.id != existing.id:
canonical = merge(existing, canonical)
else:
canonical = existing
# Find aliases, i.e. entities with the alias name which are not
# the canonical entity.
q = Entity.by_name_many(project, alias_name)
if canonical is not None:
q = q.filter(Entity.id != canonical.id)
aliases = q.all()
# If there are no existing aliases with that name, add the alias
# name to the canonical entity.
if not len(alias
|
es) and canonical is not None:
if alias_name not in known_names:
data = {
'value': alias_name,
'active': False,
'name': 'name',
'source_url': source_url
}
properties_logic.save(canonical, data)
_entity_changed.delay(canonical.id, 'update')
log.info("Alias: %s -> %s", alias_name
|
, canonical_name)
for alias in aliases:
if canonical is None:
# Rename an alias to its new, canonical name.
data = {
'value': canonical_name,
'active': True,
'name': 'name',
'source_url': source_url
}
properties_logic.save(alias, data)
_entity_changed.delay(alias.id, 'update')
log.info("Renamed: %s -> %s", alias_name, canonical_name)
else:
# Merge two existing entities, declare one as "same_as"
merge(alias, canonical)
log.info("Mapped: %s -> %s", alias.id, canonical.id)
db.session.commit()
|
Ecotrust/forestplanner
|
lot/landmapper/settings.py
|
Python
|
bsd-3-clause
| 33,100
| 0.004683
|
import os
from datetime import date
TODAY_DATE = date.today().strftime("%D")
LANDMAPPER_DIR = os.path.dirname(os.path.abspath(__file__))
###########################################
## Keys ###
###########################################
MAPBOX_TOKEN = 'set_in_landmapper_local_settings'
###########################################
## Map Scales ###
###########################################
# Closest: 'fit' -- fits the property as close as possible
# Moderate: 'medium' -- approximately zoom level 12 unless the property is too big
# Regional Context: 'context' -- appx zoom 14 unless the property is larger
PROPERTY_OVERVIEW_SCALE = 'fit'
STREET_SCALE = 'context'
TOPO_SCALE = 'medium'
CONTOUR_SCALE = TOPO_SCALE
AERIAL_SCALE = PROPERTY_OVERVIEW_SCALE
TAXLOTS_SCALE = AERIAL_SCALE
SOIL_SCALE = AERIAL_SCALE
FOREST_TYPES_SCALE = AERIAL_SCALE
STREAM_SCALE = AERIAL_SCALE
STUDY_REGION = {
'north': 46.292035,
'south': 41.991794,
'east': -116.463504,
'west': -124.566244,
'context': [
', OR',
', Oregon USA',
# ', WA',
]
}
###########################################
## Basemaps ###
###########################################
BASEMAPS = {
'USGS_Aerial': {
'URL': 'https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTI
|
ON': {'source': 'USGS', 'attribution': 'USGS The National Map: Orthoimagery. Data refreshed October, 2020.'}
# Can get updated attribution at https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer?f=pjson ['copyrightText']
},
'ESRI_Satellite': {
|
'URL': 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': {'source': 'ESRI', 'attribution': 'Source: Esri, DigitalGlobe, GeoEye, Earthstar Geographics, CNES/Airbus DS, USDA, USGS, AeroGRID, IGN, and the GIS User Community'}
},
'ESRI_Topo': {
'URL': 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': {'source': 'ESRI', 'attribution': 'Sources: Esri, HERE, Garmin, Intermap, increment P Corp., GEBCO, USGS, FAO, NPS, NRCAN, GeoBase, IGN, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), (c) OpenStreetMap contributors, and the GIS User Community'}
},
'ESRI_Street': {
'URL': 'https://server.arcgisonline.com/arcgis/rest/services/World_Street_Map/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': 'Sources: Esri, HERE, Garmin, USGS, Intermap, INCREMENT P, NRCan, Esri Japan, METI, Esri China (Hong Kong), Esri Korea, Esri (Thailand), NGCC, (c) OpenStreetMap contributors, and the GIS User Community'
},
'ESRI_NatGeo': {
'URL': 'https://server.arcgisonline.com/arcgis/rest/services/NatGeo_World_Map/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': 'Tiles © Esri — National Geographic, Esri, DeLorme, NAVTEQ, UNEP-WCMC, USGS, NASA, ESA, METI, NRCAN, GEBCO, NOAA, iPC'
},
'TNM_Aerial': {
'URL': 'https://services.nationalmap.gov/arcgis/rest/services/USGSNAIPPlus/MapServer/export',
'LAYERS': '8',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': 'USGS The National Map: Orthoimagery'
},
'TNM_NAIP': {
'URL': 'https://services.nationalmap.gov/arcgis/rest/services/USGSNAIPImagery/ImageServer/exportImage',
'TECHNOLOGY': 'arcgis_imageserver',
'ATTRIBUTION': {'source': 'USGS', 'attribution': 'USGS The National Map: Imagery'}
},
'Custom_Topo': {
'URL': 'https://api.mapbox.com/styles/v1/{userid}/cke0j10sj1gta19o9agb1w8pq/tiles/256/{zoom}/{lon}/{lat}@2x?',
'TECHNOLOGY': 'mapbox',
'ATTRIBUTION': 'Sources: MapBox',
'PARAMS': {
'userid':'forestplanner',
'layerid': 'cke0j10sj1gta19o9agb1w8pq',
'lon': '',
'lat': '',
'zoom': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512,
'ZOOM_2X': False
},
'OSM': {
# 'URL': 'https://tile.openstreetmap.org/{zoom}/{lon}/{lat}.png',
'URL': 'https://maps.geoapify.com/v1/staticmap',
'TECHNOLOGY': 'static',
# 'ATTRIBUTION': 'Sources: MapBox',
'ATTRIBUTION': 'Powered by <a href="https://www.geoapify.com/">Geoapify</a>; © OpenStreetMap contributors',
'PARAMS': {},
'QS': [
# 'style=osm-bright-smooth',
'style=osm-carto',
'width={width}',
'height={height}',
'center=lonlat%3A{lon}%2C{lat}',
'zoom={zoom}', # float
'apiKey={apiKey}',
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': False
},
'OSM_Mapnik': {
'URL': 'https://a.tile.openstreetmap.org/{zoom}/{lon}/{lat}.png',
'TECHNOLOGY': 'XYZ',
'ATTRIBUTION': '© <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors',
'PARAMS': {
},
'QS': [
# 'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': True
},
# https://tiles.wmflabs.org/hikebike/{z}/{x}/{y}.png
'Hike_Bike': {
'URL': 'https://tiles.wmflabs.org/hikebike/{zoom}/{lon}/{lat}.png',
'TECHNOLOGY': 'XYZ',
'ATTRIBUTION': '© <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors',
'PARAMS': {
},
'QS': [
# 'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': False
},
'USGS_TopoImage': {
'URL': 'https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryTopo/MapServer/tile/{zoom}/{lat}/{lon}',
'TECHNOLOGY': 'XYZ',
'ATTRIBUTION': 'Tiles courtesy of the <a href="https://usgs.gov/">U.S. Geological Survey</a>',
'PARAMS': {
},
'QS': [
# 'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': False
},
'MAPBOX_Outdoor': {
'URL': 'https://api.mapbox.com/styles/v1/mapbox/outdoors-v11/tiles/256/{zoom}/{lon}/{lat}@2x?',
'TECHNOLOGY': 'mapbox',
'ATTRIBUTION': 'Sources: MapBox',
'PARAMS': {
# 'userid':'',
# 'layerid': '',
'lon': '',
'lat': '',
'zoom': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512,
'ZOOM_2X': False
},
'MAPBOX_Streets': {
'URL': 'https://api.mapbox.com/styles/v1/mapbox/streets-v11/tiles/256/{zoom}/{lon}/{lat}@2x?',
'TECHNOLOGY': 'mapbox',
|
KaranToor/MA450
|
google-cloud-sdk/lib/surface/functions/call.py
|
Python
|
apache-2.0
| 2,127
| 0.002351
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, V
|
ersion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of
|
the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions call' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Call(base.Command):
"""Call function synchronously for testing."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'name', help='Name of the function to be called.',
type=util.ValidateFunctionNameOrRaise)
parser.add_argument(
'--data', default='',
help='Data passed to the function (JSON string)')
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Function call results (error or result with execution id)
"""
project = properties.VALUES.core.project.Get(required=True)
registry = self.context['registry']
client = self.context['functions_client']
messages = self.context['functions_messages']
function_ref = registry.Parse(
args.name, params={'projectsId': project, 'locationsId': args.region},
collection='cloudfunctions.projects.locations.functions')
return client.projects_locations_functions.Call(
messages.CloudfunctionsProjectsLocationsFunctionsCallRequest(
name=function_ref.RelativeName(),
callFunctionRequest=messages.CallFunctionRequest(data=args.data)))
|
DemocracyClub/yournextrepresentative
|
ynr/apps/elections/uk/tests/test_custom_merge.py
|
Python
|
agpl-3.0
| 5,408
| 0
|
from django_webtest import WebTest
from candidates.views.people import MERGE_FORM_ID, SUGGESTION_FORM_ID
import people.tests.factories
from candidates.tests import factories
from candidates.tests.auth import TestUserMixin
from candidates.tests.uk_examples import UK2015ExamplesMixin
from people.models import Person
from uk_results.models import CandidateResult, ResultSet
class TestUKResultsPreserved(TestUserMixin, UK2015ExamplesMixin, WebTest):
def setUp(self):
super().setUp()
self.primary_person = people.tests.factories.PersonFactory.create(
id="3885", name="Harriet Harman"
)
self.secondary_person = people.tests.factories.PersonFactory.create(
id="10000", name="Harriet Ruth Harman"
)
def test_uk_results_for_secondary_preserved(self):
self.assertTrue(Person.objects.filter(pk=10000).exists())
factories.MembershipFactory.create(
person=self.primary_person,
post=self.camberwell_post,
party=self.labour_party,
ballot=self.camberwell_post_ballot_earlier,
)
factories.MembershipFactory.create(
person=self.secondary_person,
post=self.local_post,
party=self.labour_party,
ballot=self.local_election.ballot_set.get(post=self.local_post),
)
secondary_membership = factories.MembershipFactory.create(
person=self.secondary_person,
post=self.camberwell_post,
party=self.labour_party,
ballot=self.camberwell_post_ballot,
elected=True,
)
# Now attach a vote count to the secondary person's candidacy:
result_set = ResultSet.objects.create(
ballot=self.camberwell_post_ballot,
num_turnout_reported=51561,
num_spoilt_ballots=42,
ip_address="127.0.0.1",
)
CandidateResult.objects.create(
result_set=result_set,
membership=secondary_membership,
num_ballots=32614,
)
# Now try the merge:
response = self.app.get("/person/3885/", user=self.user_who_can_merge)
# first submit the suggestion form
suggestion_form = response.forms[SUGGESTION_FORM_ID]
suggestion_form["other_person"] = "10000"
response = suggestion_form.submit()
# as user has permission to merge directly, submit merge form
merge_form = response.forms[MERGE_FORM_ID]
response = merge_form.submit()
self.assertEqual(CandidateResult.objects.count(), 1)
# Now reget the original person and her candidacy - check it
# has a result attached.
after_merging = Person.objects.get(pk=3885)
membership = after_merging.memberships.get(
ballot__election=self.election
)
candidate_result = membership.result
self.assertEqual(candidate_result.num_ballots, 32614)
self.assertFalse(Person.objects.filter(pk=10000).exists())
self.assertTrue(membership.elected)
def test_uk_results_for_primary_preserved(self
|
):
self.assertTrue(Person.objects.filter(pk=10000).exists())
primary_membership = factories.MembershipFactory.create(
person=self.primary_person,
post=self.camberwell_post,
party=self.labour_party,
ballot=self.camberwell_post_ballot_earlier,
elected=True,
)
factories.MembershipFactory.create(
person=self.secondary_person,
|
post=self.local_post,
party=self.labour_party,
ballot=self.local_election.ballot_set.get(post=self.local_post),
)
factories.MembershipFactory.create(
person=self.secondary_person,
post=self.camberwell_post,
party=self.labour_party,
ballot=self.camberwell_post_ballot,
)
# Now attach a vote count to the primary person's candidacy:
result_set = ResultSet.objects.create(
ballot=self.camberwell_post_ballot_earlier,
num_turnout_reported=46659,
num_spoilt_ballots=42,
ip_address="127.0.0.1",
)
CandidateResult.objects.create(
result_set=result_set,
membership=primary_membership,
num_ballots=27619,
)
# Now try the merge:
response = self.app.get("/person/3885/", user=self.user_who_can_merge)
# first submit the suggestion form
suggestion_form = response.forms[SUGGESTION_FORM_ID]
suggestion_form["other_person"] = "10000"
response = suggestion_form.submit()
# as user has permission to merge directly, submit merge form
merge_form = response.forms[MERGE_FORM_ID]
response = merge_form.submit()
self.assertEqual(CandidateResult.objects.count(), 1)
# Now reget the original person and her candidacy - check it
# has a result attached.
after_merging = Person.objects.get(pk=3885)
membership = after_merging.memberships.get(
ballot__election=self.earlier_election
)
candidate_result = membership.result
self.assertEqual(candidate_result.num_ballots, 27619)
self.assertFalse(Person.objects.filter(pk=10000).exists())
self.assertTrue(membership.elected)
|
fukun07/neural-image-captioning
|
codes/pycoco/rouge/rouge.py
|
Python
|
mit
| 3,659
| 0.008746
|
#!/usr/bin/env python
#
# File Name : rouge.py
#
# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
#
# Creation Date : 2015-01-07 06:03
# Author : Ramakrishna Vedantam <vrama91@vt.edu>
import numpy as np
import pdb
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class Rouge():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
|
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences
|
for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(sorted(gts.keys()) == sorted(res.keys()))
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
|
baigk/compass-core
|
compass/db/api/installer.py
|
Python
|
apache-2.0
| 1,791
| 0
|
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permis
|
sions and
# limitations under the License.
"""Adapter database operations."""
im
|
port logging
import os
from compass.db.api import database
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
from compass.utils import util
def _add_installers(session, model, configs, exception_when_existing=True):
installers = []
for config in configs:
installers.append(utils.add_db_object(
session, model,
exception_when_existing, config['INSTANCE_NAME'],
name=config['NAME'],
settings=config.get('SETTINGS', {})
))
return installers
def add_os_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.OS_INSTALLER_DIR)
return _add_installers(
session, models.OSInstaller, configs,
exception_when_existing=exception_when_existing
)
def add_package_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
return _add_installers(
session, models.PackageInstaller, configs,
exception_when_existing=exception_when_existing
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.