repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-seqinr/package.py
|
Python
|
lgpl-2.1
| 1,941
| 0.000515
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSeqinr(RPackage):
"""Exploratory data analysis and data visualization for biological
sequence (DNA and
|
protein) data. Includes also utilities for sequence
data management under the ACNUC system."""
homepage = "http://seqinr.r-forge.r-project.org"
url = "https://cran.r-project.org/src/contrib/seqinr_3.3-6.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/seginr"
version(
|
'3.4-5', 'd550525dcea754bbd5b83cb46b4124cc')
version('3.3-6', '73023d627e72021b723245665e1ad055')
depends_on('r@2.10:')
depends_on('r-ade4', type=('build', 'run'))
depends_on('r-segmented', type=('build', 'run'))
depends_on('zlib')
|
kushsharma/GotAPI
|
manage.py
|
Python
|
apache-2.0
| 804
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gotapi.settings")
try:
from django.core.management import execute_from_command_line
|
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHON
|
PATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
idan/oauthlib
|
tests/openid/connect/core/endpoints/test_userinfo_endpoint.py
|
Python
|
bsd-3-clause
| 2,522
| 0
|
# -*- coding: utf-8 -*-
import json
from unittest import mock
from oauthlib.oauth2.rfc6749 import errors
from oauthlib.openid import RequestValidator, UserInfoEndpoint
from tests.unittest import TestCase
def set_scopes_valid(token, scopes, request):
request.scopes = ["openid", "bar"]
return True
class UserInfoEndpointTest(TestCase):
def setUp(self):
self.claims = {
"sub": "john",
"fruit": "banana"
}
# Can't use MagicMock/wraps below.
# Triggers error when endpoint copies to self.bearer.request_validator
self.validator = RequestValidator()
self.validator.validate_bearer_token = mock.Mock()
self.validator.validate_bearer_token.side_effect = set_scopes_valid
self.validator.get_userinfo_claims = mock.Mock()
self.validator.get_userinfo_claims.return_value = self.claims
self.endpoint = UserInfoEndpoint(self.validator)
self.uri = 'should_not_matter'
self.headers = {
'Authorization': 'Bearer eyJxx'
}
def test_userinfo_no_auth(self):
self.endpoint.create_userinfo_response(self.uri)
def test_userinfo_wrong_auth(self):
self.headers['Authorization'] = 'Basic foifoifoi'
self.endpoint.create_userinfo_response(self.uri, headers=self.headers)
def test_userinfo_token_expired(self):
self.validator.validate_bearer_token.return_value = False
self.endpoint.create_userinfo_response(self.uri, headers=self.headers)
def test_userinfo_token_no_openid_scope(self):
def set_scopes_invalid(token, scopes, request):
request.scopes = ["foo", "bar"]
return True
self.validat
|
or.validate_bearer_token.side_effect = set_scopes_invalid
with self.assertRai
|
ses(errors.InsufficientScopeError) as context:
self.endpoint.create_userinfo_response(self.uri)
def test_userinfo_json_response(self):
h, b, s = self.endpoint.create_userinfo_response(self.uri)
self.assertEqual(s, 200)
body_json = json.loads(b)
self.assertEqual(self.claims, body_json)
self.assertEqual("application/json", h['Content-Type'])
def test_userinfo_jwt_response(self):
self.validator.get_userinfo_claims.return_value = "eyJzzzzz"
h, b, s = self.endpoint.create_userinfo_response(self.uri)
self.assertEqual(s, 200)
self.assertEqual(b, "eyJzzzzz")
self.assertEqual("application/jwt", h['Content-Type'])
|
Selfnet/dashboard
|
cgi/latest.json.py
|
Python
|
bsd-2-clause
| 232
| 0
|
#!/usr/bin/python
import memcache
import json
|
print "Content-Type: application/json"
print
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
data = mc.get_multi(["meta", "latest"])
print(json.dumps(data, separ
|
ators=(",", ":")))
|
HuayraLinux/cuaderno
|
cuaderno/team/admin.py
|
Python
|
gpl-2.0
| 260
| 0
|
from django.contrib import admin
from team.models import Member
# Register your model
|
s here.
class MemberAdmin(admin.ModelAdmin):
fields = ['last_name', 'first_name']
ordering = ['last_name', 'first_n
|
ame']
admin.site.register(Member, MemberAdmin)
|
megcunningham/django-diesel
|
tests/engine.py
|
Python
|
bsd-3-clause
| 5,088
| 0.000786
|
from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("megs_project", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("megs_project", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
|
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.se
|
rvices.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
|
uclouvain/osis_louvain
|
base/forms/learning_unit_specifications.py
|
Python
|
agpl-3.0
| 3,409
| 0.002054
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from base.forms.common import set_trans_txt
from cms.models import translated_text
from cms.enums import entity_name
from ckeditor.wi
|
dgets import CKEditorWidget
class LearningUnitSpecificationsForm(forms.Form):
learning_unit_year = language = None
def __init__(self, learning_unit_year, language, *args, **kwargs):
self.learning_unit_year = learning_unit_year
self.language = language
self.refresh_data()
super(LearningUnitSpecificationsForm, self).__init__(*args, **kwargs)
def refresh_data(self):
language_iso = se
|
lf.language[0]
texts_list = translated_text.search(entity=entity_name.LEARNING_UNIT_YEAR,
reference=self.learning_unit_year.id,
language=language_iso) \
.exclude(text__isnull=True)
set_trans_txt(self, texts_list)
class LearningUnitSpecificationsEditForm(forms.Form):
trans_text = forms.CharField(widget=CKEditorWidget(config_name='minimal'), required=False)
cms_id = forms.IntegerField(widget=forms.HiddenInput, required=True)
def __init__(self, *args, **kwargs):
self.learning_unit_year = kwargs.pop('learning_unit_year', None)
self.language_iso = kwargs.pop('language', None)
self.text_label = kwargs.pop('text_label', None)
super(LearningUnitSpecificationsEditForm, self).__init__(*args, **kwargs)
def load_initial(self):
value = translated_text.get_or_create(entity=entity_name.LEARNING_UNIT_YEAR,
reference=self.learning_unit_year.id,
language=self.language_iso,
text_label=self.text_label)
self.fields['cms_id'].initial = value.id
self.fields['trans_text'].initial = value.text
def save(self):
cleaned_data = self.cleaned_data
trans_text = translated_text.find_by_id(cleaned_data['cms_id'])
trans_text.text = cleaned_data.get('trans_text')
trans_text.save()
|
lizardsystem/lizard-efcis
|
lizard_efcis/migrations/0091_auto_20160315_0924.py
|
Python
|
gpl-3.0
| 1,198
| 0.000835
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lizard_efcis', '0090_auto_20160222_1439'),
]
operations = [
migrations.AlterField(
model_name='mappingfield',
name='db_datatype',
field=models.CharField(blank=True, max_length=255, null=True, help_text='DataType of Foreign-Tabelnaam b.v. float, Locatie', choices=[('CharField', 'CharField'), ('TextField', 'TextField'), ('float', 'float'), ('date', 'date'), ('time', 'time'), ('boolean', 'boolean'), ('Activiteit', 'Activiteit'), ('BioStatus', 'BioStatus'), ('Compartiment', 'Compartiment'), ('Detectiegrens', 'Detectiegrens'), ('Eenheid',
|
'Eenheid'), ('FCStatus', 'FCStatus'), ('Hoedanigheid', 'Hoedanigheid'), ('Locatie', 'Locatie'), ('Meetnet', 'Meetnet'), ('MeetStatus', 'MeetStatus'), ('Parameter', 'Parameter'),
|
('ParameterGroep', 'ParameterGroep'), ('Status', 'Status'), ('StatusKRW', 'StatusKRW'), ('WNS', 'WNS'), ('WNSStatus', 'WNSStatus'), ('Waterlichaam', 'Waterlichaam'), ('Watertype', 'Watertype')]),
preserve_default=True,
),
]
|
isandlaTech/cohorte-demos
|
temper/src/aggregator/aggregator.py
|
Python
|
apache-2.0
| 6,713
| 0.00283
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
An aggregator of sensors values
Created on 10 juil. 2012
:author: Thomas Calmant
"""
# ------------------------------------------------------------------------------
from pelix.ipopo import constants
from pelix.ipopo.decorators import ComponentFactory, Provides, \
Validate, Invalidate, Property, Requires, Bind, Unbind, BindField, UnbindField
# ------------------------------------------------------------------------------
import logging
import threading
import time
# ------------------------------------------------------------------------------
# Name of the HistoryEntry class in Java, for Jabsorb
HISTORY_ENTRY_CLASS = "temper.aggregator.HistoryEntry"
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory("aggregator-factory")
@Property("_name", constants.IPOPO_INSTANCE_NAME)
@Property("_history_size", "history.size", 10)
@Property("_poll_delta", "poll.delta", 10)
@Requires("_sensors", "java:/temper.sensors.TemperatureService",
aggregate=True, optional=True)
@Provides("java:/temper.aggregator.AggregatorService")
class Aggregator(object):
"""
Temperature sensor
"""
def __init__(self):
"""
Constructor
"""
self._history_size = 0
self._name = ""
self._poll_delta = 0
self._sensors = []
# The values history (sensor -> list of dictionaries)
self._history = {}
|
self._lock = threading.Lock()
self._thread_stop = threading.Event()
self._thread = None
def get_history(self):
"""
Retrieves the whole known history as a dictionary.
Result is a dictionary, with sensor name as entry and a list
of HistoryEntry (Python map/Java bean) as value
:return: The whole history
"""
with self._lock:
return self._history
def get_sens
|
or_history(self, sensor):
"""
Retrieves the known history for the given sensor
:param sensor: The name of a sensor
:return: The history of the sensor. Can be None
"""
with self._lock:
return self._history.get(sensor, None)
def get_sensor_lastentry(self, sensor):
"""
Retrieves the last known history entry for the given sensor
:param sensor: The name of the sensor
:return: The las history entry of the sensor. Can be None.
"""
with self._lock:
sensor_history = self._history.setdefault(sensor, [])
if sensor_history:
return sensor_history[0]
def get_sensors(self):
"""
Retrieves the list of sensors visible in the history
:return: The list of known sensors
"""
with self._lock:
return tuple(self._history.keys())
def get_active_sensors(self):
"""
Retrieves the active sensors
:return: The list of active sensors
"""
sensors = []
if self._sensors is not None:
for sensor in self._sensors:
try:
name = sensor.getName()
sensors.append(name)
except Exception as ex:
_logger.error("Error retrieving sensor data: %s", ex)
def _poll(self):
"""
Polls the value of all known sensors
"""
while not self._thread_stop.is_set():
if self._sensors is not None:
for sensor in self._sensors:
try:
name = sensor.getName()
value = sensor.getValue()
unit = sensor.getUnit()
self._store(name, value, unit)
except Exception as ex:
_logger.error("Error retrieving sensor data: %s", ex)
# Wait for the poll delta, or for the order to stop
try:
wait = float(self._poll_delta)
except:
wait = 30
self._thread_stop.wait(wait)
def _store(self, sensor, value, unit):
"""
Stores a value in the history
"""
# Get the history list for this sensor
with self._lock:
sensor_history = self._history.setdefault(sensor, [])
# Remove the oldest entry if needed
if len(sensor_history) >= self._history_size:
del sensor_history[-1]
# Insert the new entry in front
sensor_history.insert(0, {"sensor": sensor,
"time": int(time.time() * 1000),
"value": value,
"unit": unit,
"javaClass": HISTORY_ENTRY_CLASS})
@Bind
def bind(self, svc, ref):
"""
Called by iPOPO when a service is bound
"""
props = ref.get_properties()
if props.get("service.imported", False):
import_str = "from %s" % props.get("service.imported.from")
else:
import_str = "local"
# if service is TemperatorSensor then informe listeners
_logger.debug("%s> Bound to %s (%s)", self._name, ref, import_str)
@Unbind
def unbind(self, svc, ref):
"""
Called by iPOPO when a service is gone
"""
props = ref.get_properties()
if props.get("service.imported", False):
import_str = "from %s" % props.get("service.imported.from")
else:
import_str = "local"
_logger.debug("%s> UnBound of %s (%s)", self._name, ref, import_str)
@Validate
def validate(self, context):
"""
Component validation
"""
# Clear the stop event
self._thread_stop.clear()
# Start the polling thread
self._thread = threading.Thread(target=self._poll)
self._thread.start()
_logger.info("Component %s validated", self._name)
@Invalidate
def invalidate(self, context):
"""
Component invalidation
"""
# Set the stop event
self._thread_stop.set()
# Wait a little for the thread
self._thread.join(2)
self._thread = None
_logger.info("Component %s invalidated", self._name)
# Java API compliance
getHistory = get_history
getSensorHistory = get_sensor_history
getSensorLastEntry = get_sensor_lastentry
getSensors = get_sensors
getActiveSensors = get_active_sensors
|
1ukash/horizon
|
horizon/dashboards/project/images_and_snapshots/views.py
|
Python
|
apache-2.0
| 3,222
| 0.001241
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Images and Snapshots.
"""
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import api
from horizon import exceptions
from horizon import tables
from horizon import tabs
from .images.tables import ImagesTable
from .snapshots.tables import SnapshotsTable
from .volume_snapshots.tables import VolumeSnapshotsTable
from .volume_snapshots.tabs import SnapshotDetailTabs
LOG = logging.getLogger(__name__)
class IndexView(tables.MultiTableView):
table_classes = (ImagesTable, SnapshotsTable, VolumeSnapshotsTable)
template_name = 'project/images_and_snapshots/index.html'
def has_more_data(self, table):
return getattr(self, "_more_%s" % table.name, False)
def get_images_data(self):
marker = self.request.GET.get(ImagesTable._meta.pagination_param, None)
try:
# FIXME(gabriel): The paging is going to be strange here due to
# our filtering after the fact.
(all_images,
self._more_images) = api.image_list_detailed(self.request,
marker=marker)
images = [im for im in all_images
if im.container_format not in ['aki', 'ari'] and
im.properties.get("image_type", '') != "snapshot"]
except:
images = []
exceptions.handle(self.request, _("Unable to retrieve images."))
return images
def get_snapshots_data(self):
req = self.request
marker = req.GET.get(SnapshotsTable._meta.pagination_param, None)
try:
snaps, self._more_snapshots = api.snapshot_list_detailed(req,
marker=marker)
except:
snaps = []
exceptions.handle(req, _("Unab
|
le to retrieve snapshots."))
return snaps
def get_volume_snapshots_data(self):
try:
snapshots = api.volume_snapshot_list(self.request)
except:
snapshots = []
exceptions.handle(self.request, _("Unable t
|
o retrieve "
"volume snapshots."))
return snapshots
class DetailView(tabs.TabView):
tab_group_class = SnapshotDetailTabs
template_name = 'project/images_and_snapshots/snapshots/detail.html'
|
garr741/mr_meeseeks
|
rtmbot/plugins/hmm.py
|
Python
|
mit
| 322
| 0.003106
|
hmm = [
"https://media3.giphy.com/media/TPl5N4Ci49ZQY/giphy.gif",
"https://media0.giphy.com/media/l14qxlCgJ0zUk/giphy.gif",
"https://media4.giphy.com/media/MsWnkCVSXz73i/giphy.gif",
"https://media1.giphy.com/media/l2
|
JJEIMLgrXPEbDGM/giphy.gif",
"https://media0.gi
|
phy.com/media/dgK22exekwOLm/giphy.gif"
]
|
arturocastro/portable-performance
|
previous_work/PURE-C/timings/plot.py
|
Python
|
mit
| 107
| 0
|
#!/usr/bin/env python
import
|
fileinput
from pylab import *
for line in fileinput.input():
print line
| |
pombredanne/Rusthon
|
regtests/go/maps.py
|
Python
|
bsd-3-clause
| 359
| 0.116992
|
""
|
"map types"""
def main():
a = map[string]int{
'x': 1,
'y': 2,
'z': 3,
}
print( a['x'] )
assert a['x']==1
b = map[int]string{ 0:'a', 1:'b' }
print( b[0] )
print( b[1] )
assert b[0]=='a'
assert b[1]=='b'
## infers type of key and value ##
c = {'x':100, 'y':200}
print( c['x'] )
print( c['y'] )
assert c['x']==100
assert c['y']==20
|
0
|
NetApp/manila
|
manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py
|
Python
|
apache-2.0
| 6,186
| 0
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica
|
ble law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CON
|
DITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_log import log
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_base
from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake
@ddt.ddt
class NetAppBaseClientTestCase(test.TestCase):
def setUp(self):
super(NetAppBaseClientTestCase, self).setUp()
# Mock loggers as themselves to allow logger arg validation
mock_logger = log.getLogger('mock_logger')
self.mock_object(client_base.LOG,
'error',
mock.Mock(side_effect=mock_logger.error))
self.client = client_base.NetAppBaseClient(**fake.CONNECTION_INFO)
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
def test_get_ontapi_version(self):
version_response = netapp_api.NaElement(fake.ONTAPI_VERSION_RESPONSE)
self.connection.invoke_successfully.return_value = version_response
major, minor = self.client.get_ontapi_version(cached=False)
self.assertEqual('1', major)
self.assertEqual('19', minor)
def test_get_ontapi_version_cached(self):
self.connection.get_api_version.return_value = (1, 20)
major, minor = self.client.get_ontapi_version()
self.assertEqual(1, self.connection.get_api_version.call_count)
self.assertEqual(1, major)
self.assertEqual(20, minor)
def test_get_system_version(self):
version_response = netapp_api.NaElement(
fake.SYSTEM_GET_VERSION_RESPONSE)
self.connection.invoke_successfully.return_value = version_response
result = self.client.get_system_version()
self.assertEqual(fake.VERSION, result['version'])
self.assertEqual(('8', '2', '1'), result['version-tuple'])
def test_init_features(self):
self.client._init_features()
self.assertSetEqual(set(), self.client.features.defined_features)
@ddt.data('tag_name', '{http://www.netapp.com/filer/admin}tag_name')
def test_strip_xml_namespace(self, element):
result = self.client._strip_xml_namespace(element)
self.assertEqual('tag_name', result)
def test_send_request(self):
element = netapp_api.NaElement('fake-api')
self.client.send_request('fake-api')
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
def test_send_request_no_tunneling(self):
element = netapp_api.NaElement('fake-api')
self.client.send_request('fake-api', enable_tunneling=False)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertFalse(self.connection.invoke_successfully.call_args[0][1])
def test_send_request_with_args(self):
element = netapp_api.NaElement('fake-api')
api_args = {'arg1': 'data1', 'arg2': 'data2'}
element.translate_struct(api_args)
self.client.send_request('fake-api', api_args=api_args)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
def test_get_licenses(self):
api_response = netapp_api.NaElement(fake.LICENSE_V2_LIST_INFO_RESPONSE)
self.mock_object(
self.client, 'send_request', mock.Mock(return_value=api_response))
response = self.client.get_licenses()
self.assertSequenceEqual(fake.LICENSES, response)
def test_get_licenses_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=netapp_api.NaApiError))
self.assertRaises(netapp_api.NaApiError, self.client.get_licenses)
self.assertEqual(1, client_base.LOG.error.call_count)
def test_send_ems_log_message(self):
self.assertRaises(NotImplementedError,
self.client.send_ems_log_message,
{})
@ddt.ddt
class FeaturesTestCase(test.TestCase):
def setUp(self):
super(FeaturesTestCase, self).setUp()
self.features = client_base.Features()
def test_init(self):
self.assertSetEqual(set(), self.features.defined_features)
def test_add_feature_default(self):
self.features.add_feature('FEATURE_1')
self.assertTrue(self.features.FEATURE_1)
self.assertIn('FEATURE_1', self.features.defined_features)
@ddt.data(True, False)
def test_add_feature(self, value):
self.features.add_feature('FEATURE_2', value)
self.assertEqual(value, self.features.FEATURE_2)
self.assertIn('FEATURE_2', self.features.defined_features)
@ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,))
def test_add_feature_type_error(self, value):
self.assertRaises(TypeError,
self.features.add_feature,
'FEATURE_3',
value)
self.assertNotIn('FEATURE_3', self.features.defined_features)
def test_get_attr_missing(self):
self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4')
|
sndrtj/afplot
|
afplot/variation.py
|
Python
|
mit
| 2,720
| 0
|
"""
afplot.variation
~~~~~~~~~~~~~~~~
:copyright: (c) 2017 Sander Bollen
:copyright: (c) 2017 Leiden University Medical Center
:license: MIT
"""
def get_all_allele_freqs(record, sample_name):
fmt = record.genotype(sample_name)
if not hasattr(fmt.data, 'AD'):
return []
ad = fmt.data.AD
|
if not ad:
return []
if len(ad) == 0:
return
|
[]
if sum(ad) == 0:
return [0.0 for _ in ad]
return [float(x)/sum(ad) for x in ad]
def get_variant_type(record, sample_name):
fmt = record.genotype(sample_name)
if not fmt.called:
return "no_call"
elif hasattr(fmt.data, "GQ") and fmt.data.GQ == 0:
return "no_call"
elif not fmt.is_variant:
return "hom_ref"
elif fmt.is_het:
return "het"
else:
return "hom_alt"
def get_distance_to_exp(record, sample_name):
"""
Get distance to expected theoretical allele frequencies
This assumes the AD to field to conform to GATK spec
i.e. the number of AD values EXACTLY matches the number
of alleles (INCLUDING ref allele)
:param record: VCF record
:param sample_name: sample name
:return: list of distances
"""
freqs = get_all_allele_freqs(record, sample_name)
if len(freqs) == 0:
return []
rtype = get_variant_type(record, sample_name)
fmt = record.genotype(sample_name)
assert len(freqs) == len(record.alleles)
if rtype == "no_call":
return [0 for _ in freqs]
elif rtype == "hom_ref":
# freq of ref allele should be 1.0,
# freq of all other alleles should be 0.0
return [1 - freqs[0]] + freqs[1:]
elif rtype == "hom_alt":
# affected allele should be 1.0, all other alleles should be 0.0
if fmt.phased:
idx_affected_allele = int(fmt.data.GT.split("|")[0])
else:
idx_affected_allele = int(fmt.data.GT.split("/")[0])
distances = []
for i, f in enumerate(freqs):
if i == idx_affected_allele:
distances.append(1 - f)
else:
distances.append(f)
return distances
elif rtype == "het":
# freq of affected alleles should be 0.5,
# all other alleles should be 0.0
if fmt.phased:
idx_affected_alleles = [int(x) for x in fmt.data.GT.split("|")]
else:
idx_affected_alleles = [int(x) for x in fmt.data.GT.split("/")]
distances = []
for i, f in enumerate(freqs):
if i in idx_affected_alleles:
distances.append(abs(0.5 - f))
else:
distances.append(f)
return distances
else:
raise NotImplementedError
|
LamCiuLoeng/jcp
|
ordering/controllers/master.py
|
Python
|
mit
| 23,382
| 0.013942
|
# -*- coding: utf-8 -*-
from datetime import datetime as dt
import copy
import random
import os
import traceback
from tg import redirect, flash, expose, request, override_template
from tg.decorators import paginate
from ordering.controllers.basicMaster import *
from ordering.model import *
from ordering.util.common import *
from ordering.util.excel_helper import *
from ordering.widgets.master import *
__all__=["CountryController", "ContactController", "BillToController", "ShipToController",
"CountryCodeController", "ItemInfoController", "CustomerController", "SpecialValueController",
"RFIDMappingController", "ComboMappingInfoController"]
#class ItemCodeController(BasicMasterController):
# url = "itemcode"
# template = "ordering.templates.masters.index"
# dbObj = JCPItemCodeMaster
# searchWidget = itemCodeSearchFormInstance
# updateWidget = itemCodeUpdateFormInstance
# formFields = ["name","description"]
class CountryController(BasicMasterController):
url="country"
dbObj=JCPCountry
template="ordering.templates.masters.index_country"
searchWidget=countrySearchFormInstance
updateWidget=countryUpdateFormInstance
formFields=["name", "phone"]
class ContactController(BasicMasterController):
url="contact"
dbObj=JCPContact
template="ordering.templates.masters.index_contact"
searchWidget=contactSearchFormInstance
updateWidget=contactUpdateFormInstance
formFields=["name", "email", "countryId"]
class BillToController(BasicMasterController):
url="billto"
dbObj=JCPBillTo
template="ordering.templates.masters.index_billto"
searchWidget=billToSearchFormInstance
updateWidget=billToUpdateFormInstance
formFields=["customer_id", "company", "address", "attn", "tel", "fax", "email"]
def beforeSaveNew(self, kw, params):
params['is_default']=1
return params
class ShipToController(BasicMasterController):
url="shipto"
dbObj=JCPShipTo
template="ordering.templates.masters.index_shipto"
searchWidget=shipToSearchFormInstance
updateWidget=shipToUpdateFormInstance
formFields=["customer_id", "company", "address", "attn", "tel", "fax", "email"]
def beforeSaveNew(self, kw, params):
params['is_default']=1
return params
class CountryCodeController(BasicMasterController):
url="countrycode"
dbObj=JCPCountryCode
template="ordering.templates.masters.index_countrycode"
searchWidget=countryCodeSearchFormInstance
updateWidget=countryCodeUpdateFormInstance
formFields=["countryName", "countryCode"]
class ItemInfoController(BasicMasterController):
url="iteminfo"
dbObj=JCPItemInfo
template="ordering.templates.masters.index_iteminfo"
searchWidget=itemInfoSearchFormInstance
updateWidget=itemInfoUpdateFormInstance
formFields=["item_code",
"item_type",
"packaging_code",
"combo_packaging_code",
"combo_item",
"combo_mapping",
"washing_instruction",
"fiber_content",
"country_of_origin",
"special_value",
"multi_special_value",
"path",
"status",
"hangtang_pkg_code",
"label_pkg_code",
]
@expose('ordering.templates.masters.index_iteminfo')
@paginate("result", items_per_page=20)
@tabFocus(tab_type="master")
def index(self, **kw):
if not kw:
result=DBSession.query(self.dbObj).all()
else:
result=self.searchMaster(kw)
return {
"searchWidget" : self.searchWidget,
"result" : result,
"funcURL" :self.url,
"values" : kw,
}
def beforeSaveNew(self, kw, params):
version=JCPItemInfo.get_max_version(pkg_code=params['packaging_code'])
print version
params['version']=int(version)+1 if version else 1
return params
def beforeSaveUpdate(self, kw, params):
version=JCPItemInfo.get_max_version(pkg_code=params['packaging_code'])
params['version']=int(version) if version else 1
return params
@expose('ordering.templates.masters.item_form')
@tabFocus(tab_type="master")
def add(self, **kw):
return {
"widget" : self.updateWidget,
"values" : {},
"saveURL" : "/%s/saveNew"%self.url,
"funcURL" :self.url
}
@expose()
def upload(self, **kw):
try:
relativePath = os.path.join("ordering/public/images","jcpenney")
fileUpload(kw['item_artwork_files'], relativePath, kw['item_artwork_name'])
except:
logfile = open("log.txt", "w")
traceback.print_exc(None, logfile)
logfile.close()
@expose()
def saveNew(self, **kw):
params = {"issuedBy": request.identity["user"],
"lastModifyBy": request.identity["user"],
"lastModifyTime": dt.now()
}
combo_mapping_params = {"issuedBy": request.identity["user"],
"lastModifyBy": request.identity["user"],
"lastModifyTime": dt.now()
}
combo_mapping_fields = ["hangtang_pkg_code",
"label_pkg_code",
]
combo_mapping_flag = False
for f in self.formFields:
if f in kw.keys() and f not in combo_mapping_fields:
params[f]=kw[f]
if f in kw.keys() and len(kw[f]) > 0 and f in combo_mapping_fields:
combo_mapping_params[f] = kw[f]
combo_mapping_flag = True
if params['combo_item'] == 'False':
params.pop('combo_packaging_code')
item = JCPItemInfo(**params)
if combo_mapping_flag == True:
combo_mapping = JCPComboMappingInfo(**combo_mapping_params)
combo_mapping.main_pkg_code = item.packaging_code
DBSession.add_all([item, combo_mapping])
else:
DBSession.add(item)
flash("Save the new master successfully!")
redirect("/%s/index"%self.url)
@expose('ordering.templates.masters.item_form')
@tabFocus(tab_type="master")
def update(self, **kw):
obj=getOr404(self.dbObj, kw["id"], "/%s/index"%self.url)
combo_mapping_obj = JCPComboMappingInfo.get_by_main_code(obj.packaging_code)
values={}
combo_mapping_fields = ["hangtang_pkg_code",
"label_pkg_code",
]
for f in self.formFields:
if f not in combo_mapping_fields: values[f]=getattr(obj, f)
if f in combo_mapping_fields and len(combo_mapping_obj) > 0:
values[f] = getattr(combo_mapping_obj[0], f)
return {
"widget" : self.updateWidget,
"values" : values,
"saveURL" : "/%s/saveUpdate?id=%d"%(self.url, obj.id),
"funcURL" :self.url
}
@expose()
def saveUpdate(self, **kw):
obj=getOr404(JCPItemInfo, kw["id"], "/%s/index"%self.url)
combo_mapping_obj = JCPComboMappingInfo.get_by_main_code(obj.packaging_code)
params = {"lastModifyBy": request.identity["user"],
"lastModifyTime": dt.now()
}
combo_mapping_params = {"lastModifyBy": request.identity["user"],
"lastModifyTime": dt.now()
}
combo_mapping_fields = ["hangtang_pkg_code",
"label_pkg_code",
]
combo_mapping_flag = False
for f in self.formFields:
|
if f in kw.keys() and f
|
not in combo_mapping_fields:
params[f]=kw[f]
if f in kw.keys() and len(kw[f]) > 0 and f in combo_mapping_fields:
|
dhermes/google-cloud-python
|
bigquery_datatransfer/google/cloud/bigquery_datatransfer_v1/gapic/enums.py
|
Python
|
apache-2.0
| 5,008
| 0.001997
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class NullValue(enum.IntEnum):
"""
``NullValue`` is a singleton enumeration to represent the null value for
the ``Value`` type union.
The JSON representation for ``NullValue`` is JSON ``null``.
Attributes:
NULL_VALUE (int): Null value.
"""
NULL_VALUE = 0
class TransferType(enum.IntEnum):
"""
DEPRECATED. Represents data transfer type.
Attributes:
TRANSFER_TYPE_UNSPECIFIED (int): Invalid or Unknown transfer type placeholder.
BATCH (int): Batch data transfer.
STREAMING (int): Streaming data transfer. Streaming data source currently doesn't
support multiple transfer configs per project.
"""
TRANSFER_TYPE_UNSPECIFIED = 0
BATCH = 1
STREAMING = 2
class TransferState(enum.IntEnum):
"""
Represents data transfer run state.
Attributes:
TRANSFER_STATE_UNSPECIFIED (int): State placeholder.
PENDING (int): Data transfer is scheduled and is waiting to be picked up by
data transfer backend.
RUNNING (int): Data transfer is in progress.
SUCCEEDED (int): Data transfer completed successsfully.
FAILED (int): Data transfer failed.
CANCELLED (int): Data transfer is cancelled.
"""
TRANSFER_STATE_UNSPECIFIED = 0
PENDING = 2
RUNNING = 3
SUCCEEDED = 4
FAILED = 5
CANCELLED = 6
class TransferMessage(object):
class MessageSeverity(enum.IntEnum):
"""
Represents data transfer user facing message severity.
Attributes:
MESSAGE_SEVERITY_UNSPECIFIED (int): No severity specified.
INFO (int): Informational message.
WARNING (int): Warning message.
ERROR (int): Error message.
"""
MESSAGE_SEVERITY_UNSPECIFIED = 0
INFO = 1
WARNING = 2
ERROR = 3
class DataSourceParameter(object):
class Type(enum.IntEnum):
"""
Parameter type.
Attributes:
TYPE_UNSPECIFIED (int): Type unspecified.
STRING (int): String parameter.
INTEGER (int): Integer parameter (64-bits).
Will be serialized to json as string.
|
DOUBLE (int): Do
|
uble precision floating point parameter.
BOOLEAN (int): Boolean parameter.
RECORD (int): Record parameter.
PLUS_PAGE (int): Page ID for a Google+ Page.
"""
TYPE_UNSPECIFIED = 0
STRING = 1
INTEGER = 2
DOUBLE = 3
BOOLEAN = 4
RECORD = 5
PLUS_PAGE = 6
class DataSource(object):
class AuthorizationType(enum.IntEnum):
"""
The type of authorization needed for this data source.
Attributes:
AUTHORIZATION_TYPE_UNSPECIFIED (int): Type unspecified.
AUTHORIZATION_CODE (int): Use OAuth 2 authorization codes that can be exchanged
for a refresh token on the backend.
GOOGLE_PLUS_AUTHORIZATION_CODE (int): Return an authorization code for a given Google+ page that can then be
exchanged for a refresh token on the backend.
"""
AUTHORIZATION_TYPE_UNSPECIFIED = 0
AUTHORIZATION_CODE = 1
GOOGLE_PLUS_AUTHORIZATION_CODE = 2
class DataRefreshType(enum.IntEnum):
"""
Represents how the data source supports data auto refresh.
Attributes:
DATA_REFRESH_TYPE_UNSPECIFIED (int): The data source won't support data auto refresh, which is default value.
SLIDING_WINDOW (int): The data source supports data auto refresh, and runs will be scheduled
for the past few days. Does not allow custom values to be set for each
transfer config.
CUSTOM_SLIDING_WINDOW (int): The data source supports data auto refresh, and runs will be scheduled
for the past few days. Allows custom values to be set for each transfer
config.
"""
DATA_REFRESH_TYPE_UNSPECIFIED = 0
SLIDING_WINDOW = 1
CUSTOM_SLIDING_WINDOW = 2
class ListTransferRunsRequest(object):
class RunAttempt(enum.IntEnum):
"""
Represents which runs should be pulled.
Attributes:
RUN_ATTEMPT_UNSPECIFIED (int): All runs should be returned.
LATEST (int): Only latest run per day should be returned.
"""
RUN_ATTEMPT_UNSPECIFIED = 0
LATEST = 1
|
cvrebert/TritonScraper
|
src/triton_scraper/cape.py
|
Python
|
mit
| 11,607
| 0.008529
|
# Copyright (c) 2010 Christopher Rebert <code@rebertia.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module browses, scrapes, and parses `CAPE's website <http://www.cape.ucsd.edu>`_ into useful objects.
:copyright: (c) 2010 by Christopher Rebert.
:license: MIT, see :file:`LICENSE.txt` for more details.
"""
from contextlib import closing as _closing
from time import sleep as _sleep
from decimal import Decimal
from collections import namedtuple as _namedtuple#, OrderedDict
from urlparse import urljoin as _urljoin
fro
|
m urllib import urlencode as _urlencode
import triton_scraper.config
from triton_scraper.util import RELATIVE_PREFIX, XPath
from triton_scraper.fetchparse import make
|
_tree4url
from lxml import etree
CAPE_SEARCH_URL = "http://www.cape.ucsd.edu/stats.html"
_tree4url = make_tree4url()
def url2tree(url):
tree, _url = _tree4url(url)
return tree
#FIXME: enable
# self_cape = XPath(RELATIVE_PREFIX+"/div[@align='right' and text() = 'SelfCAPE']")
search_forms = XPath(RELATIVE_PREFIX+"/form[@name='searchQuery']")
select_elements = XPath(RELATIVE_PREFIX+"/select")
def _search_form_and_select_tag():
tree = url2tree(CAPE_SEARCH_URL)
form = search_forms(tree)[0]
select = select_elements(form)[0]
return form, select
VALUE = 'value'
option_elements = XPath(RELATIVE_PREFIX+"/option")
Department = _namedtuple('Department', "code name form_value")
def list_departments():
form, select = _search_form_and_select_tag()
for opt in option_elements(select):
val = opt.get(VALUE)
if not val: continue
code, name = opt.text.split(" - ")
yield Department(code.strip(), name.strip(), val)
# HTTP_METHOD = 'method'
# HTTP_GET = 'get'
NAME = 'name'
ACTION = 'action'
section_links = XPath(RELATIVE_PREFIX+"/a[@target='_new']/@href")
def capes_for(department_form_val):
form, select = _search_form_and_select_tag()
field_name = select.get(NAME)
# method = form.get(HTTP_METHOD)
# if method != HTTP_GET:
# raise ValueError("Expected GET form submission method; Got "+repr(method))
action = form.get(ACTION)
dest_url = _urljoin(CAPE_SEARCH_URL, action)
dept_url = "%s?%s" % (dest_url, _urlencode({field_name:department_form_val}))
tree = url2tree(dept_url)
for link in section_links(tree):
cape = parse_detailed_page(link)
if cape is not None:
yield cape
page_is_dud = XPath("/html/body[contains(text(), 'No statistics found')]")
departments = XPath(RELATIVE_PREFIX+"/td[@width='110']/text()")
enrollments = XPath(RELATIVE_PREFIX+"/td[@width='155']/text()")
questionaires_returneds = XPath(RELATIVE_PREFIX+"/td[@width='180']/text()")
term_codes = XPath(RELATIVE_PREFIX+"/td[@width='109']/div/text()")
course_codes = XPath(RELATIVE_PREFIX+"/td[@width='56']/text()")
instructor_names = XPath(RELATIVE_PREFIX+"/td[@colspan='2' and @height='15']/text()")
team_taught = XPath(RELATIVE_PREFIX+"/td[@colspan='9' and text()='Team Taught']")
def parse_detailed_page(url):
tree = url2tree(url)
if page_is_dud(tree):
return None
department_code = departments(tree)[0].strip()
enrollment = int(enrollments(tree)[0].split(": ")[1])
respondents = int(questionaires_returneds(tree)[0].split(": ")[1])
term_code = term_codes(tree)[0]
subject_code, course_number = course_codes(tree)[0].split(" ")
instructor = instructor_names(tree)
if instructor:
instructor = instructor[0].strip() #FIXME: parse into Instructor. multi-instructor courses have semicolons btw names. last, first
else:
instructor = None # Some courses have no listed instructor
nums = [string2num(s) for s in (u.strip() for u in numbers(tree)) if s]
class_levels = parse_class_levels(nums)
reasons_for_taking = parse_reasons_for_taking(nums)
expected_grades = parse_expected_grades(nums)
# _expected_gpa = Decimal(expected_gpas(tree)[0].strip())
agree_disagree_qs = questions_negative_4_thru_16E(tree)[NUM_PRE_AGREEMENT_QUESTIONS-1:-NUM_INSTRUCTOR_QUESTIONS] # skip class level, reason, expected grade, and Instructor Questions
taught_by_team = team_taught(tree)
num_agreement_qs = 6 if taught_by_team else NUM_AGREEMENT_QUESTIONS
sixteen_agree_disagrees = [parse_agree_disagree_row(nums) for i in range(num_agreement_qs)]
question2agreement = zip(agree_disagree_qs[-num_agreement_qs:], sixteen_agree_disagrees)#OrderedDict()
skip_instructor_questions(nums)
hours_studying_per_week = parse_study_hours(nums)
attendance = parse_attendance(nums)
recommend_course = parse_recommendations(nums)
if not taught_by_team:
recommend_prof = parse_recommendations(nums)
else:
recommend_prof = RecommendLevel(0, 0)
cape = CourseAndProfessorEvaluation(-1,#FIXME
department_code, term_code, subject_code, course_number, instructor, enrollment, respondents, class_levels, reasons_for_taking, expected_grades, hours_studying_per_week, attendance, recommend_course, recommend_prof, question2agreement)
if nums:
print cape
print nums
raise ValueError, "Problem when trying to parse %s %s %s (%s)" % (term_code, course_code, instructor, url)
return cape
AgreementLevels = _namedtuple('AgreementLevels', "na strong_disagree disagree neutral agree strong_agree")
# FIXME: account for #responses != total #students
#(None,) + range(-2,3)
NUM_AGREEMENT_LEVELS = len(AgreementLevels._fields)
def parse_agree_disagree_row(l):
responses = slice_off(l, NUM_AGREEMENT_LEVELS)
_num_resp = slice_off(l, 1)
if _num_resp:# CAPE pages don't include stats if no non-N/A responses
_mean, _std_dev = slice_off(l, 2)
_percents = slice_off(l, NUM_AGREEMENT_LEVELS-1) # no percentage for N/A
return AgreementLevels(*responses)
StudyHours = _namedtuple('StudyHours', "zero_one two_three four_five six_seven eight_nine ten_eleven twelve_thirteen fourteen_fifteen sixteen_seventeen eighteen_nineteen twenty_plus")
# setup StudyHours.VALUES
# _values = []
# _n = Decimal('0.5')
# TWO = Decimal('2')
# for _i in range(11):
# _values.append(_n)
# _n += TWO
# StudyHours.VALUES = tuple(_values)
# del _values, _n, _i
NUM_STUDY_HOURS_INTERVALS = 11
def parse_study_hours(l):
hours = StudyHours(*slice_off(l, NUM_STUDY_HOURS_INTERVALS))
_num_resp = slice_off(l, 1)
if _num_resp:# CAPE pages don't include stats if no responses given
_avg_hours = slice_off(l, 1)
_percents = slice_off(l, NUM_STUDY_HOURS_INTERVALS)
return hours
Attendance = _namedtuple('Attendance', "rarely some most")
# 0, 0.5, 1
NUM_ATTENDANCE_TYPES = len(Attendance._fields)
def parse_attendance(l):
attendance = Attendance(*slice_off(l, NUM_ATTENDANCE_TYPES))
_num_resp = slice_off(l, 1)
if _num_resp:
_percents = slice_off(l, NUM_ATTENDANCE_TYPES)
return attendance
RecommendLevel = _namedtuple('RecommendLevel', "no yes")
#False, True
def parse_recommendations(l):
rec_level = RecommendLevel(*slice_off(l, 2))
_num_resp = slice_off(l, 1)
_percents = slice_off(l, 2)
return rec_level
numbers = XPath(RELATIVE_PREF
|
javihernandez/accerciser-mirror
|
src/lib/accerciser/prefs_dialog.py
|
Python
|
bsd-3-clause
| 6,660
| 0.008859
|
'''
Defines the preferences dialog.
@author: Eitan Isaacson
@organization: Mozilla Foundation
@copyright: Copyright (c) 2006, 2007 Mozilla Foundation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
'''
import gi
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import Atk as atk
from gi.repository.Gio import Settings as GSettings
from i18n import _
import node
from tools import parseColorString
class AccerciserPreferencesDialog(gtk.Dialog):
'''
Class that creates a preferences dialog.
'''
def __init__(self, plugins_view=None, hotkeys_view=None):
'''
Initialize a preferences dialog.
@param plugins_view: Treeview of plugins.
@type plugins_view: L{PluginManager._View}
@param hotkeys_view: Treeview of global hotkeys.
@type hotkeys_view: L{HotkeyTreeView}
'''
gtk.Dialog.__init__(self, _('accerciser Preferences'),
buttons=(gtk.STOCK_CLOSE, gtk.ResponseType.CLOSE))
self.connect('response', self._onResponse)
self.set_default_size(500,250)
notebook = gtk.Notebook()
vbox = self.get_children()[0]
vbox.add(notebook)
for view, section in [(plugins_view, _('Plugins')),
(hotkeys_view, _('Global Hotkeys'))]:
if view is not None:
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.ShadowType.IN)
sw.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.AUTOMATIC)
sw.set_size_request(500, 150)
sw.add(view)
notebook.append_page(sw, gtk.Label(section))
notebook.append_page(_HighlighterView(), gtk.Label(_('Highlighting')))
def _onResponse(self, dialog, response_id):
'''
Callback for dialog responses, always destroy it.
@param dialog: This dialog.
@type dialog: L{AccerciserPreferencesDialog}
@param response_id: Response ID recieved.
@type response_id: integer
'''
dialog.destroy()
class _HighlighterView(gtk.Alignment):
'''
A container widget with the settings for the highlighter.
'''
def __init__(self):
gtk.Alignment.__init__(self)
self.set_padding(12, 12, 18, 12)
self.gsettings = GSettings(schema='org.a11y.Accerciser')
self._buildUI()
def _buildUI(self):
'''
Programatically build the UI.
'''
table = gtk.Table(3, 2)
table.set_col_spacings(6)
self.add(table)
labels = [None, None, None]
controls = [None, None, None]
labels[0] = gtk.Label(_('Highlight duration:'))
controls[0] = gtk.SpinButton()
controls[0].set_range(0.01, 5)
controls[0].set_digits(2)
controls[0].set_value(self.gsettings.get_double('highlight-duration'))
controls[0].set_increments(0.01, 0.1)
controls[0].connect('value-changed', self._onDurationChanged)
labels[1] = gtk.Label(_('Border color:'))
controls[1] = self._ColorButton(node.BORDER_COLOR, node.BORDER_ALPHA)
controls[1].connect('color-set', self._onColorSet, 'highlight-border')
controls[1].set_tooltip_text(_('The border color of the highlight box'))
labels[2] = gtk.Label(_('Fill color:'))
controls[2] = self._ColorButton(node.FILL_COLOR, node.FILL_ALPHA)
controls[2].connect('color-set', self._onColorSet, 'highlight-fill')
controls[2].set_tooltip_text(_('The fill color of the highlight box'))
for label, control, row in zip(labels, controls, range(3)):
label.set_alignment(0, 0.5)
table.attach(label, 0, 1, row, row + 1, gtk.AttachOptions.FILL)
table.attach(control, 1, 2, row, row + 1, gtk.AttachOptions.FILL)
for label, control in zip(map(lambda x: x.get_accessible(),labels),
map(lambda x: x.get_accessible(),controls)):
label.add_relationship(atk.RelationType.LABEL_FOR, control)
control.add_relationship(atk.RelationType.LABELLED_BY, label)
def _onDurationChanged(self, spin_button):
'''
Callback for the duration spin button. Update key and the global variable
in the L{node} module.
@param spin_button: The spin button that emitted the value-changed signal.
@type spin_button: gtk.SpinButton
'''
node.HL_DURATION = int(spin_button.get_value()*1000)
self.gsettings.set_double('highlight-duration',
spin_button.get_value())
def _onColorSet(self, color_button, key):
'''
Callback for a color button. Update gsettings and the global variables
in the L{node} module.
@param color_button: The color button that emitted the color-set signal.
@type color_button: l{_HighlighterView._ColorButton}
@param key: the key name suffix for this color setting.
@type key: string
'''
if 'fill' in key:
node.FILL_COLOR = color_button.get_rgb_string()
node.FILL_ALPHA = color_button.get_alpha_float()
else:
node.BORDER_COLOR = color_button.get_rgb_string()
node.BORDER_ALPHA = color_button.get_alpha_float()
self.gsettings.set_string(key, color_button.get_rgba_string())
class _ColorButton(gtk.ColorButton):
'''
ColorButton derivative with useful methods for us.
'''
def __init__(self, color, alpha):
color = gdk.color_parse(color)
gtk.ColorButton.__init__(self)
self.set_use_alpha(True)
self.set_alpha(int(alpha*0xffff))
self.set_color(color)
def get_rgba_string(self):
'''
Get the current color and alpha in string format.
@return: String in the format of #rrggbbaa.
@rtype: string.
'''
color = self.get_color()
color_val = 0
color_val |= color.red >> 8 << 24
color_val |= color.green >> 8 << 16
color_val |= color.blue >> 8 << 8
color_val |= self.get_alpha() >> 8
return \
'#' + hex(color_val).replace('0x', '').replace('L', '').rjust(8, '0')
def get_rgb_string(self):
'''
Get the current color in string format.
@return: String in the format of #rrggbb.
@rtype: string.
'''
color = self.get_color()
color_val = 0
color_val |= color.red >> 8 << 16
color_val |= color.green >> 8 << 8
color_val |= color.blue >> 8
return \
|
'#' + hex(color_val).replace('0x', '').replace('L', '').rjust(6, '0')
def get_alpha_float(self):
'''
Get the current alpha as a value from 0.0 to 1.
|
0.
'''
return self.get_alpha()/float(0xffff)
|
evernym/plenum
|
plenum/test/view_change/test_no_future_view_change_while_view_change.py
|
Python
|
apache-2.0
| 1,825
| 0.003836
|
import pytest
from plenum.test.delayers import icDelay
from plenum.test.helper import checkViewNoForNodes
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.test_node import checkProtocolInstanceSetup
from plenum.test.view_change.helper import ensure_view_change
@pytest.mark.skip(reason="we don't use FutureViewChangeDone messages anymore")
def test_no_propagated_future_view_change_while_view_change(txnPoolNodeSet, looper):
# the last node is a lagging one, which will receive ViewChangeDone messages for future view
viewNo = checkViewNoForNodes(txnPoolNodeSet)
lagged_node = txnPoolNodeSet[-1]
other_nodes = list(set(txnPoolNodeSet) - {lagged_node})
# emulate view change in progress
lagged_node.master_replica._consensus_data.waiting_for_new_view = True
old_view_no = checkViewNoForNodes([lagged_node])
initial_vhdc
|
= \
lagged_node.view_changer.spylog.count(lagged_node.view_changer.process_future_view_vchd_msg.__name__)
# delay INSTANCE CHANGE on lagged nodes, so all nodes except the lagging one finish View Change
with delay_rules(lagged_node.nodeIbStasher, icDelay()):
# mak
|
e sure that View Change happened on all nodes but the lagging one
ensure_view_change(looper, other_nodes)
checkProtocolInstanceSetup(looper=looper, nodes=other_nodes, instances=range(2))
ensure_all_nodes_have_same_data(looper, nodes=other_nodes)
# check that lagged node recived 3 Future VCD, but didn't start new view change
assert len(other_nodes) + initial_vhdc == \
lagged_node.view_changer.spylog.count(lagged_node.view_changer.process_future_view_vchd_msg.__name__)
assert old_view_no == checkViewNoForNodes([lagged_node])
|
mozilla/kitsune
|
kitsune/users/migrations/0004_auto_add_contrib_email_flags.py
|
Python
|
bsd-3-clause
| 897
| 0.00223
|
# -*- coding: utf-8 -*-
"""
Add first_answer_email_sent and first_l10n_email_sent fields to Profile.
"""
from __future__ import unicode_literals
from django.db import models, migrations
import kitsune.sumo.models # noqa
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20150430_1304'),
]
operations = [
migrations.AddField(
model_name='profile',
name='first_answer_email_sent',
field=models.BooleanField(default=False, help_text='Has been sent a fir
|
st answer contribution email.'),
preserve_default=True,
),
migrations.AddField(
model_name=
|
'profile',
name='first_l10n_email_sent',
field=models.BooleanField(default=False, help_text='Has been sent a first l10n contribution email.'),
preserve_default=True,
),
]
|
iHamsterball/stellaris_tech_tree
|
stellaris_tech_tree/views.py
|
Python
|
gpl-2.0
| 508
| 0
|
from django.core import serializers
from django.http import HttpResp
|
onse
from django.template import loader
from django.utils import translation
from django import http
from dj
|
ango.conf import settings
from .versions import versions
def index(request):
template = loader.get_template('index.html')
return HttpResponse(template.render({'version_list': versions}, request))
def about(request):
template = loader.get_template('about.html')
return HttpResponse(template.render({}, request))
|
Kallehz/Python
|
Verkefni 2/Transpose.py
|
Python
|
apache-2.0
| 55
| 0.018182
|
def
|
transpose(a):
return list(map(list, zip(*a)
|
))
|
google/starthinker
|
cloud_function/main.py
|
Python
|
apache-2.0
| 1,752
| 0.005137
|
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
""" Typically called by Cloud Scheduler with recipe JSON payload.
Sample JSON POST payload:
{
"setup":{
"id":"", #string - Cloud Project ID for billing.
"auth":{
"service":{}, #dict - Optional Cloud Service JSON credentials when task uses service.
"user":{} #dict -
|
Optional Cloud User JSON credentials when task uses user.
}
},
"tasks":[ # list of recipe tasks to execute, see StarThinker scripts for examples.
{
|
"hello":{
"auth":"user", # not used in demo, for display purposes only.
"say":"Hello World"
}}
]
}
Documentation: https://github.com/google/starthinker/blob/master/tutorials/deploy_cloudfunction.md
"""
from starthinker.util.configuration import Configuration
from starthinker.util.configuration import execute
def run(request):
recipe = request.get_json(force=True)
execute(Configuration(recipe=recipe, verbose=True), recipe.get('tasks', []), force=True)
return 'DONE'
|
Null01/detect-polygons-from-image
|
src/generate-polygons/vectorize-img-03.py
|
Python
|
gpl-3.0
| 1,108
| 0.01083
|
'''
This program illustrates the use of findContours and drawContours.
The original image is put up along with the image of drawn contours.
Usage:
contours.py
A trackbar is put up which controls the contour level from -3 to 3
'''
import numpy as np
import cv2
name_file_tile = "0.45-nd.png"
file_tile = "../../wp-admin/img/" + name_file_tile
img = cv2.imread(file_tile)
h, w = img.shape[:2]
imgray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
r
|
et, thresh = cv2.threshold(imgray, 127, 255, 0)
_, contours0, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]
#def update(levels):
vis = np.zeros((h, w, 3), np.uint8)
levels = 0
#cv2.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255), 3, , hierarchy, abs(levels) )
#drawContours( vis, contours, -1, (255,255,255), 3)
cv2.drawContours( vis,
|
contours, -1, (255, 255, 255), 1, 1, hierarchy );
cv2.imshow('contours', vis)
#update(3)
#cv2.createTrackbar( "levels+3", "contours", 3, 7, update )
#cv2.imshow('image', img)
cv2.waitKey()
cv2.destroyAllWindows()
|
statlab/permute
|
permute/tests/test_stratified.py
|
Python
|
bsd-2-clause
| 3,861
| 0.003108
|
import numpy as np
import math
from numpy.random import RandomState
import pytest
from cryptorandom.cryptorandom import SHA256
from ..stratified import stratified_permutationtest as spt
from ..stratified import stratified_permutationtest_mean as sptm
from ..stratified import corrcoef, sim_corr, stratified_two_sample
def test_stratified_permutationtest():
group = np.repeat([1, 2, 3], 9)
condition = np.repeat([1, 2, 3] * 3, 3)
response = np.zeros_like(group)
response[[0, 1, 3, 9, 10, 11, 18, 19, 20]] = 1
res = spt(group, condition, response,
|
reps=1000, seed=42)
res1 = spt(group, conditi
|
on, response, alternative='less', reps=1000, seed=42)
assert res[0] < 0.01
assert res[1] == res1[1]
np.testing.assert_almost_equal(res[0], 1-res1[0])
res2 = spt(group, condition, response, alternative='two-sided', reps=1000, seed=42)
assert res2[0] < 0.02
group = np.array([1, 1, 1])
condition = np.array([2, 2, 2])
response = np.zeros_like(group)
res2 = spt(group, condition, response, reps=1000, seed=42)
assert res2 == (1.0, np.nan, None)
def test_stratified_permutationtest_mean():
group = np.array([1, 2, 1, 2])
condition = np.array([1, 1, 2, 2])
response = np.zeros_like(group)
groups = np.unique(group)
conditions = np.unique(condition)
res = sptm(group, condition, response, groups, conditions)
assert res == 0.0
res2 = sptm(group, condition, response) # check defaults work
assert res2 == 0.0
def test_stratified_permutationtest_mean_error():
group = np.array([1, 1, 1])
condition = np.array([2, 2, 2])
response = np.zeros_like(group)
groups = np.unique(group)
conditions = np.unique(condition)
pytest.raises(ValueError, sptm, group, condition, response, groups, conditions)
def test_corrcoef():
prng = RandomState(42)
x = prng.rand(10)
y = x
group = prng.randint(3, size=10)
res1 = corrcoef(x, y, group)
res2 = corrcoef(x, y, group)
assert res1 == res2
def test_sim_corr():
prng = SHA256(42)
x = prng.random(10)
y = x
group = prng.randint(0, 3, size=10)
res1 = sim_corr(x, y, group, seed=prng, reps=100)
res2 = sim_corr(x, y, group, seed=prng, alternative='less', reps=100)
res3 = sim_corr(x, y, group, seed=prng, alternative='two-sided', reps=100)
np.testing.assert_almost_equal(res1[0], 1-res2[0])
assert res1[1] == res2[1]
assert res1[1] == res3[1]
assert 2*res1[0] == res3[0]
def test_strat_tests_equal():
group = np.repeat([1, 2, 3], 10)
condition = np.repeat([1, 2] * 3, 5)
response = np.zeros_like(group)
response[[0, 1, 3, 9, 10, 11, 18, 19, 20]] = 1
res1 = spt(group, condition, response, reps=100, seed=42)
res2 = stratified_two_sample(group, condition, response, reps=100,
stat='mean_within_strata', seed=42)
assert res1[1] == res2[1]
assert math.fabs(res1[0]-res2[0]) < 0.05
def test_stratified_two_sample():
group = np.repeat([1, 2, 3], 10)
condition = np.repeat([1, 2] * 3, 5)
response = np.zeros_like(group)
response[[0, 1, 3, 9, 10, 11, 18, 19, 20]] = 1
res = stratified_two_sample(group, condition, response, reps=1000,
stat='mean', seed=42)
np.testing.assert_almost_equal(res[0], 0.245, 2)
assert res[1] == 0.2
(p, t, dist) = stratified_two_sample(group, condition, response, reps=1000,
stat='mean', seed=42, keep_dist=True)
assert res == (p, t)
stat_fun = lambda u: sptm(group, condition, u, np.unique(group), np.unique(condition))
res = stratified_two_sample(group, condition, response, reps=100,
stat=stat_fun, seed=42)
np.testing.assert_almost_equal(res[0], 0.8712, 3)
assert res[1] == 0.30
|
serbyy/MIDAS
|
midas/modules/example_analyzeplist.py
|
Python
|
mit
| 7,999
| 0.000125
|
#!/usr/bin/env python
"""
This is an example MIDAS module
"""
from os.path import isfile
from os import chmod
from time import time, gmtime, strftime
import logging
from sys import argv
from lib.ty_orm import TyORM
from lib.plist import read_plist, get_plist_key
from lib.config import Config
from lib.data_science import DataScience
from lib.helpers.filesystem import hash_file, list_launch_agents, \
list_launch_daemons, list_app_info_plist, list_plugin_info_plist, \
list_current_host_pref_files
from lib.he
|
lpers.utilities import to_ascii, encode, error_running_file
from lib.tables.example import tables
class AnalyzePlist(object):
"""AnalyzePlist analyzes property list files installed on the system"""
def __init__(self):
self.data
|
= {}
self.pre_changed_files = []
self.post_changed_files = []
self.pre_new_files = []
self.post_new_files = []
self.check_keys = Config.get("plist_check_keys")
self.check_keys_hash = Config.get("plist_check_keys_hash")
self.hashes = self.gather_hashes()
self.files = list_launch_agents() + list_launch_daemons() + \
list_app_info_plist() + list_plugin_info_plist() + \
list_current_host_pref_files()
self.changed_files, self.new_files, \
self.same_files = self.bucket_files(
self.files,
self.hashes,
)
self.plist_name = None
self.plist_file = None
if self.changed_files:
self.analyze_changed_files()
if self.new_files:
self.analyze_new_files()
def gather_hashes(self):
"""
return a dictionary of plist names and their corresponding hashes
"""
hash_data = ORM.select("plist", ["name", "hash"])
hash_dict = {}
if hash_data:
for i in hash_data:
hash_dict[i['name']] = i['hash']
return hash_dict
def bucket_files(self, files, hashes):
"""
takes an array of files and a dictionary in {file: hash} form and
returns data structures indicitive of which files have changed since
the last execution
"""
# changed files and new_files are dicts so that we can store the hash
# when we compute and thus not have to compute it twice
changed_files = {}
new_files = {}
# since the hash of same_files hasn't changed, we don't need to store
# it past the comparison
same_files = []
for fname in files:
file_hash = hash_file(fname)
if fname in hashes:
if hashes[fname] == file_hash:
same_files.append(fname)
else:
changed_files[fname] = file_hash
else:
new_files[fname] = file_hash
return changed_files, new_files, same_files
def check_key(self, key):
"""
Log the values of the launch agent/daemon keys in self.check_keys
"""
value = get_plist_key(self.plist_file, key)
if value:
self.data[key.lower()] = str(to_ascii(value))
else:
self.data[key.lower()] = "KEY DNE"
def check_key_executable(self, key):
"""
Log the values of the launch agent/daemon keys in self.check_keys_hash
"""
key = key.lower()
key_hash = "%s_hash" % (key.lower(), )
value = get_plist_key(self.plist_file, key)
if value:
try:
if isinstance(value, basestring):
# This should only get triggered by the Program key
self.data[key] = str(to_ascii(value))
self.data[key_hash] = hash_file(str(to_ascii(value)))
elif isinstance(value, (list, tuple)):
# This should only get triggered by the
# ProgramArguments key
self.data[key] = encode(" ".join(value))
self.data[key_hash] = hash_file(str(value[0]))
except IOError:
self.data[key_hash] = "File DNE"
else:
self.data[key] = "KEY DNE"
self.data[key_hash] = "KEY DNE"
def analyze_changed_files(self):
"""
analyze plists that have changed since last execution
"""
where_params = self.changed_files.keys()
where_statement = "name=%s" % (" OR name=".join(
['?'] * len(where_params)), )
where_clause = [where_statement, where_params]
self.pre_changed_files = ORM.select("plist", None, where_clause)
for fname, fname_hash in self.changed_files.iteritems():
self.data = {}
self.plist_name = fname
self.plist_file = read_plist(fname)
self.data["name"] = self.plist_name
self.data["date"] = exec_date
self.data["hash"] = fname_hash
for i in self.check_keys_hash:
self.check_key_executable(i)
for i in self.check_keys:
self.check_key(i)
# Aggregate self.data
self.post_changed_files.append(self.data)
def analyze_new_files(self):
"""
analyze new plists that are on the host
"""
where_params = self.new_files.keys()
where_statement = "name=%s" % (" OR name=".join(
['?'] * len(where_params)), )
where_clause = [where_statement, where_params]
self.pre_new_files = ORM.select("plist", None, where_clause)
self.post_new_files = []
for fname, fname_hash in self.new_files.iteritems():
self.data = {}
self.plist_name = fname
self.plist_file = read_plist(fname)
self.data["name"] = self.plist_name
self.data["date"] = exec_date
self.data["hash"] = fname_hash
for i in self.check_keys_hash:
self.check_key_executable(i)
for i in self.check_keys:
self.check_key(i)
# Aggregate self.data
self.post_new_files.append(self.data)
if __name__ == "__main__":
start = time()
# the "exec_date" is used as the "date" field in the datastore
exec_date = strftime("%a, %d %b %Y %H:%M:%S", gmtime())
# the table definitions are stored in a library file. this is instantiating
# the ORM object and initializing the tables
ORM = TyORM(Config.get("database"))
if isfile(Config.get("database")):
chmod(Config.get("database"), 0600)
for k, v in tables.iteritems():
ORM.initialize_table(k, v)
###########################################################################
# Gather data
###########################################################################
try:
a = AnalyzePlist()
if a is not None:
plist_pre_changed_files = a.pre_changed_files
plist_post_changed_files = a.post_changed_files
plist_pre_new_files = a.pre_new_files
plist_post_new_files = a.post_new_files
data_science = DataScience(
ORM,
plist_post_changed_files,
"plist",
"name",
plist_pre_changed_files,
)
data_science.get_changed_entries()
data_science = DataScience(
ORM,
plist_post_new_files,
"plist",
"name",
plist_pre_new_files,
)
data_science.get_new_entries()
except Exception, error:
print error_running_file(__file__, "lad", error)
end = time()
# to see how long this module took to execute, launch the module with
# "--log" as a command line argument
if "--log" in argv[1:]:
logging.basicConfig(format='%(message)s', level=logging.INFO)
logging.info("Execution took %s seconds.", str(end - start))
|
joachimwolff/minHashNearestNeighbors
|
sparse_neighbors_search/neighbors/wtaHashClassifier.py
|
Python
|
mit
| 15,173
| 0.003691
|
# Copyright 2016, 2017, 2018, 2019, 2020 Joachim Wolff
# PhD Thesis
#
# Copyright 2015, 2016 Joachim Wolff
# Master Thesis
# Tutor: Fabrizio Costa
# Winter semester 2015/2016
#
# Chair of Bioinformatics
# Department of Computer Science
# Faculty of Engineering
# Albert-Ludwigs-University Freiburg im Breisgau
__author__ = 'joachimwolff'
from collections import Counter
import numpy as np
from numpy import asarray
from sklearn.utils import check_array
from sklearn.utils import check_X_y
from sklearn.metrics import accuracy_score
import logging
from .wtaHash import WtaHash
class WtaHashClassifier():
"""Classifier implementing the k-nearest neighbors vote on sparse data sets.
Based on a dimension reduction with minimum hash functions.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
fast : {True, False}, optional (default = False)
- True: will only use an inverse index to compute a k_neighbor query.
- False: an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_neighbor query. The
original data is stored in the memory.
number_of_hash_functions : int, optional (default = '400')
Number of hash functions to use for computing the inverse index.
max_bin_size : int, optional (default = 50)
The number of maximal collisions for one hash value of one hash function. If one value of a hash function
has more collisions, this value will be ignored.
minimal_blocks_in_common : int, optional (default = 1)
The minimal number of hash collisions two instances have to be in common to be recognised. Everything less
will be ignored.
shingle_size : int, optional (default = 4)
Reduction factor for the signature size.
E.g. number_of_hash_functions=400 and shingle_size=4 --> Size of the signature will be 100
excess_factor : int, optional (default = 5)
Factor to return more neighbors internally as defined with n_neighbors. Factor is useful to increase the
precision of the :meth:`algorithm=exact` version of the implementation.
E.g.: n_neighbors = 5, excess_factor = 5. Internally n_neighbors*excess_factor = 25 neighbors will be returned.
Now the reduced data set for sklearn.NearestNeighbors is of size 25 and not 5.
number_of_cores : int, optional
Number of cores that should be used for openmp. If your system doesn't support openmp, this value
will have no effect. If it supports openmp and it is not defined, the maximum number of cores is used.
chunk_size : int, optional
Number of elements one cpu core should work on. If it is set to "0" the default behaviour of openmp is used;
e.g. for an 8-core cpu, the chunk_size is set to 8. Every core will get 8 elements, process these and get
another 8 elements until everything is done. If you set chunk_size to "-1" all cores
are getting the same amount of data at once; e.g. 8-core cpu and 128 elements to process, every core will
get 16 elements at once.
Notes
-----
The documentation is copied from scikit-learn and was only extend for a few cases. All examples are available there.
Original documentation is available at: http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier
Sources:
Basic algorithm:
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
Idea behind implementation:
https://en.wikipedia.org/wiki/Locality-sensitive_hashing
Implementation is using scikit learn:
http://scikit-learn.org/dev/index.html
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier
Algorithm based on:
Heyne, S., Costa, F., Rose, D., & Backofen, R. (2012).
GraphClust: alignment-free structural clustering of local RNA secondary structures.
Bioinformatics, 28(12), i224-i232.
http://bioinformatics.oxfordjournals.org/content/28/12/i224.full.pdf+html"""
def __init__(self, n_neighbors=5, radius=1.0, fast=False, number_of_hash_functions=400,
max_bin_size=50, minimal_blocks_in_common=1, shingle_size=4, excess_factor=5,
similarity=False, number_of_cores=None, chunk_size=None, prune_inverse_index=-1,
prune_inverse_index_after_instance=-1.0, remove_hash_function_with_less_entries_as=-1,
block_size=5, shingle=0, store_value_with_least_sigificant_bit=0,
rangeK_wta=20, speed_optimized=None, accuracy_optimized=None): # cpu_gpu_load_balancing=0,
cpu_gpu_load_balancing = 0
self._wtaHash = WtaHash(n_neighbors=n_neighbors, radius=radius,
fast=fast, number_of_hash_functions=number_of_hash_functions,
max_bin_size=max_bin_size, minimal_blocks_in_common=minimal_blocks_in_common,
shingle_size=shingle_size, excess_factor=excess_factor,
similarity=similarity, number_of_cores=number_of_cores, chunk_size=chunk_size, prune_inverse_index=prune_inverse_index,
prune_inverse_index_after_instance=prune_inverse_index_after_instance,
remove_hash_function_with_less_entries_as=remove_hash_function_with_less_entries_as,
block_size=block_size, shingle=shingle,
store_value_with_least_sigificant_bit=store_value_with_least_sigificant_bit,
cpu_gpu_load_balancing=cpu_gpu_load_balancing,
speed_optimized=speed_optimized, rangeK_wta=rangeK_wta, accur
|
acy_optimized=accuracy_optimized)
def __del__(self):
del self._wtaHash
def fit(self, X, y):
"""Fit the model using
|
X as training data.
Parameters
----------
X : {array-like, sparse matrix}
Training data, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]"""
self._wtaHash.fit(X, y)
def partial_fit(self, X, y):
"""Extend the model by X as additional training data.
Parameters
----------
X : {array-like, sparse matrix}
Training data. Shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]"""
self._wtaHash.partial_fit(X, y)
def kneighbors(self, X=None, n_neighbors=None, return_distance=True, fast=None, pAbsoluteNumbers=None):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
fast : {True, False}, optional (default = False)
- True: will only use an inverse index to compute a k_neighbor query.
- False: an inverse index is used to preselect instances, and these are used to get
the original data from the data set to answer a k_nei
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/operations/_virtual_machine_images_operations.py
|
Python
|
mit
| 21,016
| 0.00452
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
location: str,
publisher_name: str,
offer: str,
skus: str,
version: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"skus": _SERIALIZER.url("skus", skus, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
location: str,
publisher_name: str,
offer: str,
skus: str,
subscription_id: str,
*,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"skus": _SERIALIZER.url("skus", skus, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_offers_request(
location: str,
publisher_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id,
|
'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params"
|
, {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_publishers_request(
location: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_skus_request(
location: str,
publisher_name: str,
offer: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return H
|
derekluo/scrapy-cluster
|
crawler/crawling/redis_retry_middleware.py
|
Python
|
mit
| 1,117
| 0.008953
|
from scrapy.downloadermiddlewares.retry import RetryMiddleware
import logging
logger = logging.getLogger(__name__)
class RedisRetryMiddleware(RetryMiddleware):
def __init__(self, settings):
RetryMiddleware.__init__(self, settings)
def _retry(self, request, reason, spider):
retries = request.meta.get('retry_times', 0) + 1
if retries <= self.max_retry_times:
logger.debug("Retrying {request} " \
"(failed {retries} times): {reason}".format(
spider=spider, request=request,
retries=retries, reason=reason))
retryreq = request.copy()
retryreq.meta['retry_times'] = retries
retryreq.dont_filter = True
# our priority setup is different from super
retryreq.meta['priori
|
ty'] = retryreq.meta['priority'] - 10
return retryreq
else:
|
logger.debug("Gave up retrying {request} "\
"(failed {retries} times): {reason}".format(
spider=spider, request=request,
retries=retries, reason=reason))
|
eduNEXT/edunext-ecommerce
|
ecommerce/core/migrations/0042_siteconfiguration_enable_partial_program.py
|
Python
|
agpl-3.0
| 612
| 0.001634
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-04 14:50
from django.db import migrations, models
cl
|
ass Migration(migrations.Migration):
dependencies = [
('core', '0041_remove_siteconfiguration__allowed_segment_events'),
]
operations = [
migrations.AddField(
model_name='siteconfiguration',
name='enable_partial_program',
field=models.Boolea
|
nField(default=False, help_text='Enable the application of program offers to remaining unenrolled or unverified courses', verbose_name='Enable Partial Program Offer'),
),
]
|
jnewland/home-assistant
|
tests/components/dyson/test_vacuum.py
|
Python
|
apache-2.0
| 6,611
| 0
|
"""Test the Dyson 360 eye robot vacuum component."""
import unittest
from unittest import mock
from libpurecool.const import Dyson360EyeMode, PowerMode
from libpurecool.dyson_360_eye import Dyson360Eye
from homeassistant.components.dyson import vacuum as dyson
from homeassistant.components.dyson.vacuum import Dyson360EyeDevice
from tests.common import get_test_home_assistant
def _get_non_vacuum_device():
"""Return a non vacuum device."""
device = mock.Mock()
device.name = "Device_Fan"
device.state = None
return device
def _get_vacuum_device_cleaning():
"""Return a vacuum device running."""
device = mock.Mock(spec=Dyson360Eye)
device.name = "Device_Vacuum"
device.state = mock.MagicMock()
device.state.state = Dyson360EyeMode.FULL_CLEAN_RUNNING
device.state.battery_level = 85
device.state.power_mode = PowerMode.QUIET
device.state.position = (0, 0)
return device
def _get_vacuum_device_charging():
"""Return a vacuum device charging."""
device = mock.Mock(spec=Dyson360Eye)
device.name = "Device_Vacuum"
device.state = mock.MagicMock()
device.state.state = Dyson360EyeMode.INACTIVE_CHARGING
device.state.battery_level = 40
device.state.power_mode = PowerMode.QUIET
device.state.position = (0, 0)
return device
def _get_vacuum_device_pause():
"""Return a vacuum device in pause."""
device = mock.MagicMock(spec=Dyson360Eye)
device.name = "Device_Vacuum"
device.state = mock.MagicMock()
device.state.state = Dyson360EyeMode.FULL_CLEAN_PAUSED
device.state.battery_level = 40
device.state.power_mode = PowerMode.QUIET
device.state.position = (0, 0)
return device
def _get_vacuum_device_unknown_state():
"""Return a vacuum device with unknown state."""
device = mock.Mock(spec=Dyson360Eye)
device.name = "Device_Vacuum"
device.state = mock.MagicMock()
device.state.state = "Unknown"
return device
class DysonTest(unittest.TestCase):
"""Dyson 360 eye robot vacuum component test class."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component_with_no_devices(self):
"""Test setup component with no devices."""
self.hass.data[dyson.DYSON_DEVICES] = []
add_entities = mock.MagicMock()
dyson.setup_platform(self.hass, {}, add_entities)
add_entities.assert_called_with([])
def test_setup_component(self):
"""Test setup component with devices."""
def _add_device(devices):
assert len(devices) == 1
assert devices[0].name == "Device_Vacuum"
device_vacuum = _get_vacuum_device_cleaning()
device_non_vacuum = _get_non_vacuum_device()
self.hass.data[dyson.DYSON_DEVICES] = [device_vacuum,
|
device_non_vacuum]
|
dyson.setup_platform(self.hass, {}, _add_device)
def test_on_message(self):
"""Test when message is received."""
device = _get_vacuum_device_cleaning()
component = Dyson360EyeDevice(device)
component.entity_id = "entity_id"
component.schedule_update_ha_state = mock.Mock()
component.on_message(mock.Mock())
assert component.schedule_update_ha_state.called
def test_should_poll(self):
"""Test polling is disable."""
device = _get_vacuum_device_cleaning()
component = Dyson360EyeDevice(device)
assert not component.should_poll
def test_properties(self):
"""Test component properties."""
device1 = _get_vacuum_device_cleaning()
device2 = _get_vacuum_device_unknown_state()
device3 = _get_vacuum_device_charging()
component = Dyson360EyeDevice(device1)
component2 = Dyson360EyeDevice(device2)
component3 = Dyson360EyeDevice(device3)
assert component.name == "Device_Vacuum"
assert component.is_on
assert component.status == "Cleaning"
assert component2.status == "Unknown"
assert component.battery_level == 85
assert component.fan_speed == "Quiet"
assert component.fan_speed_list == ["Quiet", "Max"]
assert component.device_state_attributes['position'] == \
'(0, 0)'
assert component.available
assert component.supported_features == 255
assert component.battery_icon == "mdi:battery-80"
assert component3.battery_icon == "mdi:battery-charging-40"
def test_turn_on(self):
"""Test turn on vacuum."""
device1 = _get_vacuum_device_charging()
component1 = Dyson360EyeDevice(device1)
component1.turn_on()
assert device1.start.called
device2 = _get_vacuum_device_pause()
component2 = Dyson360EyeDevice(device2)
component2.turn_on()
assert device2.resume.called
def test_turn_off(self):
"""Test turn off vacuum."""
device1 = _get_vacuum_device_cleaning()
component1 = Dyson360EyeDevice(device1)
component1.turn_off()
assert device1.pause.called
def test_stop(self):
"""Test stop vacuum."""
device1 = _get_vacuum_device_cleaning()
component1 = Dyson360EyeDevice(device1)
component1.stop()
assert device1.pause.called
def test_set_fan_speed(self):
"""Test set fan speed vacuum."""
device1 = _get_vacuum_device_cleaning()
component1 = Dyson360EyeDevice(device1)
component1.set_fan_speed("Max")
device1.set_power_mode.assert_called_with(PowerMode.MAX)
def test_start_pause(self):
"""Test start/pause."""
device1 = _get_vacuum_device_charging()
component1 = Dyson360EyeDevice(device1)
component1.start_pause()
assert device1.start.called
device2 = _get_vacuum_device_pause()
component2 = Dyson360EyeDevice(device2)
component2.start_pause()
assert device2.resume.called
device3 = _get_vacuum_device_cleaning()
component3 = Dyson360EyeDevice(device3)
component3.start_pause()
assert device3.pause.called
def test_return_to_base(self):
"""Test return to base."""
device = _get_vacuum_device_pause()
component = Dyson360EyeDevice(device)
component.return_to_base()
assert device.abort.called
|
PyBossa/pybossa
|
test/test_importers/test_s3_importer.py
|
Python
|
agpl-3.0
| 6,099
| 0.003771
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2016 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
import string
from pybossa.importers.s3 import BulkTaskS3Import
from default import with_context
class TestBulkTaskS3Import(object):
form_data = {
'files': ['myfile.png'],
'bucket': 'mybucket'
}
@with_context
def test_count_tasks_returns_0_if_no_files_to_import(self):
form_data = {
'files': [],
'bucket': 'mybucket'
}
number_of_tasks = BulkTaskS3Import(**form_data).count_tasks()
assert number_of_tasks == 0, number_of_tasks
@with_context
def test_count_tasks_returns_1_if_1_file_to_import(self):
form_data = {
'files': ['myfile.png'],
'bucket': 'mybucket'
}
number_of_tasks = BulkTaskS3Import(**form_data).count_tasks()
assert number_of_tasks == 1, number_of_tasks
@with_context
def test_tasks_return_emtpy_list_if_no_files_to_import(self):
form_data = {
'files': [],
'bucket': 'mybucket'
}
tasks = BulkTaskS3Import(**form_data).tasks()
assert tasks == [], tasks
@with_context
def test_tasks_returns_list_with_1_file_data_if_1_file_to_import(self):
form_data = {
'files': ['myfile.png'],
'bucket': 'mybucket'
}
tasks = BulkTaskS3Import(**form_data).tasks()
assert len(tasks) == 1, tasks
@with_context
def test_tasks_returns_tasks_with_fields_for_generic_files(self):
#For generic file extensions: link, filename, url
form_data = {
'files': ['myfile.png'],
'bucket': 'mybucket'
}
tasks = BulkTaskS3Import(**form_data).tasks()
assert tasks[0]['info']['filename'] == "myfile.png"
assert tasks[0]['info']['link'] == "https://mybucket.s3.amazonaws.com/myfile.png"
assert tasks[0]['info']['url'] == "https://mybucket.s3.amazonaws.com/myfile.png"
@with_context
def test_tasks_attributes_for_image_files(self):
#For image file extensions: link, filename, url, url_m, url_b, title
image_ext = ['png', 'jpg', 'jpeg', 'gif']
file_data = 'myfile.extension'
for ext in image_ext:
data = string.replace(file_data,'extension', ext)
form_data = {
'files': [data],
'bucket': 'mybucket'
}
tasks = BulkTaskS3Import(**form_data).tasks()
assert tasks[0]['info']['filename'] == "myfile.%s" % ext
assert tasks[0]['info']['link'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
assert tasks[0]['info']['url'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
assert tasks[0]['info']['url_m'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
assert tasks[0]['info']['url_b'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
assert tasks[0]['info']['title'] == "myfile.%s" % ext
@wit
|
h_context
def test_tasks_attributes_for_pdf_files(self):
#For pdf file extension: link, filename, url, pdf_url
pdf_file_data = 'mypdf.pdf'
form_data = {
'files': [pdf_file_data],
'bucket': 'mybucket'
}
tasks = B
|
ulkTaskS3Import(**form_data).tasks()
assert tasks[0]['info']['filename'] == "mypdf.pdf"
assert tasks[0]['info']['link'] == "https://mybucket.s3.amazonaws.com/mypdf.pdf"
assert tasks[0]['info']['url'] == "https://mybucket.s3.amazonaws.com/mypdf.pdf"
assert tasks[0]['info']['pdf_url'] == "https://mybucket.s3.amazonaws.com/mypdf.pdf"
@with_context
def test_tasks_attributes_for_video_files(self):
#For video file extension: link, filename, url, video_url
video_ext = ['mp4', 'm4v', 'ogg', 'ogv', 'webm', 'avi']
file_data = 'myfile.extension'
for ext in video_ext:
data = string.replace(file_data,'extension', ext)
form_data = {
'files': [data],
'bucket': 'mybucket'
}
tasks = BulkTaskS3Import(**form_data).tasks()
assert tasks[0]['info']['filename'] == "myfile.%s" % ext
assert tasks[0]['info']['link'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
assert tasks[0]['info']['url'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
assert tasks[0]['info']['video_url'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
@with_context
def test_tasks_attributes_for_audio_files(self):
#For audio file extension: link, filename, url, audio_url
audio_ext = ['mp4', 'm4a', 'mp3', 'ogg', 'oga', 'webm', 'wav']
file_data = 'myfile.extension'
for ext in audio_ext:
data = string.replace(file_data,'extension', ext)
form_data = {
'files': [data],
'bucket': 'mybucket'
}
tasks = BulkTaskS3Import(**form_data).tasks()
assert tasks[0]['info']['filename'] == "myfile.%s" % ext
assert tasks[0]['info']['link'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
assert tasks[0]['info']['url'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
assert tasks[0]['info']['audio_url'] == "https://mybucket.s3.amazonaws.com/myfile.%s" % ext
|
matthagy/Jamenson
|
jamenson/compiler/bind.py
|
Python
|
apache-2.0
| 9,851
| 0.004061
|
from __future__ import absolute_import
if __name__ == '__main__':
import jamenson.compiler.bind
jamenson.compiler.bind.test()
exit()
from ..runtime.ports import (Port, PortList, connect, disconnect, disconnect_all, replace_connection,
count_connections, get_cell, get_cells, DanglingPort,
AttrPortList, AttrPortMapping)
from ..runtime.ctxsingleton import CtxSingleton
from ..runtime.multimethod import defmethod
from ..runtime.copy import copy_obj, copy, get_copy, set_copy
from ..runtime.as_string import as_string, StringingMixin
#Binding Use Scoping
BND_GLOBAL, BND_LOCAL, BND_CELL, BND_FREE = BND_USE_SCOPINGS = range(4)
BND_CONCRETE = BND_LOCAL, BND_CELL, BND_FREE
class ScopableBase(StringingMixin):
def __init__(self):
self.scope_port = Port(self)
@property
def scope(self):
try:
return get_cell(self.scope_port)
except DanglingPort:
return None
class BindingUseMixin(object):
def __init__(self):
self.user_port = Port(self)
class BindingUse(ScopableBase, BindingUseMixin):
def __init__(self, binding):
ScopableBase.__init__(self)
BindingUseMixin.__init__(self)
self.binding_port = Port(self)
self.binding = binding
def binding():
def get(self):
try:
return get_cell(self.binding_port)
except DanglingPort:
return None
def set(self, binding):
del_(self)
if binding is not None:
if not isinstance(binding, Binding):
raise TypeError("can't assign type %s to binding"
% (type(binding).__name__,))
binding.uses.append(self)
def del_(self):
if self.binding is not None:
self.binding.uses.remove(self)
return property(get, set, del_)
binding = binding()
@property
def user(self):
try:
return get_cell(self.user_port)
except DanglingPort:
return None
@property
def symbol(self):
if not self.binding:
return None
return self.binding.symbol
@defmethod(as_string, [BindingUse])
def meth(bu):
return '<%s %s %s>' % (bu.__class__.__name__,
bu.binding,
{BND_GLOBAL : 'global',
BND_LOCAL : 'local',
BND_CELL : 'cell',
BND_FREE : 'free'}[get_binding_use_type(bu)])
@defmethod(copy_obj, [BindingUse])
def meth(bu):
return BindingUse(copy(bu.binding))
class ConcreteBindingUse(BindingUseMixin):
def __init__(self, name, use_type):
assert isinstance(name, str)
assert use_type in BND_CONCRETE
BindingUseMixin.__init__(self)
self.name = name
self.use_type = use_type
class BindableBase(ScopableBase):
def __init__(self, symbol):
ScopableBase.__init__(self)
self.symbol = symbol
class Binding(BindableBase):
def __init__(self, symbol):
BindableBase.__init__(self, symbol)
self.uses_port = AttrPortList('binding_port', self)
def uses():
def get(self):
return self.uses_port
def set(self, seq):
del_(self)
self.uses_port.extend(seq)
def del_(self):
del self.uses_port[::]
return property(get, set, del_)
uses = uses()
@defmethod(as_string, [Binding])
def meth(bn):
return '<%s %s 0x%X>' % (bn.__class__.__name__, bn.symbol, id(bn))
@defmethod(copy_obj, [Binding])
def meth(b):
return Binding(b.symbol)
class Macrolet(BindableBase):
def __init__(self, symbol, macro):
BindableBase.__init__(self, symbol)
self.macro = macro
class SymbolMacrolet(BindableBase):
def __init__(self, symbol, form):
BindableBase.__init__(self, symbol)
self.form = form
macro_types = Macrolet, SymbolMacrolet
class Scope(StringingMixin):
def __init__(self, parent=None, manage_locals=None):
self.child_scopes_port = AttrPortList('parent_port', self)
self.parent_port = Port(self)
if manage_locals is None:
manage_locals = parent is None
assert parent or manage_locals, 'no local managment for scope'
self.parent = parent
self.manage_locals = manage_locals
self.bindings_port = AttrPortMapping('scope_port', self)
self.binding_uses_port = AttrPortList('scope_port', self)
def parent():
def get(self):
try:
return get_cell(self.parent_port)
except DanglingPort:
return None
def set(self, parent):
del_(self)
if parent is not None:
parent.child_scopes_port.append(self)
def del_(self):
disconnect_all(self.parent_port)
return property(get, set, del_)
parent = parent()
@property
def top(self):
if not self.parent:
return self
return self.parent.top
@property
def depth(self):
depth = 0
scope = self
while scope.parent:
depth += 1
scope = scope.parent
return depth
def child_scopes():
def get(self):
return self.child_scopes_port
def set(self, seq):
_del(self)
self.child_scopes_port.extend(seq)
def del_(self):
del self.child_scopes_port[::]
return property(get, set, del_)
child_scopes = child_scopes()
def bindings():
def get(self):
return self.bindings_port
def set(self, mapping):
del_(self)
self.bindings_port.update(mapping)
def del_(self):
self.bindings_port.clear()
return property(get, set, del_)
bindings = bindings()
def binding_uses():
def get(self):
return self.binding_uses_port
def set(self, seq):
del_(self)
self.binding_uses_port.extend(seq)
def del_(self):
del self.binding_uses_port[::]
return property(get, set, del_)
binding_uses = binding_uses()
#api
def create_child(self, new_locals=False):
return self.__class__(parent=self, manage_locals=new_locals)
def register_local(self, sym):
return self.register(sym, Binding(sym))
def register_macrolet(self, sym, macro):
return self.register(sym,
|
Macrolet(sym, macro))
def register_symbolmcrole
|
t(self, sym, form):
return self.register(sym, SymbolMacrolet(sym, form))
def use_symbol(self, sym):
scope, binding_or_macro = self.find_binding_or_macro(sym)
if binding_or_macro is None:
#global, non-scoped
return BindingUse(Binding(sym))
if isinstance(binding_or_macro, macro_types):
return binding_or_macro
return self.use_binding(binding_or_macro)
def register_and_use_local(self, sym):
return self.use_binding(self.register_local(sym))
def unregister_binding(self, binding):
assert len(binding.uses)==0
assert binding is self.bindings[binding.symbol]
del self.bindings[binding.symbol]
#internals
def register(self, sym, binding_or_macro):
assert sym not in self.bindings
self.bindings[sym] = binding_or_macro
return binding_or_macro
def find_binding_or_macro(self, sym):
try:
return self, self.bindings[sym]
except KeyError:
if self.parent:
return self.parent.find_binding_or_macro(sym)
return None, None
def use_binding(self, binding):
bu = BindingUse(binding)
self.binding_uses_port.append(bu)
return bu
def get_locals_scope(self):
if self.manage_locals:
return self
return self.parent.get_locals_scope()
@defmethod(as_string, [Scope])
def meth(sc):
return '<Scope with %d bindings>' % (len(sc.binding
|
rogerhil/flaviabernardes
|
flaviabernardes/flaviabernardes/newsletter/migrations/0006_auto_20151110_2139.py
|
Python
|
apache-2.0
| 1,147
| 0.001744
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0008_auto_20151028_1931'),
('newsletter', '0005_auto_20150927_1517'),
]
operations = [
migrations.AddField(
model_name='list',
name='confirmation_p
|
age',
field=models.ForeignKey(null=True, blank=True, to='cms.Page'),
preserve_default=True,
),
migrations.AddField(
model_name='list',
na
|
me='email_message',
field=models.TextField(default="<p>Hello %(name)s,</p>\n<p>Please follow the link below.</p>\n<p>If you can't click it, please copy the entire link and paste it into your\nbrowser.</p>\n<p>%(link)s</p>\n<p>Thank you,</p>\n<p>Flavia Bernardes</p>\n"),
preserve_default=True,
),
migrations.AddField(
model_name='list',
name='email_subject',
field=models.CharField(max_length=255, default='Confirm your subscription'),
preserve_default=True,
),
]
|
BeeeOn/server
|
tools/zmqdump.py
|
Python
|
bsd-3-clause
| 1,577
| 0.039315
|
#! /usr/bin/env python3
import json
import logging
import sys
import zmq
def msg_loop(sock):
while True:
try:
message = sock.recv_json()
except Exception as e:
logging.error(e)
continue
logging.info(str(message))
def start_sub(addr):
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
logging.info("connecting to %s" % addr)
sock.connect(addr)
logging.info("subscribe to any topic")
sock.setsockopt(zmq.SUBSCRIBE, b"")
msg_loop(sock)
def start_pull(addr):
ctx = zmq.Context()
sock = ctx.socket(zmq.PULL)
logging.info("binding to %s" % addr)
sock.bind(addr)
msg_loop(sock)
def start_pair(addr):
ctx = zmq.Context()
sock = ctx.socket(zmq.PAIR)
logging.info("binding to %s" % addr)
sock.bind(addr)
msg_loop(sock)
def start_router(addr):
ctx = zmq.Context()
sock = ctx.socket(zmq.ROUTER)
logging.info("connecting to %s" % addr)
sock.connect(addr)
while True:
try:
header, payload = sock.recv_mu
|
ltipart()
message = json.loads(payload)
except Exception as e:
logging.error(e)
continue
logging.info("%s - %s" % (str(header), str(message)))
if __name__ == "__main__":
logging.basicConfig(level = logging.DEBUG)
logging.info("libzmq %s" % zmq.zmq_version())
type = "pull"
addr = "ipc://publish.zeromq"
if len(sys.argv) > 1:
type = sys.argv[1]
if len(sys.argv) > 2:
a
|
ddr = sys.argv[2]
if type == "sub":
start_sub(addr)
elif type == "pull":
start_pull(addr)
elif type == "pair":
start_pair(addr)
elif type == "router":
start_router(addr)
else:
raise Exception("unrecognized type: " + type)
|
ellisonbg/altair
|
altair/vegalite/v2/examples/scatter_with_labels.py
|
Python
|
bsd-3-clause
| 517
| 0
|
"""
Simple Scatter
|
Plot with Labels
===============================
This example shows a basic scatter plot with labels created with Altair.
"""
# category: scatter plots
import altair as alt
import pandas as pd
data = pd.DataFrame({
'x': [1, 3, 5, 7, 9],
'y': [1, 3, 5, 7, 9],
'label': ['A', 'B', 'C', 'D', '
|
E']
})
bars = alt.Chart(data).mark_point().encode(
x='x:Q',
y='y:Q'
)
text = bars.mark_text(
align='left',
baseline='middle',
dx=7
).encode(
text='label'
)
bars + text
|
Bysmyyr/chromium-crosswalk
|
tools/telemetry/catapult_base/dependency_manager/dependency_info_unittest.py
|
Python
|
bsd-3-clause
| 19,819
| 0.001716
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from catapult_base.dependency_manager import dependency_info
class DependencyInfoTest(unittest.TestCase):
def testInitRequiredInfo(self):
# Must have a dependency, platform and file_path.
self.assertRaises(ValueError, dependency_info.DependencyInfo,
None, None, None)
self.assertRaises(ValueError, dependency_info.DependencyInfo,
'dep', None, None)
self.assertRaises(ValueError, dependency_info.DependencyInfo,
None, 'plat', None)
self.assertRaises(ValueError, dependency_info.DependencyInfo,
None, None, 'config_file')
# Empty DependencyInfo.
empty_di = dependency_info.DependencyInfo('dep', 'plat', 'config_file')
self.assertFalse(empty_di.cs_bucket)
self.assertFalse(empty_di.cs_hash)
self.assertFalse(empty_di.download_path)
self.assertFalse(empty_di.cs_remote_path)
self.assertFalse(empty_di.local_paths)
self.assertEqual('dep', empty_di.dependency)
self.assertEqual('plat', empty_di.platform)
self.assertEqual(['config_file'], empty_di.config_files)
def testInitLocalPaths(self):
dep_info = dependency_info.DependencyInfo(
'dep', 'platform', 'config_file', local_paths=['path0', 'path1'])
self.assertEqual('dep', dep_info.dependency)
self.assertEqual('platform', dep_info.platform)
self.assertEqual(['config_file'], dep_info.config_files)
self.assertEqual(['path0', 'path1'], dep_info.local_paths)
self.assertFalse(dep_info.version_in_cs)
self.assertFalse(dep_info.cs_hash)
self.assertFalse(dep_info.cs_bucket)
self.assertFalse(dep_info.cs_remote_path)
self.assertFalse(dep_info.download_path)
self.assertFalse(dep_info.unzip_location)
self.assertFalse(dep_info.path_within_archive)
def testInitMinimumCloudStorageInfo(self):
# Must specify cloud storage information atomically.
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_b')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_hash='cs_hash')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_remote_path='cs_remote_path')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', download_path='download_path')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket', cs_hash='cs_hash',
cs_remote_path='cs_remote_path')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket', cs_hash='cs_hash',
cs_remote_path='cs_remote_path', local_paths=['path'])
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket', cs_hash='cs_hash',
download_path='download_path')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket', cs_hash='cs_hash',
download_path='download_path', local_paths=['path'])
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket',
cs_remote_path='cs_remote_path',
download_path='download_path')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_hash='cs_hash',
cs_remote_path='cs_remote_path',
download_path='download_path')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket', cs_hash='cs_hash',
download_path='download_path', local_paths=['path'])
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket',
cs_remote_path='cs_remote_path',
download_path='download_path', local_paths=['path'])
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_hash='cs_hash',
cs_remote_path='cs_remote_path',
download_path='download_path', local_paths=['path'])
def testInitWithVersion(self):
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', version_in_cs='version_in_cs')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', version_in_cs='version_in_cs',
local_paths=['path2'])
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket', cs_hash='cs_hash',
cs_remote_path='cs_remote_path',
version_in_cs='version_in_cs', local_paths=['path2'])
dep_info = dependency_info.DependencyInfo(
'dep', 'platform', 'config_file', cs_bucket='cs_bucket',
cs_hash='cs_hash', download_path='download_path',
cs_remote_path='cs_remote_path', version_in_cs='version_in_cs')
self.assertEqual('dep', dep_info.dependency)
self.assertEqual('platform', dep_info.platform)
self.assertEqual(['config_file'], dep_info.config_files)
self.assertEqual('cs_hash', dep_info.cs_hash)
self.assertEqual('cs_bucket', dep_info.cs_bucket)
self.assertEqual('cs_remote_path', dep_info.cs_remote_path)
self.assertEqual('download_path', dep_info.download_path)
self.assertEqual('version_in_cs', dep_info.version_in_cs)
self.assertFalse(dep_info.local_paths)
dep_info = dependency_info.DependencyInfo(
'dep', 'platform', 'config_file', cs_bucket='cs_bucket',
cs_hash='cs_hash', download_path='download_path',
cs_remote_path='cs_remote_path', version_in_cs='version_in_cs',
local_paths=['path'])
sel
|
f.assertEqual('dep', dep_info.dependency)
self.assertEqual('platform', dep_info.platform)
self.assertEqual(['config_file'], dep_info.config_files)
self.assertEqual('cs_hash', dep_info.cs_hash)
self.assertEqual('cs_bucket', dep_info.cs_bucket)
self.assertEqual('cs_re
|
mote_path', dep_info.cs_remote_path)
self.assertEqual('download_path', dep_info.download_path)
self.assertEqual('version_in_cs', dep_info.version_in_cs)
self.assertEqual(['path'], dep_info.local_paths)
def testInitWithArchivePath(self):
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', path_within_archive='path_within_archive')
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', path_within_archive='path_within_archive',
local_paths=['path2'])
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket', cs_hash='cs_hash',
cs_remote_path='cs_remote_path',
path_within_archive='path_within_archive',
local_paths=['path2'])
self.assertRaises(ValueError, dependency_info.DependencyInfo, 'dep', 'plat',
'config_file', cs_bucket='cs_bucket', cs_hash='cs_hash',
cs_remote_path='cs_remote_path', version_in_cs='version',
path_within_archive='path_within_archive',
local_paths=['path2'])
dep_info = dependency_info.DependencyInfo(
'dep', 'platform', 'config_file',
|
mattjhayes/nmeta2
|
nmeta2/api.py
|
Python
|
apache-2.0
| 36,008
| 0.004832
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#*** nmeta2 - Network Metadata - REST API Class and Methods
"""
This module is part of the nmeta suite running on top of Ryu SDN
controller to provide network identity and flow metadata.
.
It provides methods for RESTful API connectivity.
"""
import logging
import logging.handlers
import socket
import time
import sys
#*** Ryu Imports:
from ryu.exception import RyuException
from ryu.app.wsgi import ControllerBase, WSGIApplication
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
#*** Web API REST imports:
from webob import Response
import json
from json import JSONEncoder
#*** Universal Unique Identifier:
import uuid
from uuid import UUID
#*** Constants for REST API:
REST_RESULT = 'result'
REST_NG = 'failure'
REST_DETAILS = 'details'
NMETA_INSTANCE = 'nmeta_api_app'
LOGGER = 'logger_api_app'
# REST command template
def rest_command(func):
"""
REST API command template
"""
def _rest_command(*args, **kwargs):
"""
Run a REST command and return
appropriate response.
Keys/Values returned to this wrapper in a dictionary.
Valid Keys are:
'msg': the data to retu
|
rn in the message body
'location': a new location for the resource
'status': HTTP status code to return
"""
result = dict()
try:
result = func(*args, **kwargs)
except SyntaxError as e:
status = 400
details = e.msg
print "ERROR: SyntaxError in _rest_command, status ", status, \
|
"msg ", details
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
except (ValueError, NameError) as e:
status = 400
details = e.message
print "ERROR: ValueError or NameError in _rest_command, status ", \
status, "msg ", details
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
except NotFoundError as msg:
status = 404
details = str(msg)
print "ERROR: NotFoundError in _rest_command, status ", status, \
"msg ", details
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
except:
#*** General exception handling...
exc_type, exc_value, exc_traceback = sys.exc_info()
status = 500
details = "exc_type=" + str(exc_type) + " exc_value=" + \
str(exc_value) + " exc_traceback=" + \
str(exc_traceback)
print "ERROR: NotFoundError in _rest_command, status ", status, \
"msg ", details
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
if 'location' in result:
#*** Return an HTTP 201 with location for new resource:
msg = result['msg']
res_link = result['location']
status = 201
return Response(status=status, content_type='application/json',
location=res_link, body=json.dumps(msg))
else:
#*** No location to return:
msg = result['msg']
if 'status' in result:
status = result['status']
else:
status = 200
return Response(status=status, content_type='application/json',
body=json.dumps(msg))
#*** Return the inner function:
return _rest_command
class NotFoundError(RyuException):
message = 'Error occurred talking to function <TBD>'
class RESTAPIController(ControllerBase):
"""
This class is used to control REST API access to the
nmeta data and control functions
"""
def __init__(self, req, link, data, **config):
super(RESTAPIController, self).__init__(req, link, data, **config)
self.nmeta_parent_self = data[NMETA_INSTANCE]
#*** Get the parent logger and log against that:
self.logger = data[LOGGER]
#*** Performance Note: this init gets run for every API call...
#*** Update JSON to support UUID encoding:
JSONEncoder_olddefault = JSONEncoder.default
def JSONEncoder_newdefault(self, o):
if isinstance(o, UUID):
return str(o)
return JSONEncoder_olddefault(self, o)
JSONEncoder.default = JSONEncoder_newdefault
@rest_command
def rest_dpae_create(self, req, **kwargs):
"""
REST API function that creates a DPAE resource (Phase 1)
(HTTP POST method)
"""
nmeta = self.nmeta_parent_self
#*** Decode request body as JSON:
dpae_req_body = JSON_Body(req.body)
if dpae_req_body.error:
return ({'status': 400, 'msg': dpae_req_body.error})
self.logger.info("Phase 1 DPAE initiate request body=%s",
dpae_req_body.json)
#*** Validate required keys are present in JSON:
if not dpae_req_body.validate(['hostname_dpae', 'if_name',
'uuid_dpae']):
self.logger.error("Validation error %s", dpae_req_body.error)
return ({'status': 400, 'msg': dpae_req_body.error})
hostname_dpae = dpae_req_body['hostname_dpae']
uuid_dpae = dpae_req_body['uuid_dpae']
if_name = dpae_req_body['if_name']
#*** Create a unique ID:
hostname = socket.getfqdn()
our_uuid = uuid.uuid1()
#*** Record in database with controller UUID as key:
db_data = {'_id': str(our_uuid), 'time_created': time.time(),
'hostname_dpae': hostname_dpae, 'uuid_dpae': uuid_dpae,
'if_name': if_name}
db_result = nmeta.dbdpae.insert_one(db_data)
self.logger.info("Phase 1 created new db record id=%s",
db_result.inserted_id)
#*** Get the MAC addresses and ethertype for the DPAE to send to
#*** in Phase2:
dpae2ctrl_mac = str(nmeta.dpae2ctrl_mac)
ctrl2dpae_mac = str(nmeta.ctrl2dpae_mac)
dpae_ethertype = int(nmeta.dpae_ethertype)
#*** Create JSON response body:
json_create_response = json.dumps({'hostname_controller': hostname,
'uuid_dpae': uuid_dpae,
'uuid_controller': our_uuid,
'dpae2ctrl_mac': dpae2ctrl_mac,
'ctrl2dpae_mac': ctrl2dpae_mac,
'dpae_ethertype': dpae_ethertype}
)
self.logger.info("Phase 1 DPAE join response body=%s",
json_create_response)
#*** Return response body for sending to DPAE
#*** Include the location, which is branch where resource is created:
result = {'msg': json_create_response, 'location': str(our_uuid)}
return result
@rest_command
def rest_dpae_read(self, req, **kwargs):
"""
REST API function that returns DPAE resource names
(HTTP GET method)
"""
#*** We don't support this, so return a 403 Forbidden:
return ({'status': 403,
'msg': '{\"Error\":
|
mozilla/spark-eol
|
apps/eol/urls.py
|
Python
|
bsd-3-clause
| 644
| 0.003106
|
from django.conf.urls.defaults import patterns, url
from commons.views import redirect_to
from . import views
urlpatterns = patterns('',
url(r'^$', redirect_to, {'url': 'eol.home'}),
url(r'^home$', views.home, name='eol.home'),
url(r'^newsletter', vi
|
ews.newsletter, name='eol.newsletter'),
url(r'^m/$', redirect_to, {'url': 'eol.home_mobile'}),
url(r'^m/home$', views.home_mobile, name='eol.home_mobile'),
url(r'^m/sharing-the-spark$', views.spar
|
k_sharing, name='eol.sharing'),
url(r'^m/around-the-globe$', views.spark_around, name='eol.around'),
url(r'^m/hall-of-fame$', views.spark_hall, name='eol.hall'),
)
|
jayme-github/CouchPotatoServer
|
couchpotato/core/providers/nzb/nzbclub/main.py
|
Python
|
gpl-3.0
| 2,892
| 0.00657
|
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from dateutil.parser import parse
import time
log = CPLog(__name__)
class NZBClub(NZBProvider, RSS):
urls = {
'search': 'http://www.nzbclub.com/nzbfeed.aspx?%s',
}
http_time_between_calls = 4 #seconds
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s" %s' % (title, movie['library']['year'], quality.get('identifier'))
params = tryUrlencode({
'q': q,
'ig': '1',
'rpp': 200,
'st': 1,
'sp': 1,
'ns': 1,
})
nzbs = self.getRSSData(self.urls['search'] % params)
for nzb in nzbs:
nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
def extra_check(item):
full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
if ignored in full_description:
log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
return False
return True
results.append({
'id': nzbclub_id,
'name': toUnicode(self.getTextElement(nzb, "title")),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': tryInt(size) / 1024 / 1024,
'url': enclosure['url'].replace(' ', '_'),
'detail_url': self.getTextElement(nzb, "link"),
'get_more_info': self.getMoreInfo,
'extra_check': extra_check
})
def getMoreInfo(self, item):
full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('pre', attrs = {'class':'nfo'})
descripti
|
on = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
def extraCheck(self, item):
full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
if 'ARCHIVE inside ARCHIVE' in full_description:
log.info('Wrong: Seems to be passworded files: %s', item['name'])
return False
retu
|
rn True
|
tobybreckon/python-examples-ml
|
decisiontrees/dtree1.py
|
Python
|
lgpl-3.0
| 4,168
| 0.016315
|
#####################################################################
# Example : decision tree learning
# basic illustrative python script
# For use with test / training datasets : car.{train | test}
# Author : Toby Breckon, toby.breckon@durham.ac.uk
# Version : 0.4 (OpenCV 3 / Python 3 fixes)
# Copyright (c) 2014 /2016 School of Engineering & Computing Sciences,
# Durham University, UK
# License : LGPL - http://www.gnu.org/licenses/lgpl.html
#####################################################################
import csv
import cv2
import numpy as np
########### Define classes
classes = {'unacc': 0, 'acc': 1, 'good': 2, 'vgood': 3}
inv_classes = {v: k for k, v in classes.items()}
########### Define attributes (map to numerical)
attributes = {'vhigh' : 0, 'high' : 1, 'med' : 2,
'low': 2, '2' : 3, '3': 4, '4': 5,
'5more': 5, 'more': 6, 'small': 7, 'big': 8}
########### Load Training and Testing Data Sets
# load training data set
reader=csv.reader(open("car.train","rt", encoding='ascii'),delimiter=',')
attribute_list = []
label_list = []
for row in reader:
# attributes in columns 0-5, class label in column 6
attribute_list.append(list(attributes[row[i]] for i in (range(0,6))))
label_list.append(classes[row[6]])
training_attributes=np.array(attribute_list).astype(np.float32)
training_class_labels=np.array(label_list).astype(np.float32)
# load testing data set
reader=csv.reader(open("car.test","rt", encoding='ascii'),delimiter=',')
attribute_list = []
label_list = []
for row in reader:
# attributes in columns 1-5, class label in column 6
attribute_list.append(list(attributes[row[i]] for i in (range(0,6))))
label_list.append(classes[row[6]])
testing_attributes=np.array(attribute_list).astype(np.float32)
testing_class_labels=np.array(label_list).astype(np.float32)
############ Perform Training -- Decision Tree
# define decision tree object
dtree = cv2.ml.DTrees_create();
# set parameters (changing may or may not change results)
dtree.setCVFolds(1); # the number of cross-validation folds/iterations - fix at 1
dtree.setMaxCategories(25); # max number of categories (use sub-optimal algo
|
rithm for larger numbers)
dtree.setMaxDepth(8); # max tree depth
dtree.setMinSampleCount(25); # min sample count
dtree.setPrio
|
rs(np.float32([1,1,1,1])); # the array of priors, the bigger weight, the more attention to the assoc. class
# (i.e. a case will be judjed to be maligant with bigger chance))
dtree.setRegressionAccuracy(0); # regression accuracy: N/A here
dtree.setTruncatePrunedTree(True); # throw away the pruned tree branches
dtree.setUse1SERule(True); # use 1SE rule => smaller tree
dtree.setUseSurrogates(False); # compute surrogate split, no missing data
# specify that the types of our attributes is ordered with a categorical class output
# and we have 7 of them (6 attributes + 1 class label)
var_types = np.array([cv2.ml.VAR_NUMERICAL] * 6 + [cv2.ml.VAR_CATEGORICAL], np.uint8)
# train decision tree object
dtree.train(cv2.ml.TrainData_create(training_attributes, cv2.ml.ROW_SAMPLE, training_class_labels.astype(int), varType = var_types));
############ Perform Testing -- Decision Tree
correct = 0
wrong = 0
# for each testing example
for i in range(0, len(testing_attributes[:,0])) :
# perform decision tree prediction (i.e. classification)
_, result = dtree.predict(testing_attributes[i,:], cv2.ml.ROW_SAMPLE);
# and for undocumented reasons take the first element of the resulting array
# as the result
print("Test data example : {} : result = {}".format((i+1), inv_classes[int(result[0])]));
# record results as tp/tn/fp/fn
if (result[0] == testing_class_labels[i]) : correct+=1
elif (result[0] != testing_class_labels[i]) : wrong+=1
# output summmary statistics
total = correct + wrong
print();
print("Testing Data Set Performance Summary");
print("Total Correct : {}%".format(round((correct / float(total)) * 100, 2)));
#####################################################################
|
scavarda/mysql-dbcompare
|
mysql-utilities-1.6.0/mysql/fabric/failure_detector.py
|
Python
|
apache-2.0
| 8,960
| 0.000446
|
#
# Copyright (c) 2013,2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""This modules contains a simple failure detector which is used by Fabric
to monitor the availability of servers within groups.
If a master cannot be accessed through the
:meth:`~mysql.fabric.server.MySQLServer.is_alive` method after `n` consecutive
attempts, the failure detector considers that it has failed and proceeds with
the election of a new master. The failure detector does not choose any new
master but only triggers the :const:`~mysql.fabric.events.REPORT_FAILURE` event
which responsible for doing so.
If a slave cannot be accessed either the same event is triggered but in this
case the server is only marked as faulty.
See :meth:`~mysql.fabric.server.MySQLServer.is_alive`.
See :class:`~mysql.fabric.services.highavailability.PromoteMaster`.
See :class:`~mysql.fabric.services.servers.ReportFailure`.
"""
import threading
import time
import logging
from mysql.fabric import (
errors as _errors,
persistence as _persistence,
config as _config,
executor as _executor,
)
from mysql.fabric.events import (
trigger,
)
from mysql.fabric.utils import (
get_time,
)
_LOGGER = logging.getLogger(__name__)
class FailureDetector(object):
"""Responsible for periodically checking if a set of servers within a
group is alive.
It does so by connecting to these servers and executing a query (i.e.
:meth:`~mysql.fabric.server.MySQLServer.is_alive`.
"""
LOCK = threading.Condition()
GROUPS = {}
_MIN_DETECTION_INTERVAL = 2.0
_DETECTION_INTERVAL = _DEFAULT_DETECTION_INTERVAL = 5.0
_MIN_DETECTIONS = 1
_DETECTIONS = _DEFAULT_DETECTIONS = 2
_MIN_DETECTION_TIMEOUT = 1
_DETECTION_TIMEOUT = _DEFAULT_DETECTION_TIMEOUT = 1
@staticmethod
def register_groups():
"""Upon startup initializes a failure detector for each group.
"""
from mysql.fabric.server import Group
_LOGGER.info("Starting failure detector.")
for row in Group.groups_by_status(Group.ACTIVE):
FailureDetector.register_group(row[0])
@staticmethod
def register_group(group_id):
"""Start a failure detector for a group.
:param group_
|
id: Group's id.
"""
_LOGGER.info("Monitoring group (%s).", group_id)
with FailureDetector.LOCK:
if
|
group_id not in FailureDetector.GROUPS:
detector = FailureDetector(group_id)
detector.start()
FailureDetector.GROUPS[group_id] = detector
@staticmethod
def unregister_group(group_id):
"""Stop a failure detector for a group.
:param group_id: Group's id.
"""
_LOGGER.info("Stop monitoring group (%s).", group_id)
with FailureDetector.LOCK:
if group_id in FailureDetector.GROUPS:
detector = FailureDetector.GROUPS[group_id]
detector.shutdown()
del FailureDetector.GROUPS[group_id]
@staticmethod
def unregister_groups():
"""Upon shutdown stop all failure detectors that are running.
"""
_LOGGER.info("Stopping failure detector.")
with FailureDetector.LOCK:
for detector in FailureDetector.GROUPS.values():
detector.shutdown()
FailureDetector.GROUPS = {}
def __init__(self, group_id):
"""Constructor for FailureDetector.
"""
self.__group_id = group_id
self.__thread = None
self.__check = False
def start(self):
"""Start the failure detector.
"""
self.__check = True
self.__thread = threading.Thread(target=self._run,
name="FailureDetector(" + self.__group_id + ")")
self.__thread.daemon = True
self.__thread.start()
def shutdown(self):
"""Stop the failure detector.
"""
self.__check = False
def _run(self):
"""Function that verifies servers' availabilities.
"""
from mysql.fabric.server import (
Group,
MySQLServer,
)
ignored_status = [MySQLServer.FAULTY]
quarantine = {}
interval = FailureDetector._DETECTION_INTERVAL
detections = FailureDetector._DETECTIONS
detection_timeout = FailureDetector._DETECTION_TIMEOUT
_persistence.init_thread()
while self.__check:
try:
unreachable = set()
group = Group.fetch(self.__group_id)
if group is not None:
for server in group.servers():
if server.status in ignored_status or \
server.is_alive(detection_timeout):
continue
unreachable.add(server.uuid)
_LOGGER.warning(
"Server (%s) in group (%s) is unreachable.",
server.uuid, self.__group_id
)
unstable = False
failed_attempts = 0
if server.uuid not in quarantine:
quarantine[server.uuid] = failed_attempts = 1
else:
failed_attempts = quarantine[server.uuid] + 1
quarantine[server.uuid] = failed_attempts
if failed_attempts >= detections:
unstable = True
can_set_faulty = group.can_set_server_faulty(
server, get_time()
)
if unstable and can_set_faulty:
procedures = trigger("REPORT_FAILURE",
set([self.__group_id]), str(server.uuid),
threading.current_thread().name,
MySQLServer.FAULTY, False
)
executor = _executor.Executor()
for procedure in procedures:
executor.wait_for_procedure(procedure)
for uuid in quarantine.keys():
if uuid not in unreachable:
del quarantine[uuid]
except (_errors.ExecutorError, _errors.DatabaseError):
pass
except Exception as error:
_LOGGER.exception(error)
time.sleep(interval / detections)
_persistence.deinit_thread()
def configure(config):
"""Set configuration values.
"""
try:
detection_interval = \
float(config.get("failure_tracking", "detection_interval"))
if detection_interval < FailureDetector._MIN_DETECTION_INTERVAL:
_LOGGER.warning(
"Detection interval cannot be lower than %s.",
FailureDetector._MIN_DETECTION_INTERVAL
)
detection_interval = FailureDetector._MIN_DETECTION_INTERVAL
FailureDetector._DETECTION_INTERVAL = float(detection_interval)
except (_config.NoOptionError, _config.NoSectionError, ValueError):
pass
try:
detections = int(config.get("failure_tracking", "detections"))
if detections < FailureDetector._MIN_DETECTIONS:
_LOGGER.warning(
"Detections cannot be lower than %s.",
FailureDetector._MIN_DETECT
|
dnguyen0304/mfit_service
|
mfit/mfit/resources/__init__.py
|
Python
|
mit
| 1,041
| 0.000961
|
# -*- coding: utf-8 -*-
from .base import _Base, Bas
|
e
from .base_collection import BaseCollection
from .habit_groups import HabitGroups
from .habit_groups_collection import HabitGroupsCollection
from .habits import Habits
from .habits_
|
collection import HabitsCollection
from .users import Users
from .users_collection import UsersCollection
from .attempts import Attempts
from .attempts_collection import AttemptsCollection
from .routines import Routines
from .routines_collection import RoutinesCollection
from .attempts_logs import AttemptsLogs
from .attempts_logs_collection import AttemptsLogsCollection
from .root import Root
__all__ = ['Attempts',
'AttemptsCollection',
'AttemptsLogs',
'AttemptsLogsCollection',
'Base',
'BaseCollection',
'HabitGroups',
'HabitGroupsCollection',
'Habits',
'HabitsCollection',
'Root',
'Routines',
'RoutinesCollection',
'Users',
'UsersCollection']
|
s20121035/rk3288_android5.1_repo
|
external/chromium_org/chrome/common/extensions/docs/server2/redirector_test.py
|
Python
|
gpl-3.0
| 7,651
| 0.00183
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from compiled_file_system import CompiledFileSystem
from object_store_creator import ObjectStoreCreator
from redirector im
|
port Redirector
from test_file_system import TestFileSystem
from third_party.json_schema_compiler.json_parse import Parse
HOST = 'http://localhost/'
file_system = TestFileSystem({
'redirects.json': json.dumps({
'foo/...': 'apps/...',
'': '/index.html',
'home': 'index.html',
'index.html': 'http://something.absolute.com/'
}),
'apps': {
'redirects.json': json.dumps({
'': '../index.html',
'index.html
|
': 'about_apps.html',
'foo.html': '/bar.html',
})
},
'extensions': {
'redirects.json': json.dumps({
'manifest': 'manifest.html',
'tabs': 'tabs.html',
'dev/...': '...',
'a/very/long/dir/chain/...': 'short/...',
'_short/...': 'another/long/chain/...',
'r1/...': 'r2/r1/...',
'r2/r1/...': 'r3/...',
'r3/...': 'r4/...',
'r5/...': 'r6/...',
'nofile1/...': 'nofile2/...',
'noredirects1/...': 'noredirects2/...'
}),
'manifest': {
'redirects.json': json.dumps({
'': '../manifest.html',
'more-info': 'http://lmgtfy.com'
}),
},
'stable': {
'redirects.json': json.dumps({
'tabs': 'tabs.html'
}),
'manifest': {
'redirects.json': json.dumps({
'storage': 'storage.html'
})
},
},
'dev': {
'redirects.json': json.dumps({
'tabs': 'tabs.html',
'manifest': 'manifest.html'
}),
'manifest': {
'redirects.json': json.dumps({
'storage': 'storage.html'
})
}
},
'r4': {
'redirects.json': json.dumps({
'manifest': 'manifest.html'
})
},
'r6': {
'redirects.json': json.dumps({
'...': 'directory/...'
}),
'directory': {
'redirects.json': json.dumps({
'manifest': 'manifest.html'
}),
'manifest': 'manifest.html'
}
},
'short': {
'redirects.json': json.dumps({
'index': 'index.html'
})
},
'another': {
'long': {
'chain': {
'redirects.json': json.dumps({
'index': 'index.html'
})
}
}
},
'nofile': {
'redirects.json': json.dumps({
})
}
},
'priority': {
'redirects.json': json.dumps({
'directory/...': 'GOOD/...'
}),
'directory': {
'redirects.json': json.dumps({
'...': '../BAD/...'
}),
}
},
'relative_directory': {
'redirects.json': json.dumps({
'...': '../...'
})
},
'infinite_redirect': {
'redirects.json': json.dumps({
'...': 'loop/...'
}),
'loop': {
'redirects.json': json.dumps({
'...': './...'
})
}
},
'parent_redirect': {
'redirects.json': json.dumps({
'a/...': 'b/...'
})
}
})
class RedirectorTest(unittest.TestCase):
def setUp(self):
self._redirector = Redirector(
CompiledFileSystem.Factory(ObjectStoreCreator.ForTest()),
file_system)
def testExternalRedirection(self):
self.assertEqual(
'http://something.absolute.com/',
self._redirector.Redirect(HOST, 'index.html'))
self.assertEqual(
'http://lmgtfy.com',
self._redirector.Redirect(HOST, 'extensions/manifest/more-info'))
def testAbsoluteRedirection(self):
self.assertEqual(
'/index.html', self._redirector.Redirect(HOST, ''))
self.assertEqual(
'/bar.html', self._redirector.Redirect(HOST, 'apps/foo.html'))
def testRelativeRedirection(self):
self.assertEqual(
'apps/about_apps.html',
self._redirector.Redirect(HOST, 'apps/index.html'))
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/manifest/'))
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/manifest'))
self.assertEqual(
'index.html', self._redirector.Redirect(HOST, 'apps/'))
self.assertEqual(
'index.html', self._redirector.Redirect(HOST, 'home'))
def testNotFound(self):
self.assertEqual(
None, self._redirector.Redirect(HOST, 'not/a/real/path'))
self.assertEqual(
None, self._redirector.Redirect(HOST, 'public/apps/okay.html'))
def testOldHosts(self):
self.assertEqual(
'https://developer.chrome.com/',
self._redirector.Redirect('http://code.google.com', ''))
self.assertEqual(
'https://developer.chrome.com/',
self._redirector.Redirect('https://code.google.com', ''))
def testRefresh(self):
self._redirector.Refresh().Get()
expected_paths = set([
'redirects.json',
'apps/redirects.json',
'extensions/redirects.json',
'extensions/manifest/redirects.json'
])
for path in expected_paths:
self.assertEqual(
Parse(file_system.ReadSingle(path).Get()),
# Access the cache's object store to see what files were hit during
# the cron run. Returns strings parsed as JSON.
# TODO(jshumway): Make a non hack version of this check.
self._redirector._cache._file_object_store.Get(
path).Get().cache_data)
def testDirectoryRedirection(self):
# Simple redirect.
self.assertEqual(
'extensions/manifest.html',
self._redirector.Redirect(HOST, 'extensions/dev/manifest'))
# Multiple hops with one file.
self.assertEqual(
'extensions/r4/manifest.html',
self._redirector.Redirect(HOST, 'extensions/r1/manifest'))
# Multiple hops w/ multiple redirection files.
self.assertEqual(
'extensions/r6/directory/manifest.html',
self._redirector.Redirect(HOST, 'extensions/r5/manifest'))
# Redirection from root directory redirector.
self.assertEqual(
'apps/about_apps.html',
self._redirector.Redirect(HOST, 'foo/index.html'))
# Short to long.
self.assertEqual(
'extensions/short/index.html',
self._redirector.Redirect(HOST, 'extensions/a/very/long/dir/chain/index'))
# Long to short.
self.assertEqual(
'extensions/another/long/chain/index.html',
self._redirector.Redirect(HOST, 'extensions/_short/index'))
# Directory redirection without a redirects.json in final directory.
self.assertEqual(
'extensions/noredirects2/file',
self._redirector.Redirect(HOST, 'extensions/noredirects1/file'))
# Directory redirection with redirects.json without rule for the filename.
self.assertEqual(
'extensions/nofile2/file',
self._redirector.Redirect(HOST, 'extensions/nofile1/file'))
# Relative directory path.
self.assertEqual(
'index.html',
self._redirector.Redirect(HOST, 'relative_directory/home'))
# Shallower directory redirects have priority.
self.assertEqual(
'priority/GOOD/index',
self._redirector.Redirect(HOST, 'priority/directory/index'))
# Don't infinitely redirect.
self.assertEqual('infinite_redirect/loop/index',
self._redirector.Redirect(HOST, 'infinite_redirect/index'))
# If a parent directory is redirected, redirect children properly.
self.assertEqual('parent_redirect/b/c/index',
self._redirector.Redirect(HOST, 'parent_redirect/a/c/index'))
if __name__ == '__main__':
unittest.main()
|
maropu/spark
|
python/pyspark/mllib/feature.py
|
Python
|
apache-2.0
| 28,134
| 0.001102
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for feature in MLlib.
"""
import sys
import warnings
from py4j.protocol import Py4JJavaError
from pyspark import since
from pyspark.rdd import RDD
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Vectors, _convert_to_vector
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['Normalizer', 'StandardScalerModel', 'StandardScaler',
'HashingTF', 'IDFModel', 'IDF', 'Word2Vec', 'Word2VecModel',
'ChiSqSelector', 'ChiSqSelectorModel', 'ElementwiseProduct']
class VectorTransformer(object):
"""
Base class for transformation of a vector or RDD of vector
"""
def transform(self, vector):
"""
Applies transformation on a vector.
Parameters
----------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
vector or convertible or RDD to be transformed.
"""
raise NotImplementedError
class Normalizer(VectorTransformer):
r"""
Normalizes samples individually to unit L\ :sup:`p`\ norm
For any 1 <= `p` < float('inf'), normalizes samples using
sum(abs(vector) :sup:`p`) :sup:`(1/p)` as norm.
For `p` = float('inf'), max(abs(vector)) will be used as norm for
normalization.
.. versionadded:: 1.2.0
Parameters
----------
p : float, optional
Normalization in L^p^ space, p = 2 by default.
Examples
--------
>>> from pyspark.mllib.linalg import Vectors
>>> v = Vectors.dense(range(3))
>>> nor = Normalizer(1)
>>> nor.transform(v)
DenseVector([0.0, 0.3333, 0.6667])
>>> rdd = sc.parallelize([v])
>>> nor.transform(rdd).collect()
[DenseVector([0.0, 0.3333, 0.6667])]
>>> nor2 = Normalizer(float("inf"))
>>> nor2.transform(v)
DenseVector([0.0, 0.5, 1.0])
"""
def __init__(self, p=2.0):
assert p >= 1.0, "p should be greater than 1.0"
self.p = float(p)
def transform(self, vector):
"""
Applies unit length normalization on a vector.
.. versionadded:: 1.2.0
Parameters
----------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
vector or RDD of vector to be normalized.
Returns
-------
:py:class:`pyspark.mllib.lina
|
lg.Vector` or :py:class:`pyspark.RDD`
normalized vector(s). If the norm of the input is zero, it
will return the input vector.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return callMLlibFunc("normalizeVector", self.p, vector)
class Jav
|
aVectorTransformer(JavaModelWrapper, VectorTransformer):
"""
Wrapper for the model in JVM
"""
def transform(self, vector):
"""
Applies transformation on a vector or an RDD[Vector].
Parameters
----------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
Input vector(s) to be transformed.
Notes
-----
In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return self.call("transform", vector)
class StandardScalerModel(JavaVectorTransformer):
"""
Represents a StandardScaler model that can transform vectors.
.. versionadded:: 1.2.0
"""
def transform(self, vector):
"""
Applies standardization transformation on a vector.
.. versionadded:: 1.2.0
Parameters
----------
vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
Input vector(s) to be standardized.
Returns
-------
:py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD`
Standardized vector(s). If the variance of a column is
zero, it will return default `0.0` for the column with
zero variance.
Notes
-----
In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
"""
return JavaVectorTransformer.transform(self, vector)
@since('1.4.0')
def setWithMean(self, withMean):
"""
Setter of the boolean which decides
whether it uses mean or not
"""
self.call("setWithMean", withMean)
return self
@since('1.4.0')
def setWithStd(self, withStd):
"""
Setter of the boolean which decides
whether it uses std or not
"""
self.call("setWithStd", withStd)
return self
@property
@since('2.0.0')
def withStd(self):
"""
Returns if the model scales the data to unit standard deviation.
"""
return self.call("withStd")
@property
@since('2.0.0')
def withMean(self):
"""
Returns if the model centers the data before scaling.
"""
return self.call("withMean")
@property
@since('2.0.0')
def std(self):
"""
Return the column standard deviation values.
"""
return self.call("std")
@property
@since('2.0.0')
def mean(self):
"""
Return the column mean values.
"""
return self.call("mean")
class StandardScaler(object):
"""
Standardizes features by removing the mean and scaling to unit
variance using column summary statistics on the samples in the
training set.
.. versionadded:: 1.2.0
Parameters
----------
withMean : bool, optional
False by default. Centers the data with mean
before scaling. It will build a dense output, so take
care when applying to sparse input.
withStd : bool, optional
True by default. Scales the data to unit
standard deviation.
Examples
--------
>>> vs = [Vectors.dense([-2.0, 2.3, 0]), Vectors.dense([3.8, 0.0, 1.9])]
>>> dataset = sc.parallelize(vs)
>>> standardizer = StandardScaler(True, True)
>>> model = standardizer.fit(dataset)
>>> result = model.transform(dataset)
>>> for r in result.collect(): r
DenseVector([-0.7071, 0.7071, -0.7071])
DenseVector([0.7071, -0.7071, 0.7071])
>>> int(model.std[0])
4
>>> int(model.mean[0]*10)
9
>>> model.withStd
True
>>> model.withMean
True
"""
def __init__(self, withMean=False, withStd=True):
if not (withMean or withStd):
warnings.warn("Both withMean and withStd are false. The model does nothing.")
self.withMean = withMean
self.withStd = withStd
def fit(self, dataset):
"""
Computes the mean and variance and stores as a model to be used
for later scaling.
.. versionadded:: 1.2.0
Parameters
----------
dataset : :py:class:`pyspark.RDD`
The data used to compute the mean and variance
to build the transformation model.
Returns
-------
:py:
|
icaoberg/cellorganizer-galaxy-tools
|
datatypes/dataproviders/chunk.py
|
Python
|
gpl-3.0
| 2,581
| 0.020922
|
"""
Chunk (N number of bytes at M offset to a source's beginning) provider.
Primarily for file sources but usable by any iterator that has both
seek and read( N ).
"""
import os
import base64
import base
import exceptions
import logging
log = logging.getLogger( __name__ )
# -----------------------------------------------------------------------------
class ChunkDataProvider( base.DataProvider ):
"""
Data provider that yields chunks of data from its file.
Note: this version does not account for lines and works with Binary datatypes.
"""
MAX_CHUNK_SIZE = 2 ** 16
DEFAULT_CHUNK_SIZE = MAX_CHUNK_SIZE
settings = {
'chunk_index' : 'int',
'chunk_size' : 'int'
}
# TODO: subclass from LimitedOffsetDataProvider?
# see web/framework/base.iterate_file, util/__init__.file_reader, and datatypes.tabular
def __init__( self, source, chunk_index=0, chunk_size=DEFAULT_CHUNK_SIZE, **kwargs ):
"""
:param chunk_index: if a source can be divided into N number of
`chunk_size` sections, this is the index of which section to
return.
:param chunk_size: how large are the desired chunk
|
s to re
|
turn
(gen. in bytes).
"""
super( ChunkDataProvider, self ).__init__( source, **kwargs )
self.chunk_size = int( chunk_size )
self.chunk_pos = int( chunk_index ) * self.chunk_size
def validate_source( self, source ):
"""
Does the given source have both the methods `seek` and `read`?
:raises InvalidDataProviderSource: if not.
"""
source = super( ChunkDataProvider, self ).validate_source( source )
if( ( not hasattr( source, 'seek' ) ) or ( not hasattr( source, 'read' ) ) ):
raise exceptions.InvalidDataProviderSource( source )
return source
def __iter__( self ):
# not reeeally an iterator per se
self.__enter__()
self.source.seek( self.chunk_pos, os.SEEK_SET )
chunk = self.encode( self.source.read( self.chunk_size ) )
yield chunk
self.__exit__()
def encode( self, chunk ):
"""
Called on the chunk before returning.
Overrride to modify, encode, or decode chunks.
"""
return chunk
class Base64ChunkDataProvider( ChunkDataProvider ):
"""
Data provider that yields chunks of base64 encoded data from its file.
"""
def encode( self, chunk ):
"""
Return chunks encoded in base 64.
"""
return base64.b64encode( chunk )
|
funbaker/astropy
|
astropy/convolution/convolve.py
|
Python
|
bsd-3-clause
| 35,929
| 0.001169
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from functools import partial
from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION
from ..utils.exceptions import AstropyUserWarning
from ..utils.console import human_file_size
from ..utils.decorators import deprecated_renamed_argument
from .. import units as u
from ..nddata import support_nddata
from ..modeling.core import _make_arithmetic_operator, BINARY_OPERATORS
from ..modeling.core import _CompoundModelMeta
# Disabling all doctests in this module until a better way of handling warnings
# in doctests can be determined
__doctest_skip__ = ['*']
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
@support_nddata(data='array')
def convolve(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True, mask=None,
preserve_nan=False, normalization_zero_tol=1e-8):
'''
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `numpy.ndarray` or `~astropy.nddata.NDData`
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array (default).
* 'fill'
Set values outside the array boundary to ``fill_value``.
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one prior to
convolving
nan_treatment : 'interpolate', 'fill'
interpolate will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel.
'fill' will replace the NaN pixels with a fixed numerical value (default
zero, see ``fill_value``) prior to convolution
Note that if the kernel has a sum equal to zero, NaN interpolation
is not possible and will raise an exception
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
'''
from .boundary_none import (convolve1d_boundary_none,
convolve2d_boundary_none,
convolve3d_boundary_none)
from .boundary_extend import (convolve1d_boundary_extend,
convolve2d_boundary_extend,
convolve3d_boundary_extend)
from .boundary_fill import (convolve1d_boundary_fill,
convolve2d_boundary_fill,
convolve3d_boundary_fill)
from .boundary_wrap import (convolve1d_boundary_wrap,
convolve2d_boundary_wrap,
convolve3d_boundary_wrap)
if boundary not in BOUNDARY_OPTIONS:
raise ValueError("Invalid boundary option: must be one of {0}"
.format(BOUNDARY_OPTIONS))
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# The cython routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Check if kernel is kernel instance
if isinstance(kernel, Kernel):
# Check if array is also kernel instance, if so convolve and
# return new kernel instance
if isinstance(array, Kernel):
if isinstance(array, Kernel1D) and isinstance(kernel, Kernel1D):
new_array = convolve1d_boundary_fill(array.array, kernel.array,
|
0, True)
new_kernel = Kernel1D(array=new_array)
elif isinstance(array, Kernel2
|
D) and isinstance(kernel, Kernel2D):
new_array = convolve2d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel2D(array=new_array)
else:
raise Exception("Can't convolve 1D and 2D kernel.")
new_kernel._separable = kernel._separable and array._separable
new_kernel._is_bool = False
return new_kernel
kernel = kernel.array
# Check that the arguments are lists or Numpy arrays
if isinstance(array, list):
array_internal = np.array(array, dtype=float)
array_dtype = array_internal.dtype
elif isinstance(array, np.ndarray):
# Note this won't copy if it doesn't have to -- which is okay
# because none of what follows modifies array_internal.
array_dtype = array.dtype
array_internal = array.astype(float, copy=False)
else:
raise TypeError("array should be a list or a Numpy array")
if isinstance(kernel, list):
kernel_internal = np.array(kernel, dtype=float)
elif isinstance(kernel, np.ndarray):
# Note this always makes a copy, since we will be modifying it
kernel_internal = kernel.astype(float)
|
TomMinor/mesh-surface-spawner
|
FreeformAttributes.py
|
Python
|
gpl-2.0
| 15,123
| 0.009588
|
# Terrain Data [ Tools ]
# -> PencilCurves [ ToolList ]
# -> Tool Data [ Radius, Distribution(Uniform(Min/Max), Gaussian(Mean, Falloff)), Scale(Min/max), Rotation(Min/Max), Offset, TypeData ]
# -> Type Data [ Density, (Scale, Rotation, Offset)Absolute, ObjectData ]
# -> Object Data [ i
|
nstancedObject, instanceList ]
# -> instanceList
# - instance0.... instanceN
# -> RadialBound [ ToolList ]
#
|
-> Tool Data [ Radius, Distribution(Uniform(Min/Max), Gaussian(Mean, Falloff)), Scale(Min/max), Rotation(Min/Max), Offset, TypeData ]
# -> Type Data [ Density, (Scale, Rotation, Offset)Absolute, ObjectData ]
# -> Object Data [ instancedObject, instanceList ]
# -> instanceList
# - instance0.... instanceN
# -> SquareBound [ ToolList ]
# -> Tool Data [ Size(x,y), Distribution(Uniform(Min/Max), Gaussian(Mean, Falloff)), Scale(Min/max), Rotation(Min/Max), Offset, TypeData ]
# -> Type Data [ Density, (Scale, Rotation, Offset)Absolute, ObjectData ]
# -> Object Data [ instancedObject, instanceList ]
# -> instanceList
# - instance0.... instanceN
# Store placement data in dynamic attributes, these are saved with the appropriate object
# and are stored in the scene file, so they persist between saving/loading
import maya.cmds as cmds
# Storing attributes
#removeSettings('curve1')
#storeToolSettings( 'curve1', 'PencilCurve1_pCube0_instances')
#a = getSettings('curve1')
#TODO : Add more error checking to getSettings
attributes = [ 'radius', 'scaleMax', 'scaleMin', 'rotationMax', 'rotationMin',
'instanceSets', 'distMin', 'distMax', 'distMean', 'distFalloff' ]
def removeSettings(name):
"""
Removes freeform tool setting specific attributes if they exist
name : The object name to attempt to remove the attributes from
"""
for attribute in attributes:
if cmds.attributeQuery(attribute, node=name, ex=True):
cmds.deleteAttr('%s.%s'%(name,attribute))
def getSettings(name):
settings = {}
for attribute in attributes:
if cmds.attributeQuery(attribute, node=name, ex=True):
settings[attribute] = cmds.getAttr('%s.%s'%(name, attribute))
# If for some reason the object contains data for both uniform & gaussian,
# give gaussian priority
if 'distMean' in settings.values() and 'distFalloff' in settings.values():
del settings['distMin']
del settings['distMax']
# Return an empty list of the proper distribution type isn't clear
if ('distMean' in settings.values() and 'distMin' in settings.values()) or \
('distFalloff' in settings.values() and 'distMax' in settings.values()):
print "Ambiguous distribution types"
return []
return settings
class storeToolSettings:
def __init__(self, name, instanceSets, radius=1.0, distribution = ('gauss', 0.0, 1.0), scale=[(0.0,0.0,0.0), (1.0,1.0,1.0)], rotation=[(0.0,0.0,0.0), (360.0,360.0,360.0)]):
"""
Initialise dynamic attributes that are stored in the tool curve (name)
These will persist in the scene until deleted, even if the scene is saved and reloaded
name : The name of the curve object in the scene to store these attributes in
instanceSets : The names of the sets referencing the various object instances
radius : The radius of the tool, used for spawning objects along the freeform curve
distribution : Random distribution type when spawning objects, currently supports "uniform" and "gauss" types
Possible types:
("uniform", min, max)
("gauss", mean, falloff)
scale : The minimum and maximum scale vectors, in the form [(minx,miny,minz), (maxx,maxy,maxz)]
rotation : The minimum and maximum rotation vectors, in the form [(minx,miny,minz), (maxx,maxy,maxz)]
"""
self.name = name
# Currently 2 supported types of random placement
if(distribution[0] == 'gauss'):
meanVal, falloffVal = distribution[1], distribution[2]
self.setGaussian(meanVal, falloffVal)
elif(distribution[0] == 'uniform'):
minVal, maxVal = distribution[1], distribution[2]
self.setUniform(minVal, maxVal)
else:
raise ValueError('Unexpected distribution type %s'%(distribution[0]))
# Tool settings
self.setRadius(radius)
self.setScaleMin(scale[0])
self.setScaleMax(scale[1])
self.setRotationMin(rotation[0])
self.setRotationMax(rotation[1])
# Name of the set whose members are the instances made using this freeform tool
self.setInstanceSet(instanceSets)
def setRadius(self, radius):
if cmds.attributeQuery('radius', node=self.name, ex=True):
cmds.setAttr('%s.radius'%(self.name), radius)
else:
cmds.addAttr(self.name, ln ='radius', at='double', dv = radius, hidden=True)
def setScaleMax(self, maximum):
if not cmds.attributeQuery('scaleMax', node=self.name, ex=True):
cmds.addAttr(self.name, ln ='scaleMax', dt='double3', hidden=True)
cmds.setAttr('%s.scaleMax'%(self.name), *maximum, type='double3')
def setScaleMin(self, minimum):
if not (cmds.attributeQuery('scaleMin', node=self.name, ex=True)):
cmds.addAttr(self.name, ln ='scaleMin', dt='double3', hidden=True)
cmds.setAttr('%s.scaleMin'%(self.name), *minimum, type='double3')
def setRotationMax(self, maximum):
if not cmds.attributeQuery('rotationMin', node=self.name, ex=True):
cmds.addAttr(self.name, ln ='rotationMin', dt='double3', hidden=True)
cmds.setAttr('%s.rotationMin'%(self.name), *maximum, type='double3')
def setRotationMin(self, minimum):
if not cmds.attributeQuery('rotationMax', node=self.name, ex=True):
cmds.addAttr(self.name, ln ='rotationMax', dt='double3', hidden=True)
cmds.setAttr('%s.rotationMax'%(self.name), *minimum, type='double3')
def setInstanceSet(self, instances):
if not cmds.attributeQuery('instanceSets', node=self.name, ex=True):
cmds.addAttr(self.name, ln ='instanceSets', dt='stringArray', hidden=True)
cmds.setAttr('%s.instanceSets'%(self.name), len(instances), type='stringArray', *instances)
def setGaussian(self, mean, falloff):
# Remove uniform data
if cmds.attributeQuery('distMin', node=self.name, ex=True): cmds.deleteAttr('%s.distMin'%self.name)
if cmds.attributeQuery('distMax', node=self.name, ex=True): cmds.deleteAttr('%s.distMax'%self.name)
if cmds.attributeQuery('distMean', node=self.name, ex=True):
cmds.setAttr('%s.distMean'%(self.name), mean)
else:
cmds.addAttr(self.name, ln ='distMean', dv = mean, hidden=True)
if cmds.attributeQuery('distFalloff', node=self.name, ex=True):
cmds.setAttr('%s.distFalloff'%(self.name), falloff)
else:
cmds.addAttr(self.name, ln ='distFalloff', dv = falloff, hidden=True)
def setUniform(self, minimum, maximum):
# Remove gauss data
if cmds.attributeQuery('distMean', node=self.name, ex=True): cmds.deleteAttr('%s.distMean'%self.name)
if cmds.attributeQuery('distFalloff', node=self.name, ex=True): cmds.deleteAttr('%s.distFalloff'%self.name)
if cmds.attributeQuery('distMin', node=self.name, ex=True):
cmds.setAttr('%s.distMin'%(self.name), minimum)
else:
cmds.addAttr(self.name, ln ='distMin', dv = minimum, hidden=True)
if cmds.attributeQuery('distMax', node=self.name, ex=True):
cmds.setAttr('%s.distMax'%(self.name), maximum)
else:
cmds.addAttr(self.name, ln ='distMax', dv = maximum, hidden=True)
"""
# Instance specific
|
pymber/algorithms
|
algorithms/sorting/insertion_sort.py
|
Python
|
mit
| 336
| 0.008929
|
#!/usr/bin/env python
from lib.swap import *
def insertion_sort(L=[]):
''' Unstable implementation of insertion sort.
:param L: list of sortable elements.
'''
if len(L) < 2: return L
for i in range(len(L)):
j = i
while j and L[j] < L[j-1]:
swap(L, j, j-1)
|
j -= 1
return L
|
|
kennedyshead/home-assistant
|
tests/components/climacell/test_weather.py
|
Python
|
apache-2.0
| 15,479
| 0
|
"""Tests for Climacell weather entity."""
from __future__ import annotations
from datetime import datetime
import logging
from typing import Any
from unittest.mock import patch
import pytest
from homeassistant.components.climacell.config_flow import (
_get_config_schema,
_get_unique_id,
)
from homeassistant.components.climacell.const import (
ATTR_CLOUD_COVER,
ATTR_PRECIPITATION_TYPE,
ATTR_WIND_GUST,
ATTRIBUTION,
DOMAIN,
)
from homeassistant.components.weather import (
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SUNNY,
ATTR_FORECAST,
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_OZONE,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_VISIBILITY,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
DOMAIN as WEATHER_DOMAIN,
)
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_FRIENDLY_NAME
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers.entity_registry import async_get
from homeassistant.util import dt as dt_util
from .const import API_V3_ENTRY_DATA, API_V4_ENTRY_DATA
from tests.common import MockConfigEntry
_LOGGER = logging.getLogger(__name__)
@callback
def _enable_entity(hass: HomeAssistant, entity_name: str) -> None:
"""Enable disabled entity."""
ent_reg = async_get(hass)
entry = ent_reg.async_get(entity_name)
updated_entry = ent_reg.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
async def _setup(hass: HomeAssistant, config: dict[str, Any]) -> State:
"""Set up entry and return entity state."""
with patch(
"homeassistant.util.dt.utcnow",
return_value=datetime(2021, 3, 6, 23, 59, 59, tzinfo=dt_util.UTC),
):
data = _get_config_schema(hass)(config)
config_entry = MockConfigEntry(
domain=DOMAIN,
data=data,
unique_id=_ge
|
t_unique_id(hass, data),
version=1,
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
for entity_name i
|
n ("hourly", "nowcast"):
_enable_entity(hass, f"weather.climacell_{entity_name}")
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(WEATHER_DOMAIN)) == 3
return hass.states.get("weather.climacell_daily")
async def test_v3_weather(
hass: HomeAssistant,
climacell_config_entry_update: pytest.fixture,
) -> None:
"""Test v3 weather data."""
weather_state = await _setup(hass, API_V3_ENTRY_DATA)
assert weather_state.state == ATTR_CONDITION_SUNNY
assert weather_state.attributes[ATTR_ATTRIBUTION] == ATTRIBUTION
assert weather_state.attributes[ATTR_FORECAST] == [
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_SUNNY,
ATTR_FORECAST_TIME: "2021-03-07T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 7,
ATTR_FORECAST_TEMP_LOW: -5,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-08T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 10,
ATTR_FORECAST_TEMP_LOW: -4,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-09T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 19,
ATTR_FORECAST_TEMP_LOW: 0,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-10T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 18,
ATTR_FORECAST_TEMP_LOW: 3,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-11T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 5,
ATTR_FORECAST_TEMP: 20,
ATTR_FORECAST_TEMP_LOW: 9,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-12T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0.0457,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 25,
ATTR_FORECAST_TEMP: 20,
ATTR_FORECAST_TEMP_LOW: 12,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-13T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 25,
ATTR_FORECAST_TEMP: 16,
ATTR_FORECAST_TEMP_LOW: 7,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_RAINY,
ATTR_FORECAST_TIME: "2021-03-14T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 1.0744,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 75,
ATTR_FORECAST_TEMP: 6,
ATTR_FORECAST_TEMP_LOW: 3,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_SNOWY,
ATTR_FORECAST_TIME: "2021-03-15T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 7.3050,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 95,
ATTR_FORECAST_TEMP: 1,
ATTR_FORECAST_TEMP_LOW: 0,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-16T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0.0051,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 5,
ATTR_FORECAST_TEMP: 6,
ATTR_FORECAST_TEMP_LOW: -2,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-17T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 11,
ATTR_FORECAST_TEMP_LOW: 1,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-18T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 5,
ATTR_FORECAST_TEMP: 12,
ATTR_FORECAST_TEMP_LOW: 6,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-19T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0.1778,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 45,
ATTR_FORECAST_TEMP: 9,
ATTR_FORECAST_TEMP_LOW: 5,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_RAINY,
ATTR_FORECAST_TIME: "2021-03-20T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 1.2319,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 55,
ATTR_FORECAST_TEMP: 5,
ATTR_FORECAST_TEMP_LOW: 3,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-21T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: 0.0432,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 20,
ATTR_FORECAST_TEMP: 7,
ATTR_FORECAST_TEMP_LOW: 1,
},
]
assert weather_state.attributes[ATTR_FRIENDLY_NAME] == "ClimaCell - Daily"
assert weather_state.attributes[ATTR_WEATHER_HUMIDITY] == 24
assert weather_state.attributes[ATTR_WEATHER_OZONE] == 52.625
assert weather
|
ANR-COMPASS/shesha
|
guardians/__init__.py
|
Python
|
gpl-3.0
| 138
| 0
|
"""
|
GuARDIANs (packaGe for Ao eRror breakDown estImation And exploitatioN)
"""
__all__ = ["groot", "gamora",
|
"roket", "drax", "starlord"]
|
marcuskd/pyram
|
pyram/Tests/TestPyRAMmp.py
|
Python
|
bsd-3-clause
| 4,888
| 0.001432
|
'''
TestPyRAMmp unit test class.
Uses configuration file TestPyRAMmp_Config.xml.
Computational range and depth steps and number of repetitions are configurable.
Number of PyRAM runs = number of frequencies * number of repetitions.
Tests should always pass but speedup will depend upon computing environment.
'''
import unittest
import xml.etree.ElementTree as et
from time import time
from copy import deepcopy
import numpy
from pyram.PyRAMmp import PyRAMmp
from pyram.PyRAM import PyRAM
class TestPyRAMmp(unittest.TestCase):
'''
Test PyRAMmp using the test case supplied with RAM and different frequencies.
'''
def setUp(self):
config_file = 'TestPyRAMmp_Config.xml'
root = et.parse(config_file).getroot()
for child in root:
if child.tag == 'RangeStep':
dr = float(child.text)
if child.tag == 'DepthStep':
dz = float(child.text)
if child.tag == 'NumberOfRepetitions':
self.nrep = int(child.text)
|
self.pyram_args = dict(zs=50.,
zr=50.,
z_ss=numpy.array([0., 100, 400]),
rp_ss=numpy.array([0., 25000]),
cw=numpy.array([[1480., 153
|
0],
[1520, 1530],
[1530, 1530]]),
z_sb=numpy.array([0.]),
rp_sb=numpy.array([0.]),
cb=numpy.array([[1700.]]),
rhob=numpy.array([[1.5]]),
attn=numpy.array([[0.5]]),
rbzb=numpy.array([[0., 200],
[40000, 400]]))
self.pyram_kwargs = dict(rmax=50000.,
dr=dr,
dz=dz,
zmplt=500.,
c0=1600.)
self.freqs = [30., 40, 50, 60, 70]
self.ref_r = []
self.ref_z = []
self.ref_tl = []
for fn in range(len(self.freqs)):
pyram_args = deepcopy(self.pyram_args)
pyram_kwargs = deepcopy(self.pyram_kwargs)
pyram = PyRAM(self.freqs[fn], pyram_args['zs'],
pyram_args['zr'], pyram_args['z_ss'],
pyram_args['rp_ss'], pyram_args['cw'],
pyram_args['z_sb'], pyram_args['rp_sb'],
pyram_args['cb'], pyram_args['rhob'],
pyram_args['attn'], pyram_args['rbzb'],
**pyram_kwargs)
results = pyram.run()
self.ref_r.append(results['Ranges'])
self.ref_z.append(results['Depths'])
self.ref_tl.append(results['TL Grid'])
def tearDown(self):
pass
def test_PyRAMmp(self):
'''
Test that the results from PyRAMmp are the same as from PyRAM. Also measure the speedup.
'''
freqs_rep = numpy.tile(self.freqs, self.nrep)
num_runs = len(freqs_rep)
print(num_runs, 'PyRAM runs set up, running...', )
runs = []
for n in range(num_runs):
pyram_args = deepcopy(self.pyram_args)
pyram_args['freq'] = freqs_rep[n]
pyram_kwargs = deepcopy(self.pyram_kwargs)
pyram_kwargs['id'] = n
runs.append((pyram_args, pyram_kwargs))
pyram_mp = PyRAMmp()
nproc = pyram_mp.pool._processes
t0 = time()
pyram_mp.submit_runs(runs[:int(num_runs / 2)]) # Submit in 2 batches
pyram_mp.submit_runs(runs[int(num_runs / 2):])
self.elap_time = time() - t0 # Approximate value as process_time can't be used
results = [None] * num_runs
self.proc_time = 0
for result in pyram_mp.results:
rid = result['ID']
results[rid] = result
self.proc_time += result['Proc Time']
pyram_mp.close()
for n in range(num_runs):
freq = runs[n][0]['freq']
ind = self.freqs.index(freq)
self.assertTrue(numpy.array_equal(self.ref_r[ind], results[n]['Ranges']),
'Ranges are not equal')
self.assertTrue(numpy.array_equal(self.ref_z[ind], results[n]['Depths']),
'Depths are not equal')
self.assertTrue(numpy.array_equal(self.ref_tl[ind], results[n]['TL Grid']),
'Transmission Loss values are not equal')
print('Finished.\n')
speed_fact = 100 * (self.proc_time / nproc) / self.elap_time
print('{0:.1f} % of expected speed up achieved'.format(speed_fact))
if __name__ == "__main__":
unittest.main()
|
colab/colab
|
colab/utils/tests/test_conf.py
|
Python
|
gpl-2.0
| 4,536
| 0
|
import sys
import os
from django.test import TestCase, override_settings, Client
from django.conf import settings
from ..conf import (DatabaseUndefined, validate_database,
InaccessibleSettings, _load_py_file, load_py_settings,
load_colab_apps, load_widgets_settings)
from mock import patch
test_files_dir = "./colab/utils/tests"
class TestConf(TestCase):
@override_settings(DEBUG=False, DATABASES={
'default': {
'NAME': settings.DEFAULT_DATABASE,
},
})
def test_database_undefined(self):
with self.assertRaises(DatabaseUndefined):
validate_database(settings.DATABASES, settings.DEFAULT_DATABASE,
settings.DEBUG)
def test_load_py_file_with_io_error(self):
self.assertRaises(InaccessibleSettings,
_load_py_file, 'settings_test', '/etc/colab/')
def test_load_py_file_with_syntax_error(self):
with file('/tmp/settings_with_syntax_error.py', 'w') as temp_settings:
temp_settings.write('(')
self.assertRaises(InaccessibleSettings,
_load_py_file, 'settings_with_syntax_error', '/tmp')
def test_load_py_file(self):
py_settings = _load_py_file('colab_settings', test_files_dir)
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
@patch('os.getenv', return_value='/path/fake/settings.py')
def test_load_py_settings_with_inaccessible_settings(self, mock):
self.assertRaises(InaccessibleSettings, load_py_settings)
def test_load_py_settings_without_settings_d(self):
COLAB_SETTINGS_DIR = ''
if 'COLAB_SETTINGS_DIR' in os.environ:
COLAB_SETTINGS_DIR = os.environ['COLAB_SETTINGS_DIR']
del os.environ['COLAB_SETTINGS_DIR']
py_settings = load_py_settings('/path/fake/settings.d/test.py')
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
if COLAB_SETTINGS_DIR:
os.environ['COLAB_SETTINGS_DIR'] = COLAB_SETTINGS_DIR
@patch('os.listdir', return_value=[test_files_dir + '/settings.d/test.py',
'non_python_file'])
@patch('colab.utils.conf._load_py_file',
side_effect=[{'SOCIAL_NETWORK_ENABLED': True, 'EMAIL_PORT': 25},
{'TEST': 'test'}])
def test_load_py_settings_with_settings_d(self, mock_py, mock_listdir):
py_settings = load_py_settings(test_files_dir + '/settings.d/')
self.assertIn('SOCIAL_NETWORK_ENABLED', py_settings)
self.assertTrue(py_settings['SOCIAL_NETWORK_ENABLED'])
self.assertIn('EMAIL_PORT', py_settings)
self.assertEquals(py_settings['EMAIL_PORT'], 25)
self.assertIn('TEST', py_settings)
self.assertEquals(py_settings['TEST'], 'test')
@patch('os.getenv', return_value='/path/fake/plugins.d/')
def test_load_colab_apps_without_plugins_d_directory(self, mock):
colab_apps = load_colab_apps()
self.assertIn('COLAB_APPS', colab_apps)
self.assertEquals(colab_apps['COLAB_APPS'], {})
@patch('os.getenv', return_value=test_files_dir + '/plugins.d/')
def test_load_colab_apps_with_plugins_d_directory(self, os_getenv):
sys.path.insert(0, os_getenv.return_value)
colab_apps = load_colab_apps()
self.assertIn('gitlab', colab_apps['COLAB_APPS'])
self.assertIn('noosfero', colab_apps['COLAB_APPS'])
sys.path.remove(os_getenv.return_value)
self.assertNotIn(os_getenv.return_value, sys.path)
@patch('os.getenv', return_value='/path/fake/widgets_settings.py')
def test_load_widgets_settings_without_settings(self, mock):
self.assertIsNone(load_widgets_settings())
@patch('os.getenv', side_effect=[test_files_dir + '/colab_settings.py',
'/path/fake/widgets_settings.py'])
def test_load_widgets_settings_without_settings_d(self, mock):
self.assertIsNone(load_widgets_settings())
|
def test_blacklist(sel
|
f):
client = Client()
response = client.get('/test_blacklist')
self.assertEquals(403, response.status_code)
|
molmod/yaff
|
yaff/analysis/test/test_blav.py
|
Python
|
gpl-3.0
| 1,509
| 0.001325
|
# -*- coding: utf-8 -*-
# YAFF is yet another force-field code.
# Copyright (C) 2011 Toon Verstraelen <Toon.Verstraelen@UGent.be>,
# Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of YAFF.
#
# YAFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# YAFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/
|
licenses/>
#
# --
from __future__ import division
import tempfile, shutil, os, numpy as np
from yaff import *
from molmod.test.common import tmpdir
def test_blav():
# generate a time-correlated random signal
n = 50000
eps0 = 30.0/n
eps1 = 1.0
y = np.sin(np.random.normal(0, eps0, n).cumsum() + np.random.normal(0, eps1, n))
# create a temporary directory to write the plot to
with tmpdir(__name__, 'test_blav') as dn:
fn_png = '%s/blav.png' % dn
error, sinef =
|
blav(y, 100, fn_png)
assert os.path.isfile(fn_png)
|
apsun/AniConvert
|
aniconvert.py
|
Python
|
mit
| 34,380
| 0.001716
|
#!/usr/bin/env python
###############################################################
# AniConvert: Batch convert directories of videos using
# HandBrake. Intended to be used on anime and TV series,
# where files downloaded as a batch tend to have the same
# track layout. Can also automatically select a single audio
# and subtitle track based on language preference.
#
# Copyright (c) 2015 Andrew Sun (@crossbowffs)
# Distributed under the MIT license
###############################################################
from __future__ import print_function
import argparse
import collections
import errno
import logging
import os
import re
import subprocess
import sys
###############################################################
# Configuration values, no corresponding command-line args
###############################################################
# Name of the HandBrake CLI binary. Set this to the full path
# of the binary if the script cannot find it automatically.
HANDBRAKE_EXE = "HandBrakeCLI"
# The format string for logging messages
LOGGING_FORMAT = "[%(levelname)s] %(message)s"
# If no output directory is explicitly specified, the output
# files will be placed in a directory with this value appended
# to the name of the input directory.
DEFAULT_OUTPUT_SUFFIX = "-converted"
# Define the arguments to pass to HandBrake.
# Do not define any of the following:
# -i <input>
# -o <output>
# -a <audio track>
# -s <subtitle track>
# -w <width>
# -l <height>
# Obviously, do not define anything that would cause HandBrake
# to not convert the video file either.
HANDBRAKE_ARGS = """
-E ffaac
-B 160
-6 dpl2
-R Auto
-e x264
-q 20.0
--vfr
--audio-copy-mask aac,ac3,dtshd,dts,mp3
--audio-fallback ffaac
--loose-anamorphic
--modulus 2
--x264-preset medium
--h264-profile high
--h264-level 3.1
--subtitle-burned
"""
###############################################################
# Default values and explanations for command-line args
###############################################################
# List of video formats to process. Other file formats in the
# input directory will be ignored. On the command line, specify
# as "-i mkv,mp4"
INPUT_VIDEO_FORMATS = ["mkv", "mp4"]
# The format to convert the videos to. Only "mp4", "mkv", and
# "m4v" are accepted, because those are the only formats that
# HandBrake can write. On the command line, specify as "-j mp4"
OUTPUT_VIDEO_FORMAT = "mp4"
# A list of preferred audio languages, ordered from most
# to least preferable. If there is only one audio track in the
# most preferable language, it will be automatically selected.
# If more than one track is in the most preferable language,
# you will be prompted to select one. If no tracks are
# in the most preferable language, the program will check
# the second most preferable language, and so on. This value
# should use the iso639-2 (3 letter) language code format.
# You may also specify "none" as one of the items in this list.
# If it is reached, the track will be discarded. For example,
# "-a eng,none" will use English audio if it is available, or
# remove the audio track otherwise. On the command line,
# specify as "-a jpn,eng"
AUDIO_LANGUAGES = ["jpn", "eng"]
# This is the same as the preferred audio languages, but
# for subtitles. On the command line, specify as "-s eng"
SUBTITLE_LANGUAGES = ["eng"]
# What to do when the destination file already exists. Can be
# one of:
# "prompt": Ask the user what to do
# "skip": Skip the file and proceed to the next one
# "overwrite": Overwrite the destination file
# On the command line, specify as "-w skip"
DUPLICATE_ACTION = "skip"
# The width and height of the output video, in the format
# "1280x720". "1080p" and "720p" are common values and
# translate to 1920x1080 and 1280x720, respectively.
# A value of "auto" is also accepted, and will preserve
# the input video dimensions. On the command line, specify
# as "-d 1280x720", "-d 720p", or "-d auto"
OUTPUT_DIMENSIONS = "auto"
# The minimum severity for an event to be logged. Levels
# from least severe to most servere are "debug", "info",
# "warning", "error", and "critical". On the command line,
# specify as "-l info"
LOGGING_LEVEL = "info"
# By default, if there is only a single track, and it has
# language code "und" (undefined), it will automatically be
# selected. If you do not want this behavior, set this flag
# to true. On the command line, specify as "-u"
MANUAL_UND = False
# Set this to true to search sub-directories within the input
# directory. Files will be output in the correspondingly named
# folder in the destination directory. On the command line,
# specify as "-r"
RECURSIVE_SEARCH = False
###############################################################
# End of configuration values, code begins here
###############################################################
try:
input = raw_input
except NameError:
pass
class TrackInfo(object):
def __init__(self, audio_track, subtitle_track):
self.audio_track = audio_track
self.subtitle_track = subtitle_track
class BatchInfo(object):
def __init__(self, dir_path, track_map):
self.dir_path = dir_path
self.track_map = track_map
class FFmpegStreamInfo(object):
def __init__(self, stream_index, codec_ty
|
pe, codec_name, language_code, metadata):
self.stream_index = stream_index
self.codec_type = codec_type
self.codec_name = codec_name
s
|
elf.language_code = language_code
self.metadata = metadata
class HandBrakeAudioInfo(object):
pattern1 = re.compile(r"(\d+), (.+) \(iso639-2: ([a-z]{3})\)")
pattern2 = re.compile(r"(\d+), (.+) \(iso639-2: ([a-z]{3})\), (\d+)Hz, (\d+)bps")
def __init__(self, info_str):
match = self.pattern1.match(info_str)
if not match:
raise ValueError("Unknown audio track info format: " + repr(info_str))
self.index = int(match.group(1))
self.description = match.group(2)
self.language_code = match.group(3)
match = self.pattern2.match(info_str)
if match:
self.sample_rate = int(match.group(4))
self.bit_rate = int(match.group(5))
else:
self.sample_rate = None
self.bit_rate = None
self.title = None
def __str__(self):
format_str = (
"Description: {description}\n"
"Language code: {language_code}"
)
if self.sample_rate:
format_str += "\nSample rate: {sample_rate}Hz"
if self.bit_rate:
format_str += "\nBit rate: {bit_rate}bps"
return format_str.format(**self.__dict__)
def __hash__(self):
return hash((
self.index,
self.description,
self.language_code,
self.sample_rate,
self.language_code,
self.title
))
def __eq__(self, other):
if not isinstance(other, HandBrakeAudioInfo):
return False
return (
self.index == other.index and
self.description == other.description and
self.language_code == other.language_code and
self.sample_rate == other.sample_rate and
self.language_code == other.language_code and
self.title == other.title
)
class HandBrakeSubtitleInfo(object):
pattern = re.compile(r"(\d+), (.+) \(iso639-2: ([a-z]{3})\) \((\S+)\)\((\S+)\)")
def __init__(self, info_str):
match = self.pattern.match(info_str)
if not match:
raise ValueError("Unknown subtitle track info format: " + repr(info_str))
self.index = int(match.group(1))
self.language = match.group(2)
self.language_code = match.group(3)
self.format = match.group(4)
self.source = match.group(5)
self.title = None
def __str__(self):
format_str = (
"Language: {language}\n"
"Language code: {language_code}\n"
"Format: {format}\n"
"Source: {source}"
)
return format_str.format(**self.__dict__)
def __hash__(self):
return hash((
|
dmordom/nipype
|
nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py
|
Python
|
bsd-3-clause
| 3,157
| 0.029142
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.mrtrix.tracking import DiffusionTensorStreamlineTrack
def test_DiffusionTensorStreamlineTrack_inputs():
input_map = dict(args=dict(argstr='%s',
),
cutoff_value=dict(argstr='-cutoff %s',
units='NA',
),
desired_number_of_tracks=dict(argstr='-number %d',
),
do_not_precompute=dict(argstr='-noprecomputed',
),
environ=dict(nohash=True,
usedefault=True,
),
exclude_file=dict(argstr='-exclude %s',
xor=['exclude_file', 'exclude_spec'],
),
exclude_spec=dict(argstr='-exclude %s',
position=2,
sep=',',
units='mm',
xor=['exclude_f
|
ile', 'exclude_spec'],
),
gradient_encoding_file=dict(argstr='-grad %s',
mandatory=True,
position=-2,
),
|
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
include_file=dict(argstr='-include %s',
xor=['include_file', 'include_spec'],
),
include_spec=dict(argstr='-include %s',
position=2,
sep=',',
units='mm',
xor=['include_file', 'include_spec'],
),
initial_cutoff_value=dict(argstr='-initcutoff %s',
units='NA',
),
initial_direction=dict(argstr='-initdirection %s',
units='voxels',
),
inputmodel=dict(argstr='%s',
position=-3,
usedefault=True,
),
mask_file=dict(argstr='-mask %s',
xor=['mask_file', 'mask_spec'],
),
mask_spec=dict(argstr='-mask %s',
position=2,
sep=',',
units='mm',
xor=['mask_file', 'mask_spec'],
),
maximum_number_of_tracks=dict(argstr='-maxnum %d',
),
maximum_tract_length=dict(argstr='-length %s',
units='mm',
),
minimum_radius_of_curvature=dict(argstr='-curvature %s',
units='mm',
),
minimum_tract_length=dict(argstr='-minlength %s',
units='mm',
),
no_mask_interpolation=dict(argstr='-nomaskinterp',
),
out_file=dict(argstr='%s',
name_source=['in_file'],
name_template='%s_tracked.tck',
output_name='tracked',
position=-1,
),
seed_file=dict(argstr='-seed %s',
xor=['seed_file', 'seed_spec'],
),
seed_spec=dict(argstr='-seed %s',
position=2,
sep=',',
units='mm',
xor=['seed_file', 'seed_spec'],
),
step_size=dict(argstr='-step %s',
units='mm',
),
stop=dict(argstr='-stop',
),
terminal_output=dict(mandatory=True,
nohash=True,
),
unidirectional=dict(argstr='-unidirectional',
),
)
inputs = DiffusionTensorStreamlineTrack.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_DiffusionTensorStreamlineTrack_outputs():
output_map = dict(tracked=dict(),
)
outputs = DiffusionTensorStreamlineTrack.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
jmacmahon/invenio
|
modules/bibformat/lib/elements/bfe_year.py
|
Python
|
gpl-2.0
| 1,262
| 0.003962
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
#
|
License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02
|
111-1307, USA.
"""BibFormat element - Prints the publication year
"""
__revision__ = "$Id$"
import re
def format_element(bfo):
"""
Prints the publication year.
@see: pagination.py, publisher.py, reprints.py, imprint.py, place.py
"""
for date_field in ['773__y', '260__c', '269__c', '909C4y', '925__a']:
date = bfo.field(date_field)
match_obj = re.search('\d\d\d\d', date)
if match_obj is not None:
return match_obj.group()
|
angelapper/edx-platform
|
common/lib/capa/capa/safe_exec/tests/test_lazymod.py
|
Python
|
agpl-3.0
| 2,041
| 0.00049
|
"""Test lazymod.py"""
import sys
import unittest
from capa.safe_exec.lazymod import LazyModule
class ModuleIsolation(object):
"""
Manage changes to sys.modules so that we can roll back imported modules.
Create this object, it will snapshot the currently imported modules. When
you call `clean_up()`, it will delete any module imported since its creation.
"""
def __init__(self):
# Save all the names of all the imported modules.
self.mods = set(sys.modules)
def clean_up(self):
# Get a list of modules that didn't exist when we were created
new_mods = [m f
|
or m in sys.modules if m not in self.mods]
# and delete them all so another import will run code for real again.
for m in new_mods:
del sys.modules[m]
class TestLazyMod(unittest.TestCase):
def setUp(self):
super(TestLazyMod, self).setUp()
# Each test will remove modules that it imported.
self.addCleanup(
|
ModuleIsolation().clean_up)
def test_simple(self):
# Import some stdlib module that has not been imported before
module_name = 'colorsys'
if module_name in sys.modules:
# May have been imported during test discovery, remove it again
del sys.modules[module_name]
assert module_name not in sys.modules
colorsys = LazyModule(module_name)
hsv = colorsys.rgb_to_hsv(.3, .4, .2)
self.assertEqual(hsv[0], 0.25)
def test_dotted(self):
# wsgiref is a module with submodules that is not already imported.
# Any similar module would do. This test demonstrates that the module
# is not already imported
module_name = 'wsgiref.util'
if module_name in sys.modules:
# May have been imported during test discovery, remove it again
del sys.modules[module_name]
assert module_name not in sys.modules
wsgiref_util = LazyModule(module_name)
self.assertEqual(wsgiref_util.guess_scheme({}), "http")
|
hunter-87/binocular-dense-stereo
|
cpp_pcl_visualization/pcl_visualization_pcd/pcl_test.py
|
Python
|
gpl-2.0
| 190
| 0.015789
|
import pcl
p = pcl.PointCloud()
p.from_file("test_pcd.pcd")
fil = p.make_statistical
|
_outlier_filter()
fil.set_mean_k (50)
fil.se
|
t_std_dev_mul_thresh (1.0)
fil.filter().to_file("inliers.pcd")
|
peterayeni/django-smsgateway
|
smsgateway/backends/mobileweb.py
|
Python
|
bsd-3-clause
| 2,600
| 0.002692
|
import datetime
import re
from django.http import HttpResponse
from django.utils.http import urlencode
import smsgateway
from smsgateway.models import SMS
from smsgateway.backends.base import SMSBackend
from smsgateway.utils import check_cell_phone_number
class MobileWebBackend(SMSBackend):
def get_send_url(self, sms_request, account_dict):
# Encode message
msg = sms_request.msg
try:
msg = msg.encode('latin-1')
except:
pass
querystring = urlencode({
'login': account_dict['username'],
'pass': account_dict['password'],
'gsmnr': sms_request.to[0][1:],
'sid': account_dict['sid'],
'msgcontent': msg,
})
return u'http://gateway.mobileweb.be/smsin.asp?%s' % querystring
def validate_send_result(self, result):
return 'accepted' in result
def handle_incoming(self, request, reply
|
_using=None):
request_dict = request.POST if request.method == 'POST' else request.GET
# Check whether we've gotten a SendDateTime
if not 'SendDateTime' in request_dict:
return HttpResponse('')
# Check whether we've already received this message
if SMS.objects.filter(ga
|
teway_ref=request_dict['MessageID']).exists():
return HttpResponse('OK')
# Parse and process message
year, month, day, hour, minute, second, ms = map(int, re.findall(r'(\d+)', request_dict['SendDateTime']))
sms_dict = {
'sent': datetime.datetime(year, month, day, hour, minute, second),
'content': request_dict['MsgeContent'],
'sender': check_cell_phone_number(request_dict['SenderGSMNR']),
'to': request_dict['ShortCode'],
'operator': int(request_dict['Operator']),
'gateway_ref': request_dict['MessageID'],
'backend': self.get_slug(),
}
sms = SMS(**sms_dict)
response = self.process_incoming(request, sms)
# If necessary, send response SMS
if response is not None:
signature = smsgateway.get_account(reply_using)['reply_signature']
success = smsgateway.send([sms.sender], response, signature, using=reply_using)
# Sending failed, queue SMS
if not success:
smsgateway.send_queued(sms.sender, response, signature, reply_using)
return HttpResponse(response)
return HttpResponse('OK')
def get_slug(self):
return 'mobileweb'
def get_url_capacity(self):
return 1
|
FinnStutzenstein/OpenSlides
|
server/openslides/users/management/commands/createinitialuser.py
|
Python
|
mit
| 1,391
| 0.002157
|
from django.core.management.base import BaseCommand
from django.db import connection
from .createopenslidesuser import Command as CreateOpenslidesUser
class Command(BaseCommand):
"""
Command to create an OpenSlides user.
"""
help = "Creates an OpenSlides user with id=2 if no other user than the administrator were created before."
def add_arguments(self, parser):
parser.add_argument("first_name", help="The first name of the new user.")
|
parser.add_argument("last_name", help="The last name of the new user.")
parser.add_argument("username", help="The username of the new user.")
parser.add_argument("password", help="The password of the new user.")
parser.add_argument("groups_id", help="The group id of the new user.")
parser.add_argument("--email", help="The email address of the new user.")
def handle(self, *args, **options):
o
|
ptions["userid"] = 2
with connection.cursor() as cursor:
cursor.execute("SELECT last_value FROM users_user_id_seq;")
last_id = cursor.fetchone()[0]
if last_id > 1:
self.stdout.write(
self.style.NOTICE(
"There have users been created before. Do nothing."
)
)
return
CreateOpenslidesUser().handle(**options)
|
keedio/sahara
|
sahara/utils/openstack/heat.py
|
Python
|
apache-2.0
| 10,597
| 0
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from heatclient import client as heat_client
from oslo.config import cfg
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.openstack.common import log as logging
from sahara.utils import files as f
from sahara.utils import general as g
from sahara.utils.openstack import base
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SSH_PORT = 22
def client():
ctx = context.current()
heat_url = base.url_for(ctx.service_catalog, 'orchestration')
return heat_client.Client('1', heat_url, token=ctx.token)
def get_stack(stack_name):
heat = client()
for stack in heat.stacks.list(filters={'stack_name': stack_name}):
return stack
raise ex.NotFoundException(_('Failed to find stack %(stack)s')
% {'stack': stack_name})
def wait_stack_completion(stack):
# NOTE: expected empty status because status of stack
# maybe is not set in heat database
while stack.status in ['IN_PROGRESS', '']:
context.sleep(1)
stack.get()
if stack.status != 'COMPLETE':
raise ex.HeatStackException(stack.stack_status)
def _get_inst_name(cluster_name, ng_name, index):
return g.generate_instance_name(cluster_name, ng_name, index + 1)
def _get_aa_group_name(cluster_name):
return g.generate_aa_group_name(cluster_name)
def _get_port_name(inst_name):
return '%s-port' % inst_name
def _get_floating_name(inst_name):
return '%s-floating' % inst_name
def _get_floating_assoc_name(inst_name):
return '%s-floating-assoc' % inst_name
def _get_volume_name(inst_name, volume_idx):
return '%s-volume-%i' % (inst_name, volume_idx)
def _get_volume_attach_name(inst_name, volume_idx):
return '%s-volume-attachment-%i' % (inst_name, volume_idx)
def _load_template(template_name, fields):
template_file = f.get_file_text('resources/%s' % template_name)
return template_file.rstrip() % fields
def _prepare_userdata(userdata):
"""Converts userdata as a text into format consumable by heat template."""
userdata = userdata.replace('"', '\\"')
lines = userdata.splitlines()
return '"' + '",\n"'.join(lines) + '"'
class ClusterTemplate(object):
def __init__(self, cluster):
self.cluster = cluster
self.node_groups_extra = {}
def add_node_group_extra(self, node_group_id, node_count,
gen_userdata_func):
self.node_groups_extra[node_group_id] = {
'node_count': node_count,
'gen_userdata_func': gen_userdata_func
}
# Consider using a single Jinja template for all this
def instantiate(self, update_existing, disable_rollback=True):
main_tmpl = _load_template('main.heat',
{'resources': self._serialize_resources()})
heat = client()
kwargs = {
'stack_name': self.cluster.name,
'timeout_mins': 180,
'disable_rollback': disable_rollback,
'parameters': {},
'template': json.loads(main_tmpl)}
if not update_existing:
heat.stacks.create(**kwargs)
else:
for stack in heat.stacks.list():
if stack.stack_name == self.cluster.name:
stack.update(**kwargs)
break
return ClusterStack(self, get_stack(self.cluster.name))
def _need_aa_server_group(self, node_group):
for node_process in node_group.node_processes:
if node_process in self.cluster.anti_affinity:
return True
return False
def _get_anti_affinity_scheduler_hints(self, node_group):
if not self._need_aa_server_group(node_group):
return ''
return ('"scheduler_hints" : %s,' %
json.dumps({"group": {"Ref": _get_aa_group_name(
self.cluster.name)}}))
def _serialize_resources(self):
resources = []
if self.cluster.anti_affinity:
resources.extend(self._serialize_aa_server_group())
for ng in self.cluster.node_groups:
if ng.auto_security_group:
resources.extend(self._serialize_auto_security_group(ng))
for idx in range(0, self.node_groups_extra[ng.id]['node_count']):
resources.extend(self._serialize_instance(ng, idx))
return ',\n'.join(resources)
def _serialize_auto_security_group(self, ng):
fields = {
'security_group_name': g.generate_auto_security_group_name(ng),
'security_group_description':
"Auto security group created by Sahara for Node Group "
"'%s' of cluster '%s'." % (ng.name, ng.cluster.name),
'rules': self._serialize_auto_security_group_rules(ng)}
yield _load_template('security_group.heat', fields)
def _serialize_auto_security_group_rules(self, ng):
rules = []
for port in ng.open_ports:
rules.append({"remote_ip_prefix": "0.0.0.0/0", "protocol": "tcp",
"port_range_min": port, "port_range_max": port})
rules.append({"remote_ip_prefix": "0.0.0.0/0", "protocol": "tcp",
"port_range_min": SSH_PORT, "port_range_max": SSH_PORT})
return json.dumps(rules)
def _serialize_instance(self, ng, idx):
inst_name = _get_inst_name(self.cluster.name, ng.name, idx)
nets = ''
security_groups = ''
if CONF.use_neutron:
port_name = _get_port_name(inst_name)
yield self._serialize_port(port_name,
self.cluster.neutron_management_network,
self._get_security_groups(ng))
nets = '"networks" : [{ "port" : { "Ref" : "%s" }}],' % port_name
if ng.floating_ip_pool:
yield self._serialize_neutron_floating(inst_name, port_name,
ng.floating_ip_pool)
else:
if ng.floating_ip_pool:
yield self._serialize_nova_floating(inst_name,
ng.floating_ip_pool)
if ng.security_groups:
security_groups = (
'"security_groups": %s,' % json.dumps(
self._get_security_groups(ng)))
# Check if cluster contains user key-pair and include it to template.
key_name = ''
if self.cluster.user_keypair_id:
key_name = '"key_name" : "%s",' % self.cluster.user_keypair_id
gen_userdata_func = self.node_groups_extra[ng.id]['gen_userdata_func']
userdata = gen_userdata_func(ng, inst_name)
fields = {'instance_name': inst_name,
'flavor_id': ng.flavor_id,
'image_id': ng.get_image_id(),
'image_username': ng.image_username,
'network_interfaces': nets,
'key_name': key_name,
'userdata': _prepare_userdata(userdata),
'scheduler_hints':
self._get_anti_affinity_scheduler_hin
|
ts(ng),
'security_groups': security_groups}
yield _load_template('instance.heat', fields)
for idx in range(0, ng.volumes_per_node):
yield self._serialize_volume(inst_name, idx, ng.volumes_size)
def _serialize_port(s
|
elf, port_name, fixed_net_id, security_groups):
fields = {'port_name': port_name,
'fixed_net
|
remybaranx/qtaste
|
tools/jython/lib/Lib/encodings/cp437.py
|
Python
|
gpl-3.0
| 7,146
| 0.039603
|
""" Python Character Mapping Codec generated from 'CP437.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LA
|
TIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6
|
, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
mozilla/captain
|
vendor/lib/python/commonware/request/tests.py
|
Python
|
mpl-2.0
| 2,104
| 0
|
from django.conf import settings
import mock
from nose.tools import eq_
from test_utils import RequestFactory
from commonware.request.middleware import (SetRemoteAddrFromForwardedFor,
is_valid)
mw = SetRemoteAddrFromForwardedFor()
def get_req():
req = RequestFactory().get('/')
req.META['HTTP_X_FORWARDED_FOR'] = '1.2.3.4, 2.3.4.5'
req.META['REMOTE_ADDR'] = '127.0.0.1'
return req
def test_xff():
req = get_req()
mw.process_request(req)
eq_('127.0.0.1', req.META['REMOTE_ADDR'])
@mock.patch.object(settings._wrapped, 'KNOWN_PROXIES', ['127.0.0.1'])
def test_xff_known():
req = get_req()
mw.process_request(req)
eq_('2.3.4.5', req.META['REMOTE_ADDR'])
req = get_req()
del req.META['HTTP_X_FORWARDED_FOR']
mw.process_request(req)
eq_('127.0.0.1', req.META['REMOTE_ADDR'])
@mock.patch.object(settings._wrapped, 'KNOWN_PROXIES',
['127.0.0.1', '2.3.4.5'])
def test_xff_multiknown():
req = get_req()
mw.process_request(req)
eq_('1.2.3.4', req.META['REMOTE_ADDR'])
@mock.patch.object(settings._wrapped, 'KNOWN_PROXIES', ['127.0.0.1'])
def test_xff_bad_address():
req = get_req()
req.META['HTTP_X_FORWARDED_FOR'] += ',foobar'
mw.process_request(req)
eq_('2.3.4.5', req.META['REMOTE_ADDR'])
@mock.patch.object(settings._wrapped, 'KNOWN_PROXIES',
['127.0.0.1', '2.3.4.5'])
def test_xff_all_known():
"""If all the remotes are known, use the last one.""
|
"
req = get_req()
req.META['HTTP_X_FORWARDED_FOR'] = '2.3.4.5'
mw.process_request(req)
eq_('2.3.4.5', req.META['REMOTE_ADDR'])
def test_is_valid():
"""IPv4 and IPv6 addresses are OK."""
tests = (
('1.2.3.4', True),
('2.3.4.5', True),
('foobar', False),
('4.256.4.12', False),
('fe80::a00:27ff:fed
|
5:56e0', True),
('fe80::a00:277ff:fed5:56e0', False),
('fe80::a00:27ff:ged5:56e0', False),
)
def _check(i, v):
eq_(v, is_valid(i))
for i, v in tests:
yield _check, i, v
|
jeh/mopidy-gmusic
|
mopidy_gmusic/library.py
|
Python
|
apache-2.0
| 11,350
| 0.00141
|
from __future__ import unicode_literals
import logging
import hashlib
from mopidy.backends import base
from mopidy.models import Artist, Album, Track, SearchResult
logger = logging.getLogger('mopidy.backends.gmusic')
class GMusicLibraryProvider(base.BaseLibraryProvider):
def find_exact(self, query=None, uris=None):
if query
|
is None:
query = {}
self._validate_query(query)
result_tracks = self.tracks.values()
for (field, values) in query.iteritems():
if not hasattr(values, '__iter__'):
values = [values]
# FIXME this is bound to be slow for large libraries
for value in values:
if field == 'track_no':
q = self._conve
|
rt_to_int(value)
else:
q = value.strip()
uri_filter = lambda t: q == t.uri
track_name_filter = lambda t: q == t.name
album_filter = lambda t: q == getattr(t, 'album', Album()).name
artist_filter = lambda t: filter(
lambda a: q == a.name, t.artists) or filter(
lambda a: q == a.name, getattr(t, 'album',
Album()).artists)
albumartist_filter = lambda t: any([
q == a.name
for a in getattr(t.album, 'artists', [])])
track_no_filter = lambda t: q == t.track_no
date_filter = lambda t: q == t.date
any_filter = lambda t: (
uri_filter(t) or
track_name_filter(t) or
album_filter(t) or
artist_filter(t) or
albumartist_filter(t) or
date_filter(t))
if field == 'uri':
result_tracks = filter(uri_filter, result_tracks)
elif field == 'track_name':
result_tracks = filter(track_name_filter, result_tracks)
elif field == 'album':
result_tracks = filter(album_filter, result_tracks)
elif field == 'artist':
result_tracks = filter(artist_filter, result_tracks)
elif field == 'albumartist':
result_tracks = filter(albumartist_filter, result_tracks)
elif field == 'track_no':
result_tracks = filter(track_no_filter, result_tracks)
elif field == 'date':
result_tracks = filter(date_filter, result_tracks)
elif field == 'any':
result_tracks = filter(any_filter, result_tracks)
else:
raise LookupError('Invalid lookup field: %s' % field)
return SearchResult(uri='gmusic:search', tracks=result_tracks)
def lookup(self, uri):
if uri.startswith('gmusic:track:'):
return self._lookup_track(uri)
elif uri.startswith('gmusic:album:'):
return self._lookup_album(uri)
elif uri.startswith('gmusic:artist:'):
return self._lookup_artist(uri)
else:
return []
def _lookup_track(self, uri):
if uri.startswith('gmusic:track:T'):
song = self.backend.session.get_track_info(uri.split(':')[2])
if song is None:
return []
return [self._aa_to_mopidy_track(song)]
try:
return [self.tracks[uri]]
except KeyError:
logger.debug('Failed to lookup %r', uri)
return []
def _lookup_album(self, uri):
try:
album = self.albums[uri]
except KeyError:
logger.debug('Failed to lookup %r', uri)
return []
tracks = self.find_exact(
dict(album=album.name,
artist=[artist.name for artist in album.artists],
date=album.date)).tracks
return sorted(tracks, key=lambda t: (t.disc_no,
t.track_no))
def _lookup_artist(self, uri):
try:
artist = self.artists[uri]
except KeyError:
logger.debug('Failed to lookup %r', uri)
return []
tracks = self.find_exact(
dict(artist=artist.name)).tracks
return sorted(tracks, key=lambda t: (t.album.date,
t.album.name,
t.disc_no,
t.track_no))
def refresh(self, uri=None):
self.tracks = {}
self.albums = {}
self.artists = {}
for song in self.backend.session.get_all_songs():
self._to_mopidy_track(song)
def search(self, query=None, uris=None):
if query is None:
query = {}
self._validate_query(query)
result_tracks = self.tracks.values()
for (field, values) in query.iteritems():
if not hasattr(values, '__iter__'):
values = [values]
# FIXME this is bound to be slow for large libraries
for value in values:
if field == 'track_no':
q = self._convert_to_int(value)
else:
q = value.strip().lower()
uri_filter = lambda t: q in t.uri.lower()
track_name_filter = lambda t: q in t.name.lower()
album_filter = lambda t: q in getattr(
t, 'album', Album()).name.lower()
artist_filter = lambda t: filter(
lambda a: q in a.name.lower(), t.artists) or filter(
lambda a: q in a.name, getattr(t, 'album',
Album()).artists)
albumartist_filter = lambda t: any([
q in a.name.lower()
for a in getattr(t.album, 'artists', [])])
track_no_filter = lambda t: q == t.track_no
date_filter = lambda t: t.date and t.date.startswith(q)
any_filter = lambda t: (
uri_filter(t) or
track_name_filter(t) or
album_filter(t) or
artist_filter(t) or
albumartist_filter(t) or
date_filter(t))
if field == 'uri':
result_tracks = filter(uri_filter, result_tracks)
elif field == 'track_name':
result_tracks = filter(track_name_filter, result_tracks)
elif field == 'album':
result_tracks = filter(album_filter, result_tracks)
elif field == 'artist':
result_tracks = filter(artist_filter, result_tracks)
elif field == 'albumartist':
result_tracks = filter(albumartist_filter, result_tracks)
elif field == 'track_no':
result_tracks = filter(track_no_filter, result_tracks)
elif field == 'date':
result_tracks = filter(date_filter, result_tracks)
elif field == 'any':
result_tracks = filter(any_filter, result_tracks)
else:
raise LookupError('Invalid lookup field: %s' % field)
result_artists = set()
result_albums = set()
for track in result_tracks:
result_artists |= track.artists
result_albums.add(track.album)
return SearchResult(uri='gmusic:search',
tracks=result_tracks,
artists=result_artists,
albums=result_albums)
def _validate_query(self, query):
for (_, values) in query.iteritems():
if not values:
raise LookupError('Missing query')
for value in values:
if not value:
raise LookupError('Missing query')
def _to_mopidy_track(self, song):
uri = 'gmusic:t
|
nkgilley/home-assistant
|
homeassistant/components/simulated/sensor.py
|
Python
|
apache-2.0
| 4,609
| 0.000434
|
"""Adds a simulated sensor."""
from datetime import datetime
import logging
import math
from random import Random
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_AMP = "amplitude"
CONF_FWHM = "spread"
CONF_MEAN = "mean"
CONF_PERIOD = "period"
CONF_PHASE = "phase"
CONF_SEED = "seed"
CONF_UNIT = "unit"
CONF_RELATIVE_TO_EPOCH = "relative_to_epoch"
DEFAULT_AMP = 1
DEFAULT_FWHM = 0
DEFAULT_MEAN = 0
DEFAULT_NAME = "simulated"
DEFAULT_PERIOD = 60
DEFAULT_PHASE = 0
DEFAULT_SEED = 999
DEFAULT_UNIT = "value"
DEFAULT_RELATIVE_TO_EPOCH = True
ICON = "mdi:chart-line"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_AMP, default=DEFAULT_AMP): vol.Coerce(float),
vol.Optional(CONF_FWHM, default=DEFAULT_FWHM): vol.Coerce(float),
vol.Optional(CONF_MEAN, default=DEFAULT_MEAN): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PERIOD, default=DEFAULT_PERIOD): cv.positive_int,
vol.Optional(CONF_PHASE, default=DEFAULT_PHASE): vol.Coerce(float),
vol.Optional(CONF_SEED, default=DEFAULT_SEED): cv.positive_int,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): cv.string,
vol.Optional(
CONF_RELATIVE_TO_EPOCH, default=DEFAULT_RELATIVE_TO_EPOCH
): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the simulated sensor."""
name = config.get(CONF_NAME)
unit = config.get(CONF_UNIT)
amp = config.get(CONF_AMP)
mean = config.get(CONF_MEAN)
period = config.get(CONF_PERIOD)
phase = config.get(CONF_PHASE)
fwhm = config.get(CONF_FWHM)
seed = config.get(CONF_SEED)
relative_to_epoch = config.get(CONF_RELATIVE_TO_EPOCH)
sensor = SimulatedSensor(
name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch
)
add_entities([sensor], True)
class SimulatedSensor(Entity):
"""Class for simulated sensor."""
def __init__(
self, name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch
):
"""Init the class."""
self._name = name
self._unit = unit
self._amp = amp
self._mean = mean
self._period = period
self._phase = phase # phase in degrees
self._fwhm = fwhm
self._seed = seed
self._random = Random(seed) # A local seeded Random
|
self._start_time = (
datetime(1970, 1, 1, tzinfo=dt_util.UTC)
if relative_to_epoch
else dt_util.utcnow()
)
self._relative_to_epoch = relative_to_epoch
self._state = None
def time_delta(self):
"""Return the time delta."""
dt0 = self._start_time
dt1 = dt_util.utcnow()
return dt1 - dt0
def signal_calc(self):
"""Calculate the signal."""
mean = self._mean
amp = self._amp
ti
|
me_delta = self.time_delta().total_seconds() * 1e6 # to milliseconds
period = self._period * 1e6 # to milliseconds
fwhm = self._fwhm / 2
phase = math.radians(self._phase)
if period == 0:
periodic = 0
else:
periodic = amp * (math.sin((2 * math.pi * time_delta / period) + phase))
noise = self._random.gauss(mu=0, sigma=fwhm)
return round(mean + periodic + noise, 3)
async def async_update(self):
"""Update the sensor."""
self._state = self.signal_calc()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
attr = {
"amplitude": self._amp,
"mean": self._mean,
"period": self._period,
"phase": self._phase,
"spread": self._fwhm,
"seed": self._seed,
"relative_to_epoch": self._relative_to_epoch,
}
return attr
|
molmod/zeobuilder
|
share/plugins/basic/point.py
|
Python
|
gpl-3.0
| 7,413
| 0.002698
|
# -*- coding: utf-8 -*-
# Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
from zeobuilder import context
from zeobuilder.actions.composed import Immediate
from zeobuilder.actions.abstract import AddBase
from zeobuilder.actions.collections.menu import MenuInfo
from zeobuilder.nodes.meta import Property
from zeobuilder.nodes.elementary import GLGeometricBase
from zeobuilder.nodes.model_object import ModelObjectInfo
from zeobuilder.nodes.color_mixin import ColorMixin
from zeobuilder.nodes.glmixin import GLTransformationMixin
from zeobuilder.gui.fields_dialogs import DialogFieldInfo
import zeobuilder.gui.fields as fields
import zeobuilder.actions.primitive as primitive
import zeobuilder.authors as authors
from molmod import Translation
import numpy
class Point(GLGeometricBase, ColorMixin):
info = ModelObjectInfo("plugins/basic/point.svg")
authors = [authors.toon_verstraelen]
def initnonstate(self):
GLGeometricBase.initnonstate(self, Translation)
#
# Properties
#
def set_spike_length(self, spike_length, init=False):
self.spike_length = spike_length
if not init:
self.invalidate_draw_list()
self.invalidate_boundingbox_list()
def set_spike_thickness(self, spike_thickness, init=False):
self.spike_thickness = spike_thickness
if not init:
self.invalidate_draw_list()
self.invalidate_boundingbox_list()
properties = [
Property("spike_length", 0.3, lambda self: self.spike_length, set_spike_length),
Property("spike_thickness", 0.1, lambda self: self.spike_thickness, set_spike_thickness),
]
#
# Dialog fields (see action EditProperties)
#
dialog_fields = set([
DialogFieldInfo("Geometry", (2, 7), fields.faulty.Length(
label_text="Spike length",
attribute_name="spike_length",
low=0.0,
low_inclusive=False
)),
DialogFieldInfo("Geometry", (2, 8), fields.faulty.Length(
label_text="Spike thickness",
attribute_name="spike_thickness",
low=0.0,
low_inclusive=False
))
])
#
# Draw
#
def draw_spike(self):
ColorMixin.draw(self)
vb = context.application.vis_backend
vb.draw_quad_strip((
numpy.array([0.5773502692, -0.5773502692, -0.5773502692]),
[numpy.array([self.spike_length, self.spike_length, self.spike_length])]
), (
numpy.array([1, 0, 0]),
[numpy.array([self.spike_thickness, 0, 0])]
), (
numpy.array([-0.5773502692, 0.5773502692, -0.5773502692]),
[numpy.array([self.spike_length, self.spike_length, self.spike_length])]
), (
numpy.array([0, 1, 0]),
[numpy.array([0, self.spike_thickness, 0])]
), (
numpy.array([-0.5773502692, -0.5773502692, 0.5773502692]),
[numpy.array([self.spike_length, self.spike_length, self.spike_length])]
), (
numpy.array([0, 0, 1]),
[numpy.array([0, 0, self.spike_thickness])]
), (
numpy.array([0.5773502692, -0.5773502692, -0.5773502692]),
[numpy.array([self.spike_length, self.spike_length, self.spike_length])]
), (
numpy.array([1, 0, 0]),
[numpy.array([self.spike_thickness, 0, 0])]
))
def draw(self):
GLGeometricBase.draw(self)
vb = context.application.vis_backend
vb.push_matrix()
for i in range(2):
for i in range(4):
self.draw_spike()
vb.rotate(90, 1.0, 0.0, 0.0)
vb.rotate(180, 0.0, 1.0, 0.0)
vb.pop_matrix()
#
# Revalidation
#
def revalidate_bounding_box(self):
GLGeometricBase.revalidate_bounding_box(self)
self.bounding_box.extend_with_corners(numpy.array([
[-self.spike_length, -self.spike_length, -self.spike_length],
[ self.spike_length, self.spike_length, self.spike_length]
]))
class AddPoint(AddBase):
description = "Add point"
menu_info = MenuInfo("default/_Object:tools/_Add:3d", "_Point", image_name="plugins/basic/point.svg", order=(0, 4, 1, 0, 0, 2))
authors = [authors.toon_verstraelen]
@staticmethod
def analyze_selection():
return AddBase.analyze_selection(Point)
def do(self):
AddBase.do(self, Point)
class CalculateAverage(Immediate):
description = "Add point at average"
menu_info = MenuInfo("default/_Object:tools/_Add:special", "_Point at average", order=(0, 4, 1, 0, 2, 0))
authors = [authors.toon_verstraelen]
@staticmethod
def analyze_selection():
# A) calling ancestor
if not Immediate.analyze_selection(): return False
# B) validating and initialising
cache = context.application.cache
if len(cache.nodes) == 0: return False
if len(cache.translations) == 0: return False
parent = cache.common_parent
if parent == None: return False
while not parent.check_add(Point):
parent = parent.par
|
ent
if parent == None: return False
# C) passed all tests:
return True
def do(self):
cache = context.application.cache
parent = cache.common_parent
while not parent.check_add(Point):
parent = parent.parent
vector_sum = numpy.zeros(3, float)
num_vectors = 0
for node in cache.nodes:
if isinstance(node, GLTransformationMixin) and \
isinstance(node.transformation, Translation):
|
vector_sum += node.get_frame_relative_to(parent).t
num_vectors += 1
point = Point(name="Average", transformation=Translation(vector_sum / num_vectors))
primitive.Add(point, parent)
nodes = {
"Point": Point
}
actions = {
"AddPoint": AddPoint,
"CalculateAverage": CalculateAverage,
}
|
semk/iDiscover
|
idiscover/discover.py
|
Python
|
mit
| 1,840
| 0.00163
|
# -*- coding: utf-8 -*-
#
# Discover the target host types in the subnet
#
# @author: Sreejith Kesavan <sreejithemk@gmail.com>
import arp
import oui
import ipcalc
import sys
class Discovery(object):
""" Find out the host types in the Ip range (CIDR)
NOTE: This finds mac ad
|
dresses only within the subnet.
It doesn't fetch mac addresses for routed network ip's.
"""
def __init__(self):
self.__arp = arp.ARP()
self.__oui = oui.OUI()
def discover(self, address):
"""
Traverse the IP subnets and return manufacturer info.
"""
network = ipcalc.Network(address)
|
for ip in network:
ip = str(ip)
# Ignore broadcast IP Addresses
if '/' in address and ip == str(network.broadcast()):
print 'Ignoring broadcast ip: {broadcast}'.format(broadcast=str(network.broadcast()))
continue
mac = self.__arp.find_mac(ip)
if mac:
if len(mac.split(':')[0]) == 1:
mac = '0' + mac
manuf_str = mac.replace(':', '')[:6].upper()
manuf = self.__oui.find_manuf(manuf_str)
if manuf:
yield (ip, manuf)
def run():
if len(sys.argv) < 2:
print
print 'Usage:\t\tidiscover <ip-address/cidr>'
print 'Examples:'
print '\t\tidiscover 10.73.19.0'
print '\t\tidiscover 10.74.215/24'
print
else:
addrs = sys.argv[1:]
d = Discovery()
try:
for addr in addrs:
for ip, manuf in d.discover(addr):
print 'IP Address: {ip} Manufacturer: {manuf}'.format(ip=ip, manuf=manuf)
except KeyboardInterrupt:
print 'Exiting...'
if __name__ == '__main__':
run()
|
CounterpartyXCP/counterblock
|
counterblock/lib/modules/dex/__init__.py
|
Python
|
mit
| 32,660
| 0.004195
|
"""
Implements counterwallet asset-related support as a counterblock plugin
DEPENDENCIES: This module requires the assets module to be loaded before it.
Python 2.x, as counterblock is still python 2.x
"""
import os
import sys
import time
import datetime
import logging
import decimal
import urllib.request
import urllib.parse
import urllib.error
import json
import operator
import base64
import configparser
import calendar
import pymongo
from bson.son import SON
import dateutil.parser
from counterblock.lib import config, util, blockfeed, blockchain
from counterblock.lib.modules import DEX_PRIORITY_PARSE_TRADEBOOK
from counterblock.lib.processor import MessageProcessor, MempoolMessageProcessor, BlockProcessor, StartUpProcessor, CaughtUpProcessor, RollbackProcessor, API, start_task
from . import assets_trading, dex
D = decimal.Decimal
EIGHT_PLACES = decimal.Decimal(10) ** -8
COMPILE_MARKET_PAIR_INFO_PERIOD = 10 * 60 # in seconds (this is every 10 minutes currently)
COMPILE_ASSET_MARKET_INFO_PERIOD = 30 * 60 # in seconds (this is every 30 minutes currently)
logger = logging.getLogger(__name__)
@API.add_method
def get_market_price_summary(asset1, asset2, with_last_trades=0):
# DEPRECATED 1.5
result = assets_trading.get_market_price_summary(asset1, asset2, with_last_trades)
return result if result is not None else False
#^ due to current bug in our jsonrpc stack, just return False if None is returned
@API.add_method
def get_market_cap_history(start_ts=None, end_ts=None):
now_ts = calendar.timegm(time.gmtime())
if not end_ts: # default to current datetim
|
e
end_ts = now_ts
if not start_ts: # default to 30 days before the end date
start_ts = end_ts - (30 * 24 * 60 * 60)
data = {}
results = {}
#^ format is result[market_cap_as][asset] = [[block_time, market_cap], [block_time2, market_cap2], ...]
for market_cap_as in (config.XCP, config.BTC):
caps = config.mongo_db.asset_marketcap_history.aggregate([
{"$match"
|
: {
"market_cap_as": market_cap_as,
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"asset": 1,
"market_cap": 1,
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"asset": "$asset", "year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"market_cap": {"$avg": "$market_cap"}, # use the average marketcap during the interval
}},
])
data[market_cap_as] = {}
for e in caps:
interval_time = int(calendar.timegm(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day'], e['_id']['hour']).timetuple()) * 1000)
data[market_cap_as].setdefault(e['_id']['asset'], [])
data[market_cap_as][e['_id']['asset']].append([interval_time, e['market_cap']])
results[market_cap_as] = []
for asset in data[market_cap_as]:
#for z in data[market_cap_as][asset]: assert z[0] and z[0] > 0 and z[1] and z[1] >= 0
results[market_cap_as].append(
{'name': asset, 'data': sorted(data[market_cap_as][asset], key=operator.itemgetter(0))})
return results
@API.add_method
def get_market_info(assets):
assets_market_info = list(config.mongo_db.asset_market_info.find({'asset': {'$in': assets}}, {'_id': 0}))
extended_asset_info = config.mongo_db.asset_extended_info.find({'asset': {'$in': assets}})
extended_asset_info_dict = {}
for e in extended_asset_info:
if not e.get('disabled', False): # skip assets marked disabled
extended_asset_info_dict[e['asset']] = e
for a in assets_market_info:
if a['asset'] in extended_asset_info_dict and extended_asset_info_dict[a['asset']].get('processed', False):
extended_info = extended_asset_info_dict[a['asset']]
a['extended_image'] = bool(extended_info.get('image', ''))
a['extended_description'] = extended_info.get('description', '')
a['extended_website'] = extended_info.get('website', '')
a['extended_pgpsig'] = extended_info.get('pgpsig', '')
else:
a['extended_image'] = a['extended_description'] = a['extended_website'] = a['extended_pgpsig'] = ''
return assets_market_info
@API.add_method
def get_market_info_leaderboard(limit=100):
"""returns market leaderboard data for both the XCP and BTC markets"""
# do two queries because we limit by our sorted results, and we might miss an asset with a high BTC trading value
# but with little or no XCP trading activity, for instance if we just did one query
assets_market_info_xcp = list(config.mongo_db.asset_market_info.find({}, {'_id': 0}).sort('market_cap_in_{}'.format(config.XCP.lower()), pymongo.DESCENDING).limit(limit))
assets_market_info_btc = list(config.mongo_db.asset_market_info.find({}, {'_id': 0}).sort('market_cap_in_{}'.format(config.BTC.lower()), pymongo.DESCENDING).limit(limit))
assets_market_info = {
config.XCP.lower(): [a for a in assets_market_info_xcp if a['price_in_{}'.format(config.XCP.lower())]],
config.BTC.lower(): [a for a in assets_market_info_btc if a['price_in_{}'.format(config.BTC.lower())]]
}
# throw on extended info, if it exists for a given asset
assets = list(set([a['asset'] for a in assets_market_info[config.XCP.lower()]] + [a['asset'] for a in assets_market_info[config.BTC.lower()]]))
extended_asset_info = config.mongo_db.asset_extended_info.find({'asset': {'$in': assets}})
extended_asset_info_dict = {}
for e in extended_asset_info:
if not e.get('disabled', False): # skip assets marked disabled
extended_asset_info_dict[e['asset']] = e
for r in (assets_market_info[config.XCP.lower()], assets_market_info[config.BTC.lower()]):
for a in r:
if a['asset'] in extended_asset_info_dict:
extended_info = extended_asset_info_dict[a['asset']]
if 'extended_image' not in a or 'extended_description' not in a or 'extended_website' not in a:
continue # asset has been recognized as having a JSON file description, but has not been successfully processed yet
a['extended_image'] = bool(extended_info.get('image', ''))
a['extended_description'] = extended_info.get('description', '')
a['extended_website'] = extended_info.get('website', '')
else:
a['extended_image'] = a['extended_description'] = a['extended_website'] = ''
return assets_market_info
@API.add_method
def get_market_price_history(asset1, asset2, start_ts=None, end_ts=None, as_dict=False):
"""Return block-by-block aggregated market history data for the specified asset pair, within the specified date range.
@returns List of lists (or list of dicts, if as_dict is specified).
* If as_dict is False, each embedded list has 8 elements [block time (epoch in MS), open, high, low, close, volume, # trades in block, block index]
* If as_dict is True, each dict in the list has the keys: block_time (epoch in MS), block_index, open, high, low, close, vol, count
Aggregate on an an hourly basis
"""
now_ts = calendar.timegm(time.gmtime())
if not end_ts: # default to current datetime
end_ts = now_ts
if not start_ts: # default to 180 days before the end date
start_ts = end_ts - (180 * 24 * 60 * 60)
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
# get ticks -- open, high, low, close, volume
result = c
|
sirmarcel/floq
|
benchmark/museum_of_evolution/uncompiled_floq/parametric_system.py
|
Python
|
mit
| 2,852
| 0.002454
|
import floq.core.fixed_system as fs
import floq.evolution as ev
import floq.errors as er
import floq.helpers.index as h
class ParametricSystemBase(object):
"""
Base class to specify a physical system that still has open parameters,
such as the control amplitudes, the control duration, or other arbitrary
parameters in the Hamiltonian.
This needs to be sub-classed, and a subclass should provide:
- get_system(controls)
"""
def get_system(self, controls, t):
raise NotImplementedError("get_system not implemented.")
def is_nz_ok(self, controls, t):
system = self.get_system(controls, t)
try:
u = ev.evolve_system(system)
except er.EigenvalueNumberError:
return False
return h.is_unitary(u)
def set_nz(self, controls, t):
if self.is_nz_ok(controls, t):
self.decrease_nz_until_not_ok(controls, t, step=max(10, self.nz/5))
self.decrease_nz_until_not_ok(controls, t, step=max(10, self.nz/10))
self.decrease_nz_until_not_ok(controls, t, step=2)
self.increase_nz_until_ok(controls, t, step=2)
else:
self.increase_nz_until_ok(controls, t, step=max(10, self.nz/5))
self.decrease_nz_until_not_ok(controls, t, step=2)
self.increase_nz_until_ok(controls, t, step=2)
def increase_nz_until_ok(self, controls, t, step=2):
while self.is_nz_ok(controls, t) is False:
self.nz += h.make_even(step)
def decrease_nz_until_not_ok(self, controls, t, step=2):
while self.is_nz_ok(controls
|
, t) and self.nz-step > 3:
self.nz -= h.make_even(step)
class ParametricSystemWithFunctions(ParametricSystemBase):
"""
A system with parametric hf and dhf, which are passed as callables to the constructor.
hf has to have the form hf(a,parameters)
"""
def __init__(self, hf, dhf, nz, omega, parameters):
"""
hf: callable hf(controls,parameters,omega)
dhf: callable dhf(controls,parameters,omega)
omega: 2 pi/T, the period of the Hamil
|
tonian
nz: number of Fourier modes to be considered during evolution
parameters: a data structure that holds parameters for hf and dhf
(dictionary is probably the best idea)
"""
self.hf = hf
self.dhf = dhf
self.omega = omega
self.nz = nz
self.parameters = parameters
def calculate_hf(self, controls):
return self.hf(controls, self.parameters, self.omega)
def calculate_dhf(self, controls):
return self.dhf(controls, self.parameters, self.omega)
def get_system(self, controls, t):
hf = self.calculate_hf(controls)
dhf = self.calculate_dhf(controls)
return fs.FixedSystem(hf, dhf, self.nz, self.omega, t)
|
Swabboy/ImageDateSort
|
ImageDateSort.py
|
Python
|
gpl-3.0
| 7,213
| 0.014973
|
# Copyright 2015 Ian Lynn
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PIL import Image
from PIL.ExifTags import TAGS
from os.path import expanduser
import sys
import os
import shutil
import getopt
import fnmatch
#**********Show Error Message**********
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
#**********Show Error Message**********
#**********Extract EXIF Data**********
def get_exif(fn):
ret = {}
i = Image.open(fn)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
#**********Extract EXIF Data**********
#**********Parse EXIF Date Stamp**********
def parseDateStamp(tag):
tagDate = tag[:tag.index(" ")]
tagYear = tagDate[:tagDate.index(":")]
tagMonth = tagDate[tagDate.index(":")+1:tagDate.rindex(":")]
tagDay = tagDate[tagDate.rindex(":")+1:]
return (tagYear,tagMonth,tagDay)
#**********Parse EXIF Date Stamp**********
#**********Get list of images**********
def locateImages(pattern, rootDir, recur=False):
if recur:
for path, dirs, files in os.walk(rootDir):
for fileName in fnmatch.filter(files, "*." + pattern):
x = os.path.join(path, fileName)
yield x
else:
fileList = os.listdir(rootDir)
for fileName in fileList:
if os.path.splitext(fileName)[1] == "." + pattern:
x = os.path.join(rootDir, fileName)
yield x
#**********Get list of images**********
#**********Copy Image to Output Directory**********
def SortImage(outputDir, dateTag, modelTag, imageFile, deleteOrg=False):
#***Create directory path(Ex: ..../2014/14.12.24/CameraX)
dateDir = os.path.join(outputDir,dateTag[0],dateTag[0][2:] + "." + dateTag[1] + "." + dateTag[2],modelTag)
#***Create output directory if it does not exist
try:
if not os.path.isdir(dateDir):
os.makedirs(dateDir)
except Exception, err:
print "Unable to create output directory"
print err
return 0
#***Copy the files and remove
try:
shutil.copy2(imageFile, dateDir)
try:
if deleteOrg:
os.remove(imageFile)
except shutil.Error, err:
print err
except shutil.Error, err:
print err
#**********Check if Output Directory Exists**********
#**********Process File**********
def ProcessFile(imageFile, modelDir=False):
try:
#print imageFile
#***Get exif data
exifData = get_exif(imageFile)
except Exception, err:
print "EXIF Tag does not exist for " + os.path.basename(imageFile)
return 0
#***Get date tag
dateStamp = exifData["DateTime"]
#***Parse the date tag
parsedDateStamp = parseDateStamp(dateStamp)
#***Get the camera model tag
if modelDir:
try:
modelStamp = exifData["Model"]
if modelStamp == "" or modelStamp == None:
modelStamp = "Unknown"
except:
modelStamp = "Unknown"
else:
modelStamp=""
return (parsedDateStamp, modelStamp)
#**********Process File**********
#**********Show Help Text**********
def showHelp():
f = open("HelpText.txt","r")
try:
helpFile = f.read()
print helpFile
except:
print "Can't open help text"
finally:
f.close()
#**********Show Help Text**********
#**********Main Function**********
def main(argv=None):
inputDir = os.curdir
fileName = None
delOriginal = False
imageExt = "jpg"
searchRecur = False
outputDir = expanduser("~") + "/Pictures"
numImagesProcessed = 0
modelDir = False
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "vhf:Ex:s:d:Rm", ["help"])
#***Check for help parameter
for opt, arg in opts:
if opt == "-h" or opt == "--help":
showHelp()
return 1
#***Process parameters
for opt, arg in opts:
if opt == "-f":
fileName = str(arg)
continue
if opt == "-E":
delOriginal = True
continue
if opt == "-m":
modelDir = True
continue
if opt == "-x":
imageExt = arg
continue
if opt == "-s":
inputDir = os.path.abspath(arg)
continue
if opt == "-d":
outputDir = os.path.abspath(arg)
try:
if not os.path.isdir(dateDir):
os.makedirs(dateDir)
except:
print "Unable to create output directory: " + outputDir
return 2
continue
if opt == "-R":
searchRecur = True
continue
#***Get list of images
if fileName == None:
print "Finding Images"
imageList = locateImages(imageExt, os.path.abspath(inputDir), searchRecur)
print "Processing Images"
for imageFile in imageList:
exifData = ProcessFile(imageFile,modelDir)
if exifData == 0:
continue
#***Sort the images into their respective directories
SortImage(outputDir,exifData[0],exifData[1],imageFile,delOriginal)
|
numImagesProcessed += 1
#***Complete***
print "Finished"
print "Number of images processed: " + str(numImagesProcessed)
else:
print "Processing Image"
exifData = ProcessFile(fileName,modelDir)
|
if exifData == 0:
print "No EXIF data found to sort image"
else:
SortImage(outputDir,exifData[0],exifData[1],fileName,delOriginal)
print "Finished"
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use -h or --help"
return 2
if __name__ == "__main__":
sys.exit(main())
#**********Main Function**********
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/mission/quest_item/shared_surlin_rolei_q1_recruitment_disk.py
|
Python
|
mit
| 511
| 0.043053
|
#### NOTICE: THIS FILE IS AUTOGENERATED
|
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_surlin_rolei_q1_recruitment_disk.iff"
result.attribute_template_id = -1
result.stfName("quest_naboo_plc_quest_n","surlin_rolei_q1_recruitment_disk")
#### BEGIN MODIFICATIONS ####
#### END MODIFIC
|
ATIONS ####
return result
|
tiagofrepereira2012/tensorflow
|
tensorflow/python/util/nest.py
|
Python
|
apache-2.0
| 21,628
| 0.00504
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module can perform operations on nested structures. A nested structure is a
Python sequence, tuple (including `namedtuple`), or dict that can contain
further sequences, tuples, and dicts.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e., no references in the structure of the input of these functions
should be recursive.
Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
(np.array([3, 4]), tf.constant([3, 4])))`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python.platform import tf_logging as _tf_logging
from tensorflow.python.util.all_util import remove_undocumented
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, or
`collections.NamedDict`.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# For dictionaries with their values extracted, we always order the values
# by sorting the keys first (see note below). This code allows recreating
# e.g., `OrderedDict`s with their original key ordering.
result = dict(zip(sorted(_six.iterkeys(instance)), args))
return type(instance)((key, result[key]) for key in _six.iterkeys(instance))
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
if isinstance(iterable, dict):
# Iterate through dictionaries in a deterministic order. Note: we
# intentionally ignore the order in an `OrderedDict` because of the
# potential to introduce bugs if the user mixes ordered and plain dicts with
# the same keys. (This is based on experience.)
for key in sorted(_six.iterkeys(iterable)):
yield iterable[key]
else:
for value in iterable:
yield value
def _yield_flat_nest(nest):
for n in _yield_value(nest):
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
# Used by `_warn_once` to remember which warning messages have been given.
_ALREADY_WARNED = {}
def _warn_once(message):
"""Logs a warning message, once per unique string."""
if message not in _ALREADY_WARNED:
_ALREADY_WARNED[message] = True
_tf_logging.warning(message)
def is_sequence(seq):
"""Returns a true if its input is a collections.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.Sequence or a
dict.
"""
if isinstance(seq, dict):
return True
if isinstance(seq, set):
_warn_once("Sets are not currently considered sequences, but this may "
"change in the future, so consider avoiding using them.")
return (isinstance(seq, _collections.Sequence)
and not isinstance(seq, _six.string_types))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
If `nest` is not a sequence, tuple, or dict, then returns a single-element
list: `[nest]`.
Args:
nest: an arbitrarily nested structure or a scalar object. Note, numpy
arrays are considered scalars.
Returns:
A Python list, the flattened versio
|
n of the input.
"""
if is_sequence(nest):
return list(_yield_flat_nest(nest))
else:
return [nest]
def _recursive_assert_same_structure(nest1, nest2, check_types):
"""Helper function for `assert_same_structure`."""
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested str
|
ucture.\n\n"
"First structure: %s\n\nSecond structure: %s." % (nest1, nest2))
if not is_sequence_nest1:
return # finished checking
if check_types:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
if isinstance(nest1, dict):
keys1 = set(_six.iterkeys(nest1))
keys2 = set(_six.iterkeys(nest2))
if keys1 != keys2:
raise ValueError(
"The two dictionaries don't have the same set of keys. First "
"structure has keys {}, while second structure has keys {}."
.format(keys1, keys2))
nest1_as_sequence = [n for n in _yield_value(nest1)]
nest2_as_sequence = [n for n in _yield_value(nest2)]
for n1, n2 in zip(nest1_as_sequence, nest2_as_sequence):
_recursive_assert_same_structure(n1, n2, check_types)
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as
well, including the keys of dictionaries. If set to `False`, for example
a list and a tuple of objects will look the same if they have the same
size.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements.\n\nFirst structure (%i elements): %s\n\n"
"Second structure (%i elements): %s"
% (len_nest1, nest1, len_nest2, nest2))
_recursive_assert_same_structure(nest1, nest2, check_types)
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value have not the same structure, or if keys are
not unique.
"""
if not isinstance(dictionary, dict):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in _six.iteritems(dic
|
nebril/fuel-web
|
nailgun/nailgun/test/integration/test_node_allocation_stats_handler.py
|
Python
|
apache-2.0
| 1,552
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file e
|
xcept in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required b
|
y applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
class TestHandlers(BaseIntegrationTest):
def _get_allocation_stats(self):
resp = self.app.get(
reverse('NodesAllocationStatsHandler'))
return resp.json_body
def test_allocation_stats_unallocated(self):
self.env.create_node(api=False)
stats = self._get_allocation_stats()
self.assertEqual(stats['total'], 1)
self.assertEqual(stats['unallocated'], 1)
def test_allocation_stats_total(self):
self.env.create_node(api=False)
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{
"pending_addition": True,
}
]
)
stats = self._get_allocation_stats()
self.assertEqual(stats['total'], 2)
self.assertEqual(stats['unallocated'], 1)
|
pycket/pycket
|
pycket/prims/struct_structinfo.py
|
Python
|
mit
| 11,050
| 0.006063
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from pycket import impersonators as imp
from pycket import values
from pycket import values_parameter, values_struct
from pycket.arity import Arity
from pycket.error import SchemeException
from pycket.prims.expose import unsafe, default, expose, expose_val
expose_val("current-inspector", values_struct.current_inspector_param)
expose_val("current-code-inspector", values_struct.current_inspector_param)
@expose("make-inspector", [default(values_struct.W_StructInspector, None)], simple=False)
def do_make_instpector(w_inspector, env, cont):
from pycket.interpreter import return_value
if w_inspector is None:
w_inspector = values_struct.current_inspector_param.get(cont)
new_inspector = values_struct.W_StructInspector.make(w_inspector)
return return_value(new_inspector, env, cont)
@expose("make-sibling-inspector", [default(values_struct.W_StructInspector, None)], simple=False)
def do_make_sibling_instpector(w_inspector, env, cont):
from pycket.interpreter import return_value
if w_inspector is None:
w_inspector = values_struct.current_inspector_param.get(cont)
new_inspector = values_struct.W_StructInspector.make(w_inspector, issibling=True)
return return_value(new_inspector, env, cont)
@expose("inspector-superior?", [values_struct.W_StructInspector, values_struct.W_StructInspector])
def inspector_superior_huh(w_inspector, maybe_subinspector):
if w_inspector is maybe_subinspector:
return values.w_false
s = maybe_subinspector.w_super
while(s is not None):
if w_inspector is s:
return values.w_true
s = s.w_super
return values.w_false
@expose("struct?", [values.W_Object], simple=False)
def do_is_struct(v, env, cont):
from pycket.interpreter import return_value
current_inspector = values_struct.current_inspector_param.get(cont)
if isinstance(v, values_struct.W_RootStruct):
if current_inspector.has_control(v.struct_type()):
return return_value(values.w_true, env, cont)
return return_value(values.w_false, env, cont)
@expose("struct-info", [values.W_Object], simple=False)
def do_struct_info(v, env, cont):
from pycket.interpreter import return_multi_vals
current_inspector = values_struct.current_inspector_param.get(cont)
if (isinstance(v, values_struct.W_RootStruct) and
current_inspector.has_control(v.struct_type())):
return v.get_struct_info(env, cont)
return return_multi_vals(
values.Values.make([values.w_false, values.w_true]), env, cont)
struct_info = do_struct_info.w_prim
@expose("struct-type-info", [values_struct.W_StructType], simple=False)
def do_struct_type_info(struct_type, env, cont):
from pycket.interpreter import return_value
return return_value(values.Values.make(struct_type.struct_type_info(cont)), env, cont)
@expose("struct-type-make-constructor", [values_struct.W_StructType], simple=False)
def do_struct_type_make_constructor(struct_type, env, cont):
from pycket.interpreter import return_value
current_inspector = values_struct.current_inspector_param.get(cont)
if not current_inspector.has_control(struct_type):
# TODO: we should raise exn:fail:contract
raise SchemeException("fail_contract")
return return_value(struct_type.constructor, env, cont)
@expose("struct-type-make-predicate", [values_struct.W_StructType], simple=False)
def do_struct_type_make_predicate(struct_type, env, cont):
from pycket.interpreter import return_value
current_inspector = values_struct.current_inspector_param.get(cont)
if not current_inspector.has_control(struct_type):
# TODO: we should raise exn:fail:contract
raise SchemeException("fail_contract")
return return_value(struct_type.predicate, env, cont)
@expose("make-struct-type",
[values.W_Symbol, values.W_Object, values.W_Fixnum, values.W_Fixnum,
default(values.W_Object, values.w_false),
default(values.W_Object, values.w_null),
default(values.W_Object, None),
default(values.W_Object, values.w_false),
default(values.W_List, values.w_null),
default(values.W_Object, values.w_false),
default(values.W_Object, values.w_false)], simple=False)
def do_make_struct_type(w_name, w_super_type, w_init_field_count,
w_auto_field_count, w_auto_value, w_properties, w_inspector,
w_proc_spec, w_immutables, w_guard, w_constructor_name,
env, cont):
if w_inspector is None:
w_inspector = values_struct.current_inspector_param.get(cont)
if (w_constructor_name is not values.w_false and
not isinstance(w_constructor_name, values.W_Symbol)):
raise SchemeException("make-struct-type: constructor name mustbe be symbol? or #f")
if not (isinstance(w_super_type, values_struct.W_StructType) or
w_super_type is values.w_false):
raise SchemeException("make-struct-type: expected a struct-type? or #f for the super type , but got %s : %s" % (w_super_type, w_super_type.tostring())
|
)
if (isinstance(w_super_type, values_struct.W_StructType) and
w_super_type.prop_sealed):
raise SchemeException("make-struct-type: cannot make a subtype of a sealed type")
init_field_count = w_init_field_count.value
auto_field_count = w_auto_field_count.value
immutables = []
for i in values.from_list_iter(w_immutables):
|
if not isinstance(i, values.W_Fixnum) or i.value < 0:
raise SchemeException("make-struct-type: expected list of positive integers for immutable fields")
immutables.append(i.value)
return values_struct.W_StructType.make(w_name=w_name,
w_super_type=w_super_type, init_field_count=init_field_count,
auto_field_count=auto_field_count, w_auto_value=w_auto_value,
w_properties=w_properties, w_inspector=w_inspector,
w_proc_spec=w_proc_spec, immutables=immutables,
w_guard=w_guard, w_constructor_name=w_constructor_name,
env=env, cont=cont)
@expose("struct-accessor-procedure?", [values.W_Object])
def do_is_struct_accessor_procedure(v):
return values.W_Bool.make(isinstance(v, values_struct.W_StructAccessor) or
isinstance(v, values_struct.W_StructFieldAccessor))
@expose("make-struct-field-accessor", [values_struct.W_StructAccessor,
values.W_Fixnum, default(values.W_Object, values.w_false)])
def do_make_struct_field_accessor(accessor, field, field_name):
if field_name is values.w_false:
return values_struct.W_StructFieldAccessor(accessor, field.value, None)
if not isinstance(field_name, values.W_Symbol):
raise SchemeException("make-struct-field-accessor: expected symbol or #f as argument 2")
return values_struct.W_StructFieldAccessor(accessor, field.value, field_name)
@expose("struct-mutator-procedure?", [values.W_Object])
def do_is_struct_mutator_procedure(v):
return values.W_Bool.make(isinstance(v, values_struct.W_StructMutator) or
isinstance(v, values_struct.W_StructFieldMutator))
@expose("make-struct-field-mutator", [values_struct.W_StructMutator,
values.W_Fixnum, default(values.W_Object, values.w_false)])
def do_make_struct_field_mutator(mutator, field, field_name):
if field_name is values.w_false:
return values_struct.W_StructFieldMutator(mutator, field.value, None)
if not isinstance(field_name, values.W_Symbol):
raise SchemeException("make-struct-field-mutator: expected symbol or #f as argument 2")
return values_struct.W_StructFieldMutator(mutator, field.value, field_name)
@expose("struct->vector", [values_struct.W_RootStruct])
def expose_struct2vector(struct):
return values_struct.struct2vector(struct)
@expose("prefab-struct-key", [values.W_Object])
def do_prefab_struct_key(v):
if not (isinstance(v, values_struct.W_Struct) and v.struct_type().isprefab):
return values.w_false
prefab_key = values_struct.W_PrefabKey.from_struct_type(v.struc
|
dragonrider23/rabbithole
|
modules/cmd_wrappers.py
|
Python
|
bsd-3-clause
| 673
| 0.001486
|
# -*- coding: utf-8 -*-
"""
This module allows running system
|
commands directly
from Rabbithole. This can be good for simple commands
such as ping. There is currently no way to filter arguments
given to wrapped commands. For that kind of functionali
|
ty,
a separate module should be created,
"""
import rh.common as common
def _create_wrappers(config):
cmds = config.get('core', 'wrappedCommands').split(',')
for cmd in cmds:
func = lambda _, args, cmd=cmd: common.start_process(
"{} {}".format(cmd, args))
common.register_cmd(
cmd, func, "Wrapper around the {} command".format(cmd))
common.register_init(_create_wrappers)
|
cieplak/morf
|
morf/morphism.py
|
Python
|
mit
| 2,478
| 0
|
from .path import Path
class Morphism(object):
def __init__(self, arrows):
self.arrows = arrows
def __call__(self, domain):
codomain = {}
for arrow in self.arrows:
codomain = arrow.apply(domain, codomain)
return codomain
@classmethod
def compile(cls, *expressions, **kwargs):
ctx = kwargs.pop('ctx', {})
arrows = [Arrow.parse(expr, ctx=ctx) for expr in expressions]
return cls(arrows)
class Arrow(object):
symbol = '->'
def __init__(self, source_paths, function, destination_path):
self.source_paths = source_paths
self.function = function
self.destination_path = destination_path
@classmethod
def parse(cls, representation, ctx=None):
tokens = map(str.strip, representation.split(cls.symbol))
destination_path = Path.parse(tokens.pop())
source_paths = []
callable_tokens = []
|
for token in tokens:
if token[0] == '/':
source_paths.append(Path.parse(token))
else:
callable_tokens.append(token)
callables = []
for token in callable_tokens:
if token in ctx:
callables.append(ctx[token])
elif token.startswith('py::'):
try:
python_function = eval(to
|
ken[4:])
if not callable(python_function):
raise Exception(
'Token %s is not a callable in expression %s' %
(token, representation)
)
callables.append(python_function)
except Exception as e:
raise Exception(
'Failed to parse token %s in expression %s' %
(token, representation)
)
function = cls.compose(*callables)
return cls(source_paths, function, destination_path)
def apply(self, domain, codomain):
inputs = [path.resolve(domain) for path in self.source_paths]
input_ = inputs[0] if len(inputs) == 1 else tuple(inputs)
destination_value = self.function(input_)
return self.destination_path.set(codomain, destination_value)
@classmethod
def compose(cls, *functions):
def inner(arg):
for f in functions:
arg = f(arg)
return arg
return inner
|
msteinhoff/foption-bot
|
src/python/tools/cleandb.py
|
Python
|
mit
| 1,658
| 0.001809
|
# -*- coding: UTF-8 -*-
"""
$Id$
$URL$
Copyright (c) 2010 foption
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sub
|
license, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS",
|
WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@since Aug 15, 2011
@author Mario Steinhoff
"""
__version__ = '$Rev$'
import logging
import os
from core import runlevel
from core.bot import Bot
from core.persistence import SqlAlchemyPersistence
# Load all table definitions
import objects
if __name__ == '__main__':
config_root = os.environ['FPTBOT_CONFIG']
bot = Bot(root=config_root, level=logging.INFO)
bot.init(runlevel.LOCAL_SERVICE)
persistence = bot.get_subsystem('local-persistence')
base = SqlAlchemyPersistence.Base
base.metadata.drop_all(persistence.engine)
|
kYc0o/RIOT
|
dist/tools/dhcpv6-pd_ia/pkg/apt.py
|
Python
|
lgpl-2.1
| 703
| 0
|
# -*- coding: utf-8 -*-
# v
|
im:fenc=utf-8
#
# Copyright (C) 2020 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import subprocess
from .base import Installer
__author__ = "Martine S. Lenders"
__copyright__ = "Copyright (C) 2020 Freie Universität Be
|
rlin"
__credits__ = ["Martine S. Lenders"]
__license__ = "LGPLv2.1"
__maintainer__ = "Martine S. Lenders"
__email__ = "m.lenders@fu-berlin.de"
class Apt(Installer):
def _install(self, package):
subprocess.run(["apt-get", "-y", "install",
package[self.os]["name"]])
|
poldrack/openfmri
|
pipeline/fs_setup.py
|
Python
|
bsd-2-clause
| 2,291
| 0.030554
|
#!/usr/bin/env python
""" fs_setup.py - set up directories for freesurfer analysis
"""
## Copyright 2011, Russell Poldrack. All rights reserved.
## Redistribution and use in source and binary forms, with or without modification, are
## permitted provided that the following conditions are met:
## 1. Redistributions of source code must retain the above copyright notice, this list of
## conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice, this list
## of conditions and the following disclaimer in the documentation and/or other materials
## provided with the distribution.
## THIS SOFTWARE IS PROVIDED BY RUSSELL POLDRACK ``AS IS'' AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
## FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RUSSELL POLDRACK OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os,sys
dataset=sys.argv[1]
if len(sys.argv)>2:
basedir=sys.argv[2]
else:
basedir=os.path.abspath(os.curdir)
if len(sys.argv)>3:
subdir=sys.argv[3]
else:
subdir='/corral-r
|
epl/utexas/poldracklab/openfmri/subdir'
outfile=open('fs_setup.sh','w')
#subdir=basedir+'subdir'
if not basedir[-1]=='/':
basedir=basedir+'/'
if not os.path.exists(basedir+dataset):
print '%s/%s does not exist'%(basedir
|
,dataset)
sys.exit(2)
#for d in os.listdir(basedir+ds):
# if d[0:3]=='sub':
for root,dirs,files in os.walk(basedir+dataset):
if root.find(dataset)>-1:
for f in files:
if f.rfind('highres001.nii.gz')>-1:
f_split=root.split('/')
outfile.write('recon-all -i %s/%s -subjid %s_%s -sd %s\n'%(root,f,f_split[-3],f_split[-2],subdir))
outfile.close()
print 'now launch using: sh fs_setup.sh'
|
KamranMackey/readthedocs.org
|
readthedocs/projects/migrations/0006_add_imported_file.py
|
Python
|
mit
| 8,964
| 0.008701
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ImportedFile'
db.create_table('projects_importedfile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='imported_files', to=orm['projects.Project'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('projects', ['ImportedFile'])
def backwards(self, orm):
# Deleting model 'ImportedFile'
db.delete_table('projects_importedfile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.file': {
'Meta': {'ordering': "('denormalized_path',)", 'object_name': 'File'},
'content': ('django.db.models.fields.TextField', [], {}),
'denormalized_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'heading': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['projects.File']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
'projects.filerevision': {
'Meta': {'ordering': "('-revision_number',)", 'object_name': 'File
|
Revision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_date': ('dj
|
ango.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diff': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['projects.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'projects.project': {
'Meta': {'ordering': "('-modified_date', 'name')", 'object_name': 'Project'},
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'docs_directory': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'extensions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug'
|
BeardedPlatypus/capita-selecta-ctf
|
ctf/players/admin.py
|
Python
|
mit
| 126
| 0.007937
|
from django.contrib import
|
admin
from .models
|
import Question
# Register your models here.
admin.site.register(Question)
|
Collisionc/sickbeard_mp4_automator
|
postCouchPotato.py
|
Python
|
mit
| 2,496
| 0.002804
|
#!/usr/bin/env python
import sys
import os
import logging
from extensions import valid_tagging_extensions
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from tmdb_mp4 import tmdb_mp4
from autoprocess import plex
from post_processor import PostProcessor
from logging.config import fileConfig
logpath = '/var/log/sickbeard_mp4_automator'
if os.name == 'nt':
logpath = os.path.dirname(sys.argv[0])
elif not os.path.isdir(logpath):
try:
os.mkdir(logpath)
except:
logpath = os.path.dirname(sys.argv[0])
configPath = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'logging.ini')).replace("\\", "\\\\")
logPath = os.path.abspath(os.path.join(logpath, 'index.log')).replace("\\", "\\\\")
fileConfig(configPath, defaults={'logfilename': logPath})
log = logging.getLogger("CouchPotatoPostConversion")
log.info('MP4 Automator - Post processing script initialized')
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
converter = MkvtoMp4(settings)
imdbid = sys.argv[1]
inputfile = sys.argv[2]
original = sys.argv[3]
log.debug("IMDBID: %s" % imdbid)
log.debug("Input file path: %s" % inputfile)
log.debug("Original file name: %s" % original)
try:
log.info('Processing file: %s', inputfile)
if MkvtoMp4(settings).validSource(inputfile):
log.info(
|
'File is valid')
output = converter.process(inputfile, original=original)
if output:
# Tag with metadata
if settings.tagfile and output['output_extension'] in
|
valid_tagging_extensions:
log.info('Tagging file with IMDB ID %s', imdbid)
try:
tagmp4 = tmdb_mp4(imdbid, original=original, language=settings.taglanguage)
tagmp4.setHD(output['x'], output['y'])
tagmp4.writeTags(output['output'], settings.artwork)
except:
log.error("Unable to tag file")
# Copy to additional locations
output_files = converter.replicate(output['output'])
# Run any post process scripts
if settings.postprocess:
post_processor = PostProcessor(output_files, log)
post_processor.setMovie(imdbid)
post_processor.run_scripts()
plex.refreshPlex(settings, 'movie', log)
else:
log.info('File %s is invalid, ignoring' % inputfile)
except:
log.exception('File processing failed: %s' % inputfile)
|
supert-is-taken/film-sorter
|
film_sorter.py
|
Python
|
gpl-3.0
| 3,764
| 0.005579
|
#!/usr/bin/env python
'''
Files and renames your films according to genre
using IMDb API
supert-is-taken 2015
'''
from imdb import IMDb
import sys
import re
import os
import errno
import codecs
db=IMDb()
def main(argv):
for filename in sys.argv[1:]:
print filename
movie_list=db.search_movie( cleanup_movie_string(filename) )
while len(movie_list) == 0:
search_string = raw_input('No results, alternative search string: ')
movie_list = db.search_movie( cleanup_movie_string(search_string) )
movie=movie_list[0]
title = movie['long imdb title'].replace('"', "")
''' matching is not very reliable so seek confirmation '''
proceed = raw_input(title + " (Y/n) ")
if proceed is 'n':
ix = choose_movie(movie_list)
while ix == -1:
search_string = raw_input('Alternative search string: ')
movie_list = db.search_movie( cleanup_movie_string(search_string) )
ix = choose_movie(movie_list)
if ix == -2:
continue
movie=movie_list[ix]
title = movie['long imdb title'].replace('"', "")
''' get genres, summary and other extended items '''
db.update(movie)
''' summary to file '''
with codecs.open(title + ".summary", 'w', "utf-8") as summary:
summary.write(movie.summary())
summary.close()
''' rename the files and file them '''
ext = re.search(r'\.[a-zA-Z]*$', filename).group(0)
for genre in movie['genres']:
mkdir_p(genre)
__link(filename, genre + '/' + title + ext)
__link(filename.replace(ext, ".srt"), genre + '/' + title + ".srt")
__link(filename.replace(ext, ".sub"), genre + '/' + title + ".srt")
__link(title + ".summary", genre + '/' + title + ".summary")
__link(filename.replace(ext, ".idx"), genre + '/' + title + ".idx")
''' delete old files '''
__unlink(filename)
__unlink(title + ".summary")
__unlink(filename.replace(ext, ".srt"))
__unlink(filename.replace(ext, ".sub"))
__unlink(filename.replace(ext, ".idx"))
return 1
def choose_movie(movie_list):
print("-2: enter new search string")
print("-1: enter new search string")
i=0
for movie in movie_list:
prompt = '%d: ' + movie['long imdb title']
print(prompt % i)
i+=1
return int(raw_input("choice: "))
def cleanup_movie_string(s):
s = s.replace("WEB-DL", "")
s = re.sub(r'-.*$', "", s)
s = s.replace(".1.", " ")
s = s.replace(".0.", " ")
s =
|
s.replace(".H.", " ")
s = s.replace(".", " ")
s = s.rep
|
lace("mkv", "")
s = s.replace("X264", "")
s = s.replace("x264", "")
s = s.replace("avi", "")
s = s.replace("mp4", "")
s = s.replace("720p", "")
s = s.replace("1080p", "")
s = s.replace("BluRay", "")
s = s.replace("Bluray", "")
s = s.replace("bluray", "")
s = s.replace("DTS", "")
s = s.replace("AAC", "")
s = s.replace("AC3", "")
s = s.replace("HDTV", "")
s = s.replace("DD5", "")
s = s.replace("IMAX", "")
return s
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def __link(old, new):
if os.path.isfile(old):
try:
os.link(old, new)
except:
pass
def __unlink(filename):
if os.path.isfile(filename):
try:
os.unlink(filename)
except:
pass
if __name__=="__main__":
sys.exit(main(sys.argv))
|
rohitranjan1991/home-assistant
|
homeassistant/components/mazda/diagnostics.py
|
Python
|
mit
| 1,836
| 0.000545
|
"""Diagnostics support for the Mazda integration."""
from __future__ import annotations
from typing import Any
from homeassistant.components.diagnostics.util import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.
|
helpers.device_registry import DeviceEntry
from .const import DATA_COORDINATOR, DOMAIN
TO_REDACT_INFO = [CONF_EMAIL, CONF_PASSWORD]
TO_REDACT_DATA = ["vin", "id", "latitude", "longitude"]
asy
|
nc def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR]
diagnostics_data = {
"info": async_redact_data(config_entry.data, TO_REDACT_INFO),
"data": [
async_redact_data(vehicle, TO_REDACT_DATA) for vehicle in coordinator.data
],
}
return diagnostics_data
async def async_get_device_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry, device: DeviceEntry
) -> dict[str, Any]:
"""Return diagnostics for a device."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR]
vin = next(iter(device.identifiers))[1]
target_vehicle = None
for vehicle in coordinator.data:
if vehicle["vin"] == vin:
target_vehicle = vehicle
break
if target_vehicle is None:
raise HomeAssistantError("Vehicle not found")
diagnostics_data = {
"info": async_redact_data(config_entry.data, TO_REDACT_INFO),
"data": async_redact_data(target_vehicle, TO_REDACT_DATA),
}
return diagnostics_data
|
edu-zamora/inimailbot
|
cron_updater.py
|
Python
|
gpl-3.0
| 2,032
| 0.02313
|
# ###
# Copyright (c) 2010 Konstantinos Spyropoulos <inigo.aldana@gmail.com>
#
# This file is part of inimailbot
#
# inimailbot is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# inimailbot is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with inimailbot.
# If not, see http://www.gnu.org/licenses/.
# #####
import os, logging, re
from googl
|
e.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from receive_ankicrashes import Bug
class ScanIssues(webapp.RequestHandler):
def get(self):
bugs_query = Bug.all()
bugs_query.filter('linked =', False)
bugs = []
bugs = bugs_query.fetch(1000)
for bg in bugs:
issues = bg.findIssue()
if issues:
bg.issueName = issues[0]['id']
logging.info("ScanIssues: Autolinking bug " + str(bg.key().id()
|
) + " to issue " + str(bg.issueName))
bg.put()
class UpdateStatusesPriorities(webapp.RequestHandler):
def get(self):
bugs_query = Bug.all()
#bugs_query.filter('issueName !=', None)
bugs_query.filter('linked =', True)
bugs = []
bugs = bugs_query.fetch(1000)
logging.debug("Cron job updater, found " + str(bugs_query.count(1000000)) + " bugs")
for bg in bugs:
if bg.updateStatusPriority():
logging.debug("Updated status and/or priority for bug: '" + str(bg.key().id()) + "'")
bg.put()
application = webapp.WSGIApplication(
[(r'^/ankidroid_triage/cron_updater/status_priority?.*', UpdateStatusesPriorities),
(r'^/ankidroid_triage/cron_updater/issue_scanner?.*', ScanIssues)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
trustedhousesitters/roster-python
|
setup.py
|
Python
|
mit
| 860
| 0.003488
|
from setuptools import setup, find_packages
setup(
name='roster',
version='1.0',
description='Roster: A library for simple service discovery using Dynamodb for Python',
long_description=open('README.md').read(),
author='Tim Rijavec',
author_email='tim@trustedhousesitters.com',
|
url='https://github.com/trustedhousesitters/roster-python',
license='MIT',
packages=find_packages(exclude=('example', 'examples', )),
include_package
|
_data=True,
zip_safe=False,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Service Discovery',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
porimol/django-blog
|
cms/migrations/0004_auto_20160508_2117.py
|
Python
|
mit
| 468
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-08 21:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20160508_2115'),
]
operations = [
m
|
igrations.AlterField(
model_name='post',
name='featured_photo',
field=models.ImageFiel
|
d(upload_to='/featured_photos'),
),
]
|
gjwajda/Computational-Tools-For-Big-Data
|
Exercise2/exercise2_1.py
|
Python
|
mit
| 449
| 0.062361
|
#!/usr/bin/python
#First Method taking in file "matrix.txt" and printing list of lists
#Function1
def readmatrix( file ):
list = open( file, 'r' )
return list.readlines();
#Calling function
print readmatrix( 'matrix.txt' )
#Funtio
|
n2
def reverse( list, matrixfile ):
file = open( matrixfile, 'w'
|
)
for i in list:
file.write(i)
file.close()
return file;
#Calling function
reverse( readmatrix('matrix.txt'), 'output.txt' )
|
okadate/romspy
|
example/OB500/hview_ob500_grd.py
|
Python
|
mit
| 263
| 0
|
# -*- coding: utf-8 -*-
import romspy
romspy.hview('nc/ob500_grd-v5.nc
|
',
'ob500_grd-v5.png',
vname='h',
cblabel='Depth[m]',
vmax=0, vmin=-120, interval=20, cff=-1,
obsfile='nc/ob500_obs_tsdc.nc')
| |
AxiaCore/django-extlog
|
django_extlog/middleware.py
|
Python
|
mit
| 2,967
| 0
|
from django.db.models import signals
from django.utils.functional import curry
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.contrib.admin.models import LogEntry
from django.contrib.sessions.models import Session
from django_extlog.models import ExtLog
class AuditLoggingMiddleware(object):
ip_address = None
def process_request(self, request):
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
session = request.session.session_key
self.ip_address = request.META.get('REMOTE_ADDR', None)
update_post_save_info = curry(
self._update_post_save_info,
user,
session,
)
update_post_delete_info = curry(
self._update_post_delete_info,
user,
session,
)
signals.post_save.connect(
update_post_save_info,
dispatch_uid=(self.__class__, request,),
weak=False
)
signals.post_delete.connect(
update_post_delete_info,
dispatch_uid=(self.__class__, request,),
weak=False
)
def process_response(self, request, response):
signals.post_save.disconnect(dispatch_uid=(self.__class__, request,))
signals.post_delete.disconnect(dispatch_uid=(self.__class__, request,))
return response
def _save_to_log(self, instance, action, user):
content_type = ContentType.objects.get_for_model(instance)
if content_type.app_label != 'django_extlog' and user:
object_id = instance.id if hasattr(instance, 'id') else 0
ExtLog.objects.create(
object_id=object_id,
app_name=content_type.app_label,
model_name=content_type.model,
action=action,
object_instance=serializers.serialize('json', [instance]),
user=user,
ip=self.ip_address,
)
def _update_post_save_info(
self,
user,
session,
sender,
instance,
**kwargs
):
if sender in [LogEntry, Session]:
return
if kwargs['created']:
self._save_to_log(instance, ExtLog.ACTION_TYPE_CREATE
|
, user)
else:
self._save_to_log(instance, ExtLog.ACTION_TYPE_UPDATE, user)
def _update_post_delete_info(
self,
user,
session,
sender,
instance
|
,
**kwargs
):
if sender in [LogEntry, Session]:
return
self._save_to_log(instance, ExtLog.ACTION_TYPE_DELETE, user)
|
diogocs1/facebookexplorer
|
teste.py
|
Python
|
apache-2.0
| 144
| 0.020833
|
f
|
rom appkit.api.v0_2_8 import App
app = App(__name__)
@app.route("/")
def home():
return '<a href="#" target="_blank">Clique</a>'
|
app.run()
|
soft-matter/mr
|
customized_trackpy/tracking.py
|
Python
|
gpl-3.0
| 24,253
| 0.001938
|
#Copyright 2012 Thomas A Caswell
#tcaswell@uchicago.edu
#http://jfi.uchicago.edu/~tcaswell
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License, or (at
#your option) any later version.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, see <http://www.gnu.org/licenses>.
from __future__ import division
import numpy as np
from collections import deque, Iterable
class Hash_table(object):
'''
:param dims: the range of the data to be put in the hash table. 0<data[k]<dims[k]
:param box_size: how big each box should be in data units. The same scale is used for all dimensions
Basic hash table to fast look up of particles in the region of a given particle
'''
class Out_of_hash_excpt(Exception):
"""
:py:exc:`Exception` for indicating that a particle is outside of the
valid range for this hash table."""
pass
def __init__(self, dims, box_size):
'''
Sets up the hash table
'''
self.dims = dims # the dimensions of the data
self.box_size = box_size # the size of boxes to use in the units of the data
self.hash_dims = np.ceil(np.array(dims) / box_size)
self.hash_table = [[] for j in range(int(np.prod(self.hash_dims)))]
self.spat_dims = len(dims) # how many spatial dimensions
self.cached_shifts = None
self.cached_rrange = None
self.strides = np.cumprod(np.concatenate(([1], self.hash_dims[1:])))[::-1]
def get_region(self, point, rrange):
'''
:param point: point to find the features around
:param rrange: the size of the ball to search in
Returns all the particles with in the region of maximum radius
rrange in data units
can raise :py:exc:`Out_of_hash_excpt`
'''
hash_size = self.hash_dims
center = np.floor(point.pos / self.box_size)
if any(center >= hash_size) or any(center < 0):
raise Hash_table.Out_of_hash_excpt("cord out of range")
rrange = int(np.ceil(rrange / self.box_size))
# check if we have already computed the shifts
if rrange == self.cached_rrange and self.cached_shifts is not None:
shifts = self.cached_shifts # if we have, use them
# Other wise, generate them
else:
if self.spat_dims == 2:
shifts = [np.array([j, k])
for j in range(-rrange, rrange + 1)
for k in range(-rrange, rrange + 1)]
elif self.spat_dims == 3:
shifts = [np.array([j, k, m])
for j in range(-rrange, rrange + 1)
for k in range(-rrange, rrange + 1)
for m in range(-rrange, rrange + 1)]
else:
raise NotImplementedError('only 2 and 3 dimensions implemented')
self.cached_rrange = rrange # and save them
self.cached_shifts = shifts
region = []
for s in shifts:
cord = center + s
if any(cord >= hash_size) or any(cord < 0):
continue
indx = int(sum(cord * self.strides))
region.extend(self.hash_table[indx])
return region
def add_point(self, point):
"""
:param point: object representing the feature to add to the hash table
Adds the `point` to the hash table. Assumes that :py:attr:`point.pos` exists and
is the array-like.
can raise :py:exc:`~Hash_table.Out_of_hash_excpt`
"""
cord = np.floor(np.asarray(point.pos) / self.box_size)
hash_size = self.hash_dims
if any(cord >= hash_size) or any(cord < 0):
raise Hash_table.Out_of_hash_excpt("cord out of range")
indx = int(sum(cord * self.strides))
self.hash_table[indx].append(point)
class Track(object):
'''
:param point: The first feature in the track if not `None`.
:type point: :py:class:`~trackpy.tracking.Point`
Base class for objects to represent linked tracks. Includes logic
for adding, removing features to the track. This can be sub-classed
to provide additional track level computation as needed.
'''
count = 0
def __init__(self, point=None):
self.points = []
# will take initiator point
if not point is None:
self.add_point(point)
self.indx = Track.count # unique id
Track.count += 1
def __iter__(self):
return self.points.__iter__()
def __len__(self):
return len(self.points)
def __eq__(self, other):
return self.index == other.index
def __neq__(self, other):
return not self.__eq__(other)
__hash__ = None
def add_point(self, point):
'''
:param point: point to add
:type point: :py:class:`~trackpy.tracking.Point`
Appends the point to this track. '''
self.points.append(point)
point.add_to_track(self)
def remove_point(self, point):
'''
:param point: point to remove from this track
:type point: :py:class:`~trackpy.tracking.Point`
removes a point from this track'''
self.points.remove(point)
point._track = None
def last_point(self):
'''
:rtype: :py:class:`~trackpy.tracking.Point`
Returns the last point on the track'''
return self.points[-1]
@classmethod
def reset_counter(cls, c=0):
cls.count = 0
class Point(object):
'''
Base class for point (features) used in tracking. This class
contains all of the general stuff for interacting with
:py:class:`~trackpy.tracking.Track` objects.
.. note:: To be used for tracking this class must be sub-classed to provide a :py:func:`distance` function. Child classes **MUST** call :py:func:`Point.__init__`. (See :py:class:`~trackpy.tracking.PointND` for example. )
'''
count = 0
def __init__(self):
self._track = None
self.uuid = Point.count # unique id for __hash__
Point.count += 1
## def __eq__(self, other):
## return self.uuid == other.uuid
## def __neq__(self, other):
## return not self.__eq__(other)
def add_to_track(self, track):
'''
:param track: the track to assign to this :py:class:`Point`
Sets the track of a :py:class:`Point` object. Raises
:py:exc:`Exception` if the object is already assigned a track.
'''
if self._track is not None:
raise Exception("trying to add a particle already in a track")
self._track = track
def remove_from_track(self, track):
'''
:param track: the track to disassociate from this :py:class:`Point`
Removes this point from the given track. Raises :py:exc:`Exception` if
particle not associated with the given track.
'''
if self._track != track:
raise Exception("Point not associated with given track")
track.remove_point(self)
def in_track(self):
|
'''
:rtype: bool
Retu
|
rns if a point is associated with a track '''
return self._track is not None
@property
def track(self):
"""Returns the track that this :class:`Point` is in. May be `None` """
return self._track
class PointND(Point):
'''
:param t: a time-like variable.
:param pos: position of feature
:type pos: iterable of length d
Version of :py:class:`Point` for tracking in flat space with
non-periodic boundary conditions.
'''
def __init__(self, t, pos):
Point.__in
|
dtaht/ns-3-dev
|
src/flow-monitor/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-2.0
| 411,996
| 0.015158
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.flow_monitor', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## flow-monitor-helper.h (module 'flow-monitor'): ns3::FlowMonitorHelper [class]
module.add_class('FlowMonitorHelper')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## histogram.h (module 'flow-monitor'): ns3::Histogram [class]
module.add_class('Histogram')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress', import_from_module='ns.network')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'networ
|
k'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.inter
|
net')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress [class]
module.add_class('Ipv6InterfaceAddress', import_from_module='ns.internet')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e [enumeration]
module.add_enum('State_e', ['TENTATIVE', 'DEPRECATED', 'PREFERRED', 'PERMANENT', 'HOMEADDRESS', 'TENTATIVE_OPTIMISTIC', 'INVALID'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e [enumeration]
module.add_enum('Scope_e', ['HOST', 'LINKLOCAL', 'GLOBAL'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parame
|
tommeagher/pythonGIJC15
|
scripts/completed/quakes_complete.py
|
Python
|
mit
| 980
| 0.006122
|
import requests
import unicodecsv
from io import StringIO
#what country has the most serious earthquakes lately?
url = "http://python-gijc15.s3.eu-central-1.amazonaws.com/all_month.csv"
r = requests.get(url)
text = StringIO(r.text)
reader = unicodecsv.DictReader(text, dialect='excel')
new_collection = []
for row in reader:
if row['type'] == "earthquake":
if float(row['mag'])>=6.0:
new_collection.append(row)
else:
pass
else:
pass
for item in new_collection:
item[u'nearest']=item['place'].split(',')[-1].st
|
rip()
#write the results to a new csv
filename = "serious_quakes.csv"
print new_collection[0].keys()
fieldnames = [u'time', u'latitude', u'longitude', u'mag', u'type', u'id', u'place', u'nearest']
with open(filename, "wb") as f:
writer = unicodecsv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
|
writer.writeheader()
for item in new_collection:
writer.writerow(item)
|
echevemaster/fedora-college
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 8,753
| 0.004798
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
#
# This file is based upon the file generated by sphinx-quickstart. However,
# where sphinx-quickstart hardcodes values in this file that you input, this
# file has been changed to pull from your module's metadata module.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# Import project metadata
from fedora_college import metadata
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# show todos
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = metadata.project
copyright = metadata.copyright
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = metadata.version
# The full version, including alpha/beta/rc tags.
release = metadata.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch
|
description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = metadata.project_no_spaces + 'doc'
# -- Options for LaT
|
eX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', metadata.project_no_spaces + '.tex',
metadata.project + ' Documentation', metadata.authors_string,
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', metadata.package, metadata.project + ' Documentation',
metadata.authors_string, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', metadata.project_no_spaces,
metadata.project + ' Documentation', metadata.authors_string,
metadata.project_no_spaces, metadata.description, 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo
|
tmerr/bank_wrangler
|
bank_wrangler/report/__init__.py
|
Python
|
gpl-3.0
| 2,519
| 0.000794
|
import os
from glob import glob
from itertools import chain
from typing import Iterable
import json
import jinja2
import shutil
from bank_wrangler import schema
def _generate_data_json(transactions, accounts):
transactions = [list(map(str, row))
for row in transactions]
return json.dumps({
'columns': schema.Transaction._fields,
'transactions': transactions,
'accounts': accounts
})
def _generate_pages(html_path, css_names, js_names):
env = jinja2.Environment(
undefined=jinja2.StrictUndefined,
loader = jinja2.FileSystemLoader(html_path),
lstrip_blocks=True,
trim_blocks=True,
)
pages = {
'Bank Wrangler': 'index.html',
'List': 'list.html',
'Balance': 'balance.html',
'Spending': 'spending.html',
|
}
# used by base.html
env.globals = {
'cssimports': css_names,
'jsimports': js_names,
'pages': [{'name': title, 'url': filename}
for title, filename
|
in pages.items()],
}
return {filename: env.get_template(filename).render(selectedpage=filename)
for filename in pages.values()}
def generate(root, transactions, accounts: Iterable[str]):
"""Write the report to <root>/report directory."""
reportdir = os.path.dirname(os.path.abspath(__file__))
html_path = os.path.join(reportdir, 'html')
css_paths = glob(os.path.join(reportdir, 'libs', '*.css'))
js_paths = (glob(os.path.join(reportdir, 'libs', '*.js')) +
glob(os.path.join(reportdir, 'js', '*.js')))
files = {}
for path in css_paths + js_paths:
fname = os.path.basename(path)
with open(path, 'r') as f:
files[fname] = f.read()
files['data.js'] = 'const transactionModel = {};'.format(
_generate_data_json(transactions, list(accounts))
)
css_names = list(map(os.path.basename, css_paths))
js_names = list(map(os.path.basename, js_paths)) + ['data.js']
for filename, text in _generate_pages(html_path,
css_names,
js_names).items():
files[filename] = text
outdir = os.path.join(root, 'report')
try:
shutil.rmtree(outdir)
except FileNotFoundError:
pass
os.mkdir(outdir)
for filename, datastring in files.items():
path = os.path.join(outdir, filename)
with open(path, 'w') as f:
f.write(datastring)
|
rpm-software-management/dnf
|
dnf/subject.py
|
Python
|
gpl-2.0
| 1,150
| 0.00087
|
# subject.py
# Implements Subject.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
|
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ impo
|
rt unicode_literals
from hawkey import Subject # :api
|
HybridF5/jacket
|
jacket/tests/compute/unit/virt/disk/test_inject.py
|
Python
|
apache-2.0
| 11,880
| 0.000084
|
# Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import os
|
import fixtures
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.virt.disk.vfs import fakeguestfs
from jacket.compute.virt.disk import api as diskapi
from jacket.compute.virt.disk.vfs import guestfs as vfsguestfs
from jacket.compute.virt.image import model as imgmodel
class VirtDiskTest(test.NoDBTestCase):
def setUp(self):
super(VirtDiskTest, self).setUp()
self.useFixture(
fixtures.MonkeyPatch('compute.virt.disk.vf
|
s.guestfs.guestfs',
fakeguestfs))
self.file = imgmodel.LocalFileImage("/some/file",
imgmodel.FORMAT_QCOW2)
def test_inject_data(self):
self.assertTrue(diskapi.inject_data(
imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_QCOW2)))
self.assertTrue(diskapi.inject_data(
imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW),
mandatory=('files',)))
self.assertTrue(diskapi.inject_data(
imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW),
key="mysshkey",
mandatory=('key',)))
os_name = os.name
os.name = 'nt' # Cause password injection to fail
self.assertRaises(exception.NovaException,
diskapi.inject_data,
imgmodel.LocalFileImage("/some/file",
imgmodel.FORMAT_RAW),
admin_password="p",
mandatory=('admin_password',))
self.assertFalse(diskapi.inject_data(
imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW),
admin_password="p"))
os.name = os_name
self.assertFalse(diskapi.inject_data(
imgmodel.LocalFileImage("/some/fail/file", imgmodel.FORMAT_RAW),
key="mysshkey"))
def test_inject_data_key(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/root/.ssh", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh"],
{'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
{'isdir': False,
'content': "Hello World\n# The following ssh " +
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
'mode': 0o600})
vfs.teardown()
def test_inject_data_key_with_selinux(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
vfs.make_path("etc/selinux")
vfs.make_path("etc/rc.d")
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
{'isdir': False,
'content': "Hello World#!/bin/sh\n# Added by " +
"Nova to ensure injected ssh keys " +
"have the right context\nrestorecon " +
"-RF root/.ssh 2>/dev/null || :\n",
'gid': 100,
'uid': 100,
'mode': 0o700})
self.assertIn("/root/.ssh", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh"],
{'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
{'isdir': False,
'content': "Hello World\n# The following ssh " +
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
'mode': 0o600})
vfs.teardown()
def test_inject_data_key_with_selinux_append_with_newline(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done")
vfs.make_path("etc/selinux")
vfs.make_path("etc/rc.d")
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
{'isdir': False,
'content': "#!/bin/sh\necho done\n# Added "
"by Nova to ensure injected ssh keys have "
"the right context\nrestorecon -RF "
"root/.ssh 2>/dev/null || :\n",
'gid': 100,
'uid': 100,
'mode': 0o700})
vfs.teardown()
def test_inject_net(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
diskapi._inject_net_into_fs("mynetconfig", vfs)
self.assertIn("/etc/network/interfaces", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/network/interfaces"],
{'content': 'mynetconfig',
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
vfs.teardown()
def test_inject_metadata(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
metadata = {"foo": "bar", "eek": "wizz"}
metadata = OrderedDict(sorted(metadata.items()))
diskapi._inject_metadata_into_fs(metadata, vfs)
self.assertIn("/meta.js", vfs.handle.files)
self.assertEqual({'content': '{"eek": "wizz", ' +
'"foo": "bar"}',
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100},
vfs.handle.files["/meta.js"])
vfs.teardown()
def test_inject_admin_password(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
def fake_salt():
return "1234567890abcdef"
self.stubs.Set(diskapi, '_generate_salt', fake_salt)
vfs.handle.write("/etc/shadow",
"root:$1$12345678$xxxxx:14917:0:99999:7:::\n" +
"bin:*:14495:0:99999:7:::\n" +
"daemon:*:14495:0:99999:7:::\n")
vfs.handle.write("/etc/passwd",
"root:x:0:0:root:/root:/bin/bash\n" +
"bin:x:1:1:bin:/bin:/sbin/nologin\n" +
"daemon:x:2:2:daemon:/sbin:/sbin/nologin\n")
diskapi._inject_admin_password_into_fs("123456", vfs)
self.assertEqual(vfs.handle.files["/etc/passwd"],
{'content': "root:x:0:0:root:/root:/bin/bash\n" +
"bin:x:1:1:bin:/bin:/sbin/nologin\n" +
"daemon:x:2:2:daemon:/sbin:" +
"/sbin/nologin\n",
'gid': 100,
'isdir': False,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.