code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-26 19:49
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Installation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('lat', models.DecimalField(decimal_places=6, default=Decimal('0.0000'), max_digits=9)),
('lng', models.DecimalField(decimal_places=6, default=Decimal('0.0000'), max_digits=9)),
('logo', models.ImageField(blank=True, null=True, upload_to='uploads/')),
('marker', models.ImageField(blank=True, null=True, upload_to='uploads/')),
('description', models.TextField(blank=True, null=True)),
('url', models.TextField(blank=True, null=True)),
('version', models.CharField(blank=True, max_length=6, null=True)),
],
),
migrations.CreateModel(
name='Institution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('lat', models.DecimalField(decimal_places=6, default=Decimal('0.0000'), max_digits=9)),
('lng', models.DecimalField(decimal_places=6, default=Decimal('0.0000'), max_digits=9)),
('host', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='installations.Installation')),
],
),
]
|
IQSS/miniverse
|
dv_apps/installations/migrations/0001_initial.py
|
Python
|
mit
| 1,887
|
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/logging/v2/logging_metrics.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.logging.v2 MetricsServiceV2 API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.logging.v2 import enums
from google.cloud.proto.logging.v2 import logging_metrics_pb2
_PageDesc = google.gax.PageDescriptor
class MetricsServiceV2Client(object):
"""Service for configuring logs-based metrics."""
SERVICE_ADDRESS = 'logging.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_PAGE_DESCRIPTORS = {
'list_log_metrics': _PageDesc('page_token', 'next_page_token',
'metrics')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write', )
_PROJECT_PATH_TEMPLATE = path_template.PathTemplate('projects/{project}')
_METRIC_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/metrics/{metric}')
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return cls._PROJECT_PATH_TEMPLATE.render({'project': project, })
@classmethod
def metric_path(cls, project, metric):
"""Returns a fully-qualified metric resource name string."""
return cls._METRIC_PATH_TEMPLATE.render({
'project': project,
'metric': metric,
})
@classmethod
def match_project_from_project_name(cls, project_name):
"""Parses the project from a project resource.
Args:
project_name (string): A fully-qualified path representing a project
resource.
Returns:
A string representing the project.
"""
return cls._PROJECT_PATH_TEMPLATE.match(project_name).get('project')
@classmethod
def match_project_from_metric_name(cls, metric_name):
"""Parses the project from a metric resource.
Args:
metric_name (string): A fully-qualified path representing a metric
resource.
Returns:
A string representing the project.
"""
return cls._METRIC_PATH_TEMPLATE.match(metric_name).get('project')
@classmethod
def match_metric_from_metric_name(cls, metric_name):
"""Parses the metric from a metric resource.
Args:
metric_name (string): A fully-qualified path representing a metric
resource.
Returns:
A string representing the metric.
"""
return cls._METRIC_PATH_TEMPLATE.match(metric_name).get('metric')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A MetricsServiceV2Client object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-logging-v2', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'metrics_service_v2_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.logging.v2.MetricsServiceV2',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers,
page_descriptors=self._PAGE_DESCRIPTORS, )
self.metrics_service_v2_stub = config.create_stub(
logging_metrics_pb2.MetricsServiceV2Stub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._list_log_metrics = api_callable.create_api_call(
self.metrics_service_v2_stub.ListLogMetrics,
settings=defaults['list_log_metrics'])
self._get_log_metric = api_callable.create_api_call(
self.metrics_service_v2_stub.GetLogMetric,
settings=defaults['get_log_metric'])
self._create_log_metric = api_callable.create_api_call(
self.metrics_service_v2_stub.CreateLogMetric,
settings=defaults['create_log_metric'])
self._update_log_metric = api_callable.create_api_call(
self.metrics_service_v2_stub.UpdateLogMetric,
settings=defaults['update_log_metric'])
self._delete_log_metric = api_callable.create_api_call(
self.metrics_service_v2_stub.DeleteLogMetric,
settings=defaults['delete_log_metric'])
# Service calls
def list_log_metrics(self, parent, page_size=0, options=None):
"""
Lists logs-based metrics.
Example:
>>> from google.cloud.gapic.logging.v2 import metrics_service_v2_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metrics_service_v2_client.MetricsServiceV2Client()
>>> parent = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_log_metrics(parent):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_log_metrics(parent, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
parent (string): Required. The name of the project containing the metrics:
::
\"projects/[PROJECT_ID]\"
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.cloud.proto.logging.v2.logging_metrics_pb2.LogMetric` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = logging_metrics_pb2.ListLogMetricsRequest(
parent=parent, page_size=page_size)
return self._list_log_metrics(request, options)
def get_log_metric(self, metric_name, options=None):
"""
Gets a logs-based metric.
Example:
>>> from google.cloud.gapic.logging.v2 import metrics_service_v2_client
>>> api = metrics_service_v2_client.MetricsServiceV2Client()
>>> metric_name = api.metric_path('[PROJECT]', '[METRIC]')
>>> response = api.get_log_metric(metric_name)
Args:
metric_name (string): The resource name of the desired metric:
::
\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.logging.v2.logging_metrics_pb2.LogMetric` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = logging_metrics_pb2.GetLogMetricRequest(
metric_name=metric_name)
return self._get_log_metric(request, options)
def create_log_metric(self, parent, metric, options=None):
"""
Creates a logs-based metric.
Example:
>>> from google.cloud.gapic.logging.v2 import metrics_service_v2_client
>>> from google.cloud.proto.logging.v2 import logging_metrics_pb2
>>> api = metrics_service_v2_client.MetricsServiceV2Client()
>>> parent = api.project_path('[PROJECT]')
>>> metric = logging_metrics_pb2.LogMetric()
>>> response = api.create_log_metric(parent, metric)
Args:
parent (string): The resource name of the project in which to create the metric:
::
\"projects/[PROJECT_ID]\"
The new metric must be provided in the request.
metric (:class:`google.cloud.proto.logging.v2.logging_metrics_pb2.LogMetric`): The new logs-based metric, which must not have an identifier that
already exists.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.logging.v2.logging_metrics_pb2.LogMetric` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = logging_metrics_pb2.CreateLogMetricRequest(
parent=parent, metric=metric)
return self._create_log_metric(request, options)
def update_log_metric(self, metric_name, metric, options=None):
"""
Creates or updates a logs-based metric.
Example:
>>> from google.cloud.gapic.logging.v2 import metrics_service_v2_client
>>> from google.cloud.proto.logging.v2 import logging_metrics_pb2
>>> api = metrics_service_v2_client.MetricsServiceV2Client()
>>> metric_name = api.metric_path('[PROJECT]', '[METRIC]')
>>> metric = logging_metrics_pb2.LogMetric()
>>> response = api.update_log_metric(metric_name, metric)
Args:
metric_name (string): The resource name of the metric to update:
::
\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"
The updated metric must be provided in the request and it's
``name`` field must be the same as ``[METRIC_ID]`` If the metric
does not exist in ``[PROJECT_ID]``, then a new metric is created.
metric (:class:`google.cloud.proto.logging.v2.logging_metrics_pb2.LogMetric`): The updated metric.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.logging.v2.logging_metrics_pb2.LogMetric` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = logging_metrics_pb2.UpdateLogMetricRequest(
metric_name=metric_name, metric=metric)
return self._update_log_metric(request, options)
def delete_log_metric(self, metric_name, options=None):
"""
Deletes a logs-based metric.
Example:
>>> from google.cloud.gapic.logging.v2 import metrics_service_v2_client
>>> api = metrics_service_v2_client.MetricsServiceV2Client()
>>> metric_name = api.metric_path('[PROJECT]', '[METRIC]')
>>> api.delete_log_metric(metric_name)
Args:
metric_name (string): The resource name of the metric to delete:
::
\"projects/[PROJECT_ID]/metrics/[METRIC_ID]\"
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = logging_metrics_pb2.DeleteLogMetricRequest(
metric_name=metric_name)
self._delete_log_metric(request, options)
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/google/cloud/gapic/logging/v2/metrics_service_v2_client.py
|
Python
|
mit
| 16,784
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.template import Template, Context
from django.utils import six
from powerpages.models import Page
class TemplateTagsTestCase(TestCase):
maxDiff = None
def test_page_url_alias(self):
Page.objects.create(url='/test-page/', alias='test_page')
template = Template(
'{% load powerpages_tags %}{% page_url test_page %}'
)
context = Context()
output = template.render(context)
self.assertEqual(output, '/test-page/')
def test_page_url_django_view(self):
Page.objects.create(url='/test-page/', alias='test_page')
template = Template(
'{% load powerpages_tags %}{% page_url page path="test-page/" %}'
)
context = Context()
output = template.render(context)
self.assertEqual(output, '/test-page/')
def test_current_page_info_edit_mode_enabled(self):
Page.objects.create(
url='/', alias='home', title="Home Page",
template='{% current_page_info %}'
)
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get('/')
content = response.content
if not isinstance(content, six.text_type):
content = content.decode('utf-8')
self.assertEqual(
content,
'''
<link rel="stylesheet" href="/static/powerpages/css/current_page_info.css">
<div class="current-page-info">
<h5>Page Information:</h5>
<ul>
<li><span>Name: </span><strong>Home Page</strong></li>
<li><span>Alias: </span><strong>home</strong></li>
<li><span>ID: </span><strong>1</strong></li>
<li><a href="/admin/powerpages/page/1/change/">edit in Admin »</a></li>
<li>
<a href="/powerpages-admin/switch-edit-mode/">disable Edit Mode »</a>
</li>
</ul>
</div>
'''.strip().replace('\n', '')
)
def test_current_page_info_edit_mode_disabled(self):
Page.objects.create(
url='/', alias='home', title="Home Page",
template='{% current_page_info %}'
)
# NO EDIT MODE ENABLED
response = self.client.get('/')
content = response.content
if not isinstance(content, six.text_type):
content = content.decode('utf-8')
self.assertEqual(content, '')
|
Open-E-WEB/django-powerpages
|
powerpages/tests/test_template_tags.py
|
Python
|
mit
| 2,434
|
from helpers import *
import org.bukkit.Bukkit as Bukkit
from java.util.UUID import fromString as juuid
toggle_dict = {}
permission = "utils.pmtoggle"
@hook.command("pmtoggle",
aliases = ["tm", "mt", "tmsg", "msgt", "pmt", "tpm"],
usage = "/<command> [player]",
description = "Toggle automatic sending of messages"
)
def on_toggle_message_command(sender, command, label, args):
if not sender.hasPermission(permission) or not is_player(sender):
noperm(sender)
return True
plugin_header(sender, "Private Message Toggle")
uuid = uid(sender)
if len(args) > 0:
if len(args) > 1:
msg(sender, "&cToo many arguments!")
return True
target = Bukkit.getPlayer(args[0])
if target:
toggle_dict[uuid] = uid(target)
msg(sender, "&2Enabled toggle so that you're now sending only to %s &2by default" % target.getDisplayName())
else:
msg(sender, "&cThat player could not be found")
elif uuid in toggle_dict:
del toggle_dict[uuid]
msg(sender, "&2Disabled toggle successfully")
else:
msg(sender, "&cExpected a player as argument")
return True
@hook.event("player.AsyncPlayerChatEvent", "normal")
def on_chat(event):
if event.isCancelled():
return
player = event.getPlayer()
uuid = uid(player)
if uuid in toggle_dict:
event.setCancelled(True)
target = Bukkit.getPlayer(juuid(toggle_dict[uuid])).getName()
runas(player, "msg %s %s" % (target, event.getMessage()))
@hook.event("player.PlayerQuitEvent", "normal")
def on_quit(event):
uuid = uid(event.getPlayer())
if uuid in toggle_dict:
del toggle_dict[uuid]
for pid in list(toggle_dict):
if toggle_dict[pid] == uuid:
del toggle_dict[pid]
msg(Bukkit.getPlayer(juuid(pid)), "%s &cwent off so your Private Message Toggle has been disabled!" % Bukkit.getPlayer(juuid(uuid)).getDisplayName())
|
RedstonerServer/redstoner-utils
|
pmtoggle.py
|
Python
|
mit
| 2,037
|
from Animations import alternation, bouncing_lazer, glimmer, glow
from bibliopixel.drivers.LPD8806 import *
from bibliopixel import led
class LEDManager():
def __init__(self):
self._driver = DriverLPD8806(
160, c_order=ChannelOrder.RGB, use_py_spi=True, dev="/dev/spidev0.0", SPISpeed=2)
self._leds = led.LEDStrip(self._driver, threadedUpdate=True)
self.current_anim = None
self.current_color = (0, 0, 0)
self.animations = {
'Glow': glow.glow(led=self._leds),
'Glimmer': glimmer.glimmer(led=self._leds),
'Alternation': alternation.alternation(led=self._leds),
'Bouncing Lazer': bouncing_lazer.bouncing_lazer(led=self._leds)
}
def setAnimation(self, animation):
if(self.current_anim):
self.current_anim.stopThread()
self.clear()
self.current_color = None
self.current_anim = self.animations[animation]
self.current_anim.run(threaded=True, fps=30)
def setColor(self, color):
if(self.current_anim):
self.current_anim.stopThread()
self.clear()
self.current_anim = None
self.current_color = color
self._leds.fill(color)
self._leds.update()
def clear(self):
self._leds.fill((0, 0, 0))
self._leds.update()
|
KyleKaniecki/CarPC
|
LEDs/LEDManager.py
|
Python
|
mit
| 1,365
|
from django.db import models
class Way(models.Model):
steps = models.IntegerField(default=0)
duration = models.IntegerField(default=0)
class DailySteps(models.Model):
day = models.CharField(max_length=8)
steps = models.IntegerField(default=0)
|
Kesel/django
|
jsontest/models.py
|
Python
|
mit
| 263
|
import entity
from symlib.bit import MAGNETIC, THERMAL
from symlib.terminal import ITERM
from symlib.limit import LSW_NO, LSW_NO_LINE_END , LSW_NO_LINE_INTERSECT
import config as cfg
class OL(entity.CodedSymbol):
min_pole = 1
max_pole = 4
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def generate(self):
return [
ITERM(),
THERMAL().translate(xoff=20, yoff=0),
ITERM().translate(xoff=60, yoff=0)
]
class CB(entity.CodedSymbol):
min_pole = 1
max_pole = 4
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def generate(self):
return [
ITERM(left=True, right=False),
entity.Arc.from_crse(center=entity.Point(30, -5), radius=25, start=37, end=143),
ITERM(left=False, right=True).translate(xoff=40, yoff=0)
]
def generate_multipole(self, poles=1):
entities = self.generate_multipole_basic(poles=poles)
entities.append(entity.Line(start=entity.Point(30, 20),
end=entity.Point(30, 20 + (cfg.POLE_OFFSET * (poles - 1))),
linetype='PHANTOM'))
return entities
class MDS(entity.CodedSymbol):
min_pole = 3
max_pole = 4
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.translate(xoff=0, yoff=-60)
self.rotate(90)
def generate(self):
return [
LSW_NO()
]
def generate_multipole(self, poles=1):
entities = self.generate_multipole_basic(poles=poles)
entities.append(entity.PolyLine(points=[
entity.Point(*LSW_NO_LINE_INTERSECT),
entity.Point(LSW_NO_LINE_INTERSECT[0], LSW_NO_LINE_INTERSECT[1] + cfg.POLE_OFFSET * (poles - 0.5)),
entity.Point(LSW_NO_LINE_END[0], LSW_NO_LINE_END[1] + cfg.POLE_OFFSET * (poles - 0.5)),
], closed=False, linetype='PHANTOM'))
return entities
# TODO : Check on if the CB is 180 degrees off because it feels backwards
class CBMDS(entity.CodedSymbol):
min_pole = 3
max_pole = 4
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.translate(xoff=0, yoff=-60)
self.rotate(90)
def generate(self):
return [
CB()
]
def generate_multipole(self, poles=1):
entities = self.generate_multipole_basic(poles=poles)
entities.append(entity.PolyLine(points=[
entity.Point(30, 20),
entity.Point(LSW_NO_LINE_INTERSECT[0], LSW_NO_LINE_INTERSECT[1] + cfg.POLE_OFFSET * (poles - 0.5)),
entity.Point(LSW_NO_LINE_END[0], LSW_NO_LINE_END[1] + cfg.POLE_OFFSET * (poles - 0.5)),
], closed=False, linetype='PHANTOM'))
return entities
|
kozbot/kecb
|
symlib/protect.py
|
Python
|
mit
| 2,865
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
# TODO: put package requirements here
]
setup_requirements = [
'pytest-runner',
# TODO(00krishna): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest',
# TODO: put package test requirements here
]
setup(
name='data_gather',
version='0.1.0',
description="pull satellite imagery and other data from the web for use in the datakind mining project.",
long_description=readme + '\n\n' + history,
author="krishna bhogaonker",
author_email='cyclotomiq@gmail.com',
url='https://github.com/00krishna/mining_data_acquisition',
packages=find_packages(include=['data_gather']),
entry_points={
'console_scripts': [
'data_gather=data_gather.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='data_gather',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
krishnab-datakind/mining-data-acquisition
|
setup.py
|
Python
|
mit
| 1,667
|
## pygame - Python Game Library
## Copyright (C) 2007 Marcus von Appen
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Marcus von Appen
## mva@sysfault.org
"""pygame module for accessing surface pixel data using numpy
Functions to convert pixel data between pygame Surfaces and Numpy
arrays. This module will only be available when pygame can use the
external Numpy package.
Note, that numpyarray is an optional module. It requires that Numpy is
installed to be used. If not installed, an exception will be raised when
it is used. eg. ImportError: no module named numpy
Every pixel is stored as a single integer value to represent the red,
green, and blue colors. The 8bit images use a value that looks into a
colormap. Pixels with higher depth use a bit packing process to place
three or four values into a single number.
The Numpy arrays are indexed by the X axis first, followed by the Y
axis. Arrays that treat the pixels as a single integer are referred to
as 2D arrays. This module can also separate the red, green, and blue
color values into separate indices. These types of arrays are referred
to as 3D arrays, and the last index is 0 for red, 1 for green, and 2 for
blue.
In contrast to Numeric Numpy does use unsigned 16bit integers, images
with 16bit data will be treated as unsigned integers.
"""
import pygame
from pygame.compat import bytes_
from pygame.surfarray import blit_array
import numpy
def array2d (surface):
"""pygame.numpyarray.array2d (Surface): return array
copy pixels into a 2d array
Copy the pixels from a Surface into a 2D array. The bit depth of the
surface will control the size of the integer values, and will work
for any type of pixel format.
This function will temporarily lock the Surface as pixels are copied
(see the Surface.lock - lock the Surface memory for pixel access
method).
"""
bpp = surface.get_bytesize ()
if bpp <= 0 or bpp > 4:
raise ValueError("unsupported bit depth for 2D array")
size = surface.get_size ()
width, height = size
# Taken from Alex Holkner's pygame-ctypes package. Thanks a lot.
data = numpy.frombuffer (surface.get_buffer (), numpy.uint8)
pitch = surface.get_pitch ()
row_size = width * bpp
if pitch != row_size:
data.shape = (height, pitch)
data = data[:, 0:row_size]
dtype = (None, numpy.uint8, numpy.uint16, numpy.int32, numpy.int32)[bpp]
array = numpy.zeros (size, dtype, 'F')
array_data = numpy.frombuffer (array, numpy.uint8)
if bpp == 3:
data.shape = (height, width, 3)
array_data.shape = (height, width, 4)
array_data[:,:,:3] = data[...]
else:
data.shape = (height, row_size)
array_data.shape = (height, row_size)
array_data[...] = data[...]
return array
def pixels2d (surface):
"""pygame.numpyarray.pixels2d (Surface): return array
reference pixels into a 2d array
Create a new 2D array that directly references the pixel values in a
Surface. Any changes to the array will affect the pixels in the
Surface. This is a fast operation since no data is copied.
Pixels from a 24-bit Surface cannot be referenced, but all other
Surface bit depths can.
The Surface this references will remain locked for the lifetime of
the array (see the Surface.lock - lock the Surface memory for pixel
access method).
"""
bpp = surface.get_bytesize ()
if bpp == 3 or bpp < 1 or bpp > 4:
raise ValueError("unsupported bit depth for 2D reference array")
typecode = (numpy.uint8, numpy.uint16, None, numpy.int32)[bpp - 1]
array = numpy.frombuffer (surface.get_buffer (), typecode)
array.shape = surface.get_height (), surface.get_pitch () / bpp
# Padding correction for certain depth due to padding bytes.
array = array[:,:surface.get_width ()]
array = numpy.transpose (array)
return array
def array3d (surface):
"""pygame.numpyarray.array3d (Surface): return array
copy pixels into a 3d array
Copy the pixels from a Surface into a 3D array. The bit depth of the
surface will control the size of the integer values, and will work
for any type of pixel format.
This function will temporarily lock the Surface as pixels are copied
(see the Surface.lock - lock the Surface memory for pixel access
method).
"""
bpp = surface.get_bytesize ()
array = array2d (surface)
# Taken from from Alex Holkner's pygame-ctypes package. Thanks a
# lot.
if bpp == 1:
palette = surface.get_palette ()
# Resolve the correct values using the color palette
pal_r = numpy.array ([c[0] for c in palette])
pal_g = numpy.array ([c[1] for c in palette])
pal_b = numpy.array ([c[2] for c in palette])
planes = [numpy.choose (array, pal_r),
numpy.choose (array, pal_g),
numpy.choose (array, pal_b)]
array = numpy.array (planes, numpy.uint8)
array = numpy.transpose (array, (1, 2, 0))
return array
elif bpp == 2:
# Taken from SDL_GetRGBA.
masks = surface.get_masks ()
shifts = surface.get_shifts ()
losses = surface.get_losses ()
vr = (array & masks[0]) >> shifts[0]
vg = (array & masks[1]) >> shifts[1]
vb = (array & masks[2]) >> shifts[2]
planes = [(vr << losses[0]) + (vr >> (8 - (losses[0] << 1))),
(vg << losses[1]) + (vg >> (8 - (losses[1] << 1))),
(vb << losses[2]) + (vb >> (8 - (losses[2] << 1)))]
array = numpy.array (planes, numpy.uint8)
return numpy.transpose (array, (1, 2, 0))
else:
masks = surface.get_masks ()
shifts = surface.get_shifts ()
losses = surface.get_losses ()
planes = [((array & masks[0]) >> shifts[0]), # << losses[0], Assume 0
((array & masks[1]) >> shifts[1]), # << losses[1],
((array & masks[2]) >> shifts[2])] # << losses[2]]
array = numpy.array (planes, numpy.uint8)
return numpy.transpose (array, (1, 2, 0))
def pixels3d (surface):
"""pygame.numpyarray.pixels3d (Surface): return array
reference pixels into a 3d array
Create a new 3D array that directly references the pixel values in a
Surface. Any changes to the array will affect the pixels in the
Surface. This is a fast operation since no data is copied.
This will only work on Surfaces that have 24-bit or 32-bit
formats. Lower pixel formats cannot be referenced.
The Surface this references will remain locked for the lifetime of
the array (see the Surface.lock - lock the Surface memory for pixel
access method).
"""
bpp = surface.get_bytesize ()
if bpp < 3 or bpp > 4:
raise ValueError("unsupported bit depth for 3D reference array")
lilendian = pygame.get_sdl_byteorder () == pygame.LIL_ENDIAN
start = 0
step = 0
# Check for RGB or BGR surface.
shifts = surface.get_shifts ()
if shifts[0] == 16 and shifts[1] == 8 and shifts[2] == 0:
# RGB
if lilendian:
start = 2
step = -1
else:
start = 0
step = 1
elif shifts[2] == 16 and shifts[1] == 8 and shifts[0] == 0:
# BGR
if lilendian:
start = 0
step = 1
else:
start = 2
step = -1
else:
raise ValueError("unsupported colormasks for 3D reference array")
if bpp == 4 and not lilendian:
start += 1
array = numpy.ndarray \
(shape=(surface.get_width (), surface.get_height (), 3),
dtype=numpy.uint8, buffer=surface.get_buffer (),
offset=start, strides=(bpp, surface.get_pitch (),step))
return array
def array_alpha (surface):
"""pygame.numpyarray.array_alpha (Surface): return array
copy pixel alphas into a 2d array
Copy the pixel alpha values (degree of transparency) from a Surface
into a 2D array. This will work for any type of Surface
format. Surfaces without a pixel alpha will return an array with all
opaque values.
This function will temporarily lock the Surface as pixels are copied
(see the Surface.lock - lock the Surface memory for pixel access
method).
"""
if (surface.get_bytesize () == 1 or
surface.get_alpha () is None or
surface.get_masks ()[3] == 0):
# 1 bpp surfaces and surfaces without per-pixel alpha are always
# fully opaque.
array = numpy.empty (surface.get_width () * surface.get_height (),
numpy.uint8)
array.fill (0xff)
array.shape = surface.get_width (), surface.get_height ()
return array
array = array2d (surface)
if surface.get_bytesize () == 2:
# Taken from SDL_GetRGBA.
va = (array & surface.get_masks ()[3]) >> surface.get_shifts ()[3]
array = ((va << surface.get_losses ()[3]) +
(va >> (8 - (surface.get_losses ()[3] << 1))))
else:
# Taken from _numericsurfarray.c.
array = array >> surface.get_shifts ()[3] << surface.get_losses ()[3]
array = array.astype (numpy.uint8)
return array
def pixels_alpha (surface):
"""pygame.numpyarray.pixels_alpha (Surface): return array
reference pixel alphas into a 2d array
Create a new 2D array that directly references the alpha values
(degree of transparency) in a Surface. Any changes to the array will
affect the pixels in the Surface. This is a fast operation since no
data is copied.
This can only work on 32-bit Surfaces with a per-pixel alpha value.
The Surface this array references will remain locked for the
lifetime of the array.
"""
if surface.get_bytesize () != 4:
raise ValueError("unsupported bit depth for alpha reference array")
lilendian = pygame.get_sdl_byteorder () == pygame.LIL_ENDIAN
# ARGB surface.
start = 0
if surface.get_shifts ()[3] == 24 and lilendian:
# RGBA surface.
start = 3
elif surface.get_shifts ()[3] == 0 and not lilendian:
start = 3
else:
raise ValueError("unsupported colormasks for alpha reference array")
array = numpy.ndarray \
(shape=(surface.get_width (), surface.get_height ()),
dtype=numpy.uint8, buffer=surface.get_buffer (),
offset=start, strides=(4, surface.get_pitch ()))
return array
def array_colorkey (surface):
"""pygame.numpyarray.array_colorkey (Surface): return array
copy the colorkey values into a 2d array
Create a new array with the colorkey transparency value from each
pixel. If the pixel matches the colorkey it will be fully
tranparent; otherwise it will be fully opaque.
This will work on any type of Surface format. If the image has no
colorkey a solid opaque array will be returned.
This function will temporarily lock the Surface as pixels are
copied.
"""
colorkey = surface.get_colorkey ()
if colorkey == None:
# No colorkey, return a solid opaque array.
array = numpy.empty (surface.get_width () * surface.get_height (),
numpy.uint8)
array.fill (0xff)
array.shape = surface.get_width (), surface.get_height ()
return array
# Taken from from Alex Holkner's pygame-ctypes package. Thanks a
# lot.
array = array2d (surface)
# Check each pixel value for the colorkey and mark it as opaque or
# transparent as needed.
val = surface.map_rgb (colorkey)
array = numpy.choose (numpy.equal (array, val),
(numpy.uint8 (0xff), numpy.uint8 (0)))
array.shape = surface.get_width (), surface.get_height ()
return array
def make_surface (array):
"""pygame.numpyarray.make_surface (array): return Surface
copy an array to a new surface
Create a new Surface that best resembles the data and format on the
array. The array can be 2D or 3D with any sized integer values.
"""
# Taken from from Alex Holkner's pygame-ctypes package. Thanks a
# lot.
bpp = 0
r = g = b = 0
shape = array.shape
if len (shape) == 2:
# 2D array
bpp = 8
r = 0xFF >> 6 << 5
g = 0xFF >> 5 << 2
b = 0xFF >> 6
elif len (shape) == 3 and shape[2] == 3:
bpp = 32
r = 0xff << 16
g = 0xff << 8
b = 0xff
else:
raise ValueError("must be a valid 2d or 3d array")
surface = pygame.Surface ((shape[0], shape[1]), 0, bpp, (r, g, b, 0))
blit_array (surface, array)
return surface
def map_array (surface, array):
"""pygame.numpyarray.map_array (Surface, array3d): return array2d
map a 3d array into a 2d array
Convert a 3D array into a 2D array. This will use the given Surface
format to control the conversion. Palette surface formats are not
supported.
Note: arrays do not need to be 3D, as long as the minor axis has
three elements giving the component colours, any array shape can be
used (for example, a single colour can be mapped, or an array of
colours).
"""
# Taken from from Alex Holkner's pygame-ctypes package. Thanks a
# lot.
bpp = surface.get_bytesize ()
if bpp <= 1 or bpp > 4:
raise ValueError("unsupported bit depth for surface array")
shape = array.shape
if shape[-1] != 3:
raise ValueError("array must be a 3d array of 3-value color data")
shifts = surface.get_shifts ()
losses = surface.get_losses ()
if array.dtype != numpy.int32:
array = array.astype(numpy.int32)
out = array[...,0] >> losses[0] << shifts[0]
out[...] |= array[...,1] >> losses[1] << shifts[1]
out[...] |= array[...,2] >> losses[2] << shifts[2]
if surface.get_flags() & pygame.SRCALPHA:
out[...] |= numpy.int32(255) >> losses[3] << shifts[3]
return out
|
dev-coop/plithos
|
src/plithos/numpy_surfarray.py
|
Python
|
mit
| 14,740
|
# Copyright (c) 2006-2021 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, unicode_literals
import pickle
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from io import BytesIO, TextIOWrapper
import pytest
from pybtex.database import parse_bytes, parse_string, BibliographyData, Entry
from pybtex.plugin import find_plugin
from .data import reference_data
class DatabaseIO(object):
__metaclass__ = ABCMeta
def __init__(self):
self.reference_data = deepcopy(reference_data)
assert reference_data.entries
assert reference_data.preamble
@abstractmethod
def serialize(self, bib_data):
pass
@abstractmethod
def deserialize(self, bib_data):
pass
class PybtexDatabaseIO(DatabaseIO):
def __init__(self, bib_format):
super(PybtexDatabaseIO, self).__init__()
self.bib_format = bib_format
self.writer = find_plugin('pybtex.database.output', bib_format)(encoding='UTF-8')
self.parser = find_plugin('pybtex.database.input', bib_format)(encoding='UTF-8')
if bib_format == 'bibtexml':
# BibTeXML does not support TeX preambles
self.reference_data._preamble = []
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self.bib_format)
class PybtexStreamIO(PybtexDatabaseIO):
def serialize(self, bib_data):
stream = BytesIO()
unicode_stream = TextIOWrapper(stream, 'UTF-8')
self.writer.write_stream(bib_data, unicode_stream if self.writer.unicode_io else stream)
unicode_stream.flush()
stream.seek(0)
return unicode_stream
def deserialize(self, stream):
parser_stream = stream if self.parser.unicode_io else stream.buffer
return self.parser.parse_stream(parser_stream)
class PybtexStringIO(PybtexDatabaseIO):
def serialize(self, bib_data):
result = bib_data.to_string(self.bib_format)
assert isinstance(result, str)
return result
def deserialize(self, string):
# wrapper for parse_string
return BibliographyData.from_string(string, self.bib_format)
class PybtexEntryStringIO(PybtexDatabaseIO):
# the first entry in reference_data
def __init__(self, bib_format):
super(PybtexEntryStringIO, self).__init__(bib_format)
# get 1st key
self.key = list(reference_data.entries.keys())[0]
# make Entry as single-item BibliographyData
self.reference_data = reference_data.entries[self.key]
assert reference_data.entries
assert reference_data.preamble
def serialize(self, bib_data): # Entry.to_string
result = bib_data.to_string(self.bib_format)
assert isinstance(result, str)
return result
def deserialize(self, string): # Entry.from_string
return Entry.from_string(string, self.bib_format)
class PybtexBytesIO(PybtexDatabaseIO):
def serialize(self, bib_data):
result = bib_data.to_bytes(self.bib_format)
assert isinstance(result, bytes)
return result
def deserialize(self, string):
return parse_bytes(string, self.bib_format)
class PickleIO(DatabaseIO):
def __init__(self, protocol):
super(PickleIO, self).__init__()
self.protocol = protocol
def __repr__(self):
return '{}(protocol={!r})'.format(type(self).__name__, self.protocol)
def serialize(self, bib_data):
return pickle.dumps(bib_data, protocol=self.protocol)
def deserialize(self, pickled_data):
return pickle.loads(pickled_data)
class ReprEvalIO(DatabaseIO):
def __repr__(self):
return '{}()'.format(type(self).__name__)
def serialize(self, bib_data):
return repr(bib_data)
def deserialize(self, repr_value):
from pybtex.utils import OrderedCaseInsensitiveDict
from pybtex.database import BibliographyData, Entry, Person
return eval(repr_value, {
'OrderedCaseInsensitiveDict': OrderedCaseInsensitiveDict,
'BibliographyData': BibliographyData,
'Entry': Entry,
'Person': Person,
})
def check_database_io(io_obj):
serialized_data = io_obj.serialize(io_obj.reference_data)
deserialized_data = io_obj.deserialize(serialized_data)
assert deserialized_data == io_obj.reference_data
@pytest.mark.parametrize(["io_cls"], [(PybtexBytesIO,), (PybtexStringIO,), (PybtexEntryStringIO,),(PybtexBytesIO,)])
@pytest.mark.parametrize(["bib_format"], [("bibtex",), ("bibtexml",), ("yaml",)])
def test_database_io(io_cls, bib_format):
check_database_io(io_cls(bib_format))
@pytest.mark.parametrize(
["protocol"],
[(protocol,) for protocol in range(0, pickle.HIGHEST_PROTOCOL + 1)]
)
def test_database_pickling(protocol):
check_database_io(PickleIO(protocol))
def test_database_repr():
check_database_io(ReprEvalIO())
|
live-clones/pybtex
|
tests/database_test/database_test.py
|
Python
|
mit
| 5,978
|
#!/usr/bin/env python2.7
# By Dan Russell, November 2016
"""
Requires python2.7+ for the argparse module.
Arguments:
fastq # Required. The file of reads in fastq format.
genome_name # Optional. Specify the name of this genome. Default will be the input fastq file before any special characters.
num_reads # Optional. Number of reads to try assembling. Default: 80000.
adapter_list # Optional. Specify a file of adapters to use. Default: Adapters.txt
"""
PATH_TO_NEWBLER = "~/454/bin/"
PATH_TO_ACEUTIL = "~/phageAssembler/AceUtil"
DEFAULT_NUM_READS = 80000
DEFAULT_READ_LENGTH = 140
DEFAULT_ADAPTER_LIST = "~/phageAssembler/Adapters/Adapters.fasta"
DEFAULT_BLAST_DATABASE = "~/phageAssembler/BLASTdbs/Actino_DB"
#from datetime import datetime
import subprocess
import argparse
import sys
import os
import re
from shutil import copy, move
from Bio import SeqIO
from Bio.SeqUtils import GC
# Make parser for options listed above
parser = argparse.ArgumentParser(description='A script to assemble phage genomes.',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fastq', help="A file of reads in fastq format.")
parser.add_argument('-g','--genome_name', help="The name of the genome you are assembling. Default will be name of the fastq before any special characters.")
parser.add_argument('-n','--num_reads', help="The number of reads to try assembling.", type=int, default=DEFAULT_NUM_READS)
parser.add_argument('-l','--avg_read_length', help="Average read length used to calculate coverage.", type=int, default=DEFAULT_READ_LENGTH)
parser.add_argument('-a','--adapter_list', help="A fasta-formatted file of adapter sequences to be trimmed. Default file contains TruSeq Illumina adapters.", default=DEFAULT_ADAPTER_LIST)
parser.add_argument('-c','--reads_percent_cutoff', help="Contigs with more than this percentage of assembled reads will be blasted and AceUtiled.", type=int, default=2)
# Parse command-line options and save values
args = parser.parse_args()
fastq = args.fastq
if not os.path.isfile(fastq):
sys.exit("ERROR: Couldn't find the file %s" % fastq)
if args.genome_name:
genome_name = args.genome_name
else:
genome_name = re.split('\W+|_', os.path.basename(args.fastq))[0]
num_reads = args.num_reads
avg_read_length = args.avg_read_length
reads_percent_cutoff = args.reads_percent_cutoff
if args.adapter_list:
adapter_list = args.adapter_list
else:
adapter_list = DEFAULT_ADAPTER_LIST
#Log file stuff
log_file_name = '%s_phageAssembler.log' % genome_name
log_file = open(log_file_name,'w')
def printlog(message):
print message
log_file.write(message + "\n")
#Input
printlog("\n***INPUT***")
printlog("\tGenome: %s" % genome_name)
printlog("\tWill assemble %s reads from the file %s." % (str(num_reads), fastq))
printlog("\tWill trim reads using adapters found in %s." % adapter_list)
def wc(filename):
from subprocess import check_output
return int(check_output(["wc", "-l", filename]).split()[0])
total_fastq_reads = wc(fastq)
def subsample_fastq_file(filename, number_of_reads, head_tail="head", new_file_if_all_reads=False):
total_reads = wc(filename)/4
if total_reads < number_of_reads:
printlog("\tFewer reads in file than total requested.")
if new_file_if_all_reads:
new_filename = '%s_All_%sk_Reads.fastq' % (genome_name, str(total_reads/1000))
copy(filename,new_filename)
printlog("\tCreated new file, %s, with all reads for assembly." % new_filename)
return new_filename
else:
printlog("\tWill use entire original file, %s, for assembly." % filename)
return filename
new_filename = '%s_%skReads.fastq' % (genome_name, str(num_reads/1000))
subprocess.call('head -n %s %s > %s' % (str(num_reads*4),filename,new_filename), shell=True)
printlog("\tCreated new file %s with %s reads." % (new_filename, str(num_reads)))
return new_filename
#Downsample Fastq
printlog("\n***CREATING FASTQ FOR ASSEMBLY***")
assembly_fastq = subsample_fastq_file(fastq, num_reads)
printlog("\tReads file for Newbler: %s" % assembly_fastq)
def run_newbler(projectname,adaptersfile,fastqfile):
newbler_command = "%srunAssembly -o %s -vt %s -consed %s >> %s" % (PATH_TO_NEWBLER, projectname, adaptersfile, fastqfile, log_file_name)
# printlog("\tWill run: %s" % newbler_command)
subprocess.call(newbler_command, shell=True)
printlog("\n***ASSEMBLING WITH NEWBLER***")
log_file.close()
run_newbler(genome_name,adapter_list,assembly_fastq)
log_file = open(log_file_name,'a')
printlog("\tNewbler assembly complete.")
#Tidy Up Directories
printlog("\n***CLEANING UP DIRS***")
cwd = os.getcwd()
project_dir = cwd + "/%s" % genome_name
consed_dir = project_dir + "/consed"
subprocess.call(["rm","%s/sff_dir" % consed_dir])
printlog("\tRemoved fake sff_dir.")
subprocess.call(["mkdir", "%s/solexa_dir" % consed_dir])
printlog("\tCreated new solexa_dir in consed folder to hold reads file.")
move(assembly_fastq,"%s/solexa_dir/" % consed_dir)
printlog("\tMoved %s to new solexa_dir." % assembly_fastq)
os.chdir("./%s/" % genome_name)
def parse_metrics(metricsfile):
f = open(metricsfile,'r')
metrics = {}
large_done = False
for line in f:
if 'numberOfContigs' in line:
if large_done:
metrics['all_contigs'] = int(line.strip().split()[-1].split(';')[-2])
else:
metrics['large_contigs'] = int(line.strip().split()[-1].split(';')[-2])
large_done = True
if 'largestContigSize' in line:
metrics['largest_contig'] = int(line.strip().split()[-1].split(';')[-2])
if 'numberAssembled' in line:
metrics['aligned_reads'] = int(line.split()[2].split(',')[0])
f.close()
return metrics
def parse_contigs(contigsfile,totalreads):
f = open(contigsfile,'r')
contigs = []
for line in f:
if line[0] == ">":
contig = line[1:].rstrip().split()
contig[1] = int(contig[1].split('=')[1])
contig[2] = int(contig[2].split('=')[1])
contig.append(round(100*float(contig[2])/totalreads,1))
contig.append(round(avg_read_length*float(contig[2])/contig[1],1))
contig.append(int(round(avg_read_length*float(total_fastq_reads/4)*contig[3]/(100*contig[1]))))
contigs.append(contig)
f.close()
return contigs
#Show Assembly Results
printlog("\n***ASSEMBLY RESULTS***")
assembly_metrics = parse_metrics("%s/454NewblerMetrics.txt" % project_dir )
contig_list = parse_contigs("%s/454AllContigs.fna" % project_dir,assembly_metrics['aligned_reads'])
total_contigs = len(contig_list)
printlog("\t%s total contigs." % str(total_contigs))
contigs_to_blast = []
if total_contigs > 20:
printlog("\tThe first 20 are (Name, length in bp, # reads, ~coverage, % reads):")
i=0
while i<20:
out_string = "\t\t%s\t%s\t%s\t%s-fold\t(%s%% of assembled reads)" % (contig_list[i][0], contig_list[i][1], contig_list[i][2], contig_list[i][4], contig_list[i][3])
if contig_list[i][3] > reads_percent_cutoff:
out_string += "*"
contigs_to_blast.append(contig_list[i][0])
printlog(out_string)
i += 1
printlog("\t* These contigs have > %s%% of the assembled reads and will thus be blasted." % reads_percent_cutoff)
else:
printlog("\tThey are (Name, length in bp, # reads, ~coverage, % reads):")
for contig in contig_list:
out_string = "\t\t%s\t%s\t%s\t%s-fold\t(%s%% of assembled reads)" % (contig[0], str(contig[1]), str(contig[2]), str(contig[4]), str(contig[3]))
if contig[3] > reads_percent_cutoff:
out_string += "*"
contigs_to_blast.append(contig[0])
printlog(out_string)
printlog("\t* These contigs have > %s%% of the assembled reads and will thus be blasted." % reads_percent_cutoff)
def blast_contigs(seqfile,database,outfile="blast_results.txt"):
blast_command = "blastn -db %s -query %s -out %s" % (database,seqfile,outfile)
subprocess.call(blast_command,shell=True)
return outfile
def biopy_blast(queryseq,database,outfile="blast_output.xml"):
blast_command = "blastn -db %s -query %s -out %s -outfmt 5" % (database,queryseq,outfile)
subprocess.call(blast_command,shell=True)
result_handle = open(outfile,'r')
from Bio.Blast import NCBIXML
return NCBIXML.read(result_handle)
def display_blast_results(record, reblast=False):
potential_clusters = []
base_one = ()
printlog("\n\n\tQuery: %s" % record.query)
if len(record.alignments) > 10:
printlog("\t\tFirst 10 hits:")
else:
printlog("\t\tAll hits:")
if len(record.alignments) == 0:
printlog("\t\t\tNo hits found.")
else:
for alignment in record.alignments[:10]:
title_split=alignment.title.split()
outline='\t\t\t' + ' '.join([title_split[1],title_split[2],title_split[3]]) + ' (%s bp)' % str(alignment.length)
if title_split[-2] == 'Cluster':
outline += ", Cluster %s" % title_split[-1]
potential_clusters.append(title_split[-1])
printlog(outline)
printlog("\t\tBest hit details:")
try:
best = record.alignments[0]
title_split=best.title.split()
best_title = ' '.join([title_split[1],title_split[2],title_split[3]]) + ' (%s bp)' % str(best.length)
printlog("\t\t\tSubject: %s" % best_title)
i = 1
for hsp in best.hsps[:10]:
printlog("\t\t\t\tHit #%s" % str(i))
printlog("\t\t\t\tLength: %s bp Score: %s E-value: %s" % (hsp.align_length,hsp.score,hsp.expect))
printlog("\t\t\t\tIdentities: %s/%s Gaps: %s/%s." % (hsp.identities,hsp.align_length,hsp.gaps,hsp.align_length))
if hsp.frame[1] == -1:
strand = "Minus"
else:
strand = "Plus"
printlog("\t\t\t\tStrand: Plus/%s" % strand)
printlog("\t\t\t\tQuery: %s" % hsp.query_start)
printlog("\t\t\t\tSbjct: %s" % hsp.sbjct_start)
if hsp.sbjct_start == 1:
base_one = (best_title,hsp.query_start,hsp.sbjct_start,reblast,record.query,)
i += 1
except:
pass
return (potential_clusters, base_one,)
def parse_blast(blastresults):
best_hits = []
with open(blastresults,'r') as in_file:
for line in in_file:
if (len(best_hits) <= 10) and " phage " in line:
data = line.split()
best_hits.append((data[2],data[-2],data[-1],))
return best_hits
def rc_reblast(seq_file):
# rc_out = open('%s_rc.fasta' % blast_result.query.split()[0],'w')
out_file = '%s_rc.fasta' % seq_file.split('.')[0]
rc_out = open(out_file,'w')
seq = SeqIO.read(open(seq_file,'r'),'fasta')
rc_out.write(">%s_reverse_complement\n" % seq.name)
seq = seq.reverse_complement()
rc_out.write(str(seq.seq))
rc_out.close()
return biopy_blast(out_file, DEFAULT_BLAST_DATABASE, outfile='%s_blast.xml' % out_file.split('.')[0])
#BLAST
printlog("\n***BLAST***")
printlog("\tRunning local blast of %s contig(s) against %s database..." % (str(len(contigs_to_blast)),DEFAULT_BLAST_DATABASE))
#all_contigs_file = cwd + "/%s/454AllContigs.fna" % genome_name
#blast_output = blast_contigs(all_contigs_file, DEFAULT_BLAST_DATABASE)
#print "\tBLAST complete."
#print "\tParsing BLAST results..."
#blast_results = parse_blast(blast_output)
#print "\tBest matches (Name, Score, E-value):"
#for result in blast_results:
# print "\t\t%s\t%s\t%s" % (result[0],result[1],result[2])
all_contig_objects=SeqIO.parse(open('%s/454AllContigs.fna' % project_dir,'r'),'fasta')
blasted_contigs = []
for contig in all_contig_objects:
if contig.id in contigs_to_blast:
SeqIO.write(contig, '%s.fasta' % contig.id, 'fasta')
blasted_contigs.append(biopy_blast('%s.fasta' % contig.id, DEFAULT_BLAST_DATABASE, outfile='%s_blast.xml' % contig.id))
reblasted_contigs = []
cluster_guesses = []
base_ones = []
for result in blasted_contigs:
cg = display_blast_results(result)
cluster_guesses.append((result.query.split()[0],cg[0],))
if cg[1]:
base_ones.append(cg[1])
try:
if result.alignments[0].hsps[0].frame[1] == -1:
reblasted_contigs.append(rc_reblast('%s.fasta' % result.query.split()[0]))
except:
pass
if reblasted_contigs:
printlog("\n\tRe-blasting %s contig(s) in reverse orientation." % str(len(reblasted_contigs)))
for result in reblasted_contigs:
cg = display_blast_results(result, reblast=True)
if cg[1]:
base_ones.append(cg[1])
def run_AceUtil(acefile,contig=None):
try:
outfile = acefile.rsplit('.',1)[0] + "." + str(int(acefile.rsplit('.',1)[1])+1)
except:
outfile = acefile + ".aceUtil"
AceUtil_command = "java -jar %s/AceUtil.jar %s %s " % (PATH_TO_ACEUTIL, acefile, outfile)
if contig:
AceUtil_command += contig
AceUtil_command += " >> %s" % (log_file_name)
subprocess.call(AceUtil_command, shell=True)
return outfile
#AceUtil
os.chdir("%s" % cwd)
printlog("\n***ACE UTIL***")
#Temp code until AceUtil fixed
#if len(contig_list) < 2:
# printlog("\tSkipping AceUtil because there's only one contig.\n\tThis will be changed when AceUtil is fixed.")
#else:
# printlog("\tRunning AceUtil...")
# log_file.close()
# ace_out = run_AceUtil('%s/edit_dir/454Contigs.ace.1' % consed_dir)
# log_file = open(log_file_name,'a')
aceutil_infile = "%s/edit_dir/454Contigs.ace.1" % consed_dir
for contig in contigs_to_blast:
printlog("\tRunning AceUtil on %s..." % contig)
log_file.close()
aceutil_outfile = run_AceUtil(aceutil_infile,contig=contig)
aceutil_infile = aceutil_outfile
log_file = open(log_file_name,'a')
printlog("\tAceUtil analysis complete.")
#Report
printlog("\n***REPORT***")
printlog("\tCluster Guess")
if cluster_guesses:
for contig in cluster_guesses:
gs = ', '.join(contig[1][:5])
printlog("\t\t%s\tCluster of top hits: %s" % (contig[0], gs))
if len(set(contig[1][:5])) == 1:
printlog("\t\tProbable cluster: %s" % contig[1][0])
else:
printlog("\t\tUnable to make single cluster guess from blast results.")
else:
printlog("\t\tUnable to determine a likely cluster.")
printlog("\tBase One Guess")
if base_ones:
for base_one in base_ones:
out = "\t\tIn the blast hit to %s, query position %s matches subject position %s." % (base_one[0], str(base_one[1]), str(base_one[2]))
out2 = "\t\tLikely Base 1 position: %s in %s" % (base_one[1], base_one[4])
if base_one[3]:
out += " (After contig was reverse-complemented.)"
printlog(out)
printlog(out2)
else:
printlog("\t\tUnable to find Base 1.")
printlog("\tGC Info")
i=0
all_contig_objects=SeqIO.parse(open('%s/454AllContigs.fna' % project_dir,'r'),'fasta')
for contig in all_contig_objects:
if i==10:
break
printlog("\t\t%s\t%s %%" % (contig.id, round(GC(contig.seq),1)))
i += 1
printlog("\tCoverage Info")
for contig in contig_list:
printlog("\t\t%s\t%s (assembled)\t%s (estimated for entire fastq)" % (contig[0],contig[4],contig[5]))
log_file.close()
|
Danos2000/phageAssembler
|
phageAssembler.py
|
Python
|
mit
| 15,387
|
from settings.settings import Settings
from utils.file_system import FileSystem
class AppSettings(Settings):
_settings = Settings('loki.cfg')
language = Settings(_settings.language_file)
temp_folder = _settings.temp_folder
FileSystem.ensure_dir_exists(temp_folder)
log_folder = _settings.log_folder
FileSystem.ensure_dir_exists(log_folder)
del _settings
|
fpohtmeh/loki
|
settings/app_settings.py
|
Python
|
mit
| 385
|
"""An implementation of a neural network without classes (just a module)
"""
import numpy
import scipy.optimize
import itertools
def create_training_dict(X, y):
"""Take a set of input features and their labels and package them
along with some useful quantities into a dictionary. This could
be a training, validation, or test set.
Args:
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
y (numpy.ndarray): labels for each feature vector
Returns:
A dictionary containing ...
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
y (numpy.ndarray): labels for each feature vector
m (int): number of feature vectors (i.e. training examples)
n (int): number of features per vector
n_cat (int): number of categories (i.e. unique values in y)
y1hot (numpy.ndarray) 2-D array of one-hot vectors (1 per row)
for example if n_cat = 5, the label 3 -> [0, 0, 0, 1, 0]
"""
m, n = X.shape
n_cat = len(numpy.unique(y))
y1hot = numpy.identity(n_cat)[y]
Xmean = X.mean()
Xstd = X.std()
Xnorm = (X - Xmean) / Xstd
return {'Xnorm': Xnorm, 'Xmean': Xmean, 'Xstd': Xstd, 'y': y, 'm': m,
'n': n, 'n_cat': n_cat, 'y1hot': y1hot}
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2,s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def sigmoid(z):
"""Return element-wise sigmoid
Args:
z (numpy.ndarray): argument for sigmoid function
Returns:
g (numpy.ndarray): sigmoid function evaluated element-wise
"""
return 1.0 / (1.0 + numpy.exp(-z))
def sigmoid_gradient(z):
"""Return element-wise sigmoid gradient evaluated at z
Args:
z (numpy.ndarray): argument for sigmoid function
Returns:
g (numpy.ndarray): sigmoid function evaluated element-wise
"""
return sigmoid(z) * (1.0 - sigmoid(z))
def flatten_arrays(arrays):
"""Turn a list of 2-D arrays into a single 1-D array.
Args:
arrays (``list`` of numpy.ndarray): a list of 2-D arrays
Returns:
(numpy.ndarray): a flattened 1-D array
"""
return numpy.concatenate([a.flatten() for a in arrays])
def unflatten_array(flat_array, array_shapes):
"""Turn a single 1-D array into a list of 2-D arrays.
Args:
flat_array (numpy.ndarray): a flattened 1-D array
array_shapes (``list`` of ``tuple``): 2-D array shapes
Returns:
arrays (``list`` of numpy.ndarray): a list of 2-D arrays
"""
i = 0
weight_arrays = []
for shape in array_shapes:
j = i + shape[0] * shape[1]
weight_arrays.append(flat_array[i:j].reshape(shape))
i = j
return weight_arrays
def initialize_random_weights(layer_sizes):
"""Initialize weight arrays to random values. We use the normalized
initialization of Glorot and Bengio (2010).
https://scholar.google.com/scholar?cluster=17889055433985220047&hl=en&as_sdt=0,22
"""
weights = []
for si, sj in pairwise(layer_sizes):
b = numpy.sqrt(6.0 / (si + sj))
weights.append(
numpy.random.uniform(low=-b, high=b, size=(sj, si+1))
)
return weights
def minimize(initial_weights, X, y1hot, lam=0.0, method='TNC', jac=True,
tol=1.0e-3, options={'disp': True, 'maxiter': 2000}):
"""Calculate values of weights that minimize the cost function.
Args:
initial_weights (``list`` of numpy.ndarray): weights between each layer
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
y1hot (numpy.ndarray): 2-D array of one-hot vectors (1 per row)
lam (``float``): regularization parameter
method (``str``): minimization method (see scipy.optimize.minimize docs)
jac (``bool`` or ``callable``): gradient provided? (see
scipy.optimize.minimize docs)
tol (``float``): stopping criterion (see scipy.optimize.minimize docs)
options (``dict``): method specific (see scipy.optimize.minimize docs)
Returns:
res (``OptimizeResult``): (see scipy.optimize.minimize docs)
"""
weight_shapes = [w.shape for w in initial_weights]
flat_weights = flatten_arrays(initial_weights)
res = scipy.optimize.minimize(
compute_cost_and_grad,
flat_weights,
args=(X, y1hot, weight_shapes, lam),
method=method,
jac=jac,
tol=tol,
options=options,
)
return res
def compute_cost_and_grad(
weights_flat, X, y1hot, weight_shapes, lam=0.0, cost_only=False):
"""Calculate cost function and its gradient with respect to weights.
Args:
weights_flat (numpy.ndarray): a flattened 1-D weight array
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
y1hot (numpy.ndarray) 2-D array of one-hot vectors (1 per row)
weight_shapes (``list`` of ``tuple``): 2-D array shapes
lam (``float``): regularization parameter
cost_only (``boolean``): if True return cost without gradient
Returns:
J (``float``): Cost with current weights
weights_grad_flat (numpy.ndarray): d_J/d_weight
"""
# package flat weights into a list of arrays
m = X.shape[0]
weights = unflatten_array(weights_flat, weight_shapes)
# feed forward
aa, zz = feed_forward(X, weights)
# calculate raw cost
h = aa[-1]
J = -(
numpy.sum(y1hot * numpy.log(h)) +
numpy.sum((1.0 - y1hot) * numpy.log(1.0 - h))
) / m
# add regularization
for weight in weights:
J += lam * numpy.sum(weight[:, 1:] * weight[:, 1:]) * 0.5 / m
if cost_only:
return J
# gradient - back prop
weights_grad_flat = flatten_arrays(
back_propogation(weights, aa, zz, y1hot, lam=lam))
return J, weights_grad_flat
def feed_forward(X, weights):
"""Perform a feed forward step. Note that the z variables will
not have the bias columns included and that all but the final a
variables will have the bias column included.
Args:
X (numpy.ndarray): 2-D array of feature vectors (1 per row)
weights (``list`` of numpy.ndarray): weights between each layer
Returns:
aa (``list`` of numpy.ndarray): activation of nodes for
each layer. The last item in the list is the hypothesis.
zz (``list`` of numpy.ndarray): input into nodes for each layer.
"""
aa = []
zz = []
zz.append(None) # this is z1 (i.e. there is no z1)
ai = X.copy()
ai = numpy.c_[numpy.ones(ai.shape[0]), ai] # a1 is X + bias nodes
aa.append(ai)
for weight in weights:
zi = ai.dot(weight.T)
zz.append(zi)
ai = sigmoid(zi)
ai = numpy.c_[numpy.ones(ai.shape[0]), ai] # add bias column
aa.append(ai)
# remove bias column from last aa layer
aa[-1] = aa[-1][:, 1:]
return aa, zz
def back_propogation(weights, aa, zz, y1hot, lam=0.0):
"""Perform a back propogation step
Args:
weights (``list`` of numpy.ndarray): weights between each layer
aa (``list`` of numpy.ndarray): activation of nodes for
each layer. The last item in the list is the hypothesis.
zz (``list`` of numpy.ndarray): input into nodes for each layer.
y1hot (numpy.ndarray) 2-D array of one-hot vectors (1 per row)
lam (``float``): regularization parameter
Returns:
weights_grad (``list`` of numpy.ndarray): d_J/d_weight
"""
weights_grad = []
m = y1hot.shape[0]
n_layers = len(weights) + 1
di_plus_1 = aa[-1] - y1hot
i = n_layers - 2
while i > 0:
ones_col = numpy.ones(zz[i].shape[0])
di = (
di_plus_1.dot(weights[i]) *
sigmoid_gradient(numpy.c_[ones_col, zz[i]])
)
di = di[:, 1:]
weights_grad.append(di_plus_1.T.dot(aa[i]))
i -= 1
di_plus_1 = di.copy()
weights_grad.append(di.T.dot(aa[0]))
# we built it backwards
weights_grad.reverse()
# normalize by m
weights_grad = [wg/m for wg in weights_grad]
# add regularization (skip first columns)
for i in range(n_layers-1):
weights_grad[i][:, 1:] += lam/m * weights[i][:, 1:]
return weights_grad
|
galtay/neural_learner
|
nn.py
|
Python
|
mit
| 8,224
|
from . import __version__
def version(request):
data = {'amy_version': __version__}
return data
|
swcarpentry/amy
|
amy/workshops/context_processors.py
|
Python
|
mit
| 106
|
# -*- coding: utf-8 -*-
"""
Import aedat version 1 or 2.
"""
import numpy as np
def import_aedat_dataversion1or2(info):
"""
Parameters
----------
info :
"""
# The formatVersion dictates whether there are 6 or 8 bytes per event.
if info['formatVersion'] == 1:
num_bytes_per_event = 6
addr_precision = np.dtype([('addr', '>u2'), ('ts', '>u4')])
else:
num_bytes_per_event = 8
addr_precision = np.dtype([('addr', '>u4'), ('ts', '>u4')])
file_handle = info['fileHandle']
# Find the number of events, assuming that the file position is just at the
# end of the headers.
file_handle.seek(0, 2)
num_events_in_file = int(np.floor(
(file_handle.tell() - info['beginningOfDataPointer']) /
num_bytes_per_event))
info['numEventsInFile'] = num_events_in_file
# Check the startEvent and endEvent parameters
if 'startEvent' not in info:
info['startEvent'] = 0
assert info['startEvent'] <= num_events_in_file
if 'endEvent' not in info:
info['endEvent'] = num_events_in_file
if 'startPacket' in info:
print("The startPacket parameter is set, but range by packets is not "
"available for .aedat version < 3 files")
if 'endPacket' in info:
print("The endPacket parameter is set, but range by events is not "
"available for .aedat version < 3 files")
if info['endEvent'] > num_events_in_file:
print("The file contains {}; the endEvent parameter is {}; reducing "
"the endEvent parameter accordingly.".format(num_events_in_file,
info['endEvents']))
info['endEvent'] = num_events_in_file
assert info['startEvent'] < info['endEvent']
num_events_to_read = int(info['endEvent'] - info['startEvent'])
# Read events
file_handle.seek(info['beginningOfDataPointer'] + num_bytes_per_event *
info['startEvent'])
all_events = np.fromfile(file_handle, addr_precision, num_events_to_read)
all_addr = np.array(all_events['addr'])
all_ts = np.array(all_events['ts'])
# Trim events outside time window.
# This is an inefficent implementation, which allows for non-monotonic
# timestamps.
if 'startTime' in info:
temp_index = np.nonzero(all_ts >= info['startTime'] * 1e6)
all_addr = all_addr[temp_index]
all_ts = all_ts[temp_index]
if 'endTime' in info:
temp_index = np.nonzero(all_ts <= info['endTime'] * 1e6)
all_addr = all_addr[temp_index]
all_ts = all_ts[temp_index]
# DAVIS. In the 32-bit address:
# bit 32 (1-based) being 1 indicates an APS sample
# bit 11 (1-based) being 1 indicates a special event
# bits 11 and 32 (1-based) both being zero signals a polarity event
aps_or_imu_mask = int('80000000', 16)
aps_or_imu_logical = np.bitwise_and(all_addr, aps_or_imu_mask)
signal_or_special_mask = int('400', 16)
signal_or_special_logical = np.bitwise_and(all_addr,
signal_or_special_mask)
polarity_logical = \
np.logical_and(np.logical_not(aps_or_imu_logical),
np.logical_not(signal_or_special_logical))
# These masks are used for both frames and polarity events, so are defined
# outside of the following if statement
y_mask = int('7FC00000', 16)
y_shift_bits = 22
x_mask = int('003FF000', 16)
x_shift_bits = 12
output = {'data': {}}
# Polarity(DVS) events
if ('dataTypes' not in info or 'polarity' in info['dataTypes']) \
and any(polarity_logical):
output['data']['polarity'] = {}
output['data']['polarity']['timeStamp'] = all_ts[polarity_logical]
# Y addresses
output['data']['polarity']['y'] = np.array(np.right_shift(
np.bitwise_and(all_addr[polarity_logical], y_mask), y_shift_bits),
'int32')
# X addresses
output['data']['polarity']['x'] = np.array(np.right_shift(
np.bitwise_and(all_addr[polarity_logical], x_mask), x_shift_bits),
'int32')
# Polarity bit
output['data']['polarity']['polarity'] = np.array(np.equal(
np.right_shift(all_addr[polarity_logical], 11) % 2, 1), 'int32')
output['info'] = info
# calculate numEvents fields; also find first and last timeStamps
output['info']['firstTimeStamp'] = np.infty
output['info']['lastTimeStamp'] = 0
if 'polarity' in output['data']:
output['data']['polarity']['numEvents'] = \
len(output['data']['polarity']['timeStamp'])
# noinspection PyTypeChecker
if output['data']['polarity']['timeStamp'][0] < \
output['info']['firstTimeStamp']:
# noinspection PyTypeChecker
output['info']['firstTimeStamp'] = \
output['data']['polarity']['timeStamp'][0]
# noinspection PyTypeChecker
if output['data']['polarity']['timeStamp'][-1] > \
output['info']['lastTimeStamp']:
# noinspection PyTypeChecker
output['info']['lastTimeStamp'] = \
output['data']['polarity']['timeStamp'][-1]
return output
|
NeuromorphicProcessorProject/snn_toolbox
|
snntoolbox/datasets/aedat/ImportAedatDataVersion1or2.py
|
Python
|
mit
| 5,296
|
# -*- coding: utf-8 -*-
"""
originally from http://www.djangosnippets.org/snippets/828/ by dnordberg
"""
import logging
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from six.moves import input
from django_extensions.management.mysql import parse_mysql_cnf
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
help = "Resets the database for this project."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--noinput', action='store_false',
dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--no-utf8', action='store_true', dest='no_utf8_support',
default=False,
help='Tells Django to not create a UTF-8 charset database')
parser.add_argument(
'-U', '--user', action='store', dest='user', default=None,
help='Use another user for the database then defined in '
'settings.py')
parser.add_argument(
'-O', '--owner', action='store', dest='owner', default=None,
help='Use another owner for creating the database then the '
'user defined in settings or via --user')
parser.add_argument(
'-P', '--password', action='store', dest='password', default=None,
help='Use another password for the database then defined in '
'settings.py')
parser.add_argument(
'-D', '--dbname', action='store', dest='dbname', default=None,
help='Use another database name then defined in settings.py')
parser.add_argument(
'-R', '--router', action='store', dest='router', default='default',
help='Use this router-database other then defined in settings.py')
parser.add_argument(
'-c', '--close-sessions', action='store_true', dest='close_sessions', default=False,
help='Close database connections before dropping database (PostgreSQL only)')
@signalcommand
def handle(self, *args, **options):
"""
Resets the database for this project.
Note: Transaction wrappers are in reverse as a work around for
autocommit, anybody know how to do this the right way?
"""
if args:
raise CommandError("reset_db takes no arguments")
router = options.get('router')
dbinfo = settings.DATABASES.get(router)
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
user = password = database_name = database_host = database_port = ''
if engine == 'mysql':
(user, password, database_name, database_host, database_port) = parse_mysql_cnf(dbinfo)
user = options.get('user') or dbinfo.get('USER') or user
password = options.get('password') or dbinfo.get('PASSWORD') or password
owner = options.get('owner') or user
database_name = options.get('dbname') or dbinfo.get('NAME') or database_name
if database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST') or database_host
database_port = dbinfo.get('PORT') or database_port
verbosity = int(options.get('verbosity', 1))
if options.get('interactive'):
confirm = input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (database_name,))
else:
confirm = 'yes'
if confirm != 'yes':
print("Reset cancelled.")
return
if engine in ('sqlite3', 'spatialite'):
import os
try:
logging.info("Unlinking %s database" % engine)
os.unlink(database_name)
except OSError:
pass
elif engine in ('mysql',):
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
utf8_support = 'CHARACTER SET utf8' if options.get('no_utf8_support', False) else ''
create_query = 'CREATE DATABASE `%s` %s' % (database_name, utf8_support)
logging.info('Executing... "' + drop_query + '"')
connection.query(drop_query)
logging.info('Executing... "' + create_query + '"')
connection.query(create_query)
elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
if engine == 'postgresql' and django.VERSION < (1, 9):
import psycopg as Database # NOQA
elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
import psycopg2 as Database # NOQA
conn_params = {'database': 'template1'}
if user:
conn_params['user'] = user
if password:
conn_params['password'] = password
if database_host:
conn_params['host'] = database_host
if database_port:
conn_params['port'] = database_port
connection = Database.connect(**conn_params)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
if options.get('close_sessions'):
close_sessions_query = """
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '%s';
""" % database_name
logging.info('Executing... "' + close_sessions_query.strip() + '"')
try:
cursor.execute(close_sessions_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
drop_query = "DROP DATABASE \"%s\";" % database_name
logging.info('Executing... "' + drop_query + '"')
try:
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
create_query = "CREATE DATABASE \"%s\"" % database_name
if owner:
create_query += " WITH OWNER = \"%s\" " % owner
create_query += " ENCODING = 'UTF8'"
if engine == 'postgis' and django.VERSION < (1, 9):
# For PostGIS 1.5, fetch template name if it exists
from django.contrib.gis.db.backends.postgis.base import DatabaseWrapper
postgis_template = DatabaseWrapper(dbinfo).template_postgis
if postgis_template is not None:
create_query += ' TEMPLATE = %s' % postgis_template
if settings.DEFAULT_TABLESPACE:
create_query += ' TABLESPACE = %s;' % settings.DEFAULT_TABLESPACE
else:
create_query += ';'
logging.info('Executing... "' + create_query + '"')
cursor.execute(create_query)
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options.get('interactive'):
print("Reset successful.")
|
haakenlid/django-extensions
|
django_extensions/management/commands/reset_db.py
|
Python
|
mit
| 7,936
|
"""
Brume
"""
VERSION = "2.0.1"
|
flou/brume
|
brume/__init__.py
|
Python
|
mit
| 33
|
"""Order 31: simple read and write excel
1. Write simple table data to excel;
2. Read the simple table data from excel;
"""
from faker import Faker
import xlrd
import xlwt
class SimpleReadAndWriteExcel(object):
@staticmethod
def read_excel_data(file_name):
workbook = xlrd.open_workbook(file_name)
print(workbook.sheet_names())
# sheet索引从0开始
# use_sheet = workbook.sheet_by_index(0)
use_sheet = workbook.sheet_by_name(workbook.sheet_names()[0])
# 获取整行和整列的值(数组)
rows = use_sheet.row_values(3) # 获取第四行内容
cols = use_sheet.col_values(2) # 获取第三列内容
print(type(rows), rows)
print(type(cols), cols)
for row in range(10):
for column in range(3):
print(use_sheet.cell(row, column).value)
@staticmethod
def write_excel_data(file_name):
workbook = xlwt.Workbook()
sheet = workbook.add_sheet('sheet 1')
start_row_num = 0
need_save_data = SimpleReadAndWriteExcel.gen_data()
for row_id, row in enumerate(need_save_data):
for column_id, unit_data in enumerate(row):
sheet.write(row_id + start_row_num, column_id, unit_data)
workbook.save(file_name)
@staticmethod
def gen_data():
faker_obj = Faker()
data = list()
for i in range(10):
column = [
i,
faker_obj.name_female(),
faker_obj.phone_number(),
faker_obj.address()
]
data.append(column)
return data
@staticmethod
def test():
data = SimpleReadAndWriteExcel.gen_data()
print(data)
SimpleReadAndWriteExcel.write_excel_data('test.xls')
SimpleReadAndWriteExcel.read_excel_data('test.xls')
SimpleReadAndWriteExcel.test()
|
flyingSprite/spinelle
|
task_inventory/order_31_to_60/order_31_simple_read_and_write_excel.py
|
Python
|
mit
| 1,923
|
from grappa import GrappaExperiment, MPIRunGrappaExperiment
tpch_bigdatann_debug = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
'qn': [18,19],
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v2-debugmode',
'machine': 'bigdata',
'system': 'radish'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_bigdatann_debug.run()
|
bmyerz/log2sqlite
|
projects/radish_paper/run_tpch_radish_debug.py
|
Python
|
mit
| 915
|
"""
This module add the class needed for creating custom chat command
:Example:
Here's a simple ChatPlugin which will send a HelloWorld on use::
class ChatHelloWorld(ChatPlugin):
helper = "Display Hello World"
command = "hello"
def __call__(self, serv, message):
serv.send_message("Hello world", to="me")
"""
class ChatPlugin(object):
"""
Inherit from this class to add a command in the chat.
"""
helper = ""
"""
Text that will be show when calling the help command
:rtype: str
"""
permission = None
"""
Permission needed for this command (see ability)
:rtype: smserver.ability.Permissions
"""
room = False
"""
Specify here if the command need to be execute in a room
:rtype: bool
"""
command = None
"""
The command to use to call this function
:rtype: str
"""
def __init__(self, server):
self.server = server
def can(self, connection):
"""
Method call each time somenone try to run this command
:param connection: The connection which perform this command
:type connection: smserver.models.connection.Connection
:return: True if authorize False if not
:rtype: bool
"""
if self.room and not connection.room:
return False
if self.permission and not connection.can(self.permission, connection.room_id):
return False
return True
def __call__(self, resource, message):
"""
Action to perform when using the command
:param resource: The chat resource that send this command
:param message: The text after the command. (Eg. /command text)
:type resource: smserve.resources.chat_resources.ChatResource
:type message: str
:return: Response fo the command
:rtype: list
"""
|
ningirsu/stepmania-server
|
smserver/chatplugin.py
|
Python
|
mit
| 2,029
|
"""
Markdown.py
0. just print whatever is passed in to stdin
0. if filename passed in as a command line parameter,
then print file instead of stdin
1. wrap input in paragraph tags
2. convert single asterisk or underscore pairs to em tags
3. convert double asterisk or underscore pairs to strong tags
"""
import fileinput
import re
class Buffer:
def __init__(self):
self.__buffer = ""
def add(self, line):
self.__buffer += line
def get(self):
return self.__buffer
def clear(self):
self.__buffer = None
def empty(self):
return self.__buffer == ""
def convertStrong(line):
line = re.sub(r'\*\*(.*)\*\*', r'<strong>\1</strong>', line)
line = re.sub(r'__(.*)__', r'<strong>\1</strong>', line)
return line
def convertEm(line):
line = re.sub(r'\*(.*)\*', r'<em>\1</em>', line)
line = re.sub(r'_(.*)_', r'<em>\1</em>', line)
return line
def convertH1(line):
return re.sub(r'#(.*)', r'<h1>\1</h1>', line)
def convertH2(line):
return re.sub(r'##(.*)', r'<h2>\1</h2>', line)
def convertH3(line):
return re.sub(r'###(.*)', r'<h3>\1</h3>', line)
def convertBlockquote(line, line_buffer):
if line.startswith(">"):
line = line[1:]
if line_buffer.empty():
line_buffer.add("<blockquote>%s\n" % (line))
else:
line_buffer.add(line)
return ""
else:
if not line_buffer.empty():
ret = line_buffer.get() + "</blockquote>"
line_buffer.clear()
ret += line
return ret
return line
line_buffer = Buffer()
for line in fileinput.input():
line = line.rstrip()
line = convertBlockquote(line, line_buffer)
line = convertStrong(line)
line = convertEm(line)
line = convertH3(line)
line = convertH2(line)
line = convertH1(line)
if line:
print '<p>' + line + '</p>'
|
djpetti/csci2963-DanielPetti
|
code/unittest/markdown.py
|
Python
|
mit
| 1,870
|
# -*- coding: utf-8 -*-
import psutil
import time
import json
def system_monitoring(Socket):
"""Pushes system monitoring data to client"""
while True:
cameraProcessingFPS = []
# for camera in HomeSurveillance.cameras:
# cameraProcessingFPS.append("{0:.2f}".format(camera.processingFPS))
# #print "FPS: " +str(camera.processingFPS) + " " + str(camera.streamingFPS)
# app.logger.info("FPS: " +str(camera.processingFPS) + " " + str(camera.streamingFPS))
systemState = {'cpu': cpu_usage(), 'memory': memory_usage(), 'processingFPS': cameraProcessingFPS}
Socket.emit('system_monitoring', json.dumps(systemState))
time.sleep(3)
def cpu_usage():
psutil.cpu_percent(interval=1, percpu=False) # ignore first call - often returns 0
time.sleep(0.12)
cpu_load = psutil.cpu_percent(interval=1, percpu=False)
return cpu_load
def memory_usage():
mem_usage = psutil.virtual_memory().percent
return mem_usage
|
golden-tech-native/gd_facerecognize
|
facelib/monitor/osstatus.py
|
Python
|
mit
| 1,005
|
"""Test suite for the pytest-doctest-custom plugin."""
import re, os, sys, platform, pytest, pytest_doctest_custom
pytest_plugins = "pytester" # Enables the testdir fixture
PYPY = platform.python_implementation() == "PyPy"
JYTHON = platform.python_implementation() == "Jython"
PY2 = sys.version_info[0] == 2
SPLIT_DOCTEST = pytest.__version__ >= "2.4"
# Avoid py._path.local.LocalPath.pyimport from raising
# ImportMismatchError when --runpytest=subprocess
JYTHON_FIX = '''
from os.path import basename
__file__ = basename(__file__) # it was __pyclasspath__/test_*.py
'''
@pytest.fixture
def here(request):
"""
Empty fixture to include the current dir to the system path,
required for modules to be imported in testdir tests.
"""
if sys.path[0] != "":
old_sys_path = sys.path
sys.path = [""] + old_sys_path # Adds the test dir to the path
def finalizer():
sys.path = old_sys_path
request.addfinalizer(finalizer)
def join_lines(src, before, after, sep=" "):
"""
Remove the newline and indent between a pair of lines where the first
ends with ``before`` and the second starts with ``after``, replacing
it by the ``sep``.
"""
before_re = "][".join(before).join("[]")
after_re = "][".join(after).join("[]")
regex = "\n\\s*".join([before_re, after_re])
return re.sub(regex, sep.join([before, after]), src)
class JoinedDescr(object):
"""Descriptor that performs a deferred call to join_lines."""
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
self.kwargs = kwargs
def __get__(self, instance, owner):
return join_lines(getattr(instance, self.attr_name), **self.kwargs)
class TestStrLowerAsRepr(object):
src_pass = """
'''
>>> "This IS a TEsT! =D"
this is a test! =d
'''
"""
src_fail = src_pass.replace("=d", "=D")
args = "--doctest-repr", "str.lower", "--verbose", "--doctest-modules"
def test_valid_src(self, testdir):
testdir.makepyfile(self.src_pass)
result = testdir.runpytest(*self.args)
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines("test_valid_src.py*PASSED")
def test_invalid_src(self, testdir):
testdir.makepyfile(self.src_fail)
result = testdir.runpytest(*self.args)
result.assert_outcomes(passed=0, skipped=0, failed=1)
result.stdout.fnmatch_lines("test_invalid_src.py*FAILED")
# "Abstract" tests below are for pretty printers with some sort of multiline
# and/or sorted contents. Also tests single line behavior, for which a custom
# representation formatter object should be stored in conftest.py to ensure a
# larger width (150 is enough).
class ATestList(object):
"""
Abstract attributes: args, args_conftest, src_conftest, args_mymodule,
src_mymodule
"""
src_list = '''
def one_to(n):
"""
>>> [[a*b for a in one_to(4)] for b in one_to(9)]
[[1, 2, 3, 4],
[2, 4, 6, 8],
[3, 6, 9, 12],
[4, 8, 12, 16],
[5, 10, 15, 20],
[6, 12, 18, 24],
[7, 14, 21, 28],
[8, 16, 24, 32],
[9, 18, 27, 36]]
"""
return range(1, n + 1)
def test_one_to(): # Meta-test
assert list(one_to(0)) == []
assert list(one_to(1)) == [1]
assert list(one_to(2)) == [1, 2]
assert list(one_to(3)) == [1, 2, 3]
'''
src_list_no_line_break = JoinedDescr("src_list", before=",", after="[")
def test_list_pass(self, testdir):
testdir.makepyfile(self.src_list)
result = testdir.runpytest(*self.args)
result.assert_outcomes(passed=2, skipped=0, failed=0)
dt_name = "test_list_pass.one_to" if SPLIT_DOCTEST else "doctest]"
result.stdout.fnmatch_lines([
"test_list_pass.py*%s PASSED" % dt_name,
"test_list_pass.py*test_one_to PASSED",
])
def test_list_fail(self, testdir):
testdir.makepyfile(self.src_list_no_line_break)
result = testdir.runpytest(*self.args)
result.assert_outcomes(passed=1, skipped=0, failed=1)
dt_name = "test_list_fail.one_to" if SPLIT_DOCTEST else "doctest]"
result.stdout.fnmatch_lines([
"test_list_fail.py*%s FAILED" % dt_name,
"test_list_fail.py*test_one_to PASSED",
])
def test_list_conftest_fix_width(self, testdir):
testdir.makeconftest(self.src_conftest)
testdir.makepyfile(self.src_list_no_line_break)
result = testdir.runpytest(*self.args_conftest)
if SPLIT_DOCTEST:
result.assert_outcomes(passed=2, skipped=0, failed=0)
dt_name = "test_list_conftest_fix_width.one_to"
else:
result.assert_outcomes(passed=3, skipped=0, failed=0)
result.stdout.fnmatch_lines(["conftest.py*doctest] PASSED"])
dt_name = "doctest]"
result.stdout.fnmatch_lines([
"test_list_conftest_fix_width.py*%s PASSED" % dt_name,
"test_list_conftest_fix_width.py*test_one_to PASSED",
])
def test_list_mymodule_fix_width(self, testdir, here):
testdir.makepyfile(mymodule=self.src_mymodule)
testdir.makepyfile(self.src_list_no_line_break)
result = testdir.runpytest(*self.args_mymodule)
if SPLIT_DOCTEST:
result.assert_outcomes(passed=2, skipped=0, failed=0)
dt_name = "test_list_mymodule_fix_width.one_to"
else:
result.assert_outcomes(passed=3, skipped=0, failed=0)
result.stdout.fnmatch_lines(["mymodule.py*doctest] PASSED"])
dt_name = "doctest]"
result.stdout.fnmatch_lines([
"test_list_mymodule_fix_width.py*%s PASSED" % dt_name,
"test_list_mymodule_fix_width.py*test_one_to PASSED",
])
class ATestDict(object):
"""
Abstract attributes: args, args_conftest, src_conftest, args_mymodule,
src_mymodule, set3repr
"""
src_dict = '''
"""
>>> {"hey": upper("Why?"),
... "abcdefgh": set([3]),
... "weird": 2,
... "was": -5}
{'abcdefgh': %s, 'hey': 'WHY?', 'was': -5, 'weird': 2}
"""
def upper(anything):
"""
>>> from string import ascii_lowercase as low
>>> dict(zip(low[::-3], map(upper, low)))
{'b': 'I',
'e': 'H',
'h': 'G',
'k': 'F',
'n': 'E',
'q': 'D',
't': 'C',
'w': 'B',
'z': 'A'}
"""
return anything.upper()
'''
src_dict_no_line_break = JoinedDescr("src_dict", before=",", after="'")
def test_sorted_dict_pass(self, testdir):
testdir.makepyfile(self.src_dict % self.set3repr)
result = testdir.runpytest(*self.args)
if SPLIT_DOCTEST:
result.assert_outcomes(passed=2, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_sorted_dict_pass PASSED",
"*test_sorted_dict_pass.upper PASSED",
])
else:
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines(["*doctest] PASSED"])
def test_sorted_dict_half_fail(self, testdir):
testdir.makepyfile(self.src_dict_no_line_break % self.set3repr)
result = testdir.runpytest(*self.args)
if SPLIT_DOCTEST:
result.assert_outcomes(passed=1, skipped=0, failed=1)
result.stdout.fnmatch_lines([
"*test_sorted_dict_half_fail PASSED",
"*test_sorted_dict_half_fail.upper FAILED",
])
else:
result.assert_outcomes(passed=0, skipped=0, failed=1)
result.stdout.fnmatch_lines(["*doctest] FAILED"])
def test_sorted_dict_conftest_fix_width(self, testdir):
testdir.makeconftest(self.src_conftest)
testdir.makepyfile(self.src_dict_no_line_break % self.set3repr)
result = testdir.runpytest(*self.args_conftest)
result.assert_outcomes(passed=2, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_sorted_dict_conftest_fix_width PASSED",
"*test_sorted_dict_conftest_fix_width.upper PASSED",
] if SPLIT_DOCTEST else [
"*doctest] PASSED",
"*doctest] PASSED",
])
def test_sorted_dict_mymodule_fix_width(self, testdir, here):
testdir.makepyfile(mymodule=self.src_mymodule)
testdir.makepyfile(self.src_dict_no_line_break % self.set3repr)
result = testdir.runpytest(*self.args_mymodule)
result.assert_outcomes(passed=2, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_sorted_dict_mymodule_fix_width PASSED",
"*test_sorted_dict_mymodule_fix_width.upper PASSED",
] if SPLIT_DOCTEST else [
"*doctest] PASSED",
"*doctest] PASSED",
])
class ATestSet(object):
"""
Abstract attributes: args, args_conftest, src_conftest, args_mymodule,
src_mymodule
"""
src_set = '''
"""
>>> import string
>>> set(string.digits)
{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
"""
from functools import reduce
def union(*args):
"""
>>> union([5734500, 545312, 50200], range(50198,50208), [50205])
{50198,
50199,
50200,
50201,
50202,
50203,
50204,
50205,
50206,
50207,
545312,
5734500}
"""
return reduce(set.union, map(set, args))
'''
src_set_no_line_break = JoinedDescr("src_set", before=",", after="5")
def test_sorted_set_pass(self, testdir):
testdir.makepyfile(self.src_set)
result = testdir.runpytest(*self.args)
if SPLIT_DOCTEST:
result.assert_outcomes(passed=2, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_sorted_set_pass PASSED",
"*test_sorted_set_pass.union PASSED",
])
else:
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines(["*doctest] PASSED"])
def test_sorted_set_half_fail(self, testdir):
testdir.makepyfile(self.src_set_no_line_break)
result = testdir.runpytest(*self.args)
if SPLIT_DOCTEST:
result.assert_outcomes(passed=1, skipped=0, failed=1)
result.stdout.fnmatch_lines([
"*test_sorted_set_half_fail PASSED",
"*test_sorted_set_half_fail.union FAILED",
])
else:
result.assert_outcomes(passed=0, skipped=0, failed=1)
result.stdout.fnmatch_lines(["*doctest] FAILED"])
def test_sorted_set_conftest_fix_width(self, testdir):
testdir.makeconftest(self.src_conftest)
testdir.makepyfile(self.src_set_no_line_break)
result = testdir.runpytest(*self.args_conftest)
result.assert_outcomes(passed=2, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_sorted_set_conftest_fix_width PASSED",
"*test_sorted_set_conftest_fix_width.union PASSED",
] if SPLIT_DOCTEST else [
"*doctest] PASSED",
"*doctest] PASSED",
])
def test_sorted_set_mymodule_fix_width(self, testdir, here):
testdir.makepyfile(mymodule=self.src_mymodule)
testdir.makepyfile(self.src_set_no_line_break)
result = testdir.runpytest(*self.args_mymodule)
result.assert_outcomes(passed=2, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_sorted_set_mymodule_fix_width PASSED",
"*test_sorted_set_mymodule_fix_width.union PASSED",
] if SPLIT_DOCTEST else [
"*doctest] PASSED",
"*doctest] PASSED",
])
class ATestPPrint(ATestList, ATestDict):
set3repr = "set([3])" if PY2 else "{3}"
class TestPPrintPFormatAsRepr(ATestPPrint):
args = "--doctest-repr=pprint:pformat", "--verbose", "--doctest-modules"
args_conftest = ("--doctest-repr", "conftest:doctest_pp.pformat",
"--verbose", "--doctest-modules")
args_mymodule = ("--doctest-repr", "mymodule:doctest_pp.pformat",
"--verbose", "--doctest-modules")
src_conftest = src_mymodule = '''
import pprint
doctest_pp = pprint.PrettyPrinter(width=150)
'''
if JYTHON and not SPLIT_DOCTEST:
src_mymodule += JYTHON_FIX
class TestPPrintPPrintAsRepr(ATestPPrint):
args = "--doctest-repr=pprint:pprint", "--verbose", "--doctest-modules"
args_conftest = ("--doctest-repr", "conftest:doctest_pp.pprint",
"--verbose", "--doctest-modules")
args_mymodule = ("--doctest-repr", "mymodule:doctest_pp.pprint",
"--verbose", "--doctest-modules")
src_conftest = '''
import pprint
from pytest_doctest_custom import stdout_proxy
doctest_pp = pprint.PrettyPrinter(width=150, stream=stdout_proxy)
'''
src_mymodule = TestPPrintPFormatAsRepr.src_mymodule
@pytest.mark.skipif(JYTHON, reason="IPython doesn't run on Jython")
class ATestIPython(ATestList, ATestDict, ATestSet):
set3repr = "{3}"
if PYPY: # Fix the undesired IPython replacement of the dict
# representation printer by the dictproxy one in PyPy, using the
# IPython dict pretty printer factory itself for such.
src_dict = ATestDict.src_dict + '''
from IPython.lib.pretty import _type_pprinters, _dict_pprinter_factory
_type_pprinters[dict] = _dict_pprinter_factory("{", "}", dict)
'''
class TestIPythonPrettyAsRepr(ATestIPython):
args = ("--doctest-repr=IPython.lib.pretty:pretty",
"--verbose", "--doctest-modules")
args_conftest = ("--doctest-repr", "conftest:doctest_pretty",
"--verbose", "--doctest-modules")
args_mymodule = ("--doctest-repr", "mymodule:doctest_pretty",
"--verbose", "--doctest-modules")
src_conftest = src_mymodule = '''
from IPython.lib.pretty import pretty
def doctest_pretty(value):
return pretty(value, max_width=150)
'''
class TestIPythonPPrintAsRepr(ATestIPython):
args = ("--doctest-repr=IPython.lib.pretty:pprint",
"--verbose", "--doctest-modules")
args_conftest = ("--doctest-repr", "conftest:doctest_pprint",
"--verbose", "--doctest-modules")
args_mymodule = ("--doctest-repr", "mymodule:doctest_pprint",
"--verbose", "--doctest-modules")
src_conftest = src_mymodule = '''
from IPython.lib.pretty import pprint
def doctest_pprint(value):
return pprint(value, max_width=150)
'''
class TestReprAddress(object):
msg_import = "ERROR: *ImportError* No module named *{module}*"
msg_attr = "ERROR: *AttributeError* *{obj}* has no attribute '{attr}'"
def run_and_assert_stderr_msg_stout_empty(self, td, msg, address, **kws):
args = "--doctest-repr=" + address, "--verbose", "--doctest-modules"
address_split = address.split(":")
attr_raw_split = address_split.pop().split(".")
module = address_split.pop() if address_split else ""
attr = attr_raw_split.pop()
obj = attr_raw_split.pop() if attr_raw_split else "module"
keys = {"module": module, "obj": obj, "attr": attr}
keys.update(kws)
result = td.runpytest(*args)
result.stderr.fnmatch_lines(msg.format(**keys))
assert "\n" not in result.stderr.str().strip() # Only one stderr line
assert result.stdout.str().strip() == ""
def test_import_error_not_nested(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir, self.msg_import,
address = "areallybadnameunavailable_____this_shouldnt_exist:obj")
def test_import_error_nested_first(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir, self.msg_import,
address = "areallybadnameunavailable_____this_isnt.something:yet",
module = "areallybadnameunavailable_____this_isnt")
def test_import_error_nested_middle(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir, self.msg_import,
address = "sys.blablablablablah_blhaah.meeeh:obj",
module = "blablablablablah_blhaah")
def test_import_error_nested_last(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir, self.msg_import,
address = "os.path.heeeeeey:data",
module = "heeeeeey")
def test_attribute_error_builtin_not_nested(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir, self.msg_attr,
address = "some_builtin_objThatDoesntExist_atAll")
def test_attribute_error_builtin_nested(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir, self.msg_attr,
address = "str.fakyjoint")
def test_attribute_error_not_nested(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir, self.msg_attr,
address = "os.path:oh_i_dont_likeIT")
def test_attribute_error_nested(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir, self.msg_attr,
address = "itertools:chain.from_iterable.myself",
obj = "function" if PYPY else "builtin_function_or_method")
def test_empty(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir,
msg = "ERROR: *ValueError* Empty doctest-repr address",
address = "")
def test_multiple_colon(self, testdir):
self.run_and_assert_stderr_msg_stout_empty(testdir,
msg = "ERROR: *ValueError* Multiple colon in doctest-repr address",
address = "os:sys:version")
class TestPluginEnabled(object):
src = '''
"""
>>> getattr(pytest_doctest_custom.printer, "repr", None) is repr
False
>>> sys.displayhook is pytest_doctest_custom.printer
False
"""
import pytest_doctest_custom, sys
def test_displayhook():
assert sys.displayhook is not pytest_doctest_custom.printer
test_displayhook() # Tests for import time AssertionError
'''
def test_disabled(self, testdir):
testdir.makepyfile(self.src)
result = testdir.runpytest("--verbose", "--doctest-modules")
result.assert_outcomes(passed=2, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_disabled PASSED",
"*test_disabled*test_displayhook PASSED",
] if SPLIT_DOCTEST else [
"*doctest] PASSED",
"*test_disabled*test_displayhook PASSED",
])
def test_repr(self, testdir):
args = "--verbose", "--doctest-modules", "--doctest-repr=repr"
testdir.makepyfile(self.src.replace("False", "True"))
result = testdir.runpytest(*args)
result.assert_outcomes(passed=2, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_repr PASSED",
"*test_repr*test_displayhook PASSED",
] if SPLIT_DOCTEST else [
"*doctest] PASSED",
"*test_repr*test_displayhook PASSED",
])
def test_print(self, testdir, here):
if PY2:
testdir.makepyfile(p=" def p(value): print(value)" + \
JYTHON_FIX * (JYTHON and not SPLIT_DOCTEST))
args = "--verbose", "--doctest-modules", "--doctest-repr=p:p"
extra = [] if SPLIT_DOCTEST else ["*doctest] PASSED"]
passed = 3 - SPLIT_DOCTEST
else:
args = "--verbose", "--doctest-modules", "--doctest-repr=print"
extra = []
passed = 2
testdir.makepyfile("True".join(self.src.rpartition("False")[::2]))
result = testdir.runpytest(*args)
result.assert_outcomes(passed=passed, skipped=0, failed=0)
result.stdout.fnmatch_lines(extra + [
"*test_print PASSED" if SPLIT_DOCTEST else "*doctest] PASSED",
"*test_print*test_displayhook PASSED",
])
def test_none(self, testdir):
args = "--verbose", "--doctest-modules", "--doctest-repr=None"
exc_msg = "*TypeError*'NoneType' object is not callable*"
testdir.makepyfile(self.src)
result = testdir.runpytest(*args)
result.assert_outcomes(passed=1, skipped=0, failed=1)
result.stdout.fnmatch_lines([
"*test_none FAILED" if SPLIT_DOCTEST else "*doctest] FAILED",
"*test_none*test_displayhook PASSED",
"*FAILURES*",
"*doctest*",
"*" + self.src[:self.src.find("\n")].strip(), # 1st src line
"UNEXPECTED EXCEPTION*" + exc_msg,
exc_msg,
])
assert result.stderr.str().strip() == ""
class TestDoctestOutputsNone(object):
src = '''
"""
>>> None
>>> 2 + 2
4
>>> print("Returns None")
Returns None
"""
from __future__ import print_function
print = print
'''
if JYTHON:
src += JYTHON_FIX
def test_print_as_repr(self, testdir, here):
testdir.makepyfile(self.src)
arg = "test_print_as_repr:print" if PY2 else "print"
result = testdir.runpytest("--verbose", "--doctest-modules",
"--doctest-repr", arg)
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines([
"*test_print_as_repr PASSED"
if SPLIT_DOCTEST else
"*doctest] PASSED",
])
def test_help_message(testdir):
testdir.runpytest("--help").stdout.fnmatch_lines([
pytest_doctest_custom.HELP["plugin"].join("*:"),
pytest_doctest_custom.HELP["repr"][:30].join("**"),
])
@pytest.mark.skipif("TOXENV" not in os.environ, reason="Not running with tox")
def test_tox_python_pytest_versions():
"""Meta-test to ensure Python and py.test versions are correct."""
py_ver, pytest_ver = os.environ["TOXENV"].split("-")[:2]
if PYPY:
assert py_ver == ("pypy" if PY2 else "pypy3")
elif JYTHON:
assert py_ver == "jython"
else:
assert py_ver == "py{0}{1}".format(*sys.version_info)
assert pytest_ver == "pytest" + pytest.__version__.replace(".", "")
|
danilobellini/pytest-doctest-custom
|
test_pytest_doctest_custom.py
|
Python
|
mit
| 22,753
|
from django.db import models
from network.exceptions import MacNotFound
from network.providers import registry
class SwitchManager(models.Manager):
def flip_vlan(self, mac, vlan_number=None):
found_mac = False
enabled_switches = super(SwitchManager, self).get_queryset().filter(enabled=True)
for switch in enabled_switches:
try:
switch.flip_vlan(mac, vlan_number)
except MacNotFound, e:
continue
found_mac = True
return found_mac
class UplinkPort(models.Model):
port = models.IntegerField()
switch = models.ForeignKey('network.Switch', related_name='uplink_ports')
class Meta:
unique_together = (
('port', 'switch'),
)
def __unicode__(self):
return '%s' % self.port
class Switch(models.Model):
name = models.CharField(max_length=50)
ip = models.GenericIPAddressField()
port = models.IntegerField(default=22)
username = models.CharField(max_length=50)
password = models.CharField(max_length=50)
switch_vlan_dirty = models.ForeignKey('network.VLAN', verbose_name='Dirty',
blank='true', null='true',
related_name='switch_vlan_dirty',
limit_choices_to={'vlan_type': 'DI'})
switch_vlan_clean = models.ForeignKey('network.VLAN', verbose_name='Clean',
blank='true', null='true',
related_name='switch_vlan_clean',
limit_choices_to={'vlan_type': 'CL'},)
ports = models.IntegerField(default=24, verbose_name='# of Ports')
requires_authentication = models.BooleanField(default=True)
provider = models.CharField(max_length=30, choices=registry.as_choices(), verbose_name='Type')
enabled = models.BooleanField(default=False)
objects = SwitchManager()
_provider_cache = None
class Meta:
verbose_name_plural = 'Switches'
def __unicode__(self):
return '{0} ({1})'.format(self.name, self.ip)
def get_provider(self):
if not self._provider_cache:
return registry.by_id(self.provider)
return self._provider_cache
def connect(self):
provider = self.get_provider()
provider.connect(self)
'''
def get_shell(self):
provider = self.get_provider()
provider.get_interactive_shell()
'''
def get_shell(self):
provider = self.get_provider()
provider.invoke_shell()
# Clean the initial data buffer
provider.receive_data()
def run_cmd(self, cmd):
provider = self.get_provider()
provider.run_command(cmd)
output = provider.receive_data()
return output
def set_vlan(self, vlan_number):
pass
def set_port_vlan(self, vlan_number):
ports = self.uplink_ports.values_list('port', flat=True)
def flip_vlan(self, mac, vlan_number=None):
if not vlan_number:
vlan_number = self.switch_vlan_clean.vlan_num
self.connect()
try:
self.get_shell()
port = self.get_provider().find_mac_address(mac)
self.get_provider().change_vlan(port, vlan_number)
except MacNotFound, e:
self.disconnect()
raise MacNotFound()
except Exception, e:
pass
finally:
self.disconnect()
'''
def get_channel(self):
self.connect()
provider = self.get_provider()
chan = provider.invoke_shell()
return chan
'''
def disconnect(self):
provider = self.get_provider()
provider.disconnect()
class VLAN(models.Model):
# Types of VLANs
DIRTY = 'DI'
CLEAN = 'CL'
NONE = 'NO'
PUBLIC_VLANS = (
DIRTY,
CLEAN,
)
TYPES_OF_VLANS = (
(DIRTY, 'Dirty'),
(CLEAN, 'Clean'),
(NONE, 'None'),
)
vlan_name = models.CharField(max_length=50, verbose_name='Name')
vlan_num = models.IntegerField(verbose_name='VLAN #')
vlan_type = models.CharField(max_length=2, verbose_name='Type',
choices=TYPES_OF_VLANS, default=NONE)
vlan_desc = models.TextField(verbose_name='Description',
null='true', blank='true')
def __unicode__(self):
return '{0}'.format(self.vlan_name)
|
BCGamer/CheckIn-Server
|
network/models.py
|
Python
|
mit
| 4,543
|
# (c) 2017 Gregor Mitscha-Baude
import numpy as np
from nanopores.tools.polygons import Ball, Polygon, MultiPolygon, MultiPolygonPore
from nanopores.geometries.cylpore import MultiPore
from nanopores import user_params
params = user_params(
R = 100.,
R0 = 60.,
H0 = 70.,
H = 150.,
x0 = [0, 0, 46],
rMolecule = 2.1,
dim = 3,
no_membrane = True,
r0 = 13, # pore radius
angle = 40, # aperture angle in degrees
lcCenter = 0.3,
lcMolecule = 0.1,
h = 10.,
subs = "solid",
reconstruct = False,
poreregion = True,
)
# SiN membrane thickness (in vertical direction)
lsin = 50.
# Au membrane thickness (in vertical direction)
lau = 40.
# Au thickness in radial direction
rlau = 10.
# SAM layer thickness (in vertical direction)
lsam = 3
l0 = lau + lsin + lsam
angle2 = params.angle/2. * np.pi/180.
tan = np.tan(angle2)
sin = np.sin(angle2)
cos = np.cos(angle2)
l = l0/2.
r0 = params.r0
r1 = r0 + l0*tan
rsam = r0 + lsam/cos
rsin = r0 + lsam/cos + rlau
R = params.R
sam = [[r0, -l], [r1, l], [R, l], [R, l - lsam],
[rsam - tan*(lsam - l0), l - lsam], [rsam, -l]]
au = [sam[5], sam[4], sam[3], [R, -l + lsin], [rsin + tan*lsin, -l + lsin],
[rsin, -l]]
sin = [au[5], au[4], au[3], [R, -l]]
p = MultiPore(**params)
p.add_polygons(sam=sam, au=au, sin=sin)
receptor = Ball([30.,0.,30.], 7., lc=0.1)
p.add_balls(receptor=receptor)
geo = p.build(params.h, params.subs, params.reconstruct)
P = p.protein
P.plot(".k")
from matplotlib import pyplot as plt
plt.xlim(0, R + 5)
print geo
print geo.params
geo.plot_subdomains()
geo.plot_boundaries(interactive=True)
|
mitschabaude/nanopores
|
scripts/wei/test_geo.py
|
Python
|
mit
| 1,625
|
import numpy as np
import argparse
from pprint import pprint
from sklearn import mixture
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn import decomposition
from LogEntry import LogEntry
from LogEntry import db
from datetime import datetime
from matplotlib.dates import DayLocator, HourLocator, DateFormatter, drange
from numpy import arange
args = None
format_string = '%H:%M:%S %m/%d/%Y'
def parse_args():
global args
parser = argparse.ArgumentParser()
args = parser.parse_args()
def main():
g = mixture.GMM(n_components=4)
log_entries = load_light()
light_data = [min(row.light_reading, 120) for row in log_entries]
timestamps = [datetime.strptime(row.timestamp, format_string) for row in log_entries]
g.fit(light_data)
predictions = predict(g, light_data)
light_dict = {}
inside = bin_by_hour(timestamps, predictions, [0,1])
outside = bin_by_hour(timestamps, predictions, [2,3])
pprint(inside)
pprint(outside)
def plot_light_data(timestamps, predictions):
fig, ax = plt.subplots()
ax.plot_date(timestamps, predictions, 'b')
ax.xaxis.set_minor_locator(HourLocator(arange(0,25,6)))
ax.xaxis.set_minor_formatter(DateFormatter('%H'))
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%a'))
ax.fmt_xdata = DateFormatter('%H:%M:%S')
fig.autofmt_xdate()
plt.show()
def bin_by_hour(timestamps, predictions, clusters):
filtered = [timestamps[index] for index, entry in enumerate(timestamps) if predictions[index] in clusters]
buckets = {hour: 0 for hour in range(24)}
for time in filtered:
hour = time.hour
buckets[hour] = buckets.get(hour, 0) + 1
return buckets
def predict(gmm, data):
results = gmm.predict(data)
smoothed = smooth_results(results)
converter = make_converter(gmm, smoothed)
return [converter[value] for value in smoothed]
def load_light():
light_data = LogEntry.select()
return sorted(light_data, key=lambda row: datetime.strptime(row.timestamp, format_string))
def smooth_results(data):
new_data = []
for index in range(len(data)):
new_data.append(get_most_common(data, index))
return new_data
def make_converter(gmm, data):
converter = {}
means = [[index, value[0]] for index, value in enumerate(gmm.means_)]
for index, mean in enumerate(sorted(means, key=lambda means: means[1])):
converter[mean[0]] = index
return converter
def get_most_common(data, index):
window_size = 100
start = max(index - window_size, 0)
end = min(index + window_size, len(data))
buckets = {}
for value in data[start:end]:
buckets[value] = buckets.get(value, 0) + 1
return max(buckets.iterkeys(), key=(lambda key: buckets[key]))
if __name__ == '__main__':
main()
|
jpinsonault/android_sensor_logger
|
python_scripts/cluster_light.py
|
Python
|
mit
| 2,894
|
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
LOGGER = logging.getLogger(__name__)
__version__ = '0.2-dev'
def inurl(needles, haystack, position='any'):
"""convenience function to make string.find return bool"""
count = 0
# lowercase everything to do case-insensitive search
haystack2 = haystack.lower()
for needle in needles:
needle2 = needle.lower()
if position == 'any':
if haystack2.find(needle2) > -1:
count += 1
elif position == 'end':
if haystack2.endswith(needle2):
count += 1
elif position == 'begin':
if haystack2.startswith(needle2):
count += 1
# assessment
if count > 0:
return True
return False
def sniff_link(url):
"""performs basic heuristics to detect what the URL is"""
protocol = None
link = url.strip()
# heuristics begin
if inurl(['service=CSW', 'request=GetRecords'], link):
protocol = 'OGC:CSW'
elif inurl(['service=SOS', 'request=GetObservation'], link):
protocol = 'OGC:SOS'
elif inurl(['service=WCS', 'request=GetCoverage'], link):
protocol = 'OGC:WCS'
elif inurl(['service=WFS', 'request=GetFeature'], link):
protocol = 'OGC:WFS'
elif inurl(['service=WMS', 'request=GetMap'], link):
protocol = 'OGC:WMS'
elif inurl(['service=WPS', 'request=Execute'], link):
protocol = 'OGC:WPS'
elif inurl(['arcims'], link):
protocol = 'ESRI:ArcIMS'
elif inurl(['arcgis'], link):
protocol = 'ESRI:ArcGIS'
elif inurl(['mpk'], link, 'end'):
protocol = 'ESRI:MPK'
elif inurl(['opendap'], link):
protocol = 'OPeNDAP:OPeNDAP'
elif inurl(['ncss'], link):
protocol = 'UNIDATA:NCSS'
elif inurl(['cdmremote'], link):
protocol = 'UNIDATA:CDM'
elif inurl(['gml'], link, 'end'):
protocol = 'OGC:GML'
elif inurl(['htm', 'html', 'shtml'], link, 'end'):
protocol = 'WWW:LINK'
# extra tests
elif all([inurl(['census.gov/geo/tiger'], link),
inurl(['zip'], link, 'end')]):
protocol = 'ESRI:SHAPEFILE'
elif inurl(['7z', 'bz2', 'gz', 'rar', 'tar.gz', 'tgz', 'zip'],
link, 'end'):
protocol = 'WWW:DOWNLOAD'
elif inurl(['kml', 'kmz'], link, 'end'):
protocol = 'OGC:KML'
else:
LOGGER.info('No link type detected')
return protocol
|
geopython/geolinks
|
geolinks/__init__.py
|
Python
|
mit
| 3,712
|
# Sebastian Raschka, 2016
"""
source: http://adventofcode.com/2016/day/2
DESCRIPTION
--- Day 2: Bathroom Security ---
You arrive at Easter Bunny Headquarters under cover of darkness.
However, you left in such a rush that you forgot to use the bathroom!
Fancy office buildings like this one usually have keypad locks on their
bathrooms, so you search the front desk for the code.
"In order to improve security," the document you find says, "bathroom codes
will no longer be written down. Instead, please memorize and follow the
procedure below to access the bathrooms."
The document goes on to explain that each button to be pressed can be found by
starting on the previous button and moving to adjacent buttons on the
keypad: U moves up, D moves down, L moves left, and R moves right.
Each line of instructions corresponds to one button, starting at the
previous button (or, for the first line, the "5" button); press whatever
button you're on at the end of each line. If a move doesn't lead to a button,
ignore it.
You can't hold it much longer, so you decide to figure out the code as
you walk to the bathroom. You picture a keypad like this:
1 2 3
4 5 6
7 8 9
Suppose your instructions are:
ULL
RRDDD
LURDL
UUUUD
You start at "5" and move up (to "2"), left (to "1"), and left
(you can't, and stay on "1"), so the first button is 1.
Starting from the previous button ("1"), you move right twice (to "3")
and then down three times
(stopping at "9" after two moves and ignoring the third), ending up with 9.
Continuing from "9", you move left, up, right, down, and left, ending with 8.
Finally, you move up four times (stopping at "2"),
then down once, ending with 5.
So, in this example, the bathroom code is 1985.
Your puzzle input is the instructions from the document you found at the
front desk. What is the bathroom code?
--- Part Two ---
You finally arrive at the bathroom (it's a several minute walk from the
lobby so visitors can behold the many fancy conference rooms and water
coolers on this floor) and go to punch in the code.
Much to your bladder's dismay, the keypad is not at all like you imagined it.
Instead, you are confronted with the result of hundreds
of man-hours of bathroom-keypad-design meetings:
1
2 3 4
5 6 7 8 9
A B C
D
You still start at "5" and stop when you're at an edge,
but given the same instructions as above, the outcome is very different:
You start at "5" and don't move at all (up and left are both edges),
ending at 5.
Continuing from "5", you move right twice and down three times
(through "6", "7", "B", "D", "D"), ending at D.
Then, from "D", you move five more times (through "D", "B", "C", "C", "B"),
ending at B.
Finally, after five more moves, you end at 3.
So, given the actual keypad layout, the code would be 5DB3.
Using the same instructions in your puzzle input,
what is the correct bathroom code?
"""
num_grid = [["_", "_", "1", "_", "_"],
["_", "2", "3", "4", "_"],
["5", "6", "7", "8", "9"],
["_", "A", "B", "C", "_"],
["_", "_", "D", "_", "_"]]
def move_input_row(curr_pos, move_str):
for char in move_str:
if char == 'U':
if (curr_pos[0] - 1 >= 0 and
num_grid[curr_pos[0] - 1][curr_pos[1]] != '_'):
curr_pos[0] -= 1
elif char == 'D':
if (curr_pos[0] + 1 <= 4 and
num_grid[curr_pos[0] + 1][curr_pos[1]] != '_'):
curr_pos[0] += 1
elif char == 'R':
if (curr_pos[1] + 1 <= 4 and
num_grid[curr_pos[0]][curr_pos[1] + 1] != '_'):
curr_pos[1] += 1
elif char == 'L':
if (curr_pos[1] - 1 >= 0 and
num_grid[curr_pos[0]][curr_pos[1] - 1] != '_'):
curr_pos[1] -= 1
return num_grid[curr_pos[0]][curr_pos[1]]
def move_input_all(move_str_all):
digits = ''
start_pos = [2, 0]
move_strs = move_str_all.split('\n')
for ms in move_strs:
digits += move_input_row(start_pos, ms.strip())
return digits
def test_1():
test_str = """ULL
RRDDD
LURDL
UUUUD"""
result = move_input_all(test_str)
assert result == '5DB3'
def part_2_solution():
test_str = """LURLDDLDULRURDUDLRULRDLLRURDUDRLLRLRURDRULDLRLRRDDULUDULURULLURLURRRLLDURURLLUURDLLDUUDRRDLDLLRUUDURURRULURUURLDLLLUDDUUDRULLRUDURRLRLLDRRUDULLDUUUDLDLRLLRLULDLRLUDLRRULDDDURLUULRDLRULRDURDURUUUDDRRDRRUDULDUUULLLLURRDDUULDRDRLULRRRUUDUURDULDDRLDRDLLDDLRDLDULUDDLULUDRLULRRRRUUUDULULDLUDUUUUDURLUDRDLLDDRULUURDRRRDRLDLLURLULDULRUDRDDUDDLRLRRDUDDRULRULULRDDDDRDLLLRURDDDDRDRUDUDUUDRUDLDULRUULLRRLURRRRUUDRDLDUDDLUDRRURLRDDLUUDUDUUDRLUURURRURDRRRURULUUDUUDURUUURDDDURUDLRLLULRULRDURLLDDULLDULULDDDRUDDDUUDDUDDRRRURRUURRRRURUDRRDLRDUUULLRRRUDD
DLDUDULDLRDLUDDLLRLUUULLDURRUDLLDUDDRDRLRDDUUUURDULDULLRDRURDLULRUURRDLULUDRURDULLDRURUULLDLLUDRLUDRUDRURURUULRDLLDDDLRUDUDLUDURLDDLRRUUURDDDRLUDDDUDDLDUDDUUUUUULLRDRRUDRUDDDLLLDRDUULRLDURLLDURUDDLLURDDLULLDDDRLUDRDDLDLDLRLURRDURRRUDRRDUUDDRLLUDLDRLRDUDLDLRDRUDUUULULUDRRULUDRDRRLLDDRDDDLULURUURULLRRRRRDDRDDRRRDLRDURURRRDDULLUULRULURURDRRUDURDDUURDUURUURUULURUUDULURRDLRRUUDRLLDLDRRRULDRLLRLDUDULRRLDUDDUUURDUDLDDDUDL
RURDRUDUUUUULLLUULDULLLDRUULURLDULULRDDLRLLRURULLLLLLRULLURRDLULLUULRRDURRURLUDLULDLRRULRDLDULLDDRRDLLRURRDULULDRRDDULDURRRUUURUDDURULUUDURUULUDLUURRLDLRDDUUUUURULDRDUDDULULRDRUUURRRDRLURRLUUULRUDRRLUDRDLDUDDRDRRUULLLLDUUUULDULRRRLLRLRLRULDLRURRLRLDLRRDRDRLDRUDDDUUDRLLUUURLRLULURLDRRULRULUDRUUURRUDLDDRRDDURUUULLDDLLDDRUDDDUULUDRDDLULDDDDRULDDDDUUUURRLDUURULRDDRDLLLRRDDURUDRRLDUDULRULDDLDDLDUUUULDLLULUUDDULUUDLRDRUDLURDULUDDRDRDRDDURDLURLULRUURDUDULDDLDDRUULLRDRLRRUURRDDRDUDDLRRLLDRDLUUDRRDDDUUUDLRRLDDDUDRURRDDUULUDLLLRUDDRULRLLLRDLUDUUUUURLRRUDUDDDDLRLLULLUDRDURDDULULRDRDLUDDRLURRLRRULRL
LDUURLLULRUURRDLDRUULRDRDDDRULDLURDDRURULLRUURRLRRLDRURRDRLUDRUUUULLDRLURDRLRUDDRDDDUURRDRRURULLLDRDRDLDUURLDRUULLDRDDRRDRDUUDLURUDDLLUUDDULDDULRDDUUDDDLRLLLULLDLUDRRLDUUDRUUDUDUURULDRRLRRDLRLURDRURURRDURDURRUDLRURURUUDURURUDRURULLLLLUDRUDUDULRLLLRDRLLRLRLRRDULRUUULURLRRLDRRRDRULRUDUURRRRULDDLRULDRRRDLDRLUDLLUDDRURLURURRLRUDLRLLRDLLDRDDLDUDRDLDDRULDDULUDDLLDURDULLDURRURRULLDRLUURURLLUDDRLRRUUDULRRLLRUDRDUURLDDLLURRDLRUURLLDRDLRUULUDURRDULUULDDLUUUDDLRRDRDUDLRUULDDDLDDRUDDD
DRRDRRURURUDDDRULRUDLDLDULRLDURURUUURURLURURDDDDRULUDLDDRDDUDULRUUULRDUDULURLRULRDDLDUDLDLULRULDRRLUDLLLLURUDUDLLDLDRLRUUULRDDLUURDRRDLUDUDRULRRDDRRLDUDLLDLURLRDLRUUDLDULURDDUUDDLRDLUURLDLRLRDLLRUDRDUURDDLDDLURRDDRDRURULURRLRLDURLRRUUUDDUUDRDRULRDLURLDDDRURUDRULDURUUUUDULURUDDDDUURULULDRURRDRDURUUURURLLDRDLDLRDDULDRLLDUDUDDLRLLRLRUUDLUDDULRLDLLRLUUDLLLUUDULRDULDLRRLDDDDUDDRRRDDRDDUDRLLLDLLDLLRDLDRDLUDRRRLDDRLUDLRLDRUURUDURDLRDDULRLDUUUDRLLDRLDLLDLDRRRLLULLUDDDLRUDULDDDLDRRLLRDDLDUULRDLRRLRLLRUUULLRDUDLRURRRUULLULLLRRURLRDULLLRLDUUUDDRLRLUURRLUUUDURLRDURRDUDDUDDRDDRUD"""
result = move_input_all(test_str)
return result
if __name__ == '__main__':
test_1()
print('Part 2 solution:', part_2_solution())
|
rasbt/advent-of-code-2016
|
python_code/aoc_02_02.py
|
Python
|
mit
| 7,033
|
from webscraper import TradeList
from datacleaner import DataCleaner
from analysis import MarketValue
def main():
# Web scraper instance
listings = TradeList('Toyota', 'Yaris', 'OX12JD', '100')
# Initiate loop
price_array, attributes, url_ids, urls, category = listings.run(listings, pages=3, start_page=30, delay=2)
# Format output
print('='*8)
print('PRICE DATA')
print(price_array[:10])
print(' →(Viewing 10 of {})'.format(len(price_array)))
print('')
print('ATTRIBUTES')
print(attributes[:4])
print(' →(Viewing 4 of {})'.format(len(attributes)))
print('='*8)
print('Array sizes {} {} {} {}'.format(len(price_array), len(attributes), len(url_ids), len(urls)))
print('')
print('CAT:')
print(category[:40])
print(len(category))
# Cleaning data
clean = DataCleaner(price_array, attributes, url_ids, urls, category)
# Display data frame
df = clean.get_df()
print(df.iloc[:5, :-1]) # 5 rows, remove url column
# Save results
df.to_csv('yaris{}.csv'.format(len(df)))
# Analyse results
MarketValue(df)
if __name__ == '__main__':
main()
|
lukexyz/Market-Analysis-Project
|
main.py
|
Python
|
mit
| 1,243
|
from setuptools import setup, find_packages
version = '0.2'
setup(name='arduino-sketch',
version=version,
description="Compile and upload Arduino sketches from command line",
long_description="""""",
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Embedded Systems",
],
keywords='arduino',
author='Oliver Tonnhofer',
author_email='olt@bogosoft.com',
url='https://github.com/ktt-ol/arduino-sketch',
license='MIT License',
packages=find_packages(),
package_data={'arduino_sketch': ['*.ini', 'Arduino.mk']},
zip_safe=False,
install_requires=[
],
entry_points="""
# -*- Entry points: -*-
[console_scripts]
arduino-sketch = arduino_sketch.app:main
""",
)
|
ktt-ol/arduino-sketch
|
setup.py
|
Python
|
mit
| 1,135
|
import os
from elrond.ui import SaveAs
from elrond.util import Object
class Callback(Object):
def f(self, selection):
print 'selection =', selection
chooser.exit()
callback = Callback()
chooser = SaveAs()
chooser.callback = callback.f
chooser.get_selection(path=os.environ['ELROND_HOME'], filename='build.sh')
chooser.show()
chooser.run()
# $Id:$
#
# Local Variables:
# indent-tabs-mode: nil
# python-continuation-offset: 2
# python-indent: 8
# End:
# vim: ai et si sw=8 ts=8
|
cdsi/elrond
|
bin/elrond-saveas.py
|
Python
|
mit
| 524
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ActionGroupsOperations(object):
"""ActionGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2018_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
action_group_name, # type: str
action_group, # type: "_models.ActionGroupResource"
**kwargs # type: Any
):
# type: (...) -> "_models.ActionGroupResource"
"""Create a new action group or update an existing one.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param action_group: The action group to create or use for the update.
:type action_group: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(action_group, 'ActionGroupResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
action_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ActionGroupResource"
"""Get an action group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
action_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete an action group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
action_group_name, # type: str
action_group_patch, # type: "_models.ActionGroupPatchBody"
**kwargs # type: Any
):
# type: (...) -> "_models.ActionGroupResource"
"""Updates an existing action group's tags. To update other fields use the CreateOrUpdate method.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param action_group_patch: Parameters supplied to the operation.
:type action_group_patch: ~$(python-base-namespace).v2018_03_01.models.ActionGroupPatchBody
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(action_group_patch, 'ActionGroupPatchBody')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def list_by_subscription_id(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ActionGroupList"]
"""Get a list of all action groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActionGroupList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2018_03_01.models.ActionGroupList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription_id.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ActionGroupList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/actionGroups'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ActionGroupList"]
"""Get a list of all action groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActionGroupList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2018_03_01.models.ActionGroupList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ActionGroupList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups'} # type: ignore
def enable_receiver(
self,
resource_group_name, # type: str
action_group_name, # type: str
enable_request, # type: "_models.EnableRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Enable a receiver in an action group. This changes the receiver's status from Disabled to
Enabled. This operation is only supported for Email or SMS receivers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param enable_request: The receiver to re-enable.
:type enable_request: ~$(python-base-namespace).v2018_03_01.models.EnableRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.enable_receiver.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(enable_request, 'EnableRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 409]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
enable_receiver.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}/subscribe'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2018_03_01/operations/_action_groups_operations.py
|
Python
|
mit
| 25,115
|
"""
A module for plotting experimental results
"""
__author__ = 'wittawat'
from builtins import range
import freqopttest.ex.exglobal as exglo
import freqopttest.glo as glo
import matplotlib.pyplot as plt
import autograd.numpy as np
def plot_prob_stat_above_thresh(ex, fname, h1_true, func_xvalues, xlabel,
func_title=None):
"""
plot the empirical probability that the statistic is above the theshold.
This can be interpreted as type-1 error (when H0 is true) or test power
(when H1 is true). The plot is against the specified x-axis.
- ex: experiment number
- fname: file name of the aggregated result
- h1_true: True if H1 is true
- func_xvalues: function taking results dictionary and return the values
to be used for the x-axis values.
- xlabel: label of the x-axis.
- func_title: a function: results dictionary -> title of the plot
Return loaded results
"""
results = glo.ex_load_result(ex, fname)
f_pval = lambda job_result: job_result['test_result']['h0_rejected']
#f_pval = lambda job_result: job_result['h0_rejected']
vf_pval = np.vectorize(f_pval)
pvals = vf_pval(results['test_results'])
repeats, _, n_methods = results['test_results'].shape
mean_rejs = np.mean(pvals, axis=0)
#std_pvals = np.std(pvals, axis=0)
#std_pvals = np.sqrt(mean_rejs*(1.0-mean_rejs))
xvalues = func_xvalues(results)
#ns = np.array(results[xkey])
#te_proportion = 1.0 - results['tr_proportion']
#test_sizes = ns*te_proportion
line_styles = exglo.func_plot_fmt_map()
method_labels = exglo.get_func2label_map()
func_names = [f.__name__ for f in results['method_job_funcs'] ]
for i in range(n_methods):
te_proportion = 1.0 - results['tr_proportion']
fmt = line_styles[func_names[i]]
#plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
method_label = method_labels[func_names[i]]
plt.plot(xvalues, mean_rejs[:, i], fmt, label=method_label)
'''
else:
# h0 is true
z = stats.norm.isf( (1-confidence)/2.0)
for i in range(n_methods):
phat = mean_rejs[:, i]
conf_iv = z*(phat*(1-phat)/repeats)**0.5
#plt.errorbar(test_sizes, phat, conf_iv, fmt=line_styles[i], label=method_labels[i])
plt.plot(test_sizes, mean_rejs[:, i], line_styles[i], label=method_labels[i])
'''
ylabel = 'Test power' if h1_true else 'Type-I error'
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.xticks( np.hstack((xvalues) ))
alpha = results['alpha']
"""
if not h1_true:
# plot Wald interval if H0 is true
# https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
z = stats.norm.isf( (1-confidence)/2.0)
gap = z*(alpha*(1-alpha)/repeats)**0.5
lb = alpha-gap
ub = alpha+gap
plt.plot(test_sizes, np.repeat(lb, len(test_sizes)), '--', linewidth=2,
label='99%-Conf', color='k')
plt.plot(test_sizes, np.repeat(ub, len(test_sizes)), '--', linewidth=2, color='k')
plt.ylim([lb-0.005, ub+0.005])
"""
plt.legend(loc='best')
title = '%s. %d trials. $\\alpha$ = %.2g.'%( results['prob_label'],
repeats, alpha) if func_title is None else func_title(results)
plt.title(title)
#plt.grid()
return results
def plot_runtime(ex, fname, func_xvalues, xlabel, func_title=None):
results = glo.ex_load_result(ex, fname)
value_accessor = lambda job_results: job_results['time_secs']
vf_pval = np.vectorize(value_accessor)
# results['test_results'] is a dictionary:
# {'test_result': (dict from running perform_test(te) '...':..., }
times = vf_pval(results['test_results'])
repeats, _, n_methods = results['test_results'].shape
time_avg = np.mean(times, axis=0)
time_std = np.std(times, axis=0)
xvalues = func_xvalues(results)
#ns = np.array(results[xkey])
#te_proportion = 1.0 - results['tr_proportion']
#test_sizes = ns*te_proportion
line_styles = exglo.func_plot_fmt_map()
method_labels = exglo.get_func2label_map()
func_names = [f.__name__ for f in results['method_job_funcs'] ]
for i in range(n_methods):
te_proportion = 1.0 - results['tr_proportion']
fmt = line_styles[func_names[i]]
#plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
method_label = method_labels[func_names[i]]
plt.errorbar(xvalues, time_avg[:, i], yerr=time_std[:,i], fmt=fmt,
label=method_label)
ylabel = 'Time (s)'
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.gca().set_yscale('log')
plt.xlim([np.min(xvalues), np.max(xvalues)])
plt.xticks( xvalues, xvalues)
plt.legend(loc='best')
title = '%s. %d trials. '%( results['prob_label'],
repeats ) if func_title is None else func_title(results)
plt.title(title)
#plt.grid()
return results
|
wittawatj/interpretable-test
|
freqopttest/plot.py
|
Python
|
mit
| 5,055
|
import pytest
import testinfra
def test_uninstall_rpm(Command):
cmd = Command('yum remove -y apache-storm')
@pytest.mark.parametrize('name', [
'apache-storm',
])
def test_packages_should_not_be_installed(Package, name):
p = Package(name)
assert p.is_installed == False
def test_opt_apache_storm_does_not_exist(File):
f = File('/opt/apache-storm')
assert f.exists == False
assert f.is_directory == False
def test_etc_sysconfig_storm_does_not_exists(File):
f = File('/etc/sysconfig/storm')
assert f.exists == False
@pytest.mark.parametrize('name', [
'storm-nimbus',
'storm-drpc',
'storm-logviewer',
'storm-supervisor',
'storm-ui',
])
def test_etc_initd_scripts_do_not_exist(File, name):
f = File('/etc/init.d/{0}'.format(name))
assert f.exists == False
def test_var_run_storm_does_not_exist(File):
f = File('/var/run/storm')
assert f.exists == False
def test_var_log_apache_storm_does_not_exist(File):
f = File('/var/log/apache-storm')
assert f.exists == False
@pytest.mark.parametrize('name', [
'storm-nimbus',
'storm-drpc',
'storm-logviewer',
'storm-supervisor',
'storm-ui',
])
def test_service_is_stopped(Service, name):
svc = Service(name)
assert svc.is_running == False
assert svc.is_enabled == False
|
codylane/storm-installer
|
tests/test_uninstall.py
|
Python
|
mit
| 1,320
|
# region Description
"""
test_network.py: Unit tests for Raw-packet network classes
Author: Vladimir Ivanov
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from sys import path
from os.path import dirname, abspath
import unittest
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Development'
# endregion
# region Main class - NetworkTest
class NetworkTest(unittest.TestCase):
# region Properties
path.append(dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))))
from raw_packet.Utils.base import Base
from raw_packet.Utils.network import RawEthernet, RawARP, RawIPv4, RawUDP, RawDNS, RawICMPv4, RawDHCPv4
from raw_packet.Utils.network import RawIPv6, RawICMPv6, RawDHCPv6
base: Base = Base()
ethernet: RawEthernet = RawEthernet()
arp: RawARP = RawARP()
ipv4: RawIPv4 = RawIPv4()
ipv6: RawIPv6 = RawIPv6()
udp: RawUDP = RawUDP()
dns: RawDNS = RawDNS()
icmpv4: RawICMPv4 = RawICMPv4()
dhcpv4: RawDHCPv4 = RawDHCPv4()
icmpv6: RawICMPv6 = RawICMPv6()
dhcpv6: RawDHCPv6 = RawDHCPv6()
# endregion
# region Test RawEthernet methods
def test_ethernet_init(self):
self.assertIn('00:18:de', self.ethernet.macs)
def test_ethernet_make_random_mac(self):
self.assertTrue(self.base.mac_address_validation(self.ethernet.make_random_mac()))
def test_ethernet_convert_mac(self):
# Convert string MAC address to bytes
self.assertEqual(self.ethernet.convert_mac('30:31:32:33:34:35', True, 41), b'012345')
# Convert bytes MAC address to string
self.assertEqual(self.ethernet.convert_mac(b'012345', True, 41), '30:31:32:33:34:35')
# Bad MAC address string
self.assertIsNone(self.ethernet.convert_mac('30:31:32:33:34:356', False, 41))
# Bad MAC address string
self.assertIsNone(self.ethernet.convert_mac('30:31:32:33:34567', False, 41))
# Bad MAC address bytes
self.assertIsNone(self.ethernet.convert_mac(b'01234', False, 41))
def test_ethernet_get_mac_prefix(self):
# Prefix from MAC address string
self.assertEqual(self.ethernet.get_mac_prefix('ab:cd:ef:01:23:45', 3, True, 42), 'ABCDEF')
# Prefix from MAC address bytes
self.assertEqual(self.ethernet.get_mac_prefix(b'012345', 3, True, 42), '303132')
# Bad MAC address string
self.assertIsNone(self.ethernet.get_mac_prefix('30:31:32:33:34:356', 3, False, 42))
# Bad MAC address string
self.assertIsNone(self.ethernet.get_mac_prefix('30:31:32:33:34567', 3, False, 42))
# Bad MAC address bytes
self.assertIsNone(self.ethernet.get_mac_prefix(b'01234', 3, False, 42))
def test_ethernet_parse_header(self):
# Normal packet
self.assertEqual(self.ethernet.parse_header(b'6789@A012345\x08\x00', True, 43),
{'destination': '36:37:38:39:40:41', 'source': '30:31:32:33:34:35', 'type': 2048})
# Bad packet
self.assertIsNone(self.ethernet.parse_header(b'6789@A012345\x08\x00\x01', False, 43))
def test_ethernet_make_header(self):
# MAC addresses string
self.assertEqual(self.ethernet.make_header('30:31:32:33:34:35', '36:37:38:39:40:41', 2048, True, 44),
b'6789@A012345\x08\x00')
# Bad first MAC address bytes
self.assertIsNone(self.ethernet.make_header('30:31:32:33:34567', '36:37:38:39:40:41', 2048, False, 44))
# Bad second MAC address bytes
self.assertIsNone(self.ethernet.make_header('30:31:32:33:34:56', '36:37:38:39:40123', 2048, False, 44))
# Bad network type
self.assertIsNone(self.ethernet.make_header('30:31:32:33:34:56', '36:37:38:39:40:41', 123123, False, 44))
# endregion
# region Test RawARP methods
def test_arp_parse_packet(self):
# Normal packet
self.assertEqual(self.arp.parse_packet(b'\x00\x01\x08\x00\x06\x04\x00\x01\x01#Eg\x89\n\xc0\xa8\x01\x01\x01' +
b'#Eg\x89\x0b\xc0\xa8\x01\x02', True, 45),
{'hardware-type': 1, 'protocol-type': 2048, 'hardware-size': 6, 'protocol-size': 4,
'opcode': 1, 'sender-mac': '01:23:45:67:89:0a', 'sender-ip': '192.168.1.1',
'target-mac': '01:23:45:67:89:0b', 'target-ip': '192.168.1.2'})
# Bad packet
self.assertIsNone(self.arp.parse_packet(b'\x00\x01\x08\x00\x06\x04\x00\x01\x01#Eg\x89\n\xc0\xa8\x01\x01\x01' +
b'#Eg\x89\x0b\xc0\xa8\x01\x02\x03', False, 45))
def test_arp_make_packet(self):
# Normal
self.assertEqual(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4,
True, 46),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x06\x00\x01\x08\x00\x06\x04\x00\x01\x01#Eg\x89\n\xc0\xa8' +
b'\x01\x01\x01#Eg\x89\x0b\xc0\xa8\x01\x02')
# Bad ethernet src MAC address
self.assertIsNone(self.arp.make_packet('01:23:45:67:890ab', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4,
False, 46))
# Bad ethernet dst MAC address
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:890ab', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4,
False, 46))
# Bad sender MAC address
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0a', '01:23:45:67:890ab',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4,
False, 46))
# Bad target MAC address
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0a', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:890ab', '192.168.1.2', 1, 1, 2048, 6, 4,
False, 46))
# Bad sender IP address
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.300', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4,
False, 46))
# Bad target IP address
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.400', 1, 1, 2048, 6, 4,
False, 46))
# Bad ARP opcode
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 123123, 1, 2048, 6, 4,
False, 46))
# Bad hardware type
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 123123, 2048, 6, 4,
False, 46))
# Bad protocol type
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 123123, 6, 4,
False, 46))
# Bad hardware size
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 123123, 4,
False, 46))
# Bad protocol size
self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 123123,
False, 46))
def test_arp_make_request(self):
# Normal
self.assertEqual(self.arp.make_request('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', True, 47),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x06\x00\x01\x08\x00\x06\x04\x00\x01\x01#Eg\x89\n' +
b'\xc0\xa8\x01\x01\x01#Eg\x89\x0b\xc0\xa8\x01\x02')
def test_arp_make_response(self):
# Normal
self.assertEqual(self.arp.make_response('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a',
'192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', True, 48),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x06\x00\x01\x08\x00\x06\x04\x00\x02\x01#Eg\x89\n' +
b'\xc0\xa8\x01\x01\x01#Eg\x89\x0b\xc0\xa8\x01\x02')
# endregion
# region Test RawIPv4 methods
def test_ipv4_make_random_ip(self):
self.assertTrue(self.base.ip_address_validation(self.ipv4.make_random_ip()))
def test_ipv4_parse_header(self):
# Normal
self.assertEqual(self.ipv4.parse_header(b'E\x00\x00\x1c\x01\x00\x00\x00@\x11\xf6}' +
b'\xc0\xa8\x01\x01\xc0\xa8\x01\x02', True, 49),
{'version': 4, 'length': 5, 'dscp_ecn': 0, 'total-length': 28, 'identification': 256,
'flags': 0, 'fragment-offset': 0, 'time-to-live': 64, 'protocol': 17, 'checksum': 63101,
'source-ip': '192.168.1.1', 'destination-ip': '192.168.1.2'})
# Bad packet
self.assertIsNone(self.ipv4.parse_header(b'\x61\x00\x00\x1c\x8d/\x00\x00@\x11jN' +
b'\xc0\xa8\x01\x01\xc0\xa8\x01\x02', False, 49))
# Bad packet
self.assertIsNone(self.ipv4.parse_header(b'\x61\x00\x00\x1c\x8d/\x00\x00@\x11jN' +
b'\xc0\xa8\x01\x01\xc0\xa8\x01', False, 49))
def test_ipv4_make_header(self):
# Normal
self.assertEqual(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0,
transport_protocol_len=8, transport_protocol_type=17, ttl=64,
identification=1, exit_on_failure=True, exit_code=50),
b'E\x00\x00\x1c\x01\x00\x00\x00@\x11\xf6}\xc0\xa8\x01\x01\xc0\xa8\x01\x02')
# Bad source IP
self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.300', destination_ip='192.168.1.2', data_len=0,
transport_protocol_len=8, transport_protocol_type=17, ttl=64,
identification=1, exit_on_failure=False, exit_code=50))
# Bad destination IP
self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.400', data_len=0,
transport_protocol_len=8, transport_protocol_type=17, ttl=64,
identification=1, exit_on_failure=False, exit_code=50))
# Bad identification
self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0,
transport_protocol_len=8, transport_protocol_type=17, ttl=64,
identification=123123, exit_on_failure=False, exit_code=50))
# Bad data length
self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=123123,
transport_protocol_len=8, transport_protocol_type=17, ttl=64,
identification=1, exit_on_failure=False, exit_code=50))
# Bad transport protocol header length
self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0,
transport_protocol_len=123123, transport_protocol_type=17, ttl=64,
identification=1, exit_on_failure=False, exit_code=50))
# Bad transport protocol type
self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0,
transport_protocol_len=8, transport_protocol_type=123123, ttl=64,
identification=1, exit_on_failure=False, exit_code=50))
# Bad ttl
self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0,
transport_protocol_len=8, transport_protocol_type=17, ttl=123123,
identification=1, exit_on_failure=False, exit_code=50))
# endregion
# region Test RawIPv6 methods
def test_ipv6_make_random_ip(self):
# Normal
self.assertTrue(self.base.ipv6_address_validation(self.ipv6.make_random_ip(octets=3,
prefix='fd00::',
exit_on_failure=True,
exit_code=51)))
# Bad prefix
self.assertIsNone(self.ipv6.make_random_ip(octets=1, prefix='fd00:::', exit_on_failure=False, exit_code=51))
# Bad octets count
self.assertIsNone(self.ipv6.make_random_ip(octets=123, prefix='fd00::', exit_on_failure=False, exit_code=51))
def test_ipv6_pack_addr(self):
# Normal
self.assertEqual(self.ipv6.pack_addr(ipv6_address='3132:3334::1', exit_on_failure=True, exit_code=52),
b'1234\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01')
# Bad IPv6 address
self.assertIsNone(self.ipv6.pack_addr(ipv6_address='fd00:::1', exit_on_failure=False, exit_code=52))
def test_ipv6_parse_header(self):
# Normal
self.assertEqual(self.ipv6.parse_header(b'`\x00\x00\x00\x00\x08\x11@\xfd\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x02', True, 53),
{'version': 6, 'traffic-class': 0, 'flow-label': 0, 'payload-length': 8, 'next-header': 17,
'hop-limit': 64, 'source-ip': 'fd00::1', 'destination-ip': 'fd00::2'})
# Bad packet
self.assertIsNone(self.ipv6.parse_header(b'`\x00\x00\x00\x00\x08\x11@\xfd\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00', False, 53))
# Bad packet
self.assertIsNone(self.ipv6.parse_header(b'E\x00\x00\x00\x00\x08\x11@\xfd\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x02', False, 53))
def test_ipv6_make_header(self):
# Normal
self.assertEqual(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0,
flow_label=0, payload_len=8, next_header=17, hop_limit=64,
exit_on_failure=True, exit_code=54),
b'`\x00\x00\x00\x00\x08\x11@\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02')
# Bad source IP
self.assertIsNone(self.ipv6.make_header(source_ip='fd00:::1', destination_ip='fd00::2', traffic_class=0,
flow_label=0, payload_len=8, next_header=17, hop_limit=64,
exit_on_failure=False, exit_code=54))
# Bad destination IP
self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00:::2', traffic_class=0,
flow_label=0, payload_len=8, next_header=17, hop_limit=64,
exit_on_failure=False, exit_code=54))
# Bad traffic class
self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=123123123,
flow_label=0, payload_len=8, next_header=17, hop_limit=64,
exit_on_failure=False, exit_code=54))
# Bad flow label
self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0,
flow_label=123123123123, payload_len=8, next_header=17, hop_limit=64,
exit_on_failure=False, exit_code=54))
# Bad payload len
self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0,
flow_label=0, payload_len=123123123123, next_header=17, hop_limit=64,
exit_on_failure=False, exit_code=54))
# Bad next header
self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0,
flow_label=0, payload_len=8, next_header=123123123123123, hop_limit=64,
exit_on_failure=False, exit_code=54))
# Bad hop limit
self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0,
flow_label=0, payload_len=8, next_header=17, hop_limit=123123123123123,
exit_on_failure=False, exit_code=54))
# endregion
# region Test RawUDP methods
def test_udp_parse_header(self):
# Normal
self.assertEqual(self.udp.parse_header(packet=b'\x14\xe9\x14\xe9\x00\x08\xdc\x07',
exit_on_failure=True, exit_code=55),
{'source-port': 5353, 'destination-port': 5353, 'length': 8, 'checksum': 56327})
# Bad packet length
self.assertIsNone(self.udp.parse_header(packet=b'\x14\xe9\x14\xe9\x00\x08\xdc',
exit_on_failure=False, exit_code=55))
def test_udp_make_header(self):
# Normal
self.assertEqual(self.udp.make_header(source_port=5353, destination_port=5353, data_length=0,
exit_on_failure=True, exit_code=56), b'\x14\xe9\x14\xe9\x00\x08\x00\x00')
# Bad source port
self.assertIsNone(self.udp.make_header(source_port=123123, destination_port=5353, data_length=0,
exit_on_failure=False, exit_code=56))
# Bad destination port
self.assertIsNone(self.udp.make_header(source_port=5353, destination_port=123123, data_length=0,
exit_on_failure=False, exit_code=56))
# Bad data length
self.assertIsNone(self.udp.make_header(source_port=5353, destination_port=5353, data_length=123123,
exit_on_failure=False, exit_code=56))
def test_udp_make_header_with_ipv6_checksum(self):
# Normal
self.assertEqual(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00::2', port_src=5353,
port_dst=5353, payload_len=0, payload_data=b'',
exit_on_failure=True, exit_code=57),
b'\x14\xe9\x14\xe9\x00\x08\xdc\x07')
# Bad source IPv6 address
self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00:::1', ipv6_dst='fd00::2',
port_src=5353, port_dst=5353, payload_len=0,
payload_data=b'', exit_on_failure=False,
exit_code=57))
# Bad destination IPv6 address
self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00:::2',
port_src=5353, port_dst=5353, payload_len=0,
payload_data=b'', exit_on_failure=False,
exit_code=57))
# Bad source port
self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00::2',
port_src=123123, port_dst=5353, payload_len=0,
payload_data=b'', exit_on_failure=False,
exit_code=57))
# Bad destination port
self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00::2',
port_src=5353, port_dst=123123, payload_len=0,
payload_data=b'', exit_on_failure=False,
exit_code=57))
# Bad payload length
self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00::2',
port_src=5353, port_dst=5353, payload_len=123123,
payload_data=b'', exit_on_failure=False,
exit_code=57))
# endregion
# region Test RawDNS methods
def test_dns_get_top_level_domain(self):
# Normal
self.assertEqual(self.dns.get_top_level_domain(name='www.test.com'), 'test.com')
# Bad name
self.assertEqual(self.dns.get_top_level_domain(name='test'), 'test')
def test_dns_pack_dns_name(self):
# Normal
self.assertEqual(self.dns.pack_dns_name(name='test.com', exit_on_failure=True, exit_code=65),
b'\x04test\x03com\x00')
# Bad name
self.assertIsNone(self.dns.pack_dns_name(name='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'.com', exit_on_failure=False, exit_code=65))
def test_dns_parse_packet(self):
self.assertEqual(self.dns.parse_packet(packet=b'\x00\x01\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00' +
b'\x04test\x03com\x00\x00\x01\x00\x01\x04test\x03com\x00' +
b'\x00\x01\x00\x01\x00\x00\xff\xff\x00\x04\xc0\xa8\x01\x01',
exit_on_failure=True, exit_code=67),
{'additional-rrs': 0, 'answer-rrs': 1, 'authority-rrs': 0, 'questions': 1, 'flags': 33152,
'transaction-id': 1,
'answers': [
{'address': '192.168.1.1',
'class': 1,
'name': 'test.com.',
'ttl': 65535,
'type': 1}],
'queries': [{'class': 1,
'name': 'test.com.',
'type': 1}]})
def test_dns_unpack_dns_name(self):
self.assertEqual(self.dns.unpack_dns_name(packed_name=b'\x03www\x04test\x03com\x00'), 'www.test.com.')
self.assertEqual(self.dns.unpack_dns_name(packed_name=b'\x04mail\xc0\x11', name='pop3.test.com'),
'mail.test.com')
self.assertEqual(self.dns.unpack_dns_name(packed_name=b'\xc0\x10', name='test.com'), 'test.com')
self.assertEqual(self.dns.unpack_dns_name(packed_name=b'\x03www\xc0\x0c', name='test.com'), 'www.test.com')
def test_dns_make_ipv4_request_packet(self):
# Normal
self.assertEqual(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x006\x01\x00\x00\x00@\x11\xf6c\xc0\xa8\x01\x01' +
b'\xc0\xa8\x01\x02\x14\xe9\x005\x00"\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00' +
b'\x04test\x03com\x00\x00\x01\x00\x01')
# Bad source MAC address
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:890ab',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad destination MAC address
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:890ab',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad source IPv4 address
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.300', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad destination IPv4 address
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.400',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad source UDP port
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=123123, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad destination UDP port
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=123123, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad transaction id
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=123123123,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad query type
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 123123, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad query class
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 123123, 'name': 'test.com'}],
flags=0))
# Bad flags
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=123123))
# Bad queries
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'name': 'test.com'}],
flags=0))
# Bad queries
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'class': 1, 'name': 'test.com'}],
flags=0))
# Bad queries
self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2',
ip_ttl=64, ip_ident=1,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'class': 1, 'type': 1}],
flags=0))
def test_dns_make_ipv6_request_packet(self):
# Normal
self.assertEqual(self.dns.make_ipv6_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00"\x11@\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x02\x14\xe9\x005\x00"B)\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00' +
b'\x04test\x03com\x00\x00\x01\x00\x01')
# Bad source IPv6 address
self.assertIsNone(self.dns.make_ipv6_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00:::1', ip_dst='fd00::2', ip_ttl=64,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
# Bad destination IPv6 address
self.assertIsNone(self.dns.make_ipv6_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00::1', ip_dst='fd00:::2', ip_ttl=64,
udp_src_port=5353, udp_dst_port=53, transaction_id=1,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
flags=0))
def test_dns_make_response_packet(self):
# Normal IPv4 response
self.assertEqual(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64,
ip_ident=1, udp_src_port=53, udp_dst_port=5353, transaction_id=1,
flags=0x8180,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
answers_address=[{'name': 'test.com', 'type': 1, 'class': 1,
'ttl': 65535, 'address': '192.168.1.1'}],
name_servers={}, exit_on_failure=True),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x00F\x01\x00\x00\x00@\x11\xf6S\xc0\xa8\x01\x01' +
b'\xc0\xa8\x01\x02\x005\x14\xe9\x002\xb5{\x00\x01\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00' +
b'\x04test\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\xff\xff\x00\x04\xc0' +
b'\xa8\x01\x01')
# Normal IPv6 response
self.assertEqual(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1,
udp_src_port=53, udp_dst_port=5353, transaction_id=1,
flags=0x8180,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
answers_address=[{'name': 'test.com', 'type': 28, 'class': 1,
'ttl': 65535, 'address': 'fd00::1'}],
name_servers={}, exit_on_failure=True),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00>\x11@\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x02\x005\x14\xe9\x00>\x034\x00\x01\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x04' +
b'test\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x1c\x00\x01\x00\x00\xff\xff\x00\x10\xfd\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01')
# Bad MAC address
self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:890ab',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1,
udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
answers_address=[{'name': 'test.com', 'type': 28, 'class': 1,
'ttl': 65535, 'address': 'fd00::1'}],
name_servers={}, exit_on_failure=False))
# Bad IP address
self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00:::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1,
udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
answers_address=[{'name': 'test.com', 'type': 28, 'class': 1,
'ttl': 65535, 'address': 'fd00::1'}],
name_servers={}, exit_on_failure=False))
# Bad UDP port
self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1,
udp_src_port=123123, udp_dst_port=5353, transaction_id=1, flags=0,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
answers_address=[{'name': 'test.com', 'type': 28, 'class': 1,
'ttl': 65535, 'address': 'fd00::1'}],
name_servers={}, exit_on_failure=False))
# Bad IPv4 address in answer
self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1,
udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
answers_address=[{'name': 'test.com', 'type': 1, 'class': 1,
'ttl': 65535, 'address': '192.168.1.300'}],
name_servers={}, exit_on_failure=False))
# Bad IPv6 address in answer
self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1,
udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0,
queries=[{'type': 1, 'class': 1, 'name': 'test.com'}],
answers_address=[{'name': 'test.com', 'type': 28, 'class': 1,
'ttl': 65535, 'address': 'fd00:::1'}],
name_servers={}, exit_on_failure=False))
# endregion
# endregion
# region Test RawICMPv4 methods
def test_icmpv4_make_host_unreachable_packet(self):
# Normal
self.assertEqual(self.icmpv4.make_host_unreachable_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.0.1', ip_dst='192.168.0.2',
ip_ident=1),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x000\x01\x00\x00\x00@\x01\xf8y\xc0\xa8\x00' +
b'\x01\xc0\xa8\x00\x02\x03\x01\xfc\xfe\x00\x00\x00\x00E\x00\x00\x1c\x01\x00\x00\x00@\x01' +
b'\xf8\x8d\xc0\xa8\x00\x02\xc0\xa8\x00\x01')
# Bad MAC address
self.assertIsNone(self.icmpv4.make_host_unreachable_packet(ethernet_src_mac='01:23:45:67:89:0ab',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.0.1', ip_dst='192.168.0.2',
ip_ident=1))
# Bad IP address
self.assertIsNone(self.icmpv4.make_host_unreachable_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.0.1111', ip_dst='192.168.0.2',
ip_ident=1))
def test_icmpv4_make_udp_port_unreachable_packet(self):
# Normal
self.assertEqual(self.icmpv4.make_udp_port_unreachable_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.0.1', ip_dst='192.168.0.2',
udp_src_port=5353, udp_dst_port=5353,
ip_ident=1),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x008\x01\x00\x00\x00@\x01\xf8q\xc0\xa8\x00\x01' +
b'\xc0\xa8\x00\x02\x03\x03\xd3"\x00\x00\x00\x00E\x00\x00$\x01\x00\x00\x00@\x11\xf8u\xc0\xa8' +
b'\x00\x02\xc0\xa8\x00\x01\x14\xe9\x14\xe9\x00\x08\x00\x00')
def test_icmpv4_make_ping_request_packet(self):
# Normal
self.assertEqual(self.icmpv4.make_ping_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.0.1', ip_dst='192.168.0.2',
ip_ident=1, data=b'0123456789'),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x00&\x01\x00\x00\x00@\x01\xf8\x83\xc0\xa8\x00' +
b'\x01\xc0\xa8\x00\x02\x08\x00\xf2\xf5\x00\x00\x00\x000123456789')
def test_icmpv4_make_redirect_packet(self):
# Normal
self.assertEqual(self.icmpv4.make_redirect_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.0.1', ip_dst='192.168.0.2',
ip_ttl=64, ip_ident=1,
gateway_address='192.168.0.1',
payload_ip_src='192.168.0.1',
payload_ip_dst='192.168.0.2'),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x008\x01\x00\x00\x00@\x01\xf8q\xc0\xa8\x00\x01' +
b'\xc0\xa8\x00\x02\x05\x019\xe3\xc0\xa8\x00\x01E\x00\x00\x1c\x01\x00\x00\x00@\x11\xf8}\xc0' +
b'\xa8\x00\x01\xc0\xa8\x00\x02\x005\x005\x00\x08\x00\x00')
# Bad gateway address
self.assertIsNone(self.icmpv4.make_redirect_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.0.1', ip_dst='192.168.0.2',
ip_ttl=64, ip_ident=1,
gateway_address='192.168.0.1111',
payload_ip_src='192.168.0.1',
payload_ip_dst='192.168.0.2'))
# Bad payload IP address
self.assertIsNone(self.icmpv4.make_redirect_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.0.1', ip_dst='192.168.0.2',
ip_ttl=64, ip_ident=1,
gateway_address='192.168.0.1',
payload_ip_src='192.168.0.1111',
payload_ip_dst='192.168.0.2'))
# endregion
# region Test RawDHCPv4 methods
def test_dhcpv4_discover_packet(self):
# Normal
self.assertEqual(self.dhcpv4.make_discover_packet(ethernet_src_mac='01:23:45:67:89:0a',
client_mac='01:23:45:67:89:0a',
ip_ident=1, transaction_id=1,
host_name='dhcp.discover.test',
exit_on_failure=True,
exit_code=76),
b'\xff\xff\xff\xff\xff\xff\x01#Eg\x89\n\x08\x00E\x00\x02<\x01\x00\x00\x00@\x11w\xb2\x00' +
b'\x00\x00\x00\xff\xff\xff\xff\x00D\x00C\x02(\x00\x00\x01\x01\x06\x00\x00\x00\x00\x01\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01#Eg\x89' +
b'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00c\x82Sc5\x01\x01\x0c\x12dhcp.discover.test7\xfe\x01\x02\x03\x04\x05' +
b'\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c' +
b'\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefg' +
b'hijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e' +
b'\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4' +
b'\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba' +
b'\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0' +
b'\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6' +
b'\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc' +
b'\xfd\xfe\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00')
def test_dhcpv4_make_request_packet(self):
# Normal
self.assertEqual(self.dhcpv4.make_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
client_mac='01:23:45:67:89:0a',
ip_ident=1, transaction_id=1,
requested_ip='192.168.1.1',
host_name='dhcp.request.test',
exit_on_failure=True,
exit_code=77),
b'\xff\xff\xff\xff\xff\xff\x01#Eg\x89\n\x08\x00E\x00\x01J\x01\x00\x00\x00@\x11x\xa4\x00' +
b'\x00\x00\x00\xff\xff\xff\xff\x00D\x00C\x016\x00\x00\x01\x01\x06\x00\x00\x00\x00\x01\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01#Eg\x89' +
b'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00c\x82Sc5\x01\x032\x04\xc0\xa8\x01\x01\x0c\x11dhcp.request.test7\x07' +
b'\x01\x02\x03\x06\x1c\x0f\x1a\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
def test_dhcpv4_make_response_packet(self):
# DHCPv4 Offer
self.assertEqual(self.dhcpv4.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_ident=1, transaction_id=1,
dhcp_message_type=2, your_client_ip='192.168.1.2',
exit_on_failure=True,
exit_code=78),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x01F\x01\x00\x00\x00@\x11\xb6\xfe\xc0\xa8\x01' +
b'\x01\xff\xff\xff\xff\x00C\x00D\x012\x00\x00\x02\x01\x06\x00\x00\x00\x00\x01\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\xc0\xa8\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01#Eg\x89\x0b\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00c\x82Sc5\x01\x026\x04\xc0\xa8\x01\x013\x04\x00\x00\xff\xff\x01\x04\xff\xff\xff' +
b'\x00\x03\x04\xc0\xa8\x01\x01\x06\x04\xc0\xa8\x01\x01\xff\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
# DHCPv4 ACK
self.assertEqual(self.dhcpv4.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ip_src='192.168.1.1', ip_ident=1, transaction_id=1,
dhcp_message_type=5, your_client_ip='192.168.1.2',
exit_on_failure=True,
exit_code=78),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x01F\x01\x00\x00\x00@\x11\xb6\xfe\xc0\xa8\x01' +
b'\x01\xff\xff\xff\xff\x00C\x00D\x012\x00\x00\x02\x01\x06\x00\x00\x00\x00\x01\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\xc0\xa8\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01#Eg\x89\x0b\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00c\x82Sc5\x01\x056\x04\xc0\xa8\x01\x013\x04\x00\x00\xff\xff\x01\x04\xff\xff\xff' +
b'\x00\x03\x04\xc0\xa8\x01\x01\x06\x04\xc0\xa8\x01\x01\xff\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
# endregion
# region Test RawICMPv6 methods
def test_icmpv6_make_option(self):
# Normal
self.assertEqual(self.icmpv6.make_option(option_type=1, option_value=b'test_option_value'),
b'\x01\x03\x00\x00\x00\x00\x00test_option_value')
def test_icmpv6_make_router_solicit_packet(self):
# Normal
self.assertEqual(self.icmpv6.make_router_solicit_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='33:33:00:00:00:02',
ipv6_src='fd00::1', ipv6_dst='fd00::2',
ipv6_flow=0x835d1,
need_source_link_layer_address=True,
source_link_layer_address=None),
b'33\x00\x00\x00\x02\x01#Eg\x89\n\x86\xdd`\x085\xd1\x00\x10:\xff\xfd\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x02\x85\x00\xb0\x1a\x00\x00\x00\x00\x01\x01\x01#Eg\x89\n')
def test_icmpv6_make_router_advertisement_packet(self):
# Normal
self.assertEqual(self.icmpv6.make_router_advertisement_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ipv6_src='fd00::1', ipv6_dst='fd00::2',
dns_address='fd00::1', domain_search='test.local',
prefix='fd00::/64'),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x0bGU\x00\x80:\xff\xfd\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x02\x86\x00\xb3>@\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x04@\xc0\xff\xff' +
b'\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x01\x01\x01#Eg\x89\n\x05\x01\x00\x00\x00\x00\x05\xdc\x19\x03\x00\x00\x00' +
b'\x00\x17p\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x1f\x04\x00\x00' +
b'\x00\x00\x17p\x04test\x05local\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x01' +
b'\x00\x00\x00\x00\xea`')
def test_icmpv6_make_neighbor_solicitation_packet(self):
# Normal
self.assertEqual(self.icmpv6.make_neighbor_solicitation_packet(ethernet_src_mac='01:23:45:67:89:0a',
ipv6_src='fd00::1'),
b'33\x00\x00\x00\x01\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00 :\xff\xfd\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x01\x87\x00\xac\x05\x00\x00\x00\x00\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x01\x02\x01\x01#Eg\x89\n')
def test_icmpv6_make_neighbor_advertisement_packet(self):
# Normal
self.assertEqual(self.icmpv6.make_neighbor_advertisement_packet(ethernet_src_mac='01:23:45:67:89:0a',
ipv6_src='fd00::1',
target_ipv6_address='fd00::2'),
b'33\x00\x00\x00\x01\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00 :\xff\xfd\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x01\x88\x00\x8d\x06 \x00\x00\x00\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x02\x02\x01\x01#Eg\x89\n')
def test_icmpv6_make_echo_request_packet(self):
# Normal
self.assertEqual(self.icmpv6.make_echo_request_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ipv6_src='fd00::1', ipv6_dst='fd00::2',
id=1, sequence=1),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00@:\xff\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x02\x80\x00\x8ek\x00\x01\x00\x01\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c' +
b'\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' +
b'!"#$%&\'()*+,-./01234567')
def test_icmpv6_make_echo_reply_packet(self):
# Normal
self.assertEqual(self.icmpv6.make_echo_reply_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ipv6_src='fd00::1', ipv6_dst='fd00::2',
id=1, sequence=1),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00@:\xff\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x02\x81\x00\x8dk\x00\x01\x00\x01\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c' +
b'\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' +
b'!"#$%&\'()*+,-./01234567')
# endregion
# region Test RawDHCPv6 methods
def test_dhcpv6_make_option(self):
# Normal
self.assertEqual(self.dhcpv6._make_duid(mac_address='01:23:45:67:89:0a'),
b'\x00\x03\x00\x01\x01#Eg\x89\n')
def test_dhcpv6_make_solicit_packet(self):
# Normal
self.assertEqual(self.dhcpv6.make_solicit_packet(ethernet_src_mac='01:23:45:67:89:0a',
ipv6_src='fd00::1',
transaction_id=1,
client_mac_address='01:23:45:67:89:0a',
option_request_list=[23, 24]),
b'33\x00\x01\x00\x02\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00H\x11@\xfd\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x01\x00\x02\x02"\x02#\x00H.\x01\x01\x00\x00\x01\x00\x03\x00\x18\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00' +
b'\x00\x00\x08\x00\x02\x00\x00\x00\x01\x00\n\x00\x03\x00\x01\x01#Eg\x89\n\x00\x06\x00\x04' +
b'\x00\x17\x00\x18')
def test_dhcpv6_make_relay_forw_packet(self):
# Normal
self.assertEqual(self.dhcpv6.make_relay_forw_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ipv6_src='fd00::1', ipv6_dst='fd00::2', ipv6_flow=1,
hop_count=10, link_addr='fd00::2',
peer_addr='fd00::3'),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x01\x00*\x11@\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x02\x02"\x02#\x00*\xfb?\x0c\n\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x02\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03')
def test_dhcpv6_make_advertise_packet(self):
# Normal
self.assertEqual(self.dhcpv6.make_advertise_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ipv6_src='fd00::1', ipv6_dst='fd00::2',
transaction_id=1,
dns_address='fd00::1',
domain_search='test.local',
ipv6_address='fd00::2'),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\n\x1b\x82\x00\x84\x11@\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x02\x02#\x02"\x00\x84n\xf4\x02\x00\x00\x01\x00\x01\x00\n\x00\x03\x00\x01\x01#Eg' +
b'\x89\x0b\x00\x02\x00\n\x00\x03\x00\x01\x01#Eg\x89\n\x00\x14\x00\x00\x00\x17\x00\x10\xfd' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x18\x00\x0c\x04test\x05' +
b'local\x00\x00R\x00\x04\x00\x00\x00<\x00\x03\x00(\x00\x00\x00\x01\x00\x00T`\x00\x00\x87' +
b'\x00\x00\x05\x00\x18\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xff' +
b'\xff\xff\xff\xff\xff\xff\xff')
def test_dhcpv6_make_reply_packet(self):
# Normal
self.assertEqual(self.dhcpv6.make_reply_packet(ethernet_src_mac='01:23:45:67:89:0a',
ethernet_dst_mac='01:23:45:67:89:0b',
ipv6_src='fd00::1', ipv6_dst='fd00::2',
transaction_id=1,
dns_address='fd00::1',
domain_search='test.local',
ipv6_address='fd00::2'),
b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\n\x1b\x82\x00\x84\x11@\xfd\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' +
b'\x00\x00\x02\x02#\x02"\x00\x84i\xf4\x07\x00\x00\x01\x00\x01\x00\n\x00\x03\x00\x01\x01#Eg' +
b'\x89\x0b\x00\x02\x00\n\x00\x03\x00\x01\x01#Eg\x89\n\x00\x14\x00\x00\x00\x17\x00\x10\xfd' +
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x18\x00\x0c\x04test\x05' +
b'local\x00\x00R\x00\x04\x00\x00\x00<\x00\x03\x00(\x00\x00\x00\x01\x00\x00T`\x00\x00\x87' +
b'\x00\x00\x05\x00\x18\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xff' +
b'\xff\xff\xff\xff\xff\xff\xff')
# endregion
# endregion
|
Vladimir-Ivanov-Git/raw-packet
|
raw_packet/Tests/Unit_tests/Utils/test_network.py
|
Python
|
mit
| 74,709
|
# coding: utf-8
import flask
import auth
import config
import model
import util
from app import app
dropbox_config = dict(
access_token_method='POST',
access_token_url='https://api.dropbox.com/1/oauth2/token',
authorize_url='https://www.dropbox.com/1/oauth2/authorize',
base_url='https://www.dropbox.com/1/',
consumer_key=config.CONFIG_DB.dropbox_app_key,
consumer_secret=config.CONFIG_DB.dropbox_app_secret,
)
dropbox = auth.create_oauth_app(dropbox_config, 'dropbox')
@app.route('/api/auth/callback/dropbox/')
def dropbox_authorized():
response = dropbox.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (response['access_token'], '')
me = dropbox.get('account/info')
user_db = retrieve_user_from_dropbox(me.data)
return auth.signin_user_db(user_db)
@dropbox.tokengetter
def get_dropbox_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/dropbox/')
def signin_dropbox():
return auth.signin_oauth(dropbox, 'https')
def retrieve_user_from_dropbox(response):
auth_id = 'dropbox_%s' % response['uid']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return auth.create_user_db(
auth_id=auth_id,
name=response['display_name'],
username=response['display_name'],
)
|
gmist/1businka2
|
main/auth/dropbox.py
|
Python
|
mit
| 1,429
|
#!/usr/bin/env python
from FilteredCollection import FilteredCollection
__author__ = 'kinpa200296'
|
kinpa200296/python_labs
|
lab2/mycollections/lab2_task10/__init__.py
|
Python
|
mit
| 101
|
# -*- coding: utf-8 -*-
"""
Get Europe WeatherAlarms as offered by Meteoalarm.eu
"""
import StringIO
import re
import urllib2
import xml.dom.minidom
import datetime
import json
import argparse
import logging
import logging.handlers
import unicodedata
import contextlib
awareness_type_dict = {
'1': 'wind',
'2': 'snow/ice',
'3': 'thunderstorms',
'4': 'fog',
'5': 'highTemperature',
'6': 'lowTemperature',
'7': 'coastalEvent',
'8': 'forestFire',
'9': 'avalanches',
'10': 'rain',
'11': 'flood',
'12': 'rain/flood'
}
awareness_level_dict = {
'': 'informational',
'1': 'low',
'2': 'medium',
'3': 'high',
'4': 'critical'
}
weather_alarms = "http://www.meteoalarm.eu/documents/rss/{}.rss"
reg_exp = re.compile('<img(?P<group>.*?)>')
countries_to_retrieve = []
# Sanitize string to avoid forbidden characters by Orion
def sanitize(str_in):
aux = re.sub(r"[<(>)\"\'=;-]", "", str_in)
return unicodedata.normalize('NFD', aux).encode('ascii', 'ignore')
def get_weather_alarms(country):
source = weather_alarms.format(country.lower())
logger.debug("Going to GET %s", source)
req = urllib2.Request(url=source)
f = urllib2.urlopen(req)
xml_data = f.read()
final_data = xml_data
DOMTree = xml.dom.minidom.parseString(final_data).documentElement
out = []
items = DOMTree.getElementsByTagName('item')[1:]
alarm_index = -1
for item in items:
description = item.getElementsByTagName(
'description')[0].firstChild.nodeValue
# Enable description parsing
description = description.replace(' ', '')
description = re.sub(reg_exp, '<img\g<group>></img>', description)
zone = item.getElementsByTagName(
'title')[0].firstChild.nodeValue.strip()
uid = item.getElementsByTagName('guid')[0].firstChild.nodeValue
pub_date_str = item.getElementsByTagName(
'pubDate')[0].firstChild.nodeValue
pub_date = datetime.datetime.strptime(
pub_date_str[:-6], '%a, %d %b %Y %H:%M:%S').isoformat()
# It is needed to encode description as it is already unicode
parsed_content = xml.dom.minidom.parseString(
description.encode('utf-8')).documentElement
rows = parsed_content.getElementsByTagName('tr')
for row in rows:
columns = row.getElementsByTagName('td')
for column in columns:
# img column contains the awareness level and type
img_aux = column.getElementsByTagName('img')
if img_aux.length > 0:
awareness_str = img_aux[0].getAttribute('alt')
alarm_data = parse_alarm(awareness_str)
if alarm_data['level'] > 1:
alarm_index += 1
obj = {
'type': 'Alert',
'category': {
'type': 'Property',
'value': 'weather'
},
'id': 'WeatherAlert-{}-{}'.format(uid, alarm_index),
'subCategory': {
'type': 'Property',
'value': alarm_data['awt']
},
'severity': {
'type': 'Property',
'value': alarm_data['levelColor']
},
'address': {
'type': 'Property',
'value': {
'type': 'PostalAddress',
'addressCountry': country.upper(),
'addressRegion': sanitize(zone)
}
},
'alertSource': {
'type': 'Property',
'value': 'http://www.meteoalarm.eu'
},
'dateIssued': {
'value': pub_date,
'type': 'DateTime'
}
}
out.append(obj)
else:
dates = column.getElementsByTagName('i')
if dates.length > 0:
valid_from_str = dates[0].firstChild.nodeValue
valid_to_str = dates[1].firstChild.nodeValue
valid_from = datetime.datetime.strptime(
valid_from_str, '%d.%m.%Y %H:%M %Z').isoformat()
valid_to = datetime.datetime.strptime(
valid_to_str, '%d.%m.%Y %H:%M %Z').isoformat()
out[alarm_index]['validFrom'] = {
'type': 'DateTime',
'value': valid_from
}
out[alarm_index]['validTo'] = {
'type': 'DateTime',
'value': valid_to
}
out = remove_duplicates(out)
return out
def remove_duplicates(array_data):
# Dictionary for duplicate checking
alarms_duplicates = {}
out = []
for data in array_data:
key = ('{address[value][addressCountry]}{address[value][addressRegion]}'
'{severity[value]}{subCategory[value]}'
'{validFrom[value]}{validTo[value]}').format(**data)
if key not in alarms_duplicates:
alarms_duplicates[key] = data
out.append(data)
return out
def parse_alarm(alarm_string):
elements = alarm_string.split(' ')
awt = elements[0].split(':')[1]
level = elements[1].split(':')[1]
return {
'level': int(level) if level else -1,
'levelColor': awareness_level_dict.get(level, ''),
'awt': awareness_type_dict.get(awt, '')
}
def setup_logger():
global logger
LOG_FILENAME = 'harvest_weather_alarms.log'
# Set up a specific logger with our desired output level
logger = logging.getLogger('WeatherAlarms')
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=2000000, backupCount=3)
formatter = logging.Formatter('%(levelname)s %(asctime)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def persist_entities(data):
data_to_be_persisted = data
data_obj = {
'actionType': 'APPEND',
'entities': data_to_be_persisted
}
data_as_str = json.dumps(data_obj)
headers = {
'Content-Type': 'application/json',
'Content-Length': len(data_as_str)
}
if fiware_service:
headers['Fiware-Service'] = fiware_service
if fiware_service_path:
headers['Fiware-Servicepath'] = fiware_service_path
req = urllib2.Request(
url=(
orion_service +
'/v2/op/update'),
data=data_as_str,
headers=headers)
try:
with contextlib.closing(urllib2.urlopen(req)) as f:
logger.debug('Entities successfully created')
except urllib2.URLError as e:
logger.error('Error!!!')
logger.error(
'Error while POSTing data to Orion: %d %s',
e.code,
e.read())
logger.debug('Data which failed: %s', data_as_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Weather alarm harvester')
parser.add_argument('--service', metavar='service',
type=str, help='FIWARE Service', required=True)
parser.add_argument('--service-path', metavar='service_path',
type=str, nargs='?', help='FIWARE Service Path')
parser.add_argument('--endpoint', metavar='endpoint',
type=str, required=True, help='Context Broker end point. Example. http://orion:1030')
parser.add_argument('countries', metavar='countries', type=str, nargs='+',
help='Country Codes separated by spaces. ')
args = parser.parse_args()
fiware_service_path = None
if args.service:
fiware_service = args.service
print('Fiware-Service: ' + fiware_service)
if args.service_path:
fiware_service_path = args.service_path
print('Fiware-Servicepath: ' + fiware_service_path)
if args.endpoint:
orion_service = args.endpoint
print('Context Broker: ' + orion_service)
for s in args.countries:
countries_to_retrieve.append(s)
setup_logger()
for c in countries_to_retrieve:
logger.debug("Going to retrieve data from country: %s", c)
alarms = get_weather_alarms(c)
logger.debug("Going to persist data from country: %s", c)
persist_entities(alarms)
|
Fiware/dataModels
|
specs/Weather/WeatherAlert/harvest/meteoalarm_harvest.py
|
Python
|
mit
| 9,117
|
from PIL import Image
from StringIO import StringIO
from django.core.files.uploadedfile import SimpleUploadedFile
def resize_attachment(file_):
size = (960, 9999)
try:
temp = StringIO()
image = Image.open(file_)
image.thumbnail(size, Image.ANTIALIAS)
image.save(temp, 'jpeg')
temp.seek(0)
return SimpleUploadedFile(file_.name, temp.read(), content_type='image/jpeg')
except Exception as ex:
return file_
|
ilvar/lotien
|
pages/utils.py
|
Python
|
mit
| 475
|
import unittest
import sys
sys.path.append('./')
solutions = __import__('solutions.136_single_number', fromlist='*')
class Test(unittest.TestCase):
def test_singleNumber(self):
s = solutions.Solution()
nums = [1, 2, 3, 3, 2]
self.assertEqual(s.singleNumber(nums), 1)
nums = [1, 2, 3, 3, 2, 4, 4, 1, 5]
self.assertEqual(s.singleNumber(nums), 5)
nums = [1, 2, 3, 3, 2, 4, 4, 1, 5, -1, 5]
self.assertEqual(s.singleNumber(nums), -1)
nums = [1]
self.assertEqual(s.singleNumber(nums), 1)
if __name__ == '__main__':
unittest.main()
|
abawchen/leetcode
|
tests/136.py
|
Python
|
mit
| 617
|
__author__ = 'Tony Beltramelli www.tonybeltramelli.com - 19/08/2016'
|
tonybeltramelli/Deep-Lyrics
|
modules/__init__.py
|
Python
|
mit
| 69
|
from deluge.log import LOG as log
from deluge.ui.client import client
from deluge import component
from deluge.plugins.pluginbase import WebPluginBase
from common import get_resource
class WebUI(WebPluginBase):
scripts = [get_resource("sqsnotify.js")]
def enable(self):
log.debug("SQSNotify Web plugin enabled!")
def disable(self):
log.debug("SQSNotify Web plugin disabled!")
|
AbleCoder/deluge-sqsnotify
|
sqsnotify/webui.py
|
Python
|
mit
| 409
|
"""
Each geolocation service you might use, such as Google Maps, Bing Maps, or
Yahoo BOSS, has its own class in ``geopy.geocoders`` abstracting the service's
API. Geocoders each define at least a ``geocode`` method, for resolving a
location from a string, and may define a ``reverse`` method, which resolves a
pair of coordinates to an address. Each Geocoder accepts any credentials
or settings needed to interact with its service, e.g., an API key or
locale, during its initialization.
To geolocate a query to an address and coordinates::
>>> from geopy.geocoders import GoogleV3
>>> geolocator = GoogleV3()
>>> address, (latitude, longitude) = geolocator.geocode("175 5th Avenue NYC")
>>> print(address, latitude, longitude)
175 5th Avenue, New York, NY 10010, USA 40.7410262 -73.9897806
To find the address corresponding to a set of coordinates::
>>> from geopy.geocoders import GoogleV3
>>> geolocator = GoogleV3()
>>> address, (latitude, longitude) = geolocator.reverse("40.752067, -73.977578")
>>> print(address, latitude, longitude)
77 East 42nd Street, New York, NY 10017, USA 40.7520802 -73.9775683
Locators' ``geolocate`` and ``reverse`` methods require the argument ``query``,
and also accept at least the argument ``exactly_one``, which is ``True``.
Geocoders may have additional attributes, e.g., Bing accepts ``user_location``,
the effect of which is to bias results near that location. ``geolocate``
and ``reverse`` methods may return three types of values:
- When there are no results found, returns ``None``.
- When the method's ``exactly_one`` argument is ``True`` and at least one
result is found, returns a :class:`geopy.location.Location` object, which
can be iterated over as:
(address<String>, (latitude<Float>, longitude<Float>))
Or can be accessed as `Location.address`, `Location.latitude`,
`Location.longitude`, `Location.altitude`, and `Location.raw`. The
last contains the geocoder's unparsed response for this result.
- When ``exactly_one`` is False, and there is at least one result, returns a
list of :class:`geopy.location.Location` objects, as above:
[Location, [...]]
If a service is unavailable or otherwise returns a non-OK response, or doesn't
receive a response in the allotted timeout, you will receive one of the
`Exceptions`_ detailed below.
Every geocoder accepts an argument ``format_string`` that defaults to '%s' where
the input string to geocode is interpolated. For example, if you only need to
geocode locations in Cleveland, Ohio, you could do::
>>> from geopy.geocoders import GeocoderDotUS
>>> geolocator = GeocoderDotUS(format_string="%s, Cleveland OH")
>>> address, (latitude, longitude) = geolocator.geocode("11111 Euclid Ave")
>>> print(address, latitude, longitude)
11111 Euclid Ave, Cleveland, OH 44106 41.506784 -81.608148
"""
from geopy.geocoders.arcgis import ArcGIS
from geopy.geocoders.bing import Bing
from geopy.geocoders.googlev3 import GoogleV3
from geopy.geocoders.dot_us import GeocoderDotUS
from geopy.geocoders.geonames import GeoNames
from geopy.geocoders.placefinder import YahooPlaceFinder
from geopy.geocoders.openmapquest import OpenMapQuest
from geopy.geocoders.mapquest import MapQuest
from geopy.geocoders.smartystreets import LiveAddress
from geopy.geocoders.osm import Nominatim
|
XBMC-Addons/script.module.geopy
|
lib/geopy/geocoders/__init__.py
|
Python
|
mit
| 3,357
|
"""
Deprecated: This is included only to support the use of the old v1 client
class. It will be removed once v2 is at parity with v1. Do not use this for any
new functionality.
A simple JSON REST request abstraction layer that is used by the
``dropbox.client`` and ``dropbox.session`` modules. You shouldn't need to use
this.
"""
import io
import pkg_resources
import six
import socket
import ssl
import sys
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
import urllib3
except ImportError:
raise ImportError('Dropbox python client requires urllib3.')
if six.PY3:
url_encode = urllib.parse.urlencode
else:
url_encode = urllib.urlencode
SDK_VERSION = "3.27"
TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
class RESTResponse(io.IOBase):
"""
Responses to requests can come in the form of ``RESTResponse``. These are
thin wrappers around the socket file descriptor.
:meth:`read()` and :meth:`close()` are implemented.
It is important to call :meth:`close()` to return the connection
back to the connection pool to be reused. If a connection
is not closed by the caller it may leak memory. The object makes a
best-effort attempt upon destruction to call :meth:`close()`,
but it's still best to explicitly call :meth:`close()`.
"""
def __init__(self, resp):
# arg: A urllib3.HTTPResponse object
self.urllib3_response = resp
self.status = resp.status
self.version = resp.version
self.reason = resp.reason
self.strict = resp.strict
self.is_closed = False
def __del__(self):
# Attempt to close when ref-count goes to zero.
self.close()
def __exit__(self, typ, value, traceback):
# Allow this to be used in "with" blocks.
self.close()
# -----------------
# Important methods
# -----------------
def read(self, amt=None):
"""
Read data off the underlying socket.
Parameters
amt
Amount of data to read. Defaults to ``None``, indicating to read
everything.
Returns
Data off the socket. If ``amt`` is not ``None``, at most ``amt`` bytes are returned.
An empty string when the socket has no data.
Raises
``ValueError``
If the ``RESTResponse`` has already been closed.
"""
if self.is_closed:
raise ValueError('Response already closed')
return self.urllib3_response.read(amt)
BLOCKSIZE = 4 * 1024 * 1024 # 4MB at a time just because
def close(self):
"""Closes the underlying socket."""
# Double closing is harmless
if self.is_closed:
return
# Mark as closed and release the connection (exactly once)
self.is_closed = True
self.urllib3_response.release_conn()
@property
def closed(self):
return self.is_closed
# ---------------------------------
# Backwards compat for HTTPResponse
# ---------------------------------
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
# Some compat functions showed up recently in urllib3
try:
urllib3.HTTPResponse.flush
urllib3.HTTPResponse.fileno
def fileno(self):
return self.urllib3_response.fileno()
def flush(self):
return self.urllib3_response.flush()
except AttributeError:
pass
def create_connection(address):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def json_loadb(data):
if sys.version_info >= (3,):
data = data.decode('utf8')
return json.loads(data)
class RESTClientObject(object):
def __init__(self, max_reusable_connections=8, mock_urlopen=None):
"""
Parameters
max_reusable_connections
max connections to keep alive in the pool
mock_urlopen
an optional alternate urlopen function for testing
This class uses ``urllib3`` to maintain a pool of connections. We attempt
to grab an existing idle connection from the pool, otherwise we spin
up a new connection. Once a connection is closed, it is reinserted
into the pool (unless the pool is full).
SSL settings:
- Certificates validated using Dropbox-approved trusted root certs
- TLS v1.0 (newer TLS versions are not supported by urllib3)
- Default ciphersuites. Choosing ciphersuites is not supported by urllib3
- Hostname verification is provided by urllib3
"""
self.mock_urlopen = mock_urlopen
self.pool_manager = urllib3.PoolManager(
num_pools=4, # only a handful of hosts. api.dropbox.com, api-content.dropbox.com
maxsize=max_reusable_connections,
block=False,
timeout=60.0, # long enough so datastores await doesn't get interrupted
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1,
)
def request(self, method, url, post_params=None, body=None, headers=None, raw_response=False,
is_json_request=False):
"""Performs a REST request. See :meth:`RESTClient.request()` for detailed description."""
headers = headers or {}
headers['User-Agent'] = 'OfficialDropboxPythonSDK/' + SDK_VERSION
if post_params is not None:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
if is_json_request:
body = json.dumps(post_params)
headers["Content-type"] = "application/json"
else:
body = params_to_urlencoded(post_params)
headers["Content-type"] = "application/x-www-form-urlencoded"
# Handle StringIO instances, because urllib3 doesn't.
if hasattr(body, 'getvalue'):
body = str(body.getvalue())
# Reject any headers containing newlines; the error from the server isn't pretty.
for key, value in headers.items():
if isinstance(value, six.string_types) and '\n' in value:
raise ValueError("headers should not contain newlines (%s: %s)" %
(key, value))
try:
# Grab a connection from the pool to make the request.
# We return it to the pool when caller close() the response
urlopen = self.mock_urlopen if self.mock_urlopen else self.pool_manager.urlopen
r = urlopen(
method=method,
url=url,
body=body,
headers=headers,
preload_content=False
)
r = RESTResponse(r) # wrap up the urllib3 response before proceeding
except socket.error as e:
raise RESTSocketError(url, e)
except urllib3.exceptions.SSLError as e:
raise RESTSocketError(url, "SSL certificate error: %s" % e)
if r.status not in (200, 206):
raise ErrorResponse(r, r.read())
return self.process_response(r, raw_response)
def process_response(self, r, raw_response):
if raw_response:
return r
else:
s = r.read()
try:
resp = json_loadb(s)
except ValueError:
raise ErrorResponse(r, s)
r.close()
return resp
def GET(self, url, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("GET", url, headers=headers, raw_response=raw_response)
def POST(self, url, params=None, headers=None, raw_response=False, is_json_request=False):
assert type(raw_response) == bool
return self.request("POST", url,
post_params=params, headers=headers, raw_response=raw_response,
is_json_request=is_json_request)
def PUT(self, url, body, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("PUT", url, body=body, headers=headers, raw_response=raw_response)
class RESTClient(object):
"""
A class with all static methods to perform JSON REST requests that is used internally
by the Dropbox Client API. It provides just enough gear to make requests
and get responses as JSON data (when applicable). All requests happen over SSL.
"""
IMPL = RESTClientObject()
@classmethod
def request(cls, *n, **kw):
"""Perform a REST request and parse the response.
Parameters
method
An HTTP method (e.g. ``'GET'`` or ``'POST'``).
url
The URL to make a request to.
post_params
A dictionary of parameters to put in the body of the request.
This option may not be used if the body parameter is given.
body
The body of the request. Typically, this value will be a string.
It may also be a file-like object. The body
parameter may not be used with the post_params parameter.
headers
A dictionary of headers to send with the request.
raw_response
Whether to return a :class:`RESTResponse` object. Default ``False``.
It's best enabled for requests that return large amounts of data that you
would want to ``.read()`` incrementally rather than loading into memory. Also
use this for calls where you need to read metadata like status or headers,
or if the body is not JSON.
Returns
The JSON-decoded data from the server, unless ``raw_response`` is
set, in which case a :class:`RESTResponse` object is returned instead.
Raises
:class:`ErrorResponse`
The returned HTTP status is not 200, or the body was
not parsed from JSON successfully.
:class:`RESTSocketError`
A ``socket.error`` was raised while contacting Dropbox.
"""
return cls.IMPL.request(*n, **kw)
@classmethod
def GET(cls, *n, **kw):
"""Perform a GET request using :meth:`RESTClient.request()`."""
return cls.IMPL.GET(*n, **kw)
@classmethod
def POST(cls, *n, **kw):
"""Perform a POST request using :meth:`RESTClient.request()`."""
return cls.IMPL.POST(*n, **kw)
@classmethod
def PUT(cls, *n, **kw):
"""Perform a PUT request using :meth:`RESTClient.request()`."""
return cls.IMPL.PUT(*n, **kw)
class RESTSocketError(socket.error):
"""A light wrapper for ``socket.error`` that adds some more information."""
def __init__(self, host, e):
msg = "Error connecting to \"%s\": %s" % (host, str(e))
socket.error.__init__(self, msg)
# Dummy class for docstrings, see doco.py.
class _ErrorResponse__doc__(Exception):
"""Exception raised when :class:`DropboxClient` exeriences a problem.
For example, this is raised when the server returns an unexpected
non-200 HTTP response.
"""
_status__doc__ = "HTTP response status (an int)."
_reason__doc__ = "HTTP response reason (a string)."
_headers__doc__ = "HTTP response headers (a list of (header, value) tuples)."
_body__doc__ = "HTTP response body (string or JSON dict)."
_error_msg__doc__ = "Error message for developer (optional)."
_user_error_msg__doc__ = "Error message for end user (optional)."
class ErrorResponse(Exception):
"""
Raised by :meth:`RESTClient.request()` for requests that:
- Return a non-200 HTTP response, or
- Have a non-JSON response body, or
- Have a malformed/missing header in the response.
Most errors that Dropbox returns will have an error field that is unpacked and
placed on the ErrorResponse exception. In some situations, a user_error field
will also come back. Messages under user_error are worth showing to an end-user
of your app, while other errors are likely only useful for you as the developer.
"""
def __init__(self, http_resp, body):
"""
Parameters
http_resp
The :class:`RESTResponse` which errored
body
Body of the :class:`RESTResponse`.
The reason we can't simply call ``http_resp.read()`` to
get the body, is that ``read()`` is not idempotent.
Since it can't be called more than once,
we have to pass the string body in separately
"""
self.status = http_resp.status
self.reason = http_resp.reason
self.body = body
self.headers = http_resp.getheaders()
http_resp.close() # won't need this connection anymore
try:
self.body = json_loadb(self.body)
self.error_msg = self.body.get('error')
self.user_error_msg = self.body.get('user_error')
except ValueError:
self.error_msg = None
self.user_error_msg = None
def __str__(self):
if self.user_error_msg and self.user_error_msg != self.error_msg:
# one is translated and the other is English
msg = "%r (%r)" % (self.user_error_msg, self.error_msg)
elif self.error_msg:
msg = repr(self.error_msg)
elif not self.body:
msg = repr(self.reason)
else:
msg = "Error parsing response body or headers: " +\
"Body - %.100r Headers - %r" % (self.body, self.headers)
return "[%d] %s" % (self.status, msg)
def params_to_urlencoded(params):
"""
Returns a application/x-www-form-urlencoded 'str' representing the key/value pairs in 'params'.
Keys are values are str()'d before calling urllib.urlencode, with the exception of unicode
objects which are utf8-encoded.
"""
def encode(o):
if isinstance(o, six.text_type):
return o.encode('utf8')
else:
return str(o)
utf8_params = {encode(k): encode(v) for k, v in six.iteritems(params)}
return url_encode(utf8_params)
|
smarx/dropbox-sdk-python
|
dropbox/rest.py
|
Python
|
mit
| 15,065
|
from flask import render_template, redirect, url_for
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return redirect(url_for('main.index'))
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
|
TheNathanBlack/tell-me-to
|
app/main/errors.py
|
Python
|
mit
| 272
|
# -*- coding: utf-8 -*-
"""
Colormaps
=========
Different colormaps.
"""
import matplotlib.pyplot as plt
from matplotlib import cm
import WrightTools as wt
from WrightTools import datasets
fig, gs = wt.artists.create_figure(width="double", cols=[1, 1, "cbar"], nrows=3)
p = datasets.wt5.v1p0p1_MoS2_TrEE_movie
data = wt.open(p)
data.level(0, 2, -3)
data.convert("eV")
data.ai0.symmetric_root(2)
data = data.chop("w1=wm", "w2", at={"d2": [-600, "fs"]})[0]
data.ai0.normalize()
data.ai0.clip(min=0, replace="value")
def fill_row(row, cmap):
# greyscale
ax = plt.subplot(gs[row, 0])
ax.pcolor(data, cmap=wt.artists.grayify_cmap(cmap))
# color
ax = plt.subplot(gs[row, 1])
ax.pcolor(data, cmap=cmap)
# cbar
cax = plt.subplot(gs[row, 2])
wt.artists.plot_colorbar(cax=cax, label=cmap.name, cmap=cmap)
wt.artists.set_ax_labels(cax, yticks=False)
cmap = wt.artists.colormaps["default"]
fill_row(0, cmap)
cmap = wt.artists.colormaps["wright"]
fill_row(1, cmap)
cmap = cm.viridis
fill_row(2, cmap)
# label
wt.artists.set_fig_labels(xlabel=data.w1__e__wm.label, ylabel=data.w2.label, col=slice(0, 1))
|
wright-group/WrightTools
|
examples/colormaps.py
|
Python
|
mit
| 1,140
|
DEBUG = False
BORDER = '#'
FILLED = '■'
EMPTY = ' '
GAP = 4 # gap between spawn zone and sides
ROWS = 25
COLUMNS = 25
FPS = 60
FRAME_LENGTH = 1 / FPS
FALL_SPEED = 30 if not DEBUG else 0
|
BobWhitelock/termtris
|
termtris/config.py
|
Python
|
mit
| 191
|
# coding: utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filebrowser.fields import FileBrowseField
class Entry(models.Model):
created = models.DateTimeField(_(u'Data de Criação'), auto_now_add=True)
modified = models.DateTimeField(_(u'Data de Modificação'), auto_now=True)
description = models.TextField(_(u'Objeto da Licitação'))
process = models.CharField(_(u'Processo Licitatório Nº'), max_length=20)
price = models.CharField(_(u'Tomada de Preços Nº'), max_length=20)
attach = FileBrowseField(_(u'Arquivo'), max_length=200,
directory="licitacao/",
extensions=[".pdf", ".doc"])
def admin_attach(self):
if self.attach:
return "<a href='%s'>Baixar</a>" % self.attach.url
else:
return "Nenhum arquivo encontrado"
admin_attach.allow_tags = True
admin_attach.short_description = _(u'Arquivo')
def __unicode__(self):
return unicode(self.process)
class Meta:
verbose_name = _(u'Licitação')
verbose_name_plural = _(u'Licitações')
ordering = ['-created', 'description', 'process', 'price']
|
klebercode/modelpage
|
modelpage/bid/models.py
|
Python
|
mit
| 1,226
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012-2015 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# General imports
import copy
from numbers import Number
from math import sqrt
# imports from pycast
from pycastobject import PyCastObject
from decorators import optimized
from timeseries import MultiDimensionalTimeSeries
def sign(a, b):
"""Return a with the algebraic sign of b"""
return (b/abs(b)) * a
def pythag(a, b):
"""Computer c = (a^2 + b^2)^0.5 without destructive underflow or overflow
It solves the Pythagorean theorem a^2 + b^2 = c^2
"""
absA = abs(a)
absB = abs(b)
if absA > absB:
return absA * sqrt(1.0 + (absB / float(absA)) ** 2)
elif absB == 0.0:
return 0.0
else:
return absB * sqrt(1.0 + (absA / float(absB)) ** 2)
class Matrix(PyCastObject):
"""A Matrix instance stores all relevant data of a matrix.
It provides a number of Matrix operations, such as multiplication,
transformation and inversion.
"""
# default number of digits after decimal point which are printed
defaultStringPrecision = 3
def __init__(self, columns, rows, oneDimArray=None, rowBased=True, isOneDimArray=True):
"""Initialize the Matrix with the given number of columns and rows.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list oneDimArray: The values for the Matrix in a based
one dimensional list. Depending on the
rowBased parameter, the first n values
(n = the number of rows) represents either
the first row or the first column.
The length of oneDimArray has to be
columns * rows.
If isOneDimArray is False this should be a
two dimensonal list.
:param boolean rowBased: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False)
:param boolean isOneDimArray: Indicates whether the parameter
oneDimArray is a one dimensional array or
a two dimensional array.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- len(oneDimArray) != columns * rows
"""
if columns < 1 or rows < 1:
raise ValueError("At least one row and one column is necessary")
super(Matrix, self).__init__()
self._columns = columns
self._rows = rows
if oneDimArray is None:
self.matrix = [[0.0 for i in xrange(rows)] for j in xrange(columns)]
elif isOneDimArray:
if len(oneDimArray) != columns * rows:
raise ValueError("""Size of array does not fit in Matrix
with %d rows and %d columns""" % (rows, columns))
if rowBased:
self.matrix = []
for j in xrange(columns):
self.matrix.append([])
for i in xrange(rows):
self.matrix[j].append(oneDimArray[i * columns + j])
else:
self.matrix = [[oneDimArray[j * rows + i] for i in xrange(rows)] for j in xrange(columns)]
else:
self._initialize_with_array(oneDimArray, rowBased)
self._stringPrecision = Matrix.defaultStringPrecision
def __str__(self):
"""Return a String representation of the :py:obj:`self`
The number of digits after the decimal point can be specified using
:py:meth:`self.set_str_precision` """
rep = "%d x %d Matrix\n" % (self.get_height(), self.get_width())
# get value with the most digits before the decimal point.
max_val = max(max(abs(min(row)), max(row)) for row in self.matrix)
# set width for each entry.
# places before decimal place, places after decimal place +
# decimal point, sign and one empty space.
width = len(str(int(max_val))) + self._stringPrecision + 3
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
val = float(self.get_value(col, row))
rep += "{num: {width}.{prec}f}".format(num=val, width=width, prec=self._stringPrecision)
rep += "\n"
return rep
def __eq__(self, otherMatrix):
"""Return if :py:obj:`self` and the other Matrix are equal
Matrices are equal to each other if:
- the values are equal at all positions.
:return: :py:const:`True` if Matrix objects are equal,
:py:const:`False` mulotherwise.
:rtype: boolean
"""
if self.matrix != otherMatrix.matrix:
return False
return True
def __ne__(self, otherMatrix):
"""Return if :py:obj:`self` and the other Matrix are not equal"""
return not self == otherMatrix
def _initialize_with_array(self, data, rowBased=True):
"""Set the matrix values from a two dimensional list."""
if rowBased:
self.matrix = []
if len(data) != self._rows:
raise ValueError("Size of Matrix does not match")
for col in xrange(self._columns):
self.matrix.append([])
for row in xrange(self._rows):
if len(data[row]) != self._columns:
raise ValueError("Size of Matrix does not match")
self.matrix[col].append(data[row][col])
else:
if len(data) != self._columns:
raise ValueError("Size of Matrix does not match")
for col in data:
if len(col) != self._rows:
raise ValueError("Size of Matrix does not match")
self.matrix = copy.deepcopy(data)
@classmethod
def from_timeseries(cls, timeSeries):
"""Create a new Matrix instance from a TimeSeries or MultiDimensionalTimeSeries
:param TimeSeries timeSeries: The TimeSeries, which should be used to
create a new Matrix.
:return: A Matrix with the values of the timeSeries. Each row of
the Matrix represents one entry of the timeSeries.
The time of an entry is ignored in the matrix.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError`, if the timeSeries is empty.
"""
width = 1
if isinstance(timeSeries, MultiDimensionalTimeSeries):
width = timeSeries.dimension_count()
matrixData = [[] for dummy in xrange(width)]
for entry in timeSeries:
for col in xrange(1, len(entry)):
matrixData[col - 1].append(entry[col])
if not matrixData[0]:
raise ValueError("Cannot create Matrix from empty Timeseries")
mtrx = Matrix.from_two_dim_array(len(matrixData), len(matrixData[0]), matrixData)
# mtrx.initialize(matrixData, rowBased=False)
return mtrx
@classmethod
def from_two_dim_array(cls, cols, rows, twoDimArray):
"""Create a new Matrix instance from a two dimensional array.
:param integer columns: The number of columns for the Matrix.
:param integer rows: The number of rows for the Matrix.
:param list twoDimArray: A two dimensional column based array
with the values of the matrix.
:raise: Raises an :py:exc:`ValueError` if:
- columns < 1 or
- rows < 1
- the size of the parameter does not match with the size of
the Matrix.
"""
return Matrix(cols, rows, twoDimArray, rowBased=False, isOneDimArray=False)
def initialize(self, datalist, rowBased=True):
"""Initialize :py:obj:`self` with the values stored in the two dimensional list.
:param list datalist: A list representing the matrix rows
containing lists representing the columns for each row.
The values in the List must be numeric
:param boolean rowBased: Indicates wether the datalist is row or
column based. Has to be True if datalist[i] is the i'th row,
or False if datalist[i] is the i'th column
:raise: Raises an :py:exc:`ValueError` if the size of the parameter
does not match with the size of the Matrix.
:note: The values in the list are not checked for the correct type.
"""
self._initialize_with_array(datalist, rowBased)
def to_multi_dim_timeseries(self):
"""Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries
"""
ts = MultiDimensionalTimeSeries(dimensions=self.get_width())
for row in xrange(self.get_height()):
newEntry = []
for col in xrange(self.get_width()):
newEntry.append(self.get_value(col, row))
ts.add_entry(row, newEntry)
return ts
def get_array(self, rowBased=True):
"""Return a two dimensional list with the values of the :py:obj:`self`.
:param boolean rowBased: Indicates wether the returned list should be
row or column based. Has to be True if list[i] should be the i'th
row, False if list[i] should be the i'th column.
:return: Returns a list representing the matrix rows
containing lists representing the columns for each row.
:rtype: list
"""
if rowBased:
array = []
for row in xrange(self._rows):
newRow = []
for col in xrange(self._columns):
newRow.append(self.get_value(col, row))
array.append(newRow)
return array
return copy.deepcopy(self.matrix)
def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True):
"""Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False).
"""
resultMatrix = Matrix(columns, rows, matrix_list, rowBased)
return resultMatrix
def set_value(self, column, row, value):
"""Set the value of the Matrix at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:param numeric value: The new value at the given column/row
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange.
"""
self.matrix[column][row] = value
def get_value(self, column, row):
"""Return the value of :py:obj:`self` at the specified column and row.
:param integer column: The index for the column (starting at 0)
:param integer row: The index for the row (starting at 0)
:raise: Raises an :py:exc:`IndexError` if the index is out of xrange.
"""
return self.matrix[column][row]
def get_height(self):
"Return the number of rows of the Matrix"
return self._rows
def get_width(self):
"""Return the number of columns of the Matrix"""
return self._columns
def set_string_precision(self, precision):
"""Set the number of digits after the decimal point used to print the Matrix
:param integer precision: The number of digits to which the values
should be rounded when the Matrix is printed.
:raise: Raises an :py:exc:`ValueError` if precision is negative.
"""
if precision < 0:
raise ValueError("precision cannot be negative")
self._stringPrecision = precision
def invers(self):
"""Return the invers matrix, if it can be calculated
:return: Returns a new Matrix containing the invers
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if the matrix is not inversible
:note: Only a squared matrix with a determinant != 0 can be inverted.
:todo: Reduce amount of create and copy operations
"""
if self._columns != self._rows:
raise ValueError("A square matrix is needed")
mArray = self.get_array(False)
appList = [0] * self._columns
# add identity matrix to array in order to use gauss jordan algorithm
for col in xrange(self._columns):
mArray.append(appList[:])
mArray[self._columns + col][col] = 1
# create new Matrix and execute gass jordan algorithm
exMatrix = Matrix.from_two_dim_array(2 * self._columns, self._rows, mArray)
gjResult = exMatrix.gauss_jordan()
# remove identity matrix from left side
# TODO Implement slicing directly for Matrix
gjResult.matrix = gjResult.matrix[self._columns:]
gjResult._columns = len(gjResult.matrix)
return gjResult
def __copy__(self):
"""Return a new clone of the Matrix
:return: Returns a Matrix containing the same data and
configuration as self.
It does not copy super classes, but the
optimization status (True/False) is copied
:rtype: Matrix
"""
mtrx = Matrix.from_two_dim_array(self._columns, self._rows, self.matrix)
# copy of immmutable Boolean.
mtrx.optimizationEnabled = self.optimizationEnabled
return mtrx
def __mul__(self, other):
"""Return the result of the matrixmultiplication or a multiple of the matrix
:param Matrix or Number other: The matrix, which should be multiplied.
:return: Returns a new Matrix with the result of the multiplication
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if
- the number of columns of the Matrix does not match witch
- the number of rows of the given matrix.
:raise: Raises an :py:exc:`TypeError` if the input parameter is not
a Matrix or a number
"""
if isinstance(other, Matrix):
return self.matrix_multiplication(other)
elif isinstance(other, Number):
return self.multiply(other)
else:
raise TypeError("Can't multiply Matrix with type %s" % type(other).__name__)
def __rmul__(self, other):
"""Return multiple of Matrix
:return: Returns a
:raise: Raises an :py:exc:`ValueError` if the input parameter is not a number
"""
if isinstance(other, Number):
return self.multiply(other)
else:
raise TypeError("Can't multiply Matrix with type %s" % type(other).__name__)
def is_matrix_mult_possible(self, matrix):
"""Return True if :py:obj:`self` can be multiplied with the other matrix, False otherwise"""
if self._columns != matrix.get_height():
return False
return True
@optimized
def matrix_multiplication(self, matrix):
"""Multiply :py:obj:`self` with the given matrix and return result matrix.
param Matrix matrix: The matrix, which should be multiplied.
:return: Returns a new Matrix with the result of the multiplication
:rtype: Matrix
:note: Make sure, that the matrices can be multiplied.
The number of columns of the Matrix instance must match with
the number of rows of the Matrix given as parameter.
Use is_matrix_mult_possible(matrix) to test.
"""
resultMatrix = Matrix(matrix.get_width(), self.get_height())
for r_row in xrange(self._rows):
for r_col in xrange(matrix.get_width()):
#blockwise matrix multiplication hack
if isinstance(self.get_array()[0][0], Matrix):
blocksize = self.get_array()[0][0].get_width()
valueT = Matrix(blocksize, blocksize)
else:
valueT = 0
for column in xrange(matrix.get_height()):
valueT += self.get_value(column, r_row) * matrix.get_value(r_col, column)
resultMatrix.set_value(r_col, r_row, valueT)
return resultMatrix
def matrix_multiplication_blockwise(self, matrix, blocksize):
"""
http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication
"""
#Create the blockwise version of self and matrix
selfBlockwise = self.matrix_to_blockmatrix(blocksize)
matrixBlockwise = matrix.matrix_to_blockmatrix(blocksize)
return (selfBlockwise * matrixBlockwise).flatten()
def flatten(self):
"""
If the current Matrix consists of Blockmatrixes as elementes method
flattens the Matrix into one Matrix only consisting of the 2nd level
elements
[[[1 2] [[3 4] to [[1 2 3 4]
[5 6]] [7 8]]] [5 6 7 8]]
"""
blocksize = self.get_array()[0][0].get_width()
width = self.get_width() * blocksize
columnsNew = [[] for dummy in xrange(width)]
for row in self.get_array():
index = 0
for submatrix in row:
for column in submatrix.get_array(False):
columnsNew[index] += column
index += 1
columnsFlat = sum(columnsNew, [])
return Matrix(width, len(columnsNew[0]), columnsFlat, rowBased=False)
def matrix_to_blockmatrix(self, blocksize):
"""
turns an n*m Matrix into a (n/blocksize)*(m/blocksize matrix).
Each element is another blocksize*blocksize matrix.
"""
if self.get_width() % blocksize or self.get_height() % blocksize:
raise ValueError("Number of rows and columns have to be evenly dividable by blocksize")
selfBlocks = []
for columnIndex in range(0, self.get_width() - 1, blocksize):
for rowIndex in range(0, self.get_height() - 1, blocksize):
currentBlock = []
for blockRows in self.get_array(False)[columnIndex:columnIndex + blocksize]:
currentBlock += blockRows[rowIndex:rowIndex + blocksize]
selfBlocks.append(Matrix(blocksize, blocksize, currentBlock, rowBased=False))
return Matrix(self.get_width() / blocksize, self.get_height() / blocksize, selfBlocks, rowBased=False)
# def matrix_multiplication_scipy(self, matrix):
# a = np.matrix(self.get_array())
# b = np.matrix(matrix.get_array())
# c = (a*b)
# c_list = c.tolist()
# result = Matrix(len(c_list[0]), len(c_list), None)
# result.initialize(c_list)
# return result
def multiply(self, multiplicator):
"""Return a new Matrix with a multiple.
:param Number multiplicator: The number to calculate the multiple
:return: The Matrix with the the multiple.
:rtype: Matrix
"""
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) * multiplicator)
return result
def transform(self):
"""Return a new transformed matrix.
:return: Returns a new transformed Matrix
:rtype: Matrix
"""
t_matrix = Matrix(self._rows, self._columns)
for col_i, col in enumerate(self.matrix):
for row_i, entry in enumerate(col):
t_matrix.set_value(row_i, col_i, entry)
return t_matrix
def gauss_jordan(self):
"""Reduce :py:obj:`self` to row echelon form.
:return: Returns :py:obj:`self` in row echelon form for convenience.
:rtype: Matrix
:raise: Raises an :py:exc:`ValueError` if:
- the matrix rows < columns
- the matrix is not invertible
In this case :py:obj:`self` is not changed.
"""
mArray = self.get_array(rowBased=False)
width = self.get_width()
height = self.get_height()
if not height < width:
raise ValueError("""Not enough rows""")
# Start with complete matrix and remove in each iteration
# the first row and the first column
for offset in xrange(height):
# Switch lines, if current first value is 0
if mArray[offset][offset] == 0:
for i in xrange(offset + 1, height):
if mArray[offset][i] != 0:
tmp = []
for j in xrange(offset, width):
tmp.append(mArray[j][offset])
# tmp = mArray[offset][offset:]
for j in xrange(offset, width):
mArray[j][offset] = mArray[j][i]
mArray[j][i] = tmp[j]
# mArray[offset][offset:] = mArray[i][offset:]
# mArray[i] = tmp
break
currentRow = [mArray[j][offset] for j in xrange(offset, width)]
devider = float(currentRow[0])
# If no line is found with an value != 0
# the matrix is not invertible
if devider == 0:
raise ValueError("Matrix is not invertible")
transformedRow = []
# Devide current row by first element of current row
for value in currentRow:
transformedRow.append(value / devider)
# put transformed row back into matrix
for j in xrange(offset, width):
mArray[j][offset] = transformedRow[j - offset]
# subtract multiples of the current row, from all remaining rows
# in order to become a 0 at the current first column
for i in xrange(offset + 1, height):
multi = mArray[offset][i]
for j in xrange(offset, width):
mArray[j][i] = mArray[j][i] - mArray[j][offset] * multi
for i in xrange(1, height):
# subtract multiples of the i-the row from all above rows
for j in xrange(0, i):
multi = mArray[i][j]
for col in xrange(i, width):
mArray[col][j] = mArray[col][j] - mArray[col][i] * multi
self.matrix = mArray
return self
def __add__(self, matrix):
"""Return a new Matrix instance with the result of the addition
:param Matrix matrix: The matrix, which should be added to the instance
:return: A new Matrix with the same size with the result of the addition
:rtype: Matrix
:raise: Raises a :py:exc:`ValueError` if the size of the instance does
not match with the size of the parameter matrix
"""
if self.get_height() != matrix.get_height() or self.get_width() != matrix.get_width():
raise ValueError("Size of matrix does not match")
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) + matrix.get_value(col, row))
return result
def __sub__(self, matrix):
"""Return a new Matrix instance with the result of the subtraction
:param Matrix matrix: The matrix, which should be subtracted from the instance
:return: A new Matrix with the same size with the result of the subtraction
:rtype: Matrix
:raise: Raises a :py:exc:`ValueError` if the size of the instance does
not match with the size of the parameter matrix
"""
if self.get_height() != matrix.get_height() or self.get_width() != matrix.get_width():
raise ValueError("Size of matrix does not match")
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) - matrix.get_value(col, row))
return result
def __div__(self, divider):
"""Return a new Matrix, where all values are divided by the divider
:param integer divider: The divider to divide all values of the matrix
:return: A new Matrix, where all values are divided by the divider
:rtype: Matrix
"""
result = Matrix(self.get_width(), self.get_height())
for row in xrange(self.get_height()):
for col in xrange(self.get_width()):
result.set_value(col, row, self.get_value(col, row) / float(divider))
return result
def householder(self):
"""Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
"""
# copy instance to transform it to bidiagonal form.
bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix)
# build identity matrix, which is used to calculate householder transformations
identityMatrixRow = Matrix(self.get_height(), self.get_height())
for i in xrange(self.get_height()):
identityMatrixRow.set_value(i, i, 1.0)
identityMatrixCol = Matrix(self.get_width(), self.get_width())
for i in xrange(self.get_width()):
identityMatrixCol.set_value(i, i, 1.0)
# zero out the k'th column and row
for k in xrange(self.get_width() - 1):
# vector with the values of the k'th column (first k-1 rows are 0)
x = Vector(self.get_height())
y = Vector(self.get_height())
if k > 0:
x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
s = 0.0
for i in xrange(k, self.get_height()):
val = bidiagMatrix.get_value(k, i)
x.set_value(0, i, val)
s += (val ** 2)
s = sqrt(s)
# y must have same length as x
y.set_value(0, k, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
# calculate w = (x-y)/(|x-y|)
w = tmp / norm
# uk is the k'th householder matrix for the column
uk = identityMatrixRow - 2 * (w * w.transform())
bidiagMatrix = uk * bidiagMatrix
if k == 0:
# set u in first iteration.
u = uk
else:
u = u * uk
# zero out the the row
if k < self.get_width() - 2:
x = Vector(self.get_width())
y = Vector(self.get_width())
x.set_value(0, k, bidiagMatrix.get_value(k, k))
y.set_value(0, k, bidiagMatrix.get_value(k, k))
s = 0.0
for i in xrange(k + 1, bidiagMatrix.get_width()):
val = bidiagMatrix.get_value(i, k)
x.set_value(0, i, val)
s += (val ** 2)
# length of vector x ignoring the k'th value
s = sqrt(s)
# y must have same length as x, since k'th value is equal
# set k+1 value to s
y.set_value(0, k + 1, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
w = tmp / norm
# vk is the k'th householder matrix for the row
vk = identityMatrixCol - (2 * (w * w.transform()))
bidiagMatrix = bidiagMatrix * vk
if k == 0:
# set v in first iteration
v = vk
else:
v = vk * v
return (u, bidiagMatrix, v)
def svd(self, maxIteration=50):
"""Return the singular value decomposition of the Matrix instance
:param integer maxIteration: The maximmum number of iterations,
which are executed in the qr decomposition
:return: A tuple with Matrices u, sigma, v with
so that u * sigma * v^T = self
:rtype: tuple
:raise: Raises a :py:exc:`ValueError` if the Matrix object has
more columns than rows
:note: Translation of the FORTRAN implementation if the SVD given
in the NUMERICAL RECIPES IN FORTRAN 77. THE ART OF SCIENTIFIC
COMPUTING.
The algorithm is not yet numerical stable, so the results may
not be in all cases as expected.
"""
if(self.get_width() > self.get_height()):
raise ValueError("Matrix has more columns than rows.")
eps = 1.e-15
tol = 1.e-64 / eps
a = self.get_array(False)
m = len(a[0])
n = len(a)
v = []
for k in xrange(n):
v.append([0.0] * n)
# output diagonal
w = [0.0] * n
# upper diagonal (for bidiagonal form)
rv1 = [0.0] * n
# Householder Reduction to bidiagional form
g = 0.0
anorm = 0.0
for i in xrange(n):
l = i + 1
rv1[i] = g
s = 0.0
# calculate length of relevant row vector in matrix (part of i'th column)
s = sum(a[i][k] ** 2 for k in xrange(i, m))
if s <= tol:
g = 0.0
else:
f = a[i][i]
# square root to get actual length of vector
g = sqrt(s) if f < 0 else -sqrt(s)
h = f * g - s
a[i][i] = f - g
for j in xrange(l, n):
s = sum(a[i][k] * a[j][k] for k in xrange(i, m))
f = s / h
for k in xrange(i, m):
a[j][k] += (f * a[i][k])
w[i] = g
# calculate length of relevant column vector in matrix (part of i'th row)
s = 0.0
s = sum(a[k][i] ** 2 for k in xrange(l, n))
if s <= tol:
g = 0.0
else:
f = a[l][i]
g = sqrt(s) if f < 0 else -sqrt(s)
h = f * g - s
a[l][i] = f - g
for k in xrange(l, n):
rv1[k] = a[k][i] / h
for j in xrange(l, m):
s = sum(a[k][j] * a[k][i] for k in xrange(l, n))
for k in xrange(l, n):
a[k][j] += (s * rv1[k])
anorm = max(anorm, (abs(w[i]) + abs(rv1[i])))
# Accumulation of right hand transformations
for i in xrange(n - 1, -1, -1):
if g != 0.0:
for j in xrange(l, n):
v[i][j] = a[j][i] / (g * a[i + 1][i])
for j in xrange(l, n):
s = sum(a[k][i] * v[j][k] for k in xrange(l, n))
for k in xrange(l, n):
v[j][k] += (s * v[i][k])
for j in xrange(l, n):
v[j][i] = 0.0
v[i][j] = 0.0
v[i][i] = 1.0
g = rv1[i]
l = i
# Accumulation of left hand transformations
for i in xrange(n - 1, -1, -1):
l = i + 1
g = w[i]
for j in xrange(l, n):
a[j][i] = 0.0
if g != 0.0:
for j in xrange(l, n):
s = sum(a[i][k] * a[j][k] for k in xrange(l, m))
f = s / (a[i][i] * g)
for k in xrange(i, m):
a[j][k] += f * a[i][k]
for j in xrange(i, m):
a[i][j] /= g
else:
for j in xrange(i, m):
a[i][j] = 0.0
a[i][i] += 1.0
eps *= anorm
# Diagonalization of the bidiagonal form.
# Loop over singular values and over allowed iterations
for k in xrange(n - 1, -1, -1):
for dummy in xrange(maxIteration):
for l in xrange(k, -1, -1):
convergenceTest = False
if abs(rv1[l]) <= eps:
convergenceTest = True
break
if abs(w[l - 1]) <= eps:
# convergenceTest = False (already default)
break
if not convergenceTest:
c = 0.0
s = 1.0
nm = l - 1
for i in xrange(l, k + 1):
f = s * rv1[i]
rv1[i] = c * rv1[i]
if abs(f) <= eps:
break
g = w[i]
h = pythag(f, g)
w[i] = h
c = g / h
s = -f / h
for j in xrange(m):
y = a[nm][j]
z = a[i][j]
a[nm][j] = (y * c) + (z * s)
a[i][j] = -(y * s) + (z * c)
z = w[k]
if l == k:
# convergence
if z < 0.0:
w[k] = -z
for j in xrange(n):
v[k][j] = -v[k][j]
break
x = w[l]
y = w[k - 1]
g = rv1[k - 1]
h = rv1[k]
f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y)
g = pythag(f, 1.0)
f = ((x - z) * (x + z) + h * ((y / (f + sign(g, f))) - h)) / x
c = 1.0
s = 1.0
for i in xrange(l + 1, k + 1):
g = rv1[i]
y = w[i]
h = s * g
g = c * g
z = pythag(f, h)
rv1[i - 1] = z
c = f / z
s = h / z
f = (x * c) + (g * s)
g = -x * s + g * c
h = y * s
y = y * c
for jj in xrange(n):
x = v[i - 1][jj]
z = v[i][jj]
v[i - 1][jj] = (x * c) + (z * s)
v[i][jj] = -(x * s) + (z * c)
z = pythag(f, h)
w[i - 1] = z
if z != 0.0:
z = 1.0 / z
c = f * z
s = h * z
f = (c * g) + (s * y)
x = -s * g + c * y
for jj in xrange(m):
y = a[i - 1][jj]
z = a[i][jj]
a[i - 1][jj] = (y * c) + (z * s)
a[i][jj] = -(y * s) + (z * c)
rv1[l] = 0.0
rv1[k] = f
w[k] = x
# Build Matrix instances for the result
uM = Matrix.from_two_dim_array(len(a), len(a[0]), a)
diagMatrix = Matrix(len(w), len(w))
for i in xrange(len(w)):
diagMatrix.set_value(i, i, w[i])
vM = Matrix.from_two_dim_array(len(v), len(v[0]), v)
return uM, diagMatrix, vM
def pseudoinverse(self):
"""Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse.
"""
transform = False
if self.get_width() > self.get_height():
transform = True
u, sigma, v = self.transform().svd()
else:
u, sigma, v = self.svd()
# calculate inverse of sigma
for i in xrange(min(sigma.get_height(), sigma.get_width())):
val = sigma.get_value(i, i)
# divide only if the value is not 0 or close to zero (rounding errors)
eps = 1.e-15
if eps < val or val < -eps:
sigma.set_value(i, i, 1 / val)
if transform:
return (v * sigma * u.transform()).transform()
else:
return v * sigma * u.transform()
class Vector(Matrix):
"""A vector instance is a Matrix, which only has 1 column"""
def __init__(self, rows):
"""Initiliate a vector with the given number of rows.
All values of this vector are 0.0"""
super(Vector, self).__init__(1, rows)
@classmethod
def initialize_from_matrix(cls, matrix, column):
"""Create vector from matrix
:param Matrix matrix: The Matrix, which should be used to create the vector.
:param integer column: The column of the matrix, which should be used
to create the new vector.
:raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column.
"""
vec = Vector(matrix.get_height())
for row in xrange(matrix.get_height()):
vec.set_value(0, row, matrix.get_value(column, row))
return vec
def norm(self):
"""Calculates the norm (length) of the vector
:return: Return the length of the vector
:rtype: float
"""
return sqrt(sum(i[0] ** 2 for i in self.get_array()))
def unify(self):
"""Unifies the vector. The length of the vector will be 1.
:return: Return the instance itself
:rtype: Vector
"""
length = float(self.norm())
for row in xrange(self.get_height()):
self.set_value(0, row, self.get_value(0, row) / length)
return self
|
T-002/pycast
|
pycast/common/matrix.py
|
Python
|
mit
| 40,814
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djangopress.pages.models
class Migration(migrations.Migration):
dependencies = [
('pages', '0008_auto_20150730_0004'),
]
operations = [
migrations.AddField(
model_name='page',
name='image',
field=models.ImageField(null=True, upload_to=djangopress.pages.models.page_file_path, blank=True),
),
]
|
codefisher/djangopress
|
djangopress/pages/migrations/0009_page_image.py
|
Python
|
mit
| 487
|
from web import app
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5320, debug=True)
|
xavierskip/LANinfo
|
runserver.py
|
Python
|
mit
| 100
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Steamfront documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 25 17:45:24 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Steamfront'
copyright = '2017, Callum Bartlett'
author = 'Callum Bartlett'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Steamfrontdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Steamfront.tex', 'Steamfront Documentation',
'Callum Bartlett', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'steamfront', 'Steamfront Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Steamfront', 'Steamfront Documentation',
author, 'Steamfront', 'One line description of project.',
'Miscellaneous'),
]
|
4Kaylum/Steamfront
|
docs/conf.py
|
Python
|
mit
| 5,020
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import MonolingualDataset
from fairseq.tasks.language_modeling import LanguageModelingConfig, LanguageModelingTask
from tests import utils as test_utils
class TestLMContextWindow(unittest.TestCase):
def test_eval_dataloader(self):
dictionary = test_utils.dummy_dictionary(10)
assert len(dictionary) == 14 # 4 extra special symbols
assert dictionary.pad() == 1
dataset = test_utils.TestDataset(
[
torch.tensor([4, 5, 6, 7], dtype=torch.long),
torch.tensor([8, 9, 10, 11], dtype=torch.long),
torch.tensor([12, 13], dtype=torch.long),
]
)
dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary)
config = LanguageModelingConfig(tokens_per_sample=4)
task = LanguageModelingTask(config, dictionary)
eval_dataloader = task.eval_lm_dataloader(
dataset=dataset,
batch_size=1,
context_window=2,
num_workers=0,
)
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [4, 5, 6, 7, 1, 1]
assert batch["target"][0].tolist() == [4, 5, 6, 7, 1, 1]
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [6, 7, 8, 9, 10, 11]
assert batch["target"][0].tolist() == [1, 1, 8, 9, 10, 11]
batch = next(eval_dataloader)
assert batch["net_input"]["src_tokens"][0].tolist() == [10, 11, 12, 13]
assert batch["target"][0].tolist() == [1, 1, 12, 13]
if __name__ == "__main__":
unittest.main()
|
pytorch/fairseq
|
tests/test_lm_context_window.py
|
Python
|
mit
| 1,865
|
from helpers import database as db
from subapps.auth.models import login
from helpers.erorrs import BadRequest
def register(user, pas):
sql_insert_query = 'INSERT INTO users'
username = "'" + str(user) + "'"
password = "'" + str(pas) + "'"
sel_q = "SELECT id FROM users WHERE username="+username
res = db.query(sel_q)
mes = res.statusmessage
res.close()
if mes[-1] != "0":
raise BadRequest("username already used")
fields = ["id", "username", "password"]
field_str = " ("+', '.join(fields)+") VALUES "
val_str = "(" + "(SELECT MAX(id) from users)+1, "
val_str += ', '.join([username, password])+")"
res = db.save(sql_insert_query+field_str+val_str)
token = login.login([user, pas])
return token
|
rapkin/data-vis
|
subapps/auth/models/register.py
|
Python
|
mit
| 771
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('elitedata', '0002_auto_20150617_1312'),
]
operations = [
migrations.AddField(
model_name='stationcommodity',
name='demand_level',
field=models.CharField(default='low', max_length=100),
preserve_default=False,
),
]
|
Jingyu-Yao/elitetraderoutes
|
elitedata/migrations/0003_stationcommodity_demand_level.py
|
Python
|
mit
| 470
|
"""
Django settings for testproj project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent # $ getAPathArgument=Path(..)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-hqg4$wqk3894#_4p$ibwzpg5+&dvx)%6q45v0yq=-43c886(($'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'testapp.apps.TestappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testproj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testproj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
github/codeql
|
python/ql/test/library-tests/frameworks/rest_framework/testproj/settings.py
|
Python
|
mit
| 3,331
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import numpy as np
from six.moves import range
def kinetic_energy(velocity):
return 0.5 * tf.reduce_sum(tf.multiply(velocity, velocity), axis=1)
def hamiltonian(position_z, position_eps, velocity_z, velocity_eps, log_posterior):
"""Computes the Hamiltonian of the current position, velocity pair
H = U(x) + K(v)
U is the potential energy and is = -log_posterior(x)
Parameters
----------
position : tf.Variable
Position or state vector x (sample from the target distribution)
velocity : tf.Variable
Auxiliary velocity variable
energy_function
Function from state to position to 'energy'
= -log_posterior
Returns
-------
hamitonian : float
"""
#print('position.shape', position.shape)
#print('velocity.shape', position.shape)
energy_function = tf.squeeze(-log_posterior(position_z, position_eps))
return energy_function + kinetic_energy(velocity_z) + kinetic_energy(velocity_eps)
def metropolis_hastings_accept(energy_prev, energy_next):
#ediff = energy_prev - energy_next
ediff = tf.squeeze(tf.subtract(energy_prev, energy_next))
#ediff = tf.subtract(energy_prev, energy_next)
#print('energy_prev', [e.shape for e in energy_prev])
#print('energy_prev', energy_prev)
#print('energy_next.shape', energy_next.shape)
#print('energy_prev.shape', energy_prev.shape)
#print('energy_next.shape', energy_next.shape)
#print('ediff.shape', ediff.shape)
#print('tf.exp(ediff).shape', tf.exp(ediff).shape)
return (tf.exp(ediff) - tf.random_uniform(tf.shape(ediff))) >= 0.0
def simulate_dynamics(initial_pos_z, initial_pos_eps, initial_vel_z, initial_vel_eps, stepsize, n_steps, log_posterior):
def leapfrog(pos_z, pos_eps, vel_z, vel_eps, step, i):
# TODO: Check whether reduce_sum is correct
dE_dpos = tf.gradients(tf.squeeze(-log_posterior(pos_z, pos_eps)), [pos_z, pos_eps]) #[0]
new_vel_z = vel_z - step * dE_dpos[0]
new_pos_z = pos_z + step * new_vel_z
new_vel_eps = vel_eps - step * dE_dpos[1]
new_pos_eps = pos_eps + step * new_vel_eps
return [new_pos_z, new_pos_eps, new_vel_z, new_vel_eps, step, tf.add(i, 1)]
def condition(pos_z, pos_eps, vel_z, vel_eps, step, i):
return tf.less(i, n_steps)
dE_dpos = tf.gradients(tf.squeeze(-log_posterior(initial_pos_z, initial_pos_eps)), [initial_pos_z, initial_pos_eps])
stepsize = tf.reshape(stepsize, [-1, 1])
#print('*** critical point ***')
#print('dE_dpos.shape', dE_dpos.shape)
#print('stepsize.shape', stepsize.shape)
#print('initial_vel.shape', initial_vel.shape)
vel_half_step_z = initial_vel_z - 0.5 * tf.reshape(stepsize, [-1, 1]) * dE_dpos[0]
pos_full_step_z = initial_pos_z + tf.reshape(stepsize, [-1, 1]) * vel_half_step_z
vel_half_step_eps = initial_vel_eps - 0.5 * tf.reshape(stepsize, [-1, 1]) * dE_dpos[1]
pos_full_step_eps = initial_pos_eps + tf.reshape(stepsize, [-1, 1]) * vel_half_step_eps
#print('vel_half_step.shape', vel_half_step.shape)
#print('pos_full_step.shape', pos_full_step.shape)
#print('*** critical point ***')
i = tf.constant(0)
final_pos_z, final_pos_eps, new_vel_z, new_vel_eps, _, _ = tf.while_loop(condition, leapfrog, [pos_full_step_z, pos_full_step_eps, vel_half_step_z, vel_half_step_eps, stepsize, i], parallel_iterations=1)
dE_dpos = tf.gradients(tf.squeeze(-log_posterior(final_pos_z, final_pos_eps)), [final_pos_z, final_pos_eps])
final_vel_z = new_vel_z - 0.5 * stepsize * dE_dpos[0]
final_vel_eps = new_vel_eps - 0.5 * stepsize * dE_dpos[1]
return final_pos_z, final_pos_eps, final_vel_z, final_vel_eps
def hmc_step(initial_pos_z, initial_pos_eps, log_posterior, step_size, num_steps):
initial_vel_z = tf.random_normal(tf.shape(initial_pos_z))
initial_vel_eps = tf.random_normal(tf.shape(initial_pos_eps))
final_pos_z, final_pos_eps, final_vel_z, final_vel_eps = simulate_dynamics(initial_pos_z, initial_pos_eps, initial_vel_z, initial_vel_eps, step_size, num_steps, log_posterior)
#print('initial_pos.shape', initial_pos.shape)
#print('initial_vel.shape', initial_vel.shape)
#print('final_pos.shape', final_pos.shape)
#print('final_vel.shape', final_vel.shape)
#print('step_size.shape', step_size.shape)
energy_prev = hamiltonian(initial_pos_z, initial_pos_eps, initial_vel_z, initial_vel_eps, log_posterior),
energy_next = hamiltonian(final_pos_z, final_pos_eps, final_vel_z, final_vel_eps, log_posterior)
accept = metropolis_hastings_accept(energy_prev, energy_next)
#print('accept.shape', accept.shape)
new_pos_z = tf.where(accept, final_pos_z, initial_pos_z)
new_pos_eps = tf.where(accept, final_pos_eps, initial_pos_eps)
return new_pos_z, new_pos_eps, accept
def hmc_updates(
accept,
stepsize,
avg_acceptance_rate,
target_acceptance_rate=0.9,
stepsize_inc=1.02,
stepsize_dec=0.98,
stepsize_min=0.0001,
stepsize_max=0.5,
avg_acceptance_slowness=0.9):
# DEBUG
#print('*** Critical part ***')
#print('stepsize.shape', stepsize.shape)
#print('accept.shape', accept.shape)
#print('avg_acceptance_rate.shape', avg_acceptance_rate.shape)
new_stepsize_ = tf.where(avg_acceptance_rate > target_acceptance_rate, stepsize_inc*stepsize, stepsize_dec*stepsize)
#print('new_stepsize_.shape', new_stepsize_.shape)
new_stepsize = tf.maximum(tf.minimum(new_stepsize_, stepsize_max), stepsize_min)
#print('new_stepsize.shape', new_stepsize.shape)
#new_acceptance_rate = tf.add(avg_acceptance_slowness * avg_acceptance_rate, (1.0 - avg_acceptance_slowness) * tf.reduce_mean(tf.to_float(accept)))
new_acceptance_rate = tf.add(avg_acceptance_slowness * avg_acceptance_rate, (1.0 - avg_acceptance_slowness) * tf.to_float(accept))
#print('new_acceptance_rate.shape', new_acceptance_rate.shape)
#print('*** Critical part ***')
return new_stepsize, new_acceptance_rate
|
stefanwebb/tensorflow-models
|
tensorflow_models/evaluation/hmc_relaxed_bernoulli.py
|
Python
|
mit
| 7,019
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from twisted.internet import reactor, defer
def not_call_deferred(result):
print("not_call_deferred {}".format(result));
def getDummyData(inputData):
print('getDummyData called')
deferred = defer.Deferred()
# deferred.callback 会进一步调用deferred添加进去的cb
reactor.callLater(2, deferred.callback, inputData * 3)
# 如果这里的回调是 not_call_deferred, 那么不会触发deferred.addCallback中的cb
# reactor.callLater(2, not_call_deferred, inputData * 3)
return deferred
def cbPrintData(result):
print('cbPrintData Result received: {}'.format(result))
return "deliver next" # 这个返回会作为下一个callback的参数
def cbPrintData2(result):
print('cbPrintData2 Result received: {}'.format(result))
# 不起作用
deferred = defer.Deferred()
deferred.addCallback(cbPrintData, "new callback")
return deferred
# 一直不执行, but why?
def cbPrintData3(result):
print('cbPrintData Result received: {}'.format(result))
return "deliver next next"
deferred = getDummyData(3)
print("lidong deferred : ", deferred)
deferred.addCallback(cbPrintData)
deferred.addCallback(cbPrintData2)
deferred.addCallback(cbPrintData3)
# manually set up the end of the process by asking the reactor to
# stop itself in 4 seconds time
reactor.callLater(4, reactor.stop)
# start up the Twisted reactor (event loop handler) manually
reactor.run()
|
qrsforever/workspace
|
python/learn/twisted/onecallback.py
|
Python
|
mit
| 1,472
|
# Copyright 2017 Merkulov Alexey
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'Merkulov Alexey'
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from . import views
from . import login_api
from . import submit_api
from . import query_api
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('allauth.urls')),
url(r'api/search$', query_api.search_api),
url(r'api/search/$', query_api.search_api),
url(r'api/query-questions$', query_api.query_questions),
url(r'api/answers/(?P<question_id>[0-9]+)$', query_api.answers),
url(r'api/answers/(?P<question_id>[0-9]+)/$', query_api.answers),
url(r'api/last-questions/(?P<list_type>[a-z]+)/(?P<start_id>[0-9]+)$',
query_api.last_questions),
url(r'api/last-questions/(?P<list_type>[a-z]+)/(?P<start_id>[0-9]+)$/',
query_api.last_questions),
url(r'api/sorted-questions/(?P<sort_type>[a-z]+)$',
query_api.sorted_questions),
url(r'api/sorted-questions/(?P<sort_type>[a-z]+)/$',
query_api.sorted_questions),
url(r'api/post-api$', submit_api.post_api),
url(r'api/post-api/$', submit_api.post_api),
url(r'api/logout$', login_api.logout_ajax),
url(r'api/logout/$', login_api.logout_ajax),
url(r'api/check-logined$', login_api.check_logined),
url(r'api/check-logined/$', login_api.check_logined),
url(r'api/simple-login$', login_api.simple_login),
url(r'api/simple-login/$', login_api.simple_login),
url(r'api/registration$', login_api.registration),
url(r'api/registration/$', login_api.registration),
url(r'^api/', views.unknown_api),
url(r'^', views.reactindex),
]
|
steelart/ask-navalny
|
django-backend/askp/urls.py
|
Python
|
mit
| 2,757
|
"""
Tools for generating masks.
"""
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
__version__ = '0.1.0'
from numpy import max, iterable, squeeze, zeros, zeros_like, any
def generate_masks(input_size, output_size=1, observed=None):
"""
Generates some basic input and output masks.
If C{input_size} is an integer, the number of columns of the mask will be
that integer. If C{input_size} is a list or tuple, a mask with multiple channels
is created, which can be used with RGB images, for example.
By default, the input region will cover the upper half of the mask, also known as a
*causal neighborhood*. If any of the channels is observed, the input region in that
channel will cover a full square neighborhood around the output region.
Examples:
>>> input_mask, output_mask = generate_masks(8, 2)
>>> input_mask, output_mask = generate_masks([3, 7, 7], 1, [1, 0, 0])
@type input_size: C{int} / C{list}
@param input_size: determines the size of the input region
@type output_size: C{int}
@param output_size: determines the size of the output region
@type observed: C{list}
@param observed: can be used to indicate channels which are observed
@rtype: C{tuple}
@return: one input mask and one output mask
"""
if not iterable(input_size):
if iterable(observed):
input_size = [input_size] * len(observed)
else:
input_size = [input_size]
if observed is None:
observed = [False] * len(input_size)
if len(observed) != len(input_size):
raise ValueError("Incompatible `input_size` and `observed`.")
num_channels = len(input_size)
num_cols = max(input_size)
num_rows = num_cols if any(observed) else (num_cols + 1) // 2 + output_size // 2
input_mask = zeros([num_rows, num_cols, num_channels], dtype='bool')
output_mask = zeros_like(input_mask)
tmp1 = (num_cols + 1) // 2
tmp2 = output_size // 2
tmp3 = (output_size + 1) // 2
for k in range(num_channels):
offset = tmp1 - (input_size[k] + 1) // 2
if observed[k]:
input_mask[
offset:num_cols - offset,
offset:num_cols - offset, k] = True
else:
input_mask[offset:tmp1 + tmp2, offset:num_cols - offset, k] = True
for i in range(output_size):
input_mask[
tmp1 + tmp2 - i - 1,
tmp1 - tmp3:, k] = False
output_mask[
tmp1 + tmp2 - i - 1,
tmp1 - tmp3:tmp1 + output_size // 2, k] = True
if input_mask.shape[2] == 1:
input_mask.resize(input_mask.shape[0], input_mask.shape[1])
output_mask.resize(output_mask.shape[0], output_mask.shape[1])
return input_mask, output_mask
|
lucastheis/cmt
|
code/cmt/python/tools/masks.py
|
Python
|
mit
| 2,635
|
import unittest
import lob
# Setting the API key
lob.api_key = 'test_0dc8d51e0acffcb1880e0f19c79b2f5b0cc'
class BankAccountFunctions(unittest.TestCase):
def setUp(self):
lob.api_key = 'test_0dc8d51e0acffcb1880e0f19c79b2f5b0cc'
self.addr = lob.Address.list(count=1).data[0]
def test_list_bankAccounts(self):
bankAccounts = lob.BankAccount.list()
self.assertTrue(isinstance(bankAccounts.data[0], lob.BankAccount))
self.assertEqual(bankAccounts.object, 'list')
def test_list_bankAccounts_limit(self):
bankAccounts = lob.BankAccount.list(count=2)
self.assertTrue(isinstance(bankAccounts.data[0], lob.BankAccount))
self.assertEqual(len(bankAccounts.data), 2)
def test_list_bankAccounts_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.list, count=1000)
def test_create_bankAccount_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.create)
def test_create_bankAccount(self):
bankAccount = lob.BankAccount.create(
routing_number='122100024',
account_number='123456789',
bank_address=self.addr.id,
account_address=self.addr.id,
signatory='John Doe'
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEqual(bankAccount.bank_address.id, self.addr.id)
def test_create_bankAccount_lob_obj(self):
bankAccount = lob.BankAccount.create(
routing_number='122100024',
account_number='123456789',
bank_address=self.addr,
account_address=self.addr,
signatory='John Doe'
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEqual(bankAccount.bank_address.id, self.addr.id)
def test_create_bankAccount_inline(self):
bankAccount = lob.BankAccount.create(
routing_number='122100024',
account_number='123456789',
bank_address= {
'name': 'Lob1',
'address_line1': '185 Berry Street',
'address_line2': 'Suite 1510',
'address_city': 'San Francisco',
'address_zip': '94107',
'address_state': 'CA'
},
account_address= {
'name': 'Lob2',
'address_line1': '185 Berry Street',
'address_line2': 'Suite 1510',
'address_city': 'San Francisco',
'address_zip': '94107',
'address_state': 'CA'
},
signatory='John Doe'
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEquals(bankAccount.bank_address.name, 'Lob1')
self.assertEquals(bankAccount.account_address.name, 'Lob2')
def test_retrieve_bankAccount(self):
bankAccount = lob.BankAccount.retrieve(id=lob.BankAccount.list().data[0].id)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
def test_retrieve_bankAccount_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.retrieve, id='test')
def test_delete_bankAccount(self):
ba = lob.BankAccount.list().data[0].id
delBa = lob.BankAccount.delete(id=ba)
self.assertEqual(ba, delBa.id)
|
ami/lob-python
|
tests/test_bankaccount.py
|
Python
|
mit
| 3,374
|
"""ShortUrlApi API implemented using Google Cloud Endpoints."""
import appengine_config
import endpoints
from endpoints import api_config
from protorpc import remote
from api import service_impl
from api.proto_api import CreateRequest
from api.proto_api import GetRequest
from api.proto_api import RegisterRequest
from api.proto_api import Response
_AUTH_CONFIG = api_config.ApiAuth(allow_cookie_auth=True)
@endpoints.api(name='pgurin', version='v1',
description='Prediction Short URL API',
title='PGurin service',
auth=_AUTH_CONFIG, owner_name='PredictionGuru',
owner_domain='pgur.in')
class ShortUrlApi(remote.Service):
"""Class which defines pgurin API v1."""
# pylint: disable=R0201
@endpoints.method(RegisterRequest, Response,
path='register', name='pgur.register')
def register(self, request):
"""
Register device store url as well as default url in order to redirect,
if no local application found or desktop / someother device browser.
"""
success, reason, token = service_impl.register(request)
return Response(success=success, reason=reason, token=token)
@endpoints.method(RegisterRequest, Response,
path='update', name='pgur.update')
def update(self, request):
"""
Update device store url as well as default url in order to redirect,
if no local application found or desktop / some other device browser.
"""
success, reason = service_impl.update(request)
return Response(success=success, reason=reason)
@endpoints.method(CreateRequest, Response,
path='url/create', name='pgur.create')
def url_create(self, request):
"""Create Short url from given details"""
success, reason, url_uid = service_impl.create(request)
if success:
short_url = appengine_config.WEBSITE
if request.is_claim:
short_url += 'claim/'
short_url += url_uid
return Response(
short_url=short_url, url_uid=url_uid, success=success)
return Response(success=success, reason=reason)
@endpoints.method(GetRequest, Response,
path='get', name='pgur.get')
def get(self, request):
"""Return data from given ip address or url id."""
success, reason, data, url_uid = service_impl.get(request)
return Response(
success=success, reason=reason, data=data, url_uid=url_uid)
|
PredictionGuru/pgurin
|
api/service.py
|
Python
|
mit
| 2,579
|
import psycopg2
import psycopg2.extras
import twitter
import os
def insert_tweet_into_db(timestamp, text):
'DY Mon DD HH24:MI:SS +0000 YYYY'
def deconstruct_twitter_json(tweet_dict):
time_created = tweet_dict['created_at']
text = tweet_dict['text']
return (time_created, text)
def get_credentials():
credential_file = os.path.join(os.path.split(__file__)[0], 'secret','api.key')
with open (credential_file,'rU') as f:
consumer_key = f.readline().strip()
consumer_secret = f.readline().strip()
access_key = f.readline().strip()
access_secret = f.readline().strip()
return (consumer_key, consumer_secret, access_key, access_secret)
def realDonaldJTrumpDetails():
user_id = 'realDonaldTrump'
return user_id
def generate_api_object():
(consumer_key, consumer_secret, access_key, access_secret) = get_credentials()
api = twitter.Api(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_key,
access_token_secret=access_secret)
return api
def extract_tweets_from_user(username, output_file_name):
import pickle
api = generate_api_object()
stored_tweet_objects = []
max_id = None
prev_max_id = 0
while(prev_max_id != max_id):
djt_timeline = api.GetUserTimeline(screen_name=username,
count=200,
max_id=max_id)
stored_tweet_objects.extend(djt_timeline)
prev_max_id = max_id
max_id = djt_timeline[-1].id
pickle.dump(stored_tweet_objects,open(output_file_name,'wb'))
if(__name__ == '__main__'):
import pickle
import codecs
pickle_path = os.path.join(os.getcwd(),'sentiment','DJT Twitter Status Objects.obj')
stored_djt_statuses = pickle.load(open(pickle_path,'rb'))
seen_already = set()
filtered_statuses = []
for status in stored_djt_statuses:
if(status.id not in seen_already):
filtered_statuses.append(status)
seen_already.add(status.id)
print len(filtered_statuses), len(stored_djt_statuses)
strings_to_replace = [(u'\u2026','...')]
print u'\u2026'
with codecs.open('trump_mini_corpus.txt','w',encoding='ascii') as f:
for status in filtered_statuses:
text = status.text.encode('ascii','ignore')
try:
f.write('\t'.join([str(status.id), text.replace('\n',' ')]) + '\n')
except:
print text
|
jdwinkler/trumpbot
|
py_twitter_testbed.py
|
Python
|
mit
| 2,600
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
from django.contrib import admin
from filebrowser.sites import site
from mysite import views
urlpatterns = patterns("",
url(r"^$", TemplateView.as_view(template_name="homepage.html"), name="home"),
url(r"^pending$", TemplateView.as_view(template_name="pending_approval.html")),
url(r"^register$", views.register_advertiser),
url(r"^contracts/", include('contracts.urls')),
(r'^admin/filebrowser/', include(site.urls)),
(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
(r'^admin/', include(admin.site.urls)), # admin site
url(r"^accounts/", include("account.urls")),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
zurawiki/advert-crm
|
mysite/urls.py
|
Python
|
mit
| 1,022
|
# coding: utf-8
from StringIO import StringIO
from PIL import Image
DEFAULT_FORMAT = 'PNG'
CONTENT_TYPES = {
'PNG': 'image/png',
'JPEG': 'image/jpeg',
}
EXT_NAMES = {
'PNG': 'png',
'JPEG': 'jpg',
}
def get_content_type_from_format(format):
return CONTENT_TYPES[format]
def get_ext_from_format(format):
return EXT_NAMES[format]
def get_image_format(content):
data = StringIO(content)
try:
image = Image.open(data)
except (IOError, SyntaxError, ValueError):
data.close()
return None, None
image_format = image.format or DEFAULT_FORMAT
if image_format not in CONTENT_TYPES:
image_format = DEFAULT_FORMAT
return image_format
def resize_image(content, resize_to, force_fit_width=False, never_stretch=False):
if force_fit_width:
# 横幅強制モードを指定されているときは、never_stretch を無視
never_stretch = False
data = StringIO(content)
try:
image = Image.open(data)
except (IOError, SyntaxError, ValueError):
data.close()
return None, None
image_format = image.format or DEFAULT_FORMAT
if image_format not in CONTENT_TYPES:
image_format = DEFAULT_FORMAT
size_x = image.size[0]
size_y = image.size[1]
if never_stretch and size_x <= resize_to and size_y <= resize_to:
# 引き延ばさないモードの時、かつ、縦横のサイズが変換したいサイズより
# 小さい場合はオリジナルサイズのままにする
# EXIF などのことも考えて、オリジナルサイズのままでも書き出し直す
pass
else:
# 通常は、長辺が resize_to に合うように resize する
if size_x >= size_y or force_fit_width:
# 横幅の方が大きい、あるいは強制的に横幅で合わせるモードの場合
resize_y = size_y * resize_to / size_x
resize_x = resize_to
else:
resize_x = size_x * resize_to / size_y
resize_y = resize_to
if resize_x > size_x:
# 拡大する場合
import logging
logging.info(u'the image size is too small. stretch it. ({}, {}) -> ({}, {})'.format(size_x, size_y, resize_x, resize_y))
image = image.resize((resize_x, resize_y))
else:
# 縮小する場合は ANTIALIAS を使った方が高品質
#image = image.resize((resize_x, resize_y), resample=Image.ANTIALIAS)
image.thumbnail((resize_x, resize_y), Image.ANTIALIAS)
output = StringIO()
if image_format == 'JPEG':
#image.save(output, image_format, quality=80, optimize=True, progressive=True)
image.save(output, image_format, quality=90)
else:
image.save(output, image_format)
output_buffer = output.getvalue()
output.close()
width = image.size[0]
height = image.size[1]
del image
data.close()
return output_buffer, image_format, (width, height)
def calc_size(content):
data = StringIO(content)
try:
image = Image.open(data)
except (IOError, SyntaxError, ValueError):
data.close()
return None
width = image.size[0]
height = image.size[1]
del image
data.close()
return (width, height)
|
alt-core/sheetbot
|
convert_image.py
|
Python
|
mit
| 3,337
|
from sys import stdin, stdout
for cse, n in enumerate(stdin.read().split()):
if int(n):
stdout.write('Case {}: {}\n'.format(cse+1, int(n)//2))
|
arash16/prays
|
UVA/vol-115/11597.py
|
Python
|
mit
| 151
|
import discord
from discord.ext import commands
import urllib.request
import json
import redisInterface
class xplog():
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def logxp(self, ctx, id, character, event, experience : int):
if "dungeon master" in [y.name.lower() for y in ctx.message.author.roles]:
if self.bot.db.get_val('xp_log') == '':
to_db = {}
else:
to_db = self.bot.db.from_json(self.bot.db.get_val('xp_log'))
name = id[id.find('!')+1:id.find('>')]
if name not in to_db.keys():
to_db[name] = {}
if character not in to_db[name].keys():
to_db[name][character] = {}
if 'events' not in to_db[name][character].keys():
to_db[name][character]['events'] = {}
if 'total' not in to_db[name][character].keys():
to_db[name][character]['total'] = 0
to_db[name][character]['events'][event] = experience
to_db[name][character]['total'] += experience
self.bot.db.set_val('xp_log', self.bot.db.to_json(to_db))
await self.bot.say("{} has been logged for {} XP to {}.".format(event, experience, character))
else:
await self.bot.say("You do not have Dungeon Master permissions!")
@commands.command(pass_context=True)
async def delxp(self, ctx, id, character, event):
if "dungeon master" in [y.name.lower() for y in ctx.message.author.roles]:
if self.bot.db.get_val('xp_log') == '':
to_db = {}
else:
to_db = self.bot.db.from_json(self.bot.db.get_val('xp_log'))
name = id[id.find('!')+1:id.find('>')]
if name not in to_db.keys():
return await self.bot.say("Player does not exist!")
if character not in to_db[name].keys():
return await self.bot.say("Character does not exist!")
if 'events' not in to_db[name][character].keys():
return await self.bot.say("Character has not done any events!")
if event not in to_db[name][character]['events'].keys():
return await self.bot.say("Character did not attend that event!")
to_db[name][character]['total'] -= to_db[name][character]['events'][event]
experience = to_db[name][character]['events'][event]
to_db[name][character]['events'].pop(event)
self.bot.db.set_val('xp_log', self.bot.db.to_json(to_db))
await self.bot.say("{} has been removed from {}'s memories.".format(event, experience, character))
else:
await self.bot.say("You do not have Dungeon Master permissions!")
@commands.command(pass_context=True)
async def getxp(self, ctx, character):
xp_log = self.bot.db.from_json(self.bot.db.get_val('xp_log'))
if xp_log.get(ctx.message.author.id) != None:
player = xp_log[ctx.message.author.id]
output = ""
if player.get(character) != None:
for k,v in player[character]['events'].items():
output += f"{k}: {v}\n"
output += "Total XP: {}".format(player[character]['total'])
embed = discord.Embed(title=character, description=output)
return await self.bot.say(embed=embed)
else:
pass
await self.bot.say("That character does not exist!")
@commands.command(pass_context=True)
async def DMgetxp(self, ctx, id, character):
if "dungeon master" in [y.name.lower() for y in ctx.message.author.roles]:
xp_log = self.bot.db.from_json(self.bot.db.get_val('xp_log'))
name = id[id.find('!')+1:id.find('>')]
if xp_log.get(name) != None:
player = xp_log[name]
output = ""
if player.get(character) != None:
for k,v in player[character]['events'].items():
output += f"{k}: {v}\n"
output += "Total XP: {}".format(player[character]['total'])
embed = discord.Embed(title=character, description=output)
return await self.bot.say(embed=embed)
else:
pass
await self.bot.say("That character does not exist!")
else:
await self.bot.say("You do not have Dungeon Master permissions!")
def setup(bot):
bot.add_cog(xplog(bot))
|
Eylesis/Botfriend
|
Cogs/XPlog.py
|
Python
|
mit
| 4,757
|
# 2014.04.29
# S.Rodney
# HST Filter transmission curves: plotting and such
import numpy as np
from matplotlib import pylab as pl
import os
topdir = os.path.abspath( '.' )
try :
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST_CANDELS')
w435, f435 = np.loadtxt( 'ACS_WFC_F435W.dat', unpack=True )
w606, f606 = np.loadtxt( 'ACS_WFC_F606W.dat', unpack=True )
w625, f625 = np.loadtxt( 'ACS_WFC_F625W.dat', unpack=True )
w814, f814 = np.loadtxt( 'ACS_WFC_F814W.dat', unpack=True )
w350, f350 = np.loadtxt( 'WFC3_UVIS_F350LP.dat', unpack=True )
w606u, f606u = np.loadtxt( 'WFC3_UVIS_F606W.dat', unpack=True )
w763u, f763u = np.loadtxt( 'WFC3_UVIS_F763M.dat', unpack=True )
w845u, f845u = np.loadtxt( 'WFC3_UVIS_F845M.dat', unpack=True )
w127, f127 = np.loadtxt( 'WFC3_IR_F127M.dat', unpack=True )
w125, f125 = np.loadtxt( 'WFC3_IR_F125W.dat', unpack=True )
w160, f160 = np.loadtxt( 'WFC3_IR_F160W.dat', unpack=True )
w153, f153 = np.loadtxt( 'WFC3_IR_F153M.dat', unpack=True )
w139, f139 = np.loadtxt( 'WFC3_IR_F139M.dat', unpack=True )
w140, f140 = np.loadtxt( 'WFC3_IR_F140W.dat', unpack=True )
os.chdir( sndataroot+'/filters/Bessell90')
wB, fB = np.loadtxt( 'Bessell90_B.dat', unpack=True )
wV, fV = np.loadtxt( 'Bessell90_V.dat', unpack=True )
wR, fR = np.loadtxt( 'Bessell90_R.dat', unpack=True )
wI, fI = np.loadtxt( 'Bessell90_I.dat', unpack=True )
except KeyError :
pass
finally :
os.chdir(topdir)
def filtername2datfile( filtername, camera=None):
""" Given an abbreviated filter name, returns the name of the .dat file
containing the transmission curve.
"""
fname = filtername.upper()
if fname.startswith('F1') : return( 'WFC3_IR_%s.dat'%fname )
elif 'UV' in camera.upper():
return( 'WFC3_UVIS_%s.dat'%fname )
elif 'ACS' in camera.upper():
return( 'ACS_WFC_%s.dat'%fname )
elif fname=='F350LP' :
return( 'WFC3_UVIS_%s.dat'%fname )
else :
print("Must specify a camera for filter %s."%fname)
return(None)
def computeScaling( filt1, filt2, camera1=None, camera2=None ) :
"""determine the flux scaling factor that should be multiplied to
filt1 to match the throughput of filt2. This returns just a
single number, effectively assuming the source SED is flat across
the bandpass, so that we just need to correct for total
throughput, not for the shape of the filter.
"""
from scipy import integrate as scint
if filt1.lower().startswith('f') :
filt1 = filtername2datfile( filt1, camera=camera1 )
if filt2.lower().startswith('f') :
filt2 = filtername2datfile( filt2, camera=camera2 )
if not filt1.endswith('.dat') or not filt2.endswith('.dat') :
print("Must specify a filter name (e.g. F160W) or a .dat file.")
return( None )
# read in the transmission curves for filters 1 and 2
topdir = os.path.abspath( '.' )
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST')
w1, f1 = np.loadtxt( filt1, unpack=True )
w2, f2 = np.loadtxt( filt2, unpack=True )
os.chdir( topdir )
# integrate
int1 = scint.simps( f1, w1 )
int2 = scint.simps( f2, w2 )
# divide
return( int2 / int1 )
def computeScaling2to1( filt1, filt2, filt3,
camera1=None, camera2=None, camera3=None) :
"""Determine the flux scaling factor for matching the sum of filt1+filt2
to filt3. This returns the value that should be multiplied to
(filt1+filt2) to match the throughput of filt3. This returns just a
single number, effectively assuming the source SED is flat across
the bandpass, so that we just need to correct for total
throughput, not for the shape of the filter.
"""
from scipy import integrate as scint
if filt1.lower().startswith('f') :
filt1 = filtername2datfile( filt1, camera=camera1 )
if filt2.lower().startswith('f') :
filt2 = filtername2datfile( filt2, camera=camera2 )
if filt3.lower().startswith('f') :
filt3 = filtername2datfile( filt3, camera=camera3 )
if not (filt1.endswith('.dat') and filt2.endswith('.dat')
and filt3.endswith('.dat') ):
print("Must specify a filter name (e.g. F160W) or a .dat file.")
return( None )
# read in the transmission curves for filters
topdir = os.path.abspath( '.' )
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST')
w1, f1 = np.loadtxt( filt1, unpack=True )
w2, f2 = np.loadtxt( filt2, unpack=True )
w3, f3 = np.loadtxt( filt3, unpack=True )
os.chdir( topdir )
# integrate
int1 = scint.simps( f1, w1 )
int2 = scint.simps( f2, w2 )
int3 = scint.simps( f3, w3 )
# sum and divide
return( int3 / (int1+int2) )
def plotmedbands( z = 2, day=5 ):
from hstsntools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.dat', day=day )
w1az = w1a * (1+z)
f1az = f1a / f1a.max() / 2.
clf()
ax1 = subplot(3,1,1)
plot(w125, f125, 'b--', label='F125W')
plot(w127, f127, 'b-', label='F127M')
plot(w1az, f1az, 'r-', label='_nolegend_')
ax1.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax1.set_xlim( 9000, 20000 )
ax1.text(9500,0.2, 'SNIa\nz=%.1f\nt=%i'%(z,day), color='r',ha='left',va='bottom')
setp(ax1.get_xticklabels(), visible=False)
setp(ax1.get_yticklabels(), visible=False)
ax2 = subplot(3,1,2, sharex=ax1, sharey=ax1)
plot(w140, f140, 'g--',label='F140W')
plot(w139, f139, 'g-',label='F139M')
plot(w1az, f1az, 'r-', label='_nolegend_')
ax2.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax2.set_xlim( 9000, 20000 )
setp(ax2.get_xticklabels(), visible=False)
setp(ax2.get_yticklabels(), visible=False)
ax2.set_ylabel('Flux / Transmission (arbitrary units)')
ax3= subplot(3,1,3, sharex=ax1, sharey=ax1)
plot(w160, f160, 'm--',label='F160W')
plot(w153, f153, 'm-',label='F153M')
plot(w1az, f1az, 'r-',label='_nolegend_')
ax3.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
setp(ax3.get_yticklabels(), visible=False)
ax1.set_xlim( 9000, 20000 )
ax1.set_xlabe
l('observed wavelength (Angstroms)')
fig = gcf()
fig.subplots_adjust( wspace=0, hspace=0, left=0.05, bottom=0.12, right=0.95, top=0.95)
def plotbroadbandz( zvals=[1,1.5,2.0], day=0 ):
""" show how broad bands cover the SED at high z"""
from hstsnpipe import tools
from tools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.extrap.dat', day=day )
print("SALT2")
# w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/models/SALT2/SALT2.Guy10_UV2IR/salt2_template_0.dat', day=day )
#w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/models/SALT2/SALT2.Guy10_UV2IR/salt2_template_1.dat', day=day )
#wII, fII = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/non1a/SDSS-000018.DAT', day=0 )
#wIb, fIb = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/non1a/SDSS-000020.DAT', day=0 )
clf()
i = 0
for z in zvals:
i+=1
w1az = w1a * (1+z)
f1az = f1a / f1a.max() / 2.
#wII = wII * (1+z)
#fII = fII / fII.max() / 2.
#wIb = wIb * (1+z)
#fIb = fIb / fIb.max() / 2.
ax = subplot(3,1,i)
plot(w350, f350, 'b--', label='F350LP(W)')
plot(w125, f125, 'g--', label='F125W(J)')
plot(w160, f160, 'r--', label='F160W(H)')
plot(w1az, f1az, 'k-', label='_nolegend_')
#ax.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax.set_xlim( 3000, 20000 )
ax.text(0.98,0.95, 'z=%.1f'%(z), color='k',ha='right',va='top',transform=ax.transAxes)
setp(ax.get_yticklabels(), visible=False)
if i==1 :
top = ax.get_ylim()[1]
ax.text(16000,top, 'F160W(H)', color='r',ha='center',va='bottom')
ax.text(12500,top, 'F125W(J)', color='g',ha='center',va='bottom')
ax.text(3500,top, 'F350LP(W)', color='b',ha='left',va='bottom')
if i<3 :
setp(ax.get_xticklabels(), visible=False)
if i==2 :
ax.set_ylabel('Flux or Transmission (arbitrary units)')
if i==3 :
ax.set_xlabel('observed wavelength (Angstroms)')
fig = gcf()
fig.subplots_adjust( wspace=0, hspace=0, left=0.05, bottom=0.12, right=0.95, top=0.95)
def plotBVRI( ):
""" show how broad ACS bands cover the SN SED """
from hstsnpipe import tools
from tools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.extrap.dat', day=0 )
clf()
f1a = f1a / f1a.max()
plot(wB, fB, 'b--', label='B')
plot(wV, fV, 'g--', label='V')
plot(wR, fR, 'r--', label='R')
plot(wI, fI, 'k--', label='I')
plot(w435, f435, 'b-', label='F435W')
plot(w606, f606, 'g-', label='F606W')
plot(w625, f625, 'r-', label='F625W')
plot(w814, f814, 'k-', label='F814W')
plot(w1a, f1a, 'k-', label='_nolegend_')
ax = gca()
ax.set_xlim( 3000, 10000 )
#setp(ax.get_yticklabels(), visible=False)
|
srodney/hstsntools
|
filters.py
|
Python
|
mit
| 9,454
|
# -*- coding: utf-8 -*-
from django import forms
from djspace.core.models import GenericChoice
from djspace.registration.choices import GRADUATE_DEGREE
from djspace.registration.choices import UNDERGRADUATE_DEGREE
from djspace.registration.models import Faculty
from djspace.registration.models import Graduate
from djspace.registration.models import GrantsOfficer
from djspace.registration.models import HighSchool
from djspace.registration.models import Professional
from djspace.registration.models import TechnicalAdvisor
from djspace.registration.models import Undergraduate
from djtools.fields import STATE_CHOICES
try:
AFFILIATES = GenericChoice.objects.filter(
tags__name__in=['WSGC Affiliates', 'College or University'],
).filter(active=True).order_by('ranking', 'name')
except Exception:
AFFILIATES = GenericChoice.objects.none()
PROGRAMS = GenericChoice.objects.filter(
tags__name__in=['Programs'],
).filter(active=True).order_by('ranking', 'name')
class HighSchoolForm(forms.ModelForm):
"""A form for high school registrants."""
class Meta:
"""Attributes about the form and options."""
model = HighSchool
exclude = ('user', 'date_created', 'date_updated', 'updated_by')
class UndergraduateForm(forms.ModelForm):
"""A form to collect undergraduate information."""
def __init__(self, *args, **kwargs):
"""Override the initialization method to set affiliate choices."""
super(UndergraduateForm, self).__init__(*args, **kwargs)
self.fields['wsgc_affiliate'].queryset = GenericChoice.objects.filter(
tags__name__in=['College or University'],
).order_by('ranking', 'name')
class Meta:
"""Attributes about the form and options."""
model = Undergraduate
exclude = ('user', 'status')
fields = [
'wsgc_affiliate',
'wsgc_affiliate_other',
'studentid',
'class_year',
'major',
'major_other',
'secondary_major_minor',
'secondary_major_minor_other',
'current_cumulative_gpa',
'gpa_in_major',
'gpa_scale',
'cumulative_college_credits',
'month_year_of_graduation',
'highschool_name',
'highschool_city',
'highschool_state',
'cv',
'cv_authorize',
]
widgets = {
'current_cumulative_gpa': forms.TextInput(
attrs={'placeholder': 'eg. 3.87'},
),
'gpa_in_major': forms.TextInput(attrs={'placeholder': 'eg. 3.87'}),
'gpa_scale': forms.TextInput(attrs={'placeholder': 'eg. 4.00'}),
'cumulative_college_credits': forms.TextInput(
attrs={'placeholder': 'eg. 86.0'},
),
'month_year_of_graduation': forms.TextInput(
attrs={'placeholder': 'eg. 05/2015'},
),
}
def clean(self):
"""Error handling for various scenarios."""
cleaned_data = self.cleaned_data
# WSGC Affiliate
wsgc_affiliate = cleaned_data.get('wsgc_affiliate')
wsgc_affiliate_other = cleaned_data.get('wsgc_affiliate_other')
if wsgc_affiliate == 'Other' and not wsgc_affiliate_other:
self._errors['wsgc_affiliate_other'] = self.error_class(
["Required field."],
)
# majors
major = cleaned_data.get('major')
major_other = cleaned_data.get('major_other')
secondary_major_minor = cleaned_data.get('secondary_major_minor')
secondary_major_minor_other = cleaned_data.get(
'secondary_major_minor_other',
)
if major == 'Other':
if major_other == '':
self._errors['major_other'] = self.error_class(
["Required field."],
)
if secondary_major_minor == 'Other':
if secondary_major_minor_other == '':
self._errors['secondary_major_minor_other'] = self.error_class(
["Required field."],
)
return cleaned_data
class GraduateForm(forms.ModelForm):
"""A form to collect graduate information."""
def __init__(self, *args, **kwargs):
"""Override the initialization method to set affiliate choices."""
super(GraduateForm, self).__init__(*args, **kwargs)
self.fields['wsgc_affiliate'].queryset = GenericChoice.objects.filter(
tags__name__in=['College or University'],
).order_by('ranking', 'name')
cumulative_college_credits = forms.CharField(label="Total credits")
month_year_of_graduation = forms.CharField(
label="Month and year of graduation",
max_length=7,
widget=forms.TextInput(attrs={'placeholder': 'eg. 05/2015'}),
)
undergraduate_degree = forms.TypedChoiceField(
choices=UNDERGRADUATE_DEGREE,
widget=forms.RadioSelect(),
)
degree_program = forms.TypedChoiceField(
label="Graduate degree program",
choices=GRADUATE_DEGREE,
widget=forms.RadioSelect(),
)
class Meta:
"""Attributes about the form and options."""
model = Graduate
exclude = ('user', 'status')
fields = [
'major',
'major_other',
'secondary_major_minor',
'secondary_major_minor_other',
'gpa_in_major',
'gpa_scale',
'cumulative_college_credits',
'month_year_of_graduation',
'undergraduate_degree',
'wsgc_affiliate',
'wsgc_affiliate_other',
'studentid',
'degree_program',
'degree_program_other',
'concentration_area',
'graduate_gpa',
'graduate_scale',
'graduate_graduation_year',
'cv',
'cv_authorize',
]
widgets = {
# undergraduate
'gpa_in_major': forms.TextInput(attrs={'placeholder': 'eg. 3.87'}),
'gpa_scale': forms.TextInput(attrs={'placeholder': 'eg. 4.00'}),
'cumulative_college_credits': forms.TextInput(
attrs={'placeholder': 'eg. 86.0'},
),
# graduate
'graduate_gpa': forms.TextInput(attrs={'placeholder': 'eg. 3.87'}),
'graduate_scale': forms.TextInput(attrs={'placeholder': 'eg. 4.00'}),
'graduate_graduation_year': forms.TextInput(
attrs={'placeholder': 'eg. 2015'},
),
}
def clean(self):
"""Error handling for various scenarios."""
cleaned_data = self.cleaned_data
# WSGC Affiliate
wsgc_affiliate = cleaned_data.get('wsgc_affiliate')
wsgc_affiliate_other = cleaned_data.get('wsgc_affiliate_other')
if wsgc_affiliate == 'Other' and not wsgc_affiliate_other:
self._errors['wsgc_affiliate_other'] = self.error_class(
["Required field."],
)
# majors
major = cleaned_data.get('major')
major_other = cleaned_data.get('major_other')
secondary_major_minor = cleaned_data.get('secondary_major_minor')
secondary_major_minor_other = cleaned_data.get(
'secondary_major_minor_other',
)
degree_program = cleaned_data.get('degree_program')
degree_program_other = cleaned_data.get('degree_program_other')
if major == 'Other':
if major_other == '':
self._errors['major_other'] = self.error_class(
["Required field."],
)
if secondary_major_minor == 'Other':
if secondary_major_minor_other == '':
self._errors['secondary_major_minor_other'] = self.error_class(
["Required field."],
)
if degree_program == 'Other':
if degree_program_other == '':
self._errors['degree_program_other'] = self.error_class(
["Required field."],
)
return cleaned_data
class ProfessionalForm(forms.ModelForm):
"""A form to collect professional information."""
wsgc_affiliate = forms.ModelChoiceField(
label="WSGC Affiliate",
queryset=AFFILIATES,
)
sponsoring_organization_state = forms.CharField(
required=False,
widget=forms.Select(choices=STATE_CHOICES),
)
sponsoring_organization_postal_code = forms.CharField(
label="Postal Code",
required=False,
max_length=10,
)
class Meta:
"""Attributes about the form and options."""
model = Professional
exclude = ('user', 'status')
def clean(self):
"""Error handling for various scenarios."""
cd = self.cleaned_data
wa = cd.get('wsgc_affiliate')
# sponsoring organisation data are required if wsgc affiliate
# is "Other" (id = 49)
if wa and wa.id == 49:
if not cd.get('sponsoring_organization_name'):
self._errors['sponsoring_organization_name'] = self.error_class(
["Required field"],
)
if not cd.get('sponsoring_organization_address1'):
self._errors['sponsoring_organization_address1'] = self.error_class(
["Required field"],
)
if not cd.get('sponsoring_organization_city'):
self._errors['sponsoring_organization_city'] = self.error_class(
["Required field"],
)
if not cd.get('sponsoring_organization_state'):
self._errors['sponsoring_organization_state'] = self.error_class(
["Required field"],
)
if not cd.get('sponsoring_organization_postal_code'):
self._errors['sponsoring_organization_postal_code'] = self.error_class(
["Required field"],
)
if not cd.get('sponsoring_organization_contact'):
self._errors['sponsoring_organization_contact'] = self.error_class(
["Required field"],
)
return cd
class FacultyForm(forms.ModelForm):
"""A form to collect faculty information."""
wsgc_affiliate = forms.ModelChoiceField(
label="WSGC Affiliate",
queryset=AFFILIATES,
)
class Meta:
"""Attributes about the form and options."""
model = Faculty
exclude = ('user', 'status')
def clean(self):
"""Error handling for affiliate fields."""
cleaned_data = self.cleaned_data
# WSGC Affiliate
wsgc_affiliate = cleaned_data.get('wsgc_affiliate')
wsgc_affiliate_other = cleaned_data.get('wsgc_affiliate_other')
if wsgc_affiliate == 'Other' and not wsgc_affiliate_other:
self.add_error('wsgc_affiliate_other', "Required field.")
return cleaned_data
class GrantsOfficerForm(forms.ModelForm):
"""A form to collect grants officer information."""
wsgc_affiliate = forms.ModelChoiceField(
label="WSGC Affiliate", queryset=AFFILIATES,
)
title = forms.CharField(label="Title")
class Meta:
"""Attributes about the form and options."""
model = GrantsOfficer
exclude = ('user', 'status')
def clean(self):
"""Error handling for affiliate fields."""
cleaned_data = self.cleaned_data
# WSGC Affiliate
wsgc_affiliate = cleaned_data.get('wsgc_affiliate')
wsgc_affiliate_other = cleaned_data.get('wsgc_affiliate_other')
if wsgc_affiliate and wsgc_affiliate.name == 'Other' and not wsgc_affiliate_other:
self.add_error('wsgc_affiliate_other', "Required field.")
return cleaned_data
class TechnicalAdvisorForm(forms.ModelForm):
"""A form to collect technical advisor information."""
wsgc_affiliate = forms.ModelChoiceField(
label="WSGC Affiliate", queryset=AFFILIATES,
)
title = forms.CharField(label="Title")
programs = forms.ModelMultipleChoiceField(
label="Programs",
queryset=PROGRAMS,
help_text='Check all that apply',
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
"""Attributes about the form and options."""
model = TechnicalAdvisor
exclude = ('user', 'status')
def clean(self):
"""Error handling for affiliate fields."""
cleaned_data = self.cleaned_data
# WSGC Affiliate
wsgc_affiliate = cleaned_data.get('wsgc_affiliate')
wsgc_affiliate_other = cleaned_data.get('wsgc_affiliate_other')
if wsgc_affiliate and wsgc_affiliate.name == 'Other' and not wsgc_affiliate_other:
self.add_error('wsgc_affiliate_other', "Required field.")
return cleaned_data
|
carthage-college/django-djspace
|
djspace/registration/forms.py
|
Python
|
mit
| 12,987
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'My Project',
'author': 'My Name',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'My email.',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['pyconstant'],
'scripts': [],
'name': 'projectname'
}
setup(**config)
|
adamkal/pyconstant
|
setup.py
|
Python
|
mit
| 428
|
from crow2.irc import main
from crow2 import hook, log
from crow2.events.handlerclass import handlerclass, instancehandler
import re
# TODO: rewrite back to using re_gen; re_gen needs work first
space = r"\ +"
prefix = """
(?P<prefix>
{servername} | {nick} (?:!{user})? (?:@{host})?
)
"""
message = re.compile("""
^
(?:
:{prefix}{space}
)?
(?P<command>
[a-zA-Z]+ | [0-9]{3}
)
(?P<params>
{space}.+
)
$
""".format(prefix=prefix, space=space), flags=re.VERBOSE)
params = re.compile("""
(?:
[^:]
)
""".format(), flags=re.VERBOSE)
@handlerclass(hook.connection.made)
class IRCProtocol(object):
def __init__(self, event):
pass
@instancehandler.conn.disconnect
def disconnected(self, event):
self.delete()
@instancehandler.conn.received.preparer
def line_received(self, event):
if not len(event.message):
event.cancel()
return
event.command = "derp"
@instancehandler.conn.received(name="derp")
def derp_received(self, event):
pass
@hook.connection.received.preparer
def irc_log(event):
log.msg("irc message: %r" % event.line)
|
lahwran/crow2
|
crow2/irc/protocol.py
|
Python
|
mit
| 1,223
|
#http://stackoverflow.com/questions/4662759/how-to-change-folder-icons-with-python-on-windows
import os
import ctypes
from ctypes import POINTER, Structure, c_wchar, c_int, sizeof, byref
from ctypes.wintypes import BYTE, WORD, DWORD, LPWSTR, LPSTR
##import win32api
# in case it wasnt apparent, this will only work on windows.
HICON = c_int
LPTSTR = LPWSTR
TCHAR = c_wchar
MAX_PATH = 260
FCSM_ICONFILE = 0x00000010
FCS_FORCEWRITE = 0x00000002
SHGFI_ICONLOCATION = 0x000001000
class GUID(Structure):
_fields_ = [
('Data1', DWORD),
('Data2', WORD),
('Data3', WORD),
('Data4', BYTE * 8)]
class SHFOLDERCUSTOMSETTINGS(Structure):
_fields_ = [
('dwSize', DWORD),
('dwMask', DWORD),
('pvid', POINTER(GUID)),
('pszWebViewTemplate', LPTSTR),
('cchWebViewTemplate', DWORD),
('pszWebViewTemplateVersion', LPTSTR),
('pszInfoTip', LPTSTR),
('cchInfoTip', DWORD),
('pclsid', POINTER(GUID)),
('dwFlags', DWORD),
('pszIconFile', LPTSTR),
('cchIconFile', DWORD),
('iIconIndex', c_int),
('pszLogo', LPTSTR),
('cchLogo', DWORD)]
class SHFILEINFO(Structure):
_fields_ = [
('hIcon', HICON),
('iIcon', c_int),
('dwAttributes', DWORD),
('szDisplayName', TCHAR * MAX_PATH),
('szTypeName', TCHAR * 80)]
def seticon(folderpath, iconpath, iconindex, relative=1):
"""Set folder icon.
>>> seticon(".", "C:\\Windows\\system32\\SHELL32.dll", 10)
"""
shell32 = ctypes.windll.shell32
folderpath = str(os.path.abspath(folderpath), 'mbcs')
if relative:
iconpath = str(os.path.join('.',os.path.basename(iconpath)), 'mbcs')
else:
iconpath = str(os.path.abspath(iconpath), 'mbcs')
fcs = SHFOLDERCUSTOMSETTINGS()
fcs.dwSize = sizeof(fcs)
fcs.dwMask = FCSM_ICONFILE
fcs.pszIconFile = iconpath
fcs.cchIconFile = 0
fcs.iIconIndex = iconindex
hr = shell32.SHGetSetFolderCustomSettings(byref(fcs), folderpath,
FCS_FORCEWRITE)
if hr:
raise WindowsError()#win32api.FormatMessage(hr))
sfi = SHFILEINFO()
hr = shell32.SHGetFileInfoW(folderpath, 0, byref(sfi), sizeof(sfi),
SHGFI_ICONLOCATION)
if hr == 0:
raise WindowsError()#win32api.FormatMessage(hr))
index = shell32.Shell_GetCachedImageIndexW(sfi.szDisplayName, sfi.iIcon, 0)
if index == -1:
raise WindowsError()
shell32.SHUpdateImageW(sfi.szDisplayName, sfi.iIcon, 0, index)
def seticon_unicode(folderpath, iconpath, iconindex, relative=1):
# tried to hack together a version that assumes strings are already unicode, will probably not work.
# it works.
"""Set folder icon.
>>> seticon(".", "C:\\Windows\\system32\\SHELL32.dll", 10)
"""
shell32 = ctypes.windll.shell32
folderpath = os.path.abspath(folderpath)
if relative:
iconpath = os.path.join('.',os.path.basename(iconpath))
else:
iconpath = os.path.abspath(iconpath)
fcs = SHFOLDERCUSTOMSETTINGS()
fcs.dwSize = sizeof(fcs)
fcs.dwMask = FCSM_ICONFILE
fcs.pszIconFile = iconpath
fcs.cchIconFile = 0
fcs.iIconIndex = iconindex
hr = shell32.SHGetSetFolderCustomSettings(byref(fcs), folderpath,
FCS_FORCEWRITE)
if hr:
raise WindowsError()#win32api.FormatMessage(hr))
sfi = SHFILEINFO()
hr = shell32.SHGetFileInfoW(folderpath, 0, byref(sfi), sizeof(sfi),
SHGFI_ICONLOCATION)
if hr == 0:
raise WindowsError()#win32api.FormatMessage(hr))
shell32.SHUpdateImageA(sfi.szDisplayName, 0, 0, 0)
## index = shell32.Shell_GetCachedImageIndexW(sfi.szDisplayName, sfi.iIcon, 0)
## if index == -1:
## raise WindowsError()
##
## shell32.SHUpdateImageW(sfi.szDisplayName, sfi.iIcon, 0, index)
if __name__=='__main__':
import sys
if len(sys.argv)==3:
seticon_unicode(sys.argv[1],sys.argv[2],0)
'path,icon_path'
|
NeverDecaf/Alastore
|
iconchange.py
|
Python
|
mit
| 4,145
|
"""
Support for Speedtest.net based on speedtest-cli.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.speedtest/
"""
import logging
import re
import sys
from datetime import timedelta
from subprocess import check_output
import homeassistant.util.dt as dt_util
from homeassistant.components.sensor import DOMAIN
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_time_change
from homeassistant.util import Throttle
REQUIREMENTS = ['speedtest-cli==0.3.4']
_LOGGER = logging.getLogger(__name__)
_SPEEDTEST_REGEX = re.compile(r'Ping:\s(\d+\.\d+)\sms[\r\n]+'
r'Download:\s(\d+\.\d+)\sMbit/s[\r\n]+'
r'Upload:\s(\d+\.\d+)\sMbit/s[\r\n]+')
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MINUTE = 'minute'
CONF_HOUR = 'hour'
CONF_DAY = 'day'
SENSOR_TYPES = {
'ping': ['Ping', 'ms'],
'download': ['Download', 'Mbit/s'],
'upload': ['Upload', 'Mbit/s'],
}
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Speedtest sensor."""
data = SpeedtestData(hass, config)
dev = []
for sensor in config[CONF_MONITORED_CONDITIONS]:
if sensor not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', sensor)
else:
dev.append(SpeedtestSensor(data, sensor))
add_devices(dev)
def update(call=None):
"""Update service for manual updates."""
data.update(dt_util.now())
for sensor in dev:
sensor.update()
hass.services.register(DOMAIN, 'update_speedtest', update)
# pylint: disable=too-few-public-methods
class SpeedtestSensor(Entity):
"""Implements a speedtest.net sensor."""
def __init__(self, speedtest_data, sensor_type):
self._name = SENSOR_TYPES[sensor_type][0]
self.speedtest_client = speedtest_data
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
@property
def name(self):
"""The name of the sensor."""
return '{} {}'.format('Speedtest', self._name)
@property
def state(self):
"""Returns the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Gets the latest data and updates the states."""
data = self.speedtest_client.data
if data is not None:
if self.type == 'ping':
self._state = data['ping']
elif self.type == 'download':
self._state = data['download']
elif self.type == 'upload':
self._state = data['upload']
class SpeedtestData(object):
"""Gets the latest data from speedtest.net."""
def __init__(self, hass, config):
self.data = None
self.hass = hass
self.path = hass.config.path
track_time_change(self.hass, self.update,
minute=config.get(CONF_MINUTE, 0),
hour=config.get(CONF_HOUR, None),
day=config.get(CONF_DAY, None))
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, now):
"""Gets the latest data from speedtest.net."""
_LOGGER.info('Executing speedtest')
re_output = _SPEEDTEST_REGEX.split(
check_output([sys.executable, self.path(
'lib', 'speedtest_cli.py'), '--simple']).decode("utf-8"))
self.data = {'ping': round(float(re_output[1]), 2),
'download': round(float(re_output[2]), 2),
'upload': round(float(re_output[3]), 2)}
|
coteyr/home-assistant
|
homeassistant/components/sensor/speedtest.py
|
Python
|
mit
| 3,949
|
from __future__ import absolute_import
from __future__ import unicode_literals
import subprocess
import sys
from xiami_downloader._compat import request
def get_downloader(name=None):
if not name:
name = {
'win32': 'urllib2'
}.get(sys.platform, 'wget')
return {
'urllib2': urllib2_downloader,
'wget': wget_downloader
}.get(name, None)
def urllib2_downloader(url, dest, headers):
req = request.Request(url)
for h in headers:
req.add_header(h, headers[h])
response = request.urlopen(req)
length = int(response.headers['Content-Length'])
downloaded = 0.0
with open(dest, 'wb') as output:
while True:
chunk = response.read(8192)
if not chunk:
break
downloaded += len(chunk)
output.write(chunk)
percent = float(downloaded) / length * 100
sys.stdout.write('\r{:5.1f}%'.format(percent))
sys.stdout.flush()
sys.stdout.write('\n')
def wget_downloader(url, dest, headers):
wget_opts = ['wget', url, '-O', dest]
for h in headers:
wget_opts.append('--header=%s:%s' % (h, headers[h]))
subprocess.check_call(wget_opts)
|
timothyqiu/xiami-downloader
|
xiami_downloader/adapters.py
|
Python
|
mit
| 1,238
|
__author__ = 'brad'
import pygame
from resourceman import ResourceManager
from spritesheet import Spritesheet
from gameobject import GameObject
import xml.etree.ElementTree as ET
class Map(object):
def __init__(self, filename, tile_set):
"""Takes an XML world file and a tile set and creates a map from it"""
# Load the tile set
self.resource_manager = ResourceManager()
self.object_tiles = {}
self.object_properties = {}
self.tile_set = Spritesheet(tile_set) # Could eventually allow for multiple tile sets by making a list
# TODO: Make spritesheet size independent of class
self.resource_manager.add_spritesheet_strip_offsets('tile_set', self.tile_set, (1, 1), 600, 24, (16, 16), 1, 1)
# Read the XML file
tree = ET.parse(filename)
root = tree.getroot()
self.width = int(root.get("width"))
self.height = int(root.get("height"))
self.tile_width = int(root.get("tilewidth"))
self.tile_height = int(root.get("tileheight"))
# Read in the special tiles
self.special_tiles = {}
for tileset in root.findall("tileset"):
for tile in tileset.findall("tile"):
tile_properties = {}
for tile_property in tile.find("properties").findall("property"):
tile_properties[tile_property.get("name")] = tile_property.get("value")
self.special_tiles[int(tile.get("id"))] = tile_properties
# Read in the layers
self.layers = {}
for layer in root.findall("layer"):
# print(layer.get("name"))
tile_list = []
for tile in layer.find("data").findall("tile"):
# print(tile.get("gid"))
tile_list.append(int(tile.get("gid")))
self.layers[layer.get("name")] = tile_list
# Read in the collision boxes
self.object_layers = {}
for layer in root.findall("objectgroup"):
rect_list = []
rect_index = ""
for object_rect in layer.findall("object"):
try:
current_rect =pygame.Rect(int(object_rect.get("x")), # /self.tile_width,
int(object_rect.get("y")), # /self.tile_height,
int(object_rect.get("width")), # /self.tile_width,
int(object_rect.get("height"))) # /self.tile_height))
rect_list.append(current_rect)
rect_index = str(current_rect[0]) + " " + str(current_rect[1]) + " " + str(current_rect[2]) + " " + str(current_rect[3])
except TypeError:
print("There was a problem loading object at " + str(int(object_rect.get("x"))/self.tile_width)
+ ", " + str(int(object_rect.get("y"))/self.tile_height))
current_properties = {}
if object_rect.get("type") is not None:
current_properties["type"] = object_rect.get("type")
if object_rect.find("properties") is not None:
for object_property in object_rect.find("properties").findall("property"):
current_properties[object_property.get("name")] = object_property.get("value")
if "layer" not in current_properties.keys():
current_properties["layer"] = 0
self.object_properties[rect_index] = current_properties
self.object_layers[layer.get("name")] = rect_list
for layer_property in layer.findall("property"):
if layer_property.get("name") == "tile":
self.object_tiles[layer.get("name")] = int(layer_property.get("value"))
def get_tile_index(self, layer_name, x_tile, y_tile):
index = self.width*y_tile+x_tile
try:
return self.layers[layer_name][index]-1
except IndexError:
return 0
def build_world(self, scene, view_rect=None, current_frame=0):
# This will be deprecated shortly
# print("Starting build")
# self.clear_collisions(scene)
objects = 0
tiles = 0
if view_rect is None:
row = 0
for layer_name in self.layers.keys():
while row < self.height:
for tile in xrange(0, self.width):
current_tile = self.get_tile_index(layer_name, tile, row)
if current_tile != -1:
is_special_tile = False
for special_tile in self.special_tiles.keys():
if current_tile == special_tile:
is_special_tile = True
if is_special_tile:
scene.insert_object(GameObject(self.resource_manager.get_images('tile_set')
[current_tile], -1000, object_type=layer_name,
properties=self.special_tiles[current_tile],
tile_id=current_tile, sync=True),
(16*tile, 16*row))
else:
scene.insert_object(GameObject(self.resource_manager.get_images('tile_set')
[current_tile], -1000, object_type=layer_name,
tile_id=current_tile),
(16*tile, 16*row))
# print(str(row) + " " + str(tile))
row += 1
else:
tile_rect = (view_rect.x/self.tile_width, view_rect.y/self.tile_height,
view_rect.width/self.tile_width, view_rect.height/self.tile_height)
# print(str(view_rect[0]) + " " + str(view_rect[1]) + " " + str(view_rect[2]) + " " + str(view_rect[3]))
# Build the map tiles
for layer_name in self.layers.keys():
row = 0
while row < tile_rect[3]:
for tile in xrange(0, tile_rect[2]):
current_tile = self.get_tile_index(layer_name, tile_rect[0]+tile, tile_rect[1]+row)
if current_tile != -1:
is_special_tile = False
for special_tile in self.special_tiles.keys():
if current_tile == special_tile:
is_special_tile = True
if is_special_tile:
animated = False
frames = 0
# for object_property in self.special_tiles[current_tile].keys():
# if object_property == "animate":
# animated = True
# frames = int(self.special_tiles[current_tile][object_property])
if "animate" in self.special_tiles[current_tile]:
animated = True
frames = int(self.special_tiles[current_tile]["animate"])
if animated:
images = []
for x in xrange(current_tile, current_tile + frames):
images.append(self.resource_manager.get_images('tile_set')[x])
scene.insert_object(GameObject(images, -1000, object_type=layer_name,
properties=self.special_tiles[current_tile],
tile_id=current_tile, animate=True,
current_frame=current_frame, sync=True),
(16*(tile_rect[0]+tile), 16*(tile_rect[1]+row)))
else:
scene.insert_object(GameObject(self.resource_manager.get_images('tile_set')
[current_tile], -1000, object_type=layer_name,
properties=self.special_tiles[current_tile],
tile_id=current_tile),
(16*(tile_rect[0]+tile), 16*(tile_rect[1]+row)))
else:
# Allow it to determine whether objects already exist and just make them visible if they do
scene.insert_object(GameObject(self.resource_manager.get_images('tile_set')
[current_tile], -1000, object_type=layer_name,
tile_id=current_tile),
(16*(tile_rect[0]+tile), 16*(tile_rect[1]+row)))
tiles += 1
# print(str(row) + " " + str(tile))
row += 1
# Build the object layers
for layer_name in self.object_layers.keys():
for object_rect in self.object_layers[layer_name]:
if object_rect.colliderect(view_rect): # tile_rect):
rect_index = str(object_rect[0]) + " " + str(object_rect[1]) + " " + str(object_rect[2]) + " " + str(object_rect[3])
scene.insert_object(GameObject(collision_rect=pygame.Rect(0, 0, object_rect[2], object_rect[3]),
handle_collisions=True, object_type=layer_name, visible=False,
properties=self.object_properties[rect_index],
layer=self.object_properties[rect_index]["layer"]),
(object_rect[0], object_rect[1]))
objects += 1
# print("Added " + str(tiles) + " tiles")
# print("Added " + str(objects) + " objects")
# print("Ending build")
def clear_tiles(self, scene, view_rect, kill_all=False):
# print("Starting to clear tiles")
# handle_all_collisions = scene.handle_all_collisions
# scene.handle_all_collisions = True
# scene.update_collisions()
# scene.handle_all_collisions = handle_all_collisions
self.clear_objects(scene, view_rect)
objects = 0
for coordinate in scene.coordinate_array.keys():
for game_object in scene.coordinate_array[coordinate]:
if game_object.object_type == "Map Tiles":
object_rect = pygame.Rect(scene.check_position(game_object), (game_object.rect.width,
game_object.rect.height))
# try:
# object_rect = scene.collision_array[game_object]
# except KeyError:
# print("an object failed to clear")
if not object_rect.colliderect(view_rect):
if kill_all:
scene.remove_object(game_object)
else:
game_object.visible = False
objects += 1
# scene.update_collisions()
# print("There were " + str(objects) + " tiles")
# print("Ending clear tiles")
# @staticmethod
# def clear_collisions(scene, kill_all=False):
# # print("Starting to clear objects")
# objects = 0
# for coordinate in scene.coordinate_array.keys():
# for game_object in scene.coordinate_array[coordinate]:
# if not game_object.persistent and game_object.object_type == "Regular Collisions":
# scene.remove_object(game_object)
# objects += 1
# # print("There were " + str(objects) + " objects")
# # print("Ending clear objects")
@staticmethod
def clear_objects(scene, view_rect):
for coordinate in scene.coordinate_array.keys():
for game_object in scene.coordinate_array[coordinate]:
if not game_object.persistent and game_object.object_type != "Map Tiles":
object_rect = pygame.Rect(scene.check_position(game_object), (game_object.rect.width,
game_object.rect.height))
if not object_rect.colliderect(view_rect):
scene.remove_object(game_object)
|
branderson/PyZelda
|
src/engine/map.py
|
Python
|
mit
| 13,218
|
import pytest
from django.core.urlresolvers import reverse
pytestmark = pytest.mark.django_db
def test_public_urls(client):
public_urls = [
reverse("page-home"),
"/nimda/login/",
]
for url in public_urls:
response = client.get(url)
assert response.status_code == 200
|
pythonindia/junction
|
tests/integrations/test_permissions.py
|
Python
|
mit
| 316
|
"""
This is an example showing how to call the mgd2d solver.
"""
import numpy as np
import time
from mgd2d import V_cycle
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
#analytical solution
def Uann(x,y):
return (x**3-x)*(y**3-y)
#RHS corresponding to above
def source(x,y):
return 6*x*y*(x**2+ y**2 - 2)
#input
max_cycles = 50 #maximum numbera of V cycles
nlevels = 8 #number of grid levels. 1 means no multigrid, 2 means one coarse grid. etc
NX = 1*2**(nlevels-1) #Nx and Ny are given as function of grid levels
NY = 1*2**(nlevels-1) #
tol = 1e-10
#the grid has one layer of ghost cells to help apply the boundary conditions
uann=np.zeros([NX+2,NY+2])#analytical solution
u =np.zeros([NX+2,NY+2])#approximation
f =np.zeros([NX+2,NY+2])#RHS
#calcualte the RHS and exact solution
DX=1.0/NX
DY=1.0/NY
xc=np.linspace(0.5*DX,1-0.5*DX,NX)
yc=np.linspace(0.5*DY,1-0.5*DY,NY)
XX,YY=np.meshgrid(xc,yc,indexing='ij')
uann[1:NX+1,1:NY+1]=Uann(XX,YY)
f[1:NX+1,1:NY+1] =source(XX,YY)
print('mgd2d.py solver:')
print('NX:',NX,', NY:',NY,', tol:',tol,'levels: ',nlevels)
#start solving
tb=time.time()
##V cycle
for it in range(1,max_cycles+1):
u,res=V_cycle(NX,NY,nlevels,u,f)
rtol=np.max(np.max(np.abs(res)))
if(rtol<tol):
break
error=uann[1:NX+1,1:NY+1]-u[1:NX+1,1:NY+1]
print(' cycle: ',it,', L_inf(res.)= ',rtol,',L_inf(true error): ',np.max(np.max(np.abs(error))))
print('Elapsed time: ',time.time()-tb,' seconds')
error=uann[1:NX+1,1:NY+1]-u[1:NX+1,1:NY+1]
print('L_inf (true error): ',np.max(np.max(np.abs(error))))
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(XX, YY, u[1:NX+1,1:NY+1],cmap=cm.coolwarm,
linewidth=0, antialiased=False)
plt.show()
|
AbhilashReddyM/GeometricMultigrid
|
example_Vcycle.py
|
Python
|
mit
| 1,821
|
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-std=c++11',
'-x', 'c++',
'-I./',
'-I../',
'-Igoogletest/googletest/include',
'-isystem', '/usr/local/include',
'-isystem', '/usr/lib/llvm-3.8/bin/../lib/clang/3.8.0/include',
'-isystem', '/usr/include/x86_64-linux-gnu',
'-isystem', '/usr/include'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
mrdunk/game_of_tides_4
|
backend/.ycm_extra_conf.py
|
Python
|
mit
| 5,364
|
from jobber.app import app
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
|
hackcyprus/jobber
|
runserver.py
|
Python
|
mit
| 93
|
#!/usr/bin/env/ python
# encoding: utf-8
"""
Configuration logic.
"""
import configparser
import logging
import RedditWallpaperChooser.utils
__author__ = 'aldur'
logger = logging.getLogger(__name__)
SECTION_REDDIT = "reddit"
REDDIT_SUBREDDITS = "subreddits"
REDDIT_RESULT_LIMIT = "result_limit"
REDDIT_SORTING = "sorting"
REDDIT_TIME = "time"
SECTION_WALLPAPER = "wallpaper"
WALLPAPER_SIZE = "size"
WALLPAPER_ASPECT_RATIO = "aspect_ratio"
WALLPAPER_FOLDER = "output_folder"
_default_config = {
SECTION_REDDIT: {
REDDIT_SUBREDDITS: "spaceporn, skyporn, earthporn, wallpapers, wallpaper",
REDDIT_SORTING: "hot",
REDDIT_RESULT_LIMIT: "100",
REDDIT_TIME: "month",
},
SECTION_WALLPAPER: {
WALLPAPER_SIZE: "1920x1080",
WALLPAPER_ASPECT_RATIO: "16:9",
WALLPAPER_FOLDER: "wallpapers",
},
}
parser = None
def as_dictionary():
"""
:return: The parsed configuration as a dictionary.
"""
assert parser, "Parse the configuration first."
return {
section: {
k: v for k, v in parser.items(section)
} for section in parser.sections()
}
def parse_config(config_path):
"""
Read the .ini configuration.
Store the configuration in the "config" global variable.
:param config_path: Path to configuration file.
"""
global parser
if parser is not None:
logger.debug("Configuration already loaded, skipping.")
return
parser = configparser.ConfigParser()
parser.read_dict(_default_config)
if config_path:
with open(config_path) as config_file:
logger.info("Loading configuration from: '%s'.", config_path)
parser.read_file(config_file)
else:
logger.info("No configuration path provided. Loading default values.")
def write_default_config(config_path):
"""
Helper function to store default configuration.
:param config_path: Path on which configuration will be stored.
"""
logger.info("Storing default configuration to %s.", config_path)
default_parser = configparser.ConfigParser()
default_parser.read_dict(_default_config)
with open(config_path, "w") as config_file:
default_parser.write(config_file)
def get_size():
"""
Parse the configuration for target image size.
:return: An image size.
"""
assert parser is not None
size = parser.get(SECTION_WALLPAPER, WALLPAPER_SIZE)
assert not size or "x" in size, "Malformed image size."
if not size:
return None
size = size.split("x")
assert len(size) == 2, "Malformed image size."
return RedditWallpaperChooser.utils.Size(int(size[0]), int(size[1]))
def get_ratio():
"""
Parse the configuration for target image ratio.
:return: The required image ration (as float).
"""
ratio = parser.get(SECTION_WALLPAPER, WALLPAPER_ASPECT_RATIO)
assert not ratio or ":" in ratio, "Malformed image ratio."
if not ratio:
return None
ratio = ratio.split(":")
assert len(ratio) == 2, "Malformed image ratio."
return round(float(ratio[0]) / float(ratio[1]), 5)
|
aldur/RedditWallpaperChooser
|
RedditWallpaperChooser/config.py
|
Python
|
mit
| 3,161
|
#!/usr/bin/python
#Neil A. Patel
#20150112
import zipfile
file = open("channel/90052.txt", "r")
zfile = zipfile.ZipFile('channel.zip', 'r')
line = "nothing"
while "nothing" in line:
for line in file:
if "nothing" in line:
if line[-3] == " ":
nothingNum = line[-2:]
elif line[-4] == " ":
nothingNum = line[-3:]
elif line[-5] == " ":
nothingNum = line[-4:]
else:
nothingNum = line[-5:]
file.close
print zfile.getinfo(nothingNum+".txt").comment
file = open(("channel/"+nothingNum+".txt"), "r")
else:
print line
print zfile.getinfo(nothingNum+".txt").comment
|
neil92/MiscScripts2
|
PythonChallengeCom/Problem6.py
|
Python
|
mit
| 631
|
from modules.chart_module import ChartModule
import tornado.web
import logging
class LineChartModule(ChartModule):
def render(self, header, color, chart_data, chart_id="linechart"):
self.chart_id = chart_id
self.chart_data = chart_data
return self.render_string('modules/linechart.html',
header=header, color=color, chart_id=self.chart_id)
def chart_options(self):
return super(LineChartModule, self).chart_options()
def embedded_javascript(self):
options = self.chart_options()
return '''
var ctx = document.getElementById("{2}").getContext("2d");
var myLineChart = new Chart(ctx,{{
type:'line',
data:{1},
options:{0}
}});
'''.format(options, self.chart_data, self.chart_id)
|
antsankov/cufcq-new
|
modules/linechart_module.py
|
Python
|
mit
| 842
|
import scipy.sparse as sparse
import matplotlib.pyplot as plt
import scipy.io as io
from math import sqrt, atan, cos, sin, pi, atan2
import numpy as np
from nutils import *
cc = list('gbcmy')
def plot_opt_splitting2(e, w, W):
df = 0.01
w_span = np.arange(w,W+2.0*pi*df,2.0*pi*df)
J_span2 = np.empty((len(w_span),))
iters2 = np.empty((len(w_span),))
J_span3 = np.empty((len(w_span),))
J_span4 = np.empty((len(w_span),))
w_tick3 = np.logspace(np.log10(w), np.log10(W), num=4, endpoint=True)
w_tick4 = np.logspace(np.log10(w), np.log10(W), num=5, endpoint=True)
J_span2[0] = J(e, w_span[0], w_span[-1])
J_span3[0] = max( J(e, w_span[0], np.sqrt(w_span[0]*w_span[-1])), J(e, np.sqrt(w_span[0]*w_span[-1]), w_span[-1]) )
J_span4[0] = max( J(e, w_tick3[0], w_tick3[1]), J(e, w_tick3[1], w_tick3[2]), J(e, w_tick3[2], w_tick3[-1]) )
for i in range(1,len(w_span)-1):
J_span2[i] = max( J(e, w_span[0], w_span[i]), J(e, w_span[i], w_span[-1]) )
J_span3[i] = max( J(e, w_span[0], w_span[i]), J(e, w_span[i], np.sqrt(w_span[i]*w_span[-1])), J(e, np.sqrt(w_span[i]*w_span[-1]), w_span[-1]) )
w_tick3 = np.logspace(np.log10(w_span[i]), np.log10(W), num=4, endpoint=True)
J_span4[i] = max( J(e, w_span[0], w_span[i]), J(e, w_tick3[0], w_tick3[1]), J(e, w_tick3[1], w_tick3[2]), J(e, w_tick3[2], w_tick3[-1]) )
J_span2[-1] = J(e, w_span[0], w_span[-1])
J_span3[-1] = J(e, w_span[0], w_span[-1])
J_span4[-1] = J(e, w_span[0], w_span[-1])
#with plot.PyPlot( 'opt_splitting', figsize=(10,10)) as plt:
##plt.title('Splitting frequency range')
#plt.semilogx(w_span/(2.0*pi), J_span2)
#plt.semilogx(w_span/(2.0*pi), J_span3)
#plt.semilogx(w_span/(2.0*pi), J_span4)
#plt.xlabel('frequency (log scale)')
#plt.ylabel(r'$\mathcal{J} (\tau^\ast$)')
#plt.xlim((w/(2*pi),W/(2*pi)))
##plt.xticks( [w/(2*pi),(w+W)/(4*pi),W/(2*pi)] )
##plt.plot( np.mean([w,W])/(2.0*pi), max(J(e, w_span[0], np.mean([w,W])), J(e, np.mean([w,W]), w_span[-1])), 'x', markersize=5 )
#plt.legend([r'$n_p = 2$', r'$n_p = 3$', r'$n_p = 4$'])
return w_span, J_span2, J_span3, J_span4
def J(e, w, W, tau=False):
if not tau:
tau = opt_tau_anal(e,w,W)
r = 0.5*np.sqrt(1.0 + (tau.real/tau.imag)**2)
c1_im = tau.real/(2.0*tau.imag) - ((tau.imag+e*tau.real)*w)/((w-tau.real)**2+(e*w+tau.imag)**2)
#cN_im = tau.real/(2.0*tau.imag) - ((tau.imag+e*tau.real)*W)/((W-tau.real)**2+(e*W+tau.imag)**2)
R = np.sqrt(tau.real**2+tau.imag**2)*np.sqrt((e**2+1.0))/(2.0*abs(tau.real*e+tau.imag))
C_im = e*(tau.real**2+tau.imag**2)/(2.0*tau.imag*(tau.real*e+tau.imag))
#c1_re = 0.5 - ((1.0+e**2)*w**2+(e*tau.imag-tau.real)*w)/((w-tau.real)**2+(e*w+tau.imag)**2)
#cN_re = 0.5 - ((1.0+e**2)*W**2+(e*tau.imag-tau.real)*W)/((W-tau.real)**2+(e*W+tau.imag)**2)
#c1 = c1_re+1j*c1_im
#cN = cN_re+1j*cN_im
#print (abs(c1)-abs(cN))
return np.sqrt(r**2/(R**2-C_im**2+2.0*C_im*c1_im))
def opt_tau_anal(e,w,W):
r = sqrt(w*W*(1.0+e**2))
th = atan(-sqrt( (e**2*(W+w)**2+(W-w)**2) /(4.0*w*W) ))
#th = atan(sqrt( (e**2*(W+w)**2+(W-w)**2) /(4.0*w*W) ))
tau_anal = r*cos(th) + 1j*(r*sin(th))
#print('DEBUG -- test tau')
#print( tau_anal.real )
#print( 2.0*w*W/(w+W) )
#print( -sqrt((e**2*(w+W)**2+(W-w)**2)*w*W)/(w+W) )
#print(tau_anal.imag)
#print('-----------------')
return tau_anal
def plot_tau_im(e, w, W, imgtype='png'):
col = list('bgrcmy')
mark = list('ovs*Dh')
NOP = 100
e_span = np.linspace(0.0,1.0,NOP)
tau_im = np.empty((NOP,))
fmin = np.array([1.0, 3.0, 6.0, 9.0])
fmax = (W/(2.0*pi))*np.ones((len(fmin),))
kk = 0
my_leg = []
with plot.PyPlot( 'opt_tau_im', figsize=(10,10), imgtype=imgtype) as plt:
for f,F in zip(fmin, fmax):
w = 2.0*pi*f
W = 2.0*pi*F
for i in range(NOP):
tau = opt_tau_anal(e_span[i],w,W)
tau_im[i] = tau.imag
#plt.title('Imaginary part of '+r'$\tau^\ast$')
plt.plot(e_span, tau_im/W, color=col[kk], linewidth=2.0, marker=mark[kk], markevery=5, markersize=10)
#plt.plot(e, opt_tau_anal(e,w,W).imag/W, 'rx', markersize=10, mew=1 )
plt.xlabel('damping parameter '+r'$\epsilon$', fontsize=20)
#plt.ylabel('relative imaginary part of '+r'$\tau^\ast$')
plt.ylabel('imaginary part of '+r'$\tau^\ast$'+' (scaled)', fontsize=20)
my_leg = my_leg+['f = ['+str(round(f,1))+','+str(round(F,1))+']']
plt.legend(my_leg)
kk = kk + 1
def plot_obj_fun(e, w, W, imgtype='png'):
col = list('bgrcmy')
mark = list('ovs*Dh')
NOP = 100
e_span = np.linspace(0.0,1.0,NOP)
J_span = np.empty((NOP,))
fmin = np.array([1.0, 3.0, 6.0, 9.0])
fmax = (W/(2.0*pi))*np.ones((len(fmin),))
kk = 0
my_leg = []
with plot.PyPlot( 'obj_fun', figsize=(10,10), imgtype=imgtype) as plt:
for f,F in zip(fmin, fmax):
w = 2.0*pi*f
W = 2.0*pi*F
for i in range(NOP):
tau = opt_tau_anal(e_span[i],w,W)
J_span[i] = J(e_span[i], w, W)
#plt.title('Objective function value')
plt.plot(e_span, J_span, color=col[kk], linewidth=2.0, marker=mark[kk], markevery=5, markersize=10)
#plt.plot(e, J(e, w, W), 'rx', markersize=10, mew=1 )
plt.xlabel('damping parameter '+r'$\epsilon$', fontsize=20)
#plt.ylabel(r'$\mathcal{J} (\tau^\ast$)')
plt.ylabel('GMRES-bound in Corrolary 2.7', fontsize=20)
my_leg = my_leg+['f = ['+str(round(f,1))+','+str(round(F,1))+']']
plt.legend(my_leg)
kk = kk + 1
def plot_num_tau(om, tau_anal, imgtype='png'):
my_eps = 1e-8
#dd = -om[0].imag/om[0].real
alpha1 = om.real
beta1 = om.imag
alpha_max = np.max(alpha1)
step1 = 0.007*alpha_max
step2 = 0.007*alpha_max
alpha2 = np.arange(my_eps, alpha_max+my_eps, step1)
beta2 = np.arange(-my_eps, -alpha_max-my_eps, -step2)
cf = np.empty((len(alpha2),len(beta2)))
c_fac = np.empty((len(alpha2),len(beta2),len(alpha1)))
for i in range(len(alpha2)):
for j in range(len(beta2)):
tau = alpha2[i]+1j*beta2[j]
for k in range(len(alpha1)):
omk = alpha1[k]+1j*beta1[k]
eta = omk/(omk-tau)
c_re = ((0.0 - np.conj(tau))/(tau - np.conj(tau)) - eta).real
c_im = ((0.0 - np.conj(tau))/(tau - np.conj(tau)) - eta).imag
radius = abs((tau - 0)/(tau - np.conj(tau)))
c_fac[i,j,k] = radius/np.sqrt(c_re**2+c_im**2)
cf[i,j] = np.max(c_fac[i,j,:])
with plot.PyPlot( 'opt_tau', figsize=(10,10), imgtype=imgtype) as plt:
xx, yy = np.meshgrid(alpha2/alpha_max, beta2/alpha_max, sparse=False, indexing='ij')
plt.contourf(xx,yy,cf, 20)
plt.axis('equal')
plt.xlabel('real part (relative)', fontsize=16)
plt.ylabel('imag part (relative)', fontsize=16)
plt.xlim((0,1))
plt.ylim((0,-1))
plt.colorbar()
ind_tau = np.argmin(cf)
i_ind, j_ind = np.unravel_index(ind_tau, cf.shape)
tau_num = alpha2[i_ind]+1j*beta2[j_ind]
#plt.plot(tau_num.real/alpha_max, tau_num.imag/alpha_max, linestyle='None', markersize=15, linewidth=3.0, color='y', marker='x', mew=2)
plt.plot(tau_anal.real/alpha_max, tau_anal.imag/alpha_max, markersize=15, linewidth=3.0, color='w', marker='x', mew=2)
NOP = 1000
#th = np.linspace(0.0, atan2(tau_anal.imag,tau_anal.real), NOP)
th = np.linspace(0.0, -pi/2.0, NOP)
x_anal = abs(tau_anal/alpha_max)*np.cos(th)
y_anal = abs(tau_anal/alpha_max)*np.sin(th)
plt.plot(x_anal, y_anal, 'w--')
#plt.plot([tau_anal.real/alpha_max,tau_anal.real/alpha_max], [0.0,-1.0], 'w--' )
#plt.plot([0.0,tau_anal.real], [0.0,tau_anal.imag], 'w--' )
plt.plot(om.real/alpha_max, om.imag/alpha_max, linestyle='None', markersize=10, linewidth=3.0, color='k', marker='x', mew=1)
return tau_num
def plot_circles_on_circle(A, B, om, tau, dd, plot_spec=False, rot=False):
NOP = 100
th = np.linspace(0.0,2.0*pi,NOP)
Nom = len(om)
col = list('r')
j = -1
for k in range(1,Nom-1):
j=j+1
if (j>4):
j=0
col.append(cc[j])
col.append('r')
eta = om/(om-tau)
#dd = -om[0].imag/om[0].real
C = 0.0 + 1j*( (dd*abs(tau)**2)/(2.0*tau.imag*(tau.imag+dd*tau.real)) )
R = sqrt( abs(tau)**2*(dd**2+1.0)/(4.0*(tau.imag+dd*tau.real)**2) )
X = R*np.cos(th)+C.real
Y = R*np.sin(th)+C.imag
with plot.PyPlot( 'circles', figsize=(10,10)) as plt:
plt.plot(X, Y, 'k')
plt.plot(C.real, C.imag, 'kx', markersize=10)
for k in range(0,Nom):
ck = -np.conj(tau)/(tau-np.conj(tau)) - eta[k]
r = abs(tau/(tau-np.conj(tau)))
x = r*np.cos(th)+ck.real
y = r*np.sin(th)+ck.imag
if rot is not False:
tmp = x + 1j*y
tmp = tmp*rot[k]
ck = ck*rot[k]
plt.plot(tmp.real, tmp.imag, col[k]+'--')
plt.plot(ck.real, ck.imag, col[k]+'x', markersize=10)
else:
plt.plot(x, y, col[k]+'--')
plt.plot(ck.real, ck.imag, col[k]+'x', markersize=10)
if plot_spec:
n = A.shape[0]
I = sparse.identity(n).tocsc()
P = (A - tau*B).tocsc()
Pinv = sparse.linalg.inv(P)
vals, vecs = sparse.linalg.eigs(A.tocsc()*Pinv.tocsc()-eta[k]*I,k=n-2)
plt.plot(vals.real, vals.imag, col[k]+'x', markersize=4)
plt.axhline(linewidth=0.5, color='k')
plt.axvline(linewidth=0.5, color='k')
plt.axis('equal')
def plot_msconvergence(resvec):
Nom = resvec.shape[1]
it = resvec.shape[0]
col = list('r')
j = -1
for k in range(1,Nom-1):
j=j+1
if (j>4):
j=0
col.append(cc[j])
col.append('r')
x_as = np.linspace(0,it,it)
my_leg = []
with plot.PyPlot( 'conv_pmsgmres', figsize=(10,10)) as plt:
for k in range(Nom):
plt.semilogy(x_as, resvec[:,k]/resvec[0,k],col[k])
my_leg = my_leg+['f'+str(k)]
plt.title('Convergence of pmsGMRES')
plt.xlabel('Number of matrix-vector multiplications')
plt.ylabel('Relative residual norm')
plt.ylim((1e-8,1))
plt.legend(my_leg)
plt.grid()
def plot_meconvergence(resvec):
it = len(resvec)
x_as = np.linspace(0,it,it)
with plot.PyPlot( 'conv_megmres', figsize=(10,10)) as plt:
plt.semilogy(x_as, resvec[:]/resvec[0])
plt.title('Convergence of global GMRES')
plt.xlabel('Number of operator applications')
plt.ylabel('Relative residual norm')
plt.ylim((1e-8,1))
plt.grid()
def plot_ritzvals(H, om=False):
if om is not False:
Nom = len(om)
col = list('r')
j = -1
for k in range(1,Nom-1):
j=j+1
if (j>4):
j=0
col.append(cc[j])
col.append('r')
I = np.eye(H.shape[0])
with plot.PyPlot( 'ritz_vals', figsize=(10,10)) as plt:
if om is not False:
for k in range(Nom):
vals = np.linalg.eigvals(H - om[k]*I)
plt.plot(vals.real, vals.imag, col[k]+'x', markersize=4)
plt.axhline(linewidth=0.5, color='k')
plt.axvline(linewidth=0.5, color='k')
plt.axis('equal')
else:
vals = np.linalg.eigvals(H)
plt.plot(vals.real, vals.imag, 'bx', markersize=4)
plt.axhline(linewidth=0.5, color='k')
plt.axvline(linewidth=0.5, color='k')
plt.axis('equal')
#plt.xlim( (-2,2) )
#plt.ylim( (-1.5,2.8) )
|
ManuelMBaumann/opt_tau
|
num_exper/plot_misc.py
|
Python
|
mit
| 12,667
|
from datetime import datetime, timedelta
import os
import tempfile
import unittest
from flask import Flask
from lark.auth.models import User, Client, Grant, Token
from lark.auth.api import lark_admin_api
from lark.auth.database import db
class ModelTests(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
self.db_fd, self.path = tempfile.mkstemp()
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///%s" % (self.path)
app.register_blueprint(lark_admin_api, url_prefix='/admin')
self.app = app
self.request_ctx = self.app.test_request_context()
self.request_ctx.push()
db.create_all()
def tearDown(self):
self.request_ctx.pop()
os.close(self.db_fd)
os.unlink(self.path)
def create_user(self, username='voidfiles', password='awesome', external_access_token='abc', remote_user_id=3):
user_data = {
'username': username,
'password': password,
'external_access_token': external_access_token,
'remote_user_id': remote_user_id,
}
user = User.create_user(**user_data)
db.session.add(user)
db.session.commit()
return user
def test_user(self):
user = self.create_user()
assert User.get_for_oauth2('voidfiles', 'awesome', {}, {}).id == user.id
def create_client(self, user, name='TextApp', description='An awesome test app', client_id='123', client_secret='abc',
is_confidential=True, default_scope=['email', 'user'],
redirect_uris=['http://example.com', 'http://example.com/2']):
client_data = {
'user': user,
'name': name,
'description': description,
'client_id': client_id,
'client_secret': client_secret,
'_default_scopes': ' '.join(default_scope),
'_redirect_uris': ' '.join(redirect_uris),
}
client = Client(**client_data)
db.session.add(client)
db.session.commit()
return client
def test_client(self):
user = self.create_user()
client = self.create_client(user)
assert client.user.id == user.id
assert client.redirect_uris == ['http://example.com', 'http://example.com/2']
assert client.default_scopes == ['email', 'user']
assert client.default_redirect_uri == 'http://example.com'
assert Client.get_for_oauth2('123').client_id == client.client_id
def test_grant(self):
user = self.create_user()
client = self.create_client(user)
print user
print client
data = {
'user_id': user.id,
'user': user,
'client': client,
'client_id': client.client_id,
'code': '101112',
'redirect_uri': 'http://example.com',
'_scopes': 'email user',
'expires': datetime.utcnow() + timedelta(seconds=10),
}
grant = Grant(**data)
db.session.add(grant)
db.session.commit()
grant = Grant.get_for_oauth2(client.client_id, '101112')
assert grant.user.id == user.id
assert grant.client_id == client.client_id
assert grant.scopes == ['email', 'user']
assert Grant.get_for_oauth2(client.client_id, '101112').id == grant.id
class Request(object):
scopes = ['email', 'user']
redirect_uri = 'http://example.com'
def __init__(self, user):
self.user = user
request = Request(user=user)
current_user = lambda: request.user
grant = Grant.set_for_oauth2(current_user, 'abcdef', {'code': '123'}, request)
assert grant.scopes == ['email', 'user']
assert grant.client_id == 'abcdef'
assert grant.code == '123'
def test_token(self):
user = self.create_user()
client = self.create_client(user)
token_data = {
'user': user,
'client': client,
'token_type': 'bearer',
'access_token': '123',
'refresh_token': 'abc',
'expires': datetime.utcnow() + timedelta(seconds=3600),
'_scopes': 'email user',
}
token = Token(**token_data)
db.session.add(token)
db.session.commit()
token = Token.get_for_oauth2(access_token='123')
assert token.user.id == user.id
assert token.client.client_id == client.client_id
assert token.scopes == ['email', 'user']
class Request(object):
scopes = ['email', 'user']
def __init__(self, user, client):
self.user = user
self.client = client
request = Request(user=user, client=client)
token_data = {
'expires_in': 3600,
'access_token': 'abc',
'refresh_token': '123',
'token_type': 'Bearer',
'scope': ['email', 'user'],
}
token = Token.set_for_oauth2(token_data, request)
assert token.user.id == user.id
assert token.client.client_id == client.client_id
assert token.scopes == ['email', 'user']
token = Token.get_for_oauth2(access_token='abc')
assert token.user.id == user.id
assert token.client.client_id == client.client_id
assert token.scopes == ['email', 'user']
token = Token.get_for_oauth2(refresh_token='123')
assert token.user.id == user.id
assert token.client.client_id == client.client_id
assert token.scopes == ['email', 'user']
if __name__ == '__main__':
unittest.main()
|
voidfiles/lark
|
lark/auth/tests/test_models.py
|
Python
|
mit
| 5,731
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="pie.textfont", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/pie/textfont/_family.py
|
Python
|
mit
| 593
|
import scrapy
class ResultPage(scrapy.Item):
apartments = scrapy.Field()
page = scrapy.Field()
|
pawod/gis-berlin-rents
|
is24crawler/resultpage.py
|
Python
|
mit
| 105
|
import gym
from gym.wrappers import SkipWrapper
import pickle as pick
# from gym.utils.replay_buffer import ReplayBuffer #IN CASE WE NEED IT LATER!!!
from gym.utils.json_utils import json_encode_np
from PIL import Image
import gym
import pygame
import sys
import time
import matplotlib
import matplotlib.pyplot as plt
import random
import io
import os
import numpy as np
from collections import deque
from pygame.locals import HWSURFACE, DOUBLEBUF, RESIZABLE, VIDEORESIZE
from threading import Thread
from gym import spaces
from viewer import SimpleImageViewer
from collections import deque
try:
matplotlib.use('GTK3Agg')
except Exception:
pass
RECORD_EVERY = 1 #record every n frames (should be >= 1)
SCORE_THRESHOLD = 3500 #reasonably hard to achieve score. However the score is honestly oddly set up
HORIZ_DOWNSAMPLE = 1 # Leave at 1. Other values and you can't see some thin, but critical, parts of the environment
VERT_DOWNSAMPLE = 1 #1 or 2. I find it harder to do the laser gates when set to 2, but should theoretically be possible
SPEED = 0 # 0 or 1 at most. I find 1 difficult
FPS = 5
RECORD_FILE = './records.txt'
RECORD_FOLDER = './records/'
# FILE_EPISODE_DIVIDER = None#'\n<end_eps>----<end_eps>\n'
def downsample(state):
state = state[:195] # crop
state = state[::VERT_DOWNSAMPLE,::HORIZ_DOWNSAMPLE] # downsample by factor of 2
return state.astype(np.uint8)
class PreproWrapper(gym.Wrapper):
def __init__(self, env, prepro, shape, high=255):
"""
Args:
env: (gym env)
prepro: (function) to apply to a state for preprocessing
shape: (list) shape of obs after prepro
overwrite_render: (bool) if True, render is overwriten to vizualise effect of prepro
grey_scale: (bool) if True, assume grey scale, else black and white
high: (int) max value of state after prepro
"""
super(PreproWrapper, self).__init__(env)
self.viewer = None
self.prepro = prepro
self.observation_space = spaces.Box(low=0, high=high, shape=shape)
self.high = high
def _step(self, action):
"""
Overwrites _step function from environment to apply preprocess
"""
obs, reward, done, info = self.env.step(action)
self.obs = self.prepro(obs)
return self.obs, reward, done, info
def _reset(self):
self.obs = self.prepro(self.env.reset())
return self.obs
def _render(self, mode='human', close=False):
"""
Overwrite _render function to vizualize preprocessing
"""
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self.obs
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = SimpleImageViewer()
self.viewer.imshow(img)
'''
General class used to record and retrieve episodes in a numpy format. Unless immediate_flush is set to true,
the general usage of this class should follow:
for each episode:
while episode is not over, for each SARSD tuple:
Recorder.buffer_SARSD(...)
Recorder.record_eps()
... do stuff
for episode in Recorder.read_episode():
do stuff on episode
'''
class Recorder():
def __init__(self, record_file = RECORD_FILE, immediate_flush = False, score_threshold = SCORE_THRESHOLD):
self.record_file = record_file
self.SARSD_keys = ['prev_obs', 'obs', 'act', 'rew', 'done']
self.record_buffer = dict()
for key in self.SARSD_keys:
self.record_buffer[key] = []
self.imm_flush = immediate_flush
if not immediate_flush:
self.current_buffer_score = 0
self.sc_thresh = score_threshold
'''
Buffers a SARSD tuple but does NOT write to a file unless immediate flushing was set to true
'''
def buffer_SARSD(self, prev_obs, obs, action, rew, env_done, info):
obs = obs.astype(np.int8)
#print(obs.shape)
#print(100928.0/sys.getsizeof(obs), 'x improved')
#prev_obs = prev_obs.astype(np.float32) #float32 is faster on gpu, supposedly
SARSD = (prev_obs, action, rew, obs, env_done)
if(self.imm_flush):
with open(self.record_file, 'a') as f:
pickle(SARSD, f) #immediate flushing pickles objects, pls don't use
else:
self.current_buffer_score += rew
#self.record_buffer.append(SARSD)
self.record_buffer['prev_obs'].append(prev_obs)
self.record_buffer['obs'].append(obs)
self.record_buffer['act'].append(action)
self.record_buffer['rew'].append(rew)
self.record_buffer['done'].append(env_done)
def rec_file_path(self, rec_key):
if not os.path.exists(RECORD_FOLDER):
os.mkdir(RECORD_FOLDER)
return RECORD_FOLDER + rec_key + '_record.txt'
def get_key_from_path(self, fp):
file_name = fp.split('/')[-1]
key = file_name.split('_record.txt')[0]
return key
'''
Record Epsidode
Call to actually store the buffered episode in the record file. This should be called
at the end of every episode (unless the recorder is configured to immediately flush data).
'''
def record_eps(self):
if not self.imm_flush:
if len(self.record_buffer['rew']) > 0:
print('recording from buffer...')
if self.current_buffer_score >= self.sc_thresh:
for key in self.SARSD_keys:
with open(self.rec_file_path(key), 'a') as f:
obj = np.array(self.record_buffer[key])
np.save(f, obj)
#f.write(FILE_EPISODE_DIVIDER) #TODO???
print('%s recorded' %(key))
else:
print("score too low to bother recording -- score = %i" % (self.current_buffer_score))
print('...emptying buffer')
for key in self.SARSD_keys:
del self.record_buffer[key][:]
else:
print("NOTE: Using immediate buffer flushing, do not use record pls")
return
self.current_buffer_score = 0
'''
Does not support immediate flushing. Immediate flushing should really just be used for debugging.
Returns: a generator over dicts with self.SARSD_keys as the keys, each mapping to their respective data
Usage:
for episode in Recorder.read_eps():
rewards = episode['rew']
episode_score = sum(rewards)
for t in range(len(rewards)):
SARSD_t = map(lambda key: x[key][t], Recorder.SARSD_keys)
'''
def read_eps(self):
file_names = map(self.rec_file_path, self.SARSD_keys)
file_d = dict()
map(file_d.update, map(lambda fn: {self.get_key_from_path(fn): io.open(fn, 'rb')} , file_names))
while not True:
full_eps_dict = dict()
for key in self.SARSD_keys:
try:
eps_data = np.load(file_d[key])
full_eps_dict[key] = eps_data
except IOError as e:
map(lambda x: x.close(), file_d.values())
return #read is finished
yield full_eps_dict
#
# Not ours, and not 100% sure what it's doing. Copied from utils.play
def display_arr(screen, arr, video_size, transpose):
arr_min, arr_max = arr.min(), arr.max()
arr = 255.0 * (arr - arr_min) / (arr_max - arr_min)
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1) if transpose else arr)
pyg_img = pygame.transform.scale(pyg_img, video_size)
screen.blit(pyg_img, (0,0))
def record_game(env, record_file, frames_to_record = RECORD_EVERY , transpose=True, fps=FPS, zoom=None, callback=None, keys_to_action=None):
"""
For our purposes, modify frames_to_record if you want to not record every single frame. The default value of 1 records every frame.
This method was largely copied from gym.utils.play however it has some modifications to record the data
Arguments
---------
env: gym.Env
Environment to use for playing.
transpose: bool
If True the output of observation is transposed.
Defaults to true.
fps: int
Maximum number of steps of the environment to execute every second.
Defaults to 30.
zoom: float
Make screen edge this many times bigger
callback: lambda or None
Callback if a callback is provided it will be executed after
every step. It takes the following input:
obs_t: observation before performing action
obs_tp1: observation after performing action
action: action that was executed
rew: reward that was received
done: whether the environemnt is done or not
info: debug info
keys_to_action: dict: tuple(int) -> int or None
Mapping from keys pressed to action performed.
For example if pressed 'w' and space at the same time is supposed
to trigger action number 2 then key_to_action dict would look like this:
{
# ...
sorted(ord('w'), ord(' ')) -> 2
# ...
}
If None, default key_to_action mapping for that env is used, if provided.
"""
recorder = Recorder()
obs_s = env.observation_space
assert type(obs_s) == gym.spaces.box.Box
assert len(obs_s.shape) == 2 or (len(obs_s.shape) == 3 and obs_s.shape[2] in [1,3])
if keys_to_action is None:
if hasattr(env, 'get_keys_to_action'):
keys_to_action = env.get_keys_to_action()
elif hasattr(env.unwrapped, 'get_keys_to_action'):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
assert False, env.spec.id + " does not have explicit key to action mapping, " + \
"please specify one manually"
relevant_keys = set(sum(map(list, keys_to_action.keys()),[]))
if transpose:
video_size = env.observation_space.shape[1], env.observation_space.shape[0]
else:
video_size = env.observation_space.shape[0], env.observation_space.shape[1]
if zoom is not None:
video_size = int(video_size[0] * zoom), int(video_size[1] * zoom)
video_size = (video_size[0], video_size[1])
pressed_keys = []
running = True
env_done = True
screen = pygame.display.set_mode(video_size)
clock = pygame.time.Clock()
while running:
if env_done:
env_done = False
obs = env.reset()
recorder.record_eps() #Records it all at the end of the montezuma episode
else:
try:
action = keys_to_action[tuple(sorted(pressed_keys))]
prev_obs = obs
obs, rew, env_done, info = env.step(action)
if callback is not None:
callback(prev_obs, obs, action, rew, env_done, info)
time = clock.get_rawtime()
if(time % frames_to_record == 0):
recorder.buffer_SARSD(prev_obs, obs, action, rew, env_done, info)
except KeyError:
print('Don\'t push too many keys guys')
if obs is not None:
if len(obs.shape) == 2:
obs = obs[:, :, None]
if obs.shape[2] == 1:
obs = obs.repeat(3, axis=2)
display_arr(screen, obs, transpose=transpose, video_size=video_size)
# process pygame events
for event in pygame.event.get():
# test events, set key states
if event.type == pygame.KEYDOWN:
if event.key in relevant_keys:
pressed_keys.append(event.key)
elif event.key == 27:
running = False
elif event.type == pygame.KEYUP:
if event.key in relevant_keys:
pressed_keys.remove(event.key)
elif event.type == pygame.QUIT:
running = False
elif event.type == VIDEORESIZE:
video_size = event.size
screen = pygame.display.set_mode(video_size)
print(video_size)
pygame.display.flip()
clock.tick(fps)
pygame.quit()
if __name__ == '__main__':
env = gym.make('MontezumaRevenge-v0')
wrapper = SkipWrapper(SPEED) # 0 = don't skip
env = wrapper(env)
env = PreproWrapper(env, prepro=lambda x: downsample(x), shape=(105, 80, 3))
record_game(env, RECORD_FILE, zoom=4)
|
jemdwood/cs234_proj
|
record_mont.py
|
Python
|
mit
| 11,368
|
import mock
from shpkpr.vault import resolve_secrets
@mock.patch("shpkpr.cli.options.hvac.Client")
def test_resolve_secrets(mock_vault_client_class):
mock_vault_data = {
'secret/my_project/my_path': {
'my_key': 'some_secret_info'
}
}
mock_rendered_template = {
'secrets': {
'MY_SECRET_USING_REL_PATH': {'source': 'my_project/my_path:my_key'},
'MY_SECRET_USING_FULL_PATH': {'source': 'secret/my_project/my_path:my_key'},
}
}
def read_vault_data(path):
secrets = mock_vault_data.get(path, None)
return dict(data=secrets) if secrets else None
mock_vault_client = mock_vault_client_class.return_value
mock_vault_client.read.side_effect = read_vault_data
result = resolve_secrets(mock_vault_client, mock_rendered_template)
assert 'MY_SECRET_USING_REL_PATH' not in result
assert result['MY_SECRET_USING_FULL_PATH'] == 'some_secret_info'
|
shopkeep/shpkpr
|
tests/test_resolve_secrets.py
|
Python
|
mit
| 961
|
from GraphWidget import GraphWidget
from motioncapture.app import scrapcap
from motioncapture.gui.GL.Shapes import *
import cv2
import numpy as np
class CalibrationGraphWidget(GraphWidget):
def __init__(self):
GraphWidget.__init__(self)
def timerEvent(self, event):
GraphWidget.timerEvent(self, event)
self.drawList = []
colors = [[255,0,0],[0,255,0],[0,0,255],[255,255,0],[0,255,255]]
for camera in scrapcap.cameras:
rotation, translation = camera.rotation, camera.translation
rotMat = np.array(cv2.Rodrigues(rotation)[0])
position = -np.matrix(rotMat).T * np.matrix(translation)
rotatedPosition = np.array(cv2.Rodrigues(np.array([-90,0,0], np.float32))[0]).dot(position)
if rotation is not None:
self.drawList.append(makeDrawFunction(drawCircle, position, 1, colors.pop(0)))
|
g-rauhoeft/scrap-cap
|
motioncapture/gui/CalibrationGraphWidget.py
|
Python
|
mit
| 926
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'swagger_ui.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
|
cuongnb14/swagger-ui
|
swagger_ui/users/apps.py
|
Python
|
mit
| 276
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2021 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from re import compile
from ..scraper import _BasicScraper, _ParserScraper
from ..helpers import bounceStarter, indirectStarter
from ..util import tagre
from .common import _ComicControlScraper, _WordPressScraper, _WPNaviIn
class Lackadaisy(_ParserScraper):
url = 'https://www.lackadaisy.com/comic.php'
stripUrl = url + '?comicid=%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//div[@id="content"]/img'
prevSearch = '//div[@class="prev"]/a'
nextSearch = '//div[@class="next"]/a'
help = 'Index format: n'
starter = bounceStarter
def namer(self, imageUrl, pageUrl):
# Use comic id for filename
num = pageUrl.rsplit('=', 1)[-1]
ext = imageUrl.rsplit('.', 1)[-1]
return 'lackadaisy_%s.%s' % (num, ext)
class LastResort(_WordPressScraper):
url = 'http://www.lastres0rt.com/'
stripUrl = url + 'comic/%s/'
firstStripUrl = stripUrl % 'that-sound-you-hear-is-a-shattered-stereotype'
class LazJonesAndTheMayfieldRegulators(_ParserScraper):
baseUrl = 'https://www.lazjones.com/'
url = baseUrl + 'regulators'
stripUrl = baseUrl + 'comic/%s'
firstStripUrl = stripUrl % 'chapter1_00'
imageSearch = '//img[contains(@src, "comic/pages/")]'
prevSearch = '//a[contains(text(), "Previous")]'
class LazJonesAndTheMayfieldRegulatorsSideStories(LazJonesAndTheMayfieldRegulators):
name = 'LazJonesAndTheMayfieldRegulators/SideStories'
baseUrl = 'https://www.lazjones.com/'
url = baseUrl + 'comics'
stripUrl = baseUrl + 'comic/%s'
firstStripUrl = stripUrl % 'journal01'
def getPrevUrl(self, url, data):
# Fix broken navigation links
if url == self.url and data.xpath(self.prevSearch + '/@href')[0] == self.stripUrl % 'summer00':
return self.stripUrl % 'summer21'
return super(LazJonesAndTheMayfieldRegulators, self).getPrevUrl(url, data)
class LeastICouldDo(_ParserScraper):
url = 'https://leasticoulddo.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % '20030210'
imageSearch = '//div[@id="content-comic"]//img'
prevSearch = '//a[@rel="prev"]'
latestSearch = '//a[@id="latest-comic"]'
starter = indirectStarter
help = 'Index format: yyyymmdd'
class LetsSpeakEnglish(_ComicControlScraper):
url = 'http://www.marycagle.com'
class LifeAintNoPonyFarm(_WordPressScraper):
url = ('https://web.archive.org/web/20181221154155/'
'http://sarahburrini.com/en/')
firstStripUrl = url + 'comic/my-first-webcomic/'
multipleImagesPerStrip = True
endOfLife = True
class LifeAsRendered(_ParserScraper):
# Reverse navigation doesn't work properly, so search forward instead
stripUrl = 'https://kittyredden.com/LAR/%s/'
url = stripUrl % '0100'
firstStripUrl = stripUrl % '05extra'
imageSearch = '//figure[@class="wp-block-image"]//img'
prevSearch = '//a[img[@alt="Next"]]'
textSearch = '//div[@class="entry-content"]//text()'
adult = True
endOfLife = True
nav = {
'0140': '0200',
'0272': '02ss00',
'02SS14': '0300',
'0367': '03ss00',
'03ss10': '0400',
'0408': '0409',
'0409': '0410',
'0421': '0422',
'0449': '0450',
'0458': '0460',
'0460': '04ss00',
'04ss00': '04ss01',
'04ss10': '0500',
'0500': '0501',
'0508': '0509',
'0558': '0559',
'0577': '05extra',
}
def namer(self, imageUrl, pageUrl):
# Fix inconsistent filenames
filename = imageUrl.rsplit('/', 1)[-1]
return filename.replace('ReN', 'N').replace('N01P', 'A02S')
def fetchUrls(self, url, data, urlSearch):
# Fix missing image link
if 'LAR/0403' in url and urlSearch == self.imageSearch:
return [self.stripUrl.rstrip('/') % 'A04/A04P03.png']
return super(LifeAsRendered, self).fetchUrls(url, data, urlSearch)
def getPrevUrl(self, url, data):
# Fix broken navigation links
page = url.rstrip('/').rsplit('/', 1)[-1]
if page in self.nav:
return self.stripUrl % self.nav[page]
return super(LifeAsRendered, self).getPrevUrl(url, data)
def fetchText(self, url, data, textSearch, optional):
# Save final summary text
if url == self.firstStripUrl:
url = self.stripUrl % 'the-end'
data = self.getPage(url)
return super(LifeAsRendered, self).fetchText(url, data, textSearch, optional)
return None
class LilithsWord(_ComicControlScraper):
url = 'http://www.lilithword.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % 'prologue-page-00'
def namer(self, imageUrl, pageUrl):
return imageUrl.rsplit('/', 1)[-1].split('-', 1)[1]
class LittleGamers(_BasicScraper):
url = 'http://www.little-gamers.com/'
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2000/12/01/99'
imageSearch = compile(tagre("img", "src", r'(http://little-gamers\.com/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(http://www\.little-gamers\.com/[^"]+)', before="comic-nav-prev-link"))
help = 'Index format: yyyy/mm/dd/name'
class LittleTales(_ParserScraper):
url = 'http://www.little-tales.com/'
stripUrl = url + 'index.php?Strip=%s'
firstStripUrl = stripUrl % '1'
url = stripUrl % '450'
imageSearch = '//img[contains(@src, "strips/")]'
prevSearch = '//a[./img[@alt="BACK"]]'
nextSearch = '//a[./img[@alt="FORWARD"]]'
starter = bounceStarter
nav = {
'517': '515',
'449': '447',
}
def namer(self, imageUrl, pageUrl):
page = pageUrl.rsplit('=', 1)[-1]
ext = imageUrl.rsplit('.', 1)[-1]
return page + '.' + ext
def getPrevUrl(self, url, data):
# Skip missing pages with broken navigation links
page = url.rsplit('=', 1)[1]
if page in self.nav:
return self.stripUrl % self.nav[page]
return super(LittleTales, self).getPrevUrl(url, data)
class LoadingArtist(_ParserScraper):
url = 'http://www.loadingartist.com/latest'
imageSearch = '//div[@class="comic"]//img'
prevSearch = "//a[contains(concat(' ', @class, ' '), ' prev ')]"
class LoFiJinks(_WPNaviIn):
baseUrl = 'https://hijinksensue.com/comic/'
url = baseUrl + 'learning-to-love-again/'
firstStripUrl = baseUrl + 'lo-fijinks-everything-i-know-anout-james-camerons-avatar-movie/'
endOfLife = True
class LookingForGroup(_ParserScraper):
url = 'https://www.lfg.co/'
stripUrl = url + 'page/%s/'
firstStripUrl = stripUrl % '1'
imageSearch = '//div[@id="comic-img"]//img'
prevSearch = '//a[@class="comic-nav-prev"]'
latestSearch = '//div[@id="feature-lfg-footer"]/a[contains(@href, "page/")]'
starter = indirectStarter
help = 'Index format: nnn'
def namer(self, imageUrl, pageUrl):
page = pageUrl.rstrip('/').rsplit('/', 1)[-1]
return page.replace('2967', '647')
|
webcomics/dosage
|
dosagelib/plugins/l.py
|
Python
|
mit
| 7,222
|
# -*- coding: utf-8 -*-
import six
from calendar import timegm
from decimal import Decimal as MyDecimal, ROUND_HALF_EVEN
from email.utils import formatdate
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse # python3
from flask import url_for, request
from . import marshal
__all__ = ['String', 'FormattedString', 'Url', 'DateTime', 'Float',
'Integer', 'Arbitrary', 'Nested', 'List', 'Raw', 'Boolean',
'Fixed', 'Price']
class MarshallingException(Exception):
'''This is an encapsulating Exception in case of marshalling error.'''
def __init__(self, underlying_exception):
# just put the contextual representation of the error to hint on what
# went wrong without exposing internals
super(MarshallingException, self).__init__(six.text_type(underlying_exception))
def is_indexable_but_not_string(obj):
return not hasattr(obj, 'strip') and hasattr(obj, '__iter__')
def get_value(key, obj, default=None):
'''Helper for pulling a keyed value off various types of objects.'''
if isinstance(key, int):
return _get_value_for_key(key, obj, default)
elif callable(key):
return key(obj)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def _get_value_for_keys(keys, obj, default):
if len(keys) == 1:
return _get_value_for_key(keys[0], obj, default)
else:
return _get_value_for_keys(
keys[1:], _get_value_for_key(keys[0], obj, default), default)
def _get_value_for_key(key, obj, default):
if is_indexable_but_not_string(obj):
try:
return obj[key]
except (IndexError, TypeError, KeyError):
pass
return getattr(obj, key, default)
def to_marshallable_type(obj):
'''Helper for converting an object to a dictionary only if it is not
dictionary already or an indexable object nor a simple type'''
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
if hasattr(obj, '__getitem__'):
return obj # it is indexable it is ok
return dict(obj.__dict__)
class Raw(object):
'''Raw provides a base field class from which others should extend. It
applies no formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized. Fields should
throw a :class:`MarshallingException` in case of parsing problem.
:param default: The default value for the field, if no value is
specified.
:param attribute: If the public facing value differs from the internal
value, use this to retrieve a different attribute from the response
than the publicly named value.
'''
def __init__(self, default=None, attribute=None):
self.attribute = attribute
self.default = default
def format(self, value):
'''Formats a field's value. No-op by default - field classes that
modify how the value of existing object keys should be presented should
override this and apply the appropriate formatting.
:param value: The value to format
:exception MarshallingException: In case of formatting problem
Ex::
class TitleCase(Raw):
def format(self, value):
return unicode(value).title()
'''
return value
def output(self, key, obj):
'''Pulls the value for the given key from the object, applies the
field's formatting and returns the result. If the key is not found
in the object, returns the default value. Field classes that create
values which do not require the existence of the key in the object
should override this and return the desired value.
:exception MarshallingException: In case of formatting problem
'''
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
return self.default
return self.format(value)
class Nested(Raw):
'''Allows you to nest one set of fields inside another.
See :ref:`nested-field` for more information
:param dict nested: The dictionary to nest
:param bool allow_null: Whether to return None instead of a dictionary
with null keys, if a nested dictionary has all-null keys
:param kwargs: If ``default`` keyword argument is present, a nested
dictionary will be marshaled as its value if nested dictionary is
all-null keys (e.g. lets you return an empty JSON object instead of
null)
'''
def __init__(self, nested, allow_null=False, **kwargs):
self.nested = nested
self.allow_null = allow_null
super(Nested, self).__init__(**kwargs)
def output(self, key, obj):
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
if self.allow_null:
return None
elif self.default is not None:
return self.default
return marshal(value, self.nested)
class List(Raw):
'''
Field for marshalling lists of other fields.
See :ref:`list-field` for more information.
:param cls_or_instance: The field type the list will contain.
'''
def __init__(self, cls_or_instance, **kwargs):
super(List, self).__init__(**kwargs)
error_msg = ('The type of the list elements must be a subclass of '
'Raw')
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance
def format(self, value):
# Convert all instances in typed list to container type
if isinstance(value, set):
value = list(value)
return [
self.container.output(idx,
val if (isinstance(val, dict)
or (self.container.attribute
and hasattr(val, self.container.attribute)))
and not isinstance(self.container, Nested)
and not type(self.container) is Raw
else value)
for idx, val in enumerate(value)
]
def output(self, key, data):
value = get_value(key if self.attribute is None else self.attribute, data)
# we cannot really test for external dict behavior
if is_indexable_but_not_string(value) and not isinstance(value, dict):
return self.format(value)
if value is None:
return self.default
return [marshal(value, self.container.nested)]
class String(Raw):
'''
Marshal a value as a string. Uses ``six.text_type`` so values will
be converted to :class:`unicode` in python2 and :class:`str` in
python3.
'''
def format(self, value):
try:
return six.text_type(value)
except ValueError as ve:
raise MarshallingException(ve)
class Integer(Raw):
''' Field for outputting an integer value.
:param int default: The default value for the field, if no value is
specified.
'''
def __init__(self, default=0, **kwargs):
super(Integer, self).__init__(default=default, **kwargs)
def format(self, value):
try:
if value is None:
return self.default
return int(value)
except ValueError as ve:
raise MarshallingException(ve)
class Boolean(Raw):
'''
Field for outputting a boolean value.
Empty collections such as ``''``, ``{}``, ``[]``, etc. will be converted to
``False``.
'''
def format(self, value):
return bool(value)
class FormattedString(Raw):
'''
FormattedString is used to interpolate other values from
the response into this field. The syntax for the source string is
the same as the string :meth:`~str.format` method from the python
stdlib.
Ex::
fields = {
'name': fields.String,
'greeting': fields.FormattedString('Hello {name}')
}
data = {
'name': 'Doug',
}
marshal(data, fields)
'''
def __init__(self, src_str):
'''
:param string src_str: the string to format with the other
values from the response.
'''
super(FormattedString, self).__init__()
self.src_str = six.text_type(src_str)
def output(self, key, obj):
try:
data = to_marshallable_type(obj)
return self.src_str.format(**data)
except (TypeError, IndexError) as error:
raise MarshallingException(error)
class Url(Raw):
'''
A string representation of a Url
:param endpoint: Endpoint name. If endpoint is ``None``,
``request.endpoint`` is used instead
:type endpoint: str
:param absolute: If ``True``, ensures that the generated urls will have the
hostname included
:type absolute: bool
:param scheme: URL scheme specifier (e.g. ``http``, ``https``)
:type scheme: str
'''
def __init__(self, endpoint=None, absolute=False, scheme=None):
super(Url, self).__init__()
self.endpoint = endpoint
self.absolute = absolute
self.scheme = scheme
def output(self, key, obj):
try:
data = to_marshallable_type(obj)
endpoint = self.endpoint if self.endpoint is not None else request.endpoint
o = urlparse(url_for(endpoint, _external=self.absolute, **data))
if self.absolute:
scheme = self.scheme if self.scheme is not None else o.scheme
return urlunparse((scheme, o.netloc, o.path, '', '', ''))
return urlunparse(('', '', o.path, '', '', ''))
except TypeError as te:
raise MarshallingException(te)
class Float(Raw):
'''
A double as IEEE-754 double precision.
ex : 3.141592653589793 3.1415926535897933e-06 3.141592653589793e+24 nan inf
-inf
'''
def format(self, value):
try:
return float(value)
except ValueError as ve:
raise MarshallingException(ve)
class Arbitrary(Raw):
'''
A floating point number with an arbitrary precision
ex: 634271127864378216478362784632784678324.23432
'''
def format(self, value):
return six.text_type(MyDecimal(value))
class DateTime(Raw):
'''
Return a formatted datetime string in UTC. Supported formats are RFC 822
and ISO 8601.
See :func:`email.utils.formatdate` for more info on the RFC 822 format.
See :meth:`datetime.datetime.isoformat` for more info on the ISO 8601
format.
:param dt_format: ``'rfc822'`` or ``'iso8601'``
:type dt_format: str
'''
def __init__(self, dt_format='rfc822', **kwargs):
super(DateTime, self).__init__(**kwargs)
self.dt_format = dt_format
def format(self, value):
try:
if self.dt_format == 'rfc822':
return _rfc822(value)
elif self.dt_format == 'iso8601':
return _iso8601(value)
else:
raise MarshallingException(
'Unsupported date format %s' % self.dt_format
)
except AttributeError as ae:
raise MarshallingException(ae)
ZERO = MyDecimal()
class Fixed(Raw):
'''
A decimal number with a fixed precision.
'''
def __init__(self, decimals=5, **kwargs):
super(Fixed, self).__init__(**kwargs)
self.precision = MyDecimal('0.' + '0' * (decimals - 1) + '1')
def format(self, value):
dvalue = MyDecimal(value)
if not dvalue.is_normal() and dvalue != ZERO:
raise MarshallingException('Invalid Fixed precision number.')
return six.text_type(dvalue.quantize(self.precision, rounding=ROUND_HALF_EVEN))
'''Alias for :class:`~fields.Fixed`'''
Price = Fixed
def _rfc822(dt):
'''Turn a datetime object into a formatted date.
Example::
fields._rfc822(datetime(2011, 1, 1)) => 'Sat, 01 Jan 2011 00:00:00 -0000'
:param dt: The datetime to transform
:type dt: datetime
:return: A RFC 822 formatted date string
'''
return formatdate(timegm(dt.utctimetuple()))
def _iso8601(dt):
'''Turn a datetime object into an ISO8601 formatted date.
Example::
fields._iso8601(datetime(2012, 1, 1, 0, 0)) => '2012-01-01T00:00:00'
:param dt: The datetime to transform
:type dt: datetime
:return: A ISO 8601 formatted date string
'''
return dt.isoformat()
|
fernandojunior/anerp
|
anerp/lib/marshal/fields.py
|
Python
|
mit
| 12,980
|
from . import TheInternetTestCase
from helium.api import start_chrome, S, get_driver, set_driver, write, click, \
Text, Link, kill_browser, wait_until
class ForgotPasswordTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/forgot_password"
def test_retrieve_password(self):
email_address = self._get_temporary_email_address()
write(email_address, into="E-mail")
click("Retrieve Password")
self.assertTrue(Text("Your e-mail's been sent!").exists())
set_driver(self.emailbox_driver)
wait_until(
self._refresh_and_check_if_exists,
timeout_secs=60, interval_secs=1
)
self.assertTrue(Text("no-reply@the-internet.herokuapp.com").exists())
kill_browser()
set_driver(self.test_case_driver)
def _get_temporary_email_address(self):
self.test_case_driver = get_driver()
start_chrome("http://temp-mail.org/")
self.emailbox_driver = get_driver()
email_address = S("#email").web_element.text
set_driver(self.test_case_driver)
return email_address
def _refresh_and_check_if_exists(self):
click("Refresh")
return Link("Forgot Password from the").exists()
|
bugfree-software/the-internet-solution-python
|
tests/test_forgot_password.py
|
Python
|
mit
| 1,125
|
#encoding:utf-8
subreddit = 'remotejs'
t_channel = '@r_remotejs'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
Fillll/reddit2telegram
|
reddit2telegram/channels/~inactive/r_remotejs/app.py
|
Python
|
mit
| 139
|
from django import template
register = template.Library()
@register.inclusion_tag('account/tags/login_tag.html')
def login_tag():
return {"test": "test"}
|
acdh-oeaw/totetiroler
|
account/templatetags/account_extras.py
|
Python
|
mit
| 161
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Base class for renderers
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
class RenderBase(object):
"""
Base class for form value renderers
"""
def __init__(self):
"""
Creates a renderer object for a simple text field
"""
super(RenderBase, self).__init__()
return
def render(self, context):
assert False, "Render method not implemented for %s"%(type(__self__).__name__)
@classmethod
def encode(cls, field_value):
"""
Returns a string value as itself, for use as a textual form value
"""
return field_value
@classmethod
def decode(cls, field_value):
"""
Returns a textual form value as itself.
"""
return field_value
def decode_store(self, field_value, entityvals, property_uri):
"""
Decodes a supplied value and stores it into a field of
a supplied entity value dictionary
"""
v = self.decode(field_value)
entityvals[property_uri] = v
return v
# End.
|
gklyne/annalist
|
src/annalist_root/annalist/views/fields/render_base.py
|
Python
|
mit
| 1,335
|
import re
import subprocess
import DefaultModule
import locale
class RpmLint(DefaultModule.DefaultModule):
"""
Uses rpmlint utility and parses its output.
"""
_errors_to_ignore = [ ]
_errors_to_ignore_scl = [ "dir-or-file-in-opt" ]
_errors_multiplyer = { "E": 75, "W": 95 }
_log_file="rpmlint.log"
def _check_rpmlint_output(self, output):
error_types_present = { "E": {}, "W": {} }
# last line is resutl summary, ignore it
for line in output.split("\n")[0:-1]:
# some messages contain line number
match = re.match(r"^(?P<pkgname>[^:]+):(\d+:)?\s+(?P<errtype>[WE]):"
"\s+(?P<errname>\S+)", line)
if match:
error_type = match.group("errtype")
error_message = match.group("errname")
if self._scl_name and error_message in self._errors_to_ignore:
continue
if error_message in self._errors_to_ignore:
continue
if not error_message in error_types_present[error_type]:
error_types_present[error_type][error_message] = 1
else:
error_types_present[error_type][error_message] += 1
self._log_warning(line)
score = 100
for err in ["E", "W"]:
for i in range(len(error_types_present[err].keys())):
score = score * self._errors_multiplyer[err] / 100
return score
def perform(self):
self._touch_log()
for package in self._packages:
sub_score = 0
files_checked = 0
try:
print("Calling %s %s" % ("rpmlint", package))
encoding = locale.getdefaultlocale()[1]
out = subprocess.check_output(["rpmlint", package]).decode(encoding)
except subprocess.CalledProcessError as e:
out = e.output
files_checked += 1
sub_score += self._check_rpmlint_output(out)
if files_checked > 0:
self._score = sub_score / files_checked
else:
self._score = 0
return {"score": self._score}
|
hhorak/rpmquality
|
rpmquality/modules/RpmLint.py
|
Python
|
mit
| 2,239
|
from django.contrib.sitemaps import Sitemap
from scuole.regions.models import Region, RegionCohorts
class RegionCohortSitemap(Sitemap):
changefreq = 'yearly'
priority = 0.5
protocol = 'https'
limit = 1000
def items(self):
return Region.objects.all()
def location(self, obj):
from django.urls import reverse
url = reverse("cohorts:regions", kwargs={"slug": obj.slug})
return url
|
texastribune/scuole
|
scuole/cohorts/sitemaps_region.py
|
Python
|
mit
| 438
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# -----------------------------------------------------------------
# Package initialization file
# -----------------------------------------------------------------
## \package pts.evolve This is the main module of the pyevolve,
# every other module is above this namespace
#
# This package ...
#
# -----------------------------------------------------------------
__all__ = ["Consts", "Crossovers", "DBAdapters", "FunctionSlot",
"G1DBinaryString", "G1DList", "G2DBinaryString",
"G2DList", "GAllele", "GenomeBase", "GPopulation",
"SimpleGeneticAlgorithm", "GTree", "Initializators",
"Migration", "Mutators", "Network", "Scaling", "Selectors",
"Statistics", "Util"]
__version__ = '0.6'
__author__ = 'Christian S. Perone'
import constants
import sys
if sys.version_info[:2] < constants.CDefPythonRequire:
raise Exception("Python 2.5+ required, the version %s was found on your system !" % (sys.version_info[:2],))
del sys
# -----------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/evolve/__init__.py
|
Python
|
mit
| 1,376
|
'''
evaluate result
'''
from keras.models import load_model
from keras.utils import np_utils
import numpy as np
import os
import sys
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
# input sentence dimensions
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
gazetteer_length = conf.gazetteer_length
IOB = conf.ner_IOB_decode
data = sys.argv[1]
best_epoch = sys.argv[2]
if data=="dev":
test_data = load_data.load_ner(dataset='eng.testa')
elif data == "test":
test_data = load_data.load_ner(dataset='eng.testb')
tokens = [len(x[0]) for x in test_data]
print(sum(tokens))
print('%s shape:'%data, len(test_data))
model_name = os.path.basename(__file__)[9:-3]
folder_path = './model/%s'%model_name
model_path = '%s/model_epoch_%s.h5'%(folder_path, best_epoch)
result = open('%s/predict.txt'%folder_path, 'w')
def convert(chunktags):
# convert BIOES to BIO
for p, q in enumerate(chunktags):
if q.startswith("E-"):
chunktags[p] = "I-" + q[2:]
elif q.startswith("S-"):
if p==0:
chunktags[p] = "I-" + q[2:]
elif q[2:]==chunktags[p-1][2:]:
chunktags[p] = "B-" + q[2:]
elif q[2:]!=chunktags[p-1][2:]:
chunktags[p] = "I-" + q[2:]
elif q.startswith("B-"):
if p==0:
chunktags[p] = "I-" + q[2:]
else:
if q[2:]!=chunktags[p-1][2:]:
chunktags[p] = "I-" + q[2:]
return chunktags
print('loading model...')
model = load_model(model_path)
print('loading model finished.')
for each in test_data:
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=[each], gram='tri')
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
# chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer(batch=[each])
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
prob = model.predict_on_batch([embed_index, hash_index, pos, gazetteer])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
chunktags = [IOB[j] for j in predict_label][:l]
word_pos_chunk = list(zip(*each))
# convert
word_pos_chunk = list(zip(*word_pos_chunk))
word_pos_chunk = [list(x) for x in word_pos_chunk]
# if data == "test":
# word_pos_chunk[3] = convert(word_pos_chunk[3])
word_pos_chunk = list(zip(*word_pos_chunk))
#convert
# if data == "test":
# chunktags = convert(chunktags)
# chunktags = prepare.gazetteer_lookup(each[0], chunktags, data)
for ind, chunktag in enumerate(chunktags):
result.write(' '.join(word_pos_chunk[ind])+' '+chunktag+'\n')
result.write('\n')
result.close()
print('epoch %s predict over !'%best_epoch)
os.system('../tools/conlleval < %s/predict.txt'%folder_path)
|
danche354/Sequence-Labeling
|
ner/evaluate-senna-hash-pos-gazetteer-128-64-rmsprop5.py
|
Python
|
mit
| 3,327
|