repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
piyanatk/sim
|
opstats/utils/settings.py
|
Python
|
mit
| 1,196
| 0.000838
|
import numpy as np
from astropy.coordinates i
|
mport EarthLocation, SkyCoord
__all__ = ['MWA_LOC', 'MWA_FIELD_EOR0', 'MWA_FIELD_EOR1', 'MWA_FIELD_EOR2',
'MWA_FREQ_EOR_ALL_40KHZ', 'MWA_FREQ_EOR_ALL_80KHZ',
'MWA_FREQ_EOR_HI_40KHZ', 'MWA_FREQ_EOR_HI_80KHZ',
'MWA_FREQ_EOR_LOW_40KHZ', 'MWA_FREQ_EOR
|
_LOW_80KHZ',
'HERA_ANT_DICT', 'F21']
F21 = 1420.405751786e6
MWA_LOC = EarthLocation(lat='−26d42m11.94986s', lon='116d40m14.93485s',
height=377.827)
MWA_FIELD_EOR0 = SkyCoord(ra='0.0h', dec='-30.0d')
MWA_FIELD_EOR1 = SkyCoord(ra='4.0h', dec='-30.0d')
MWA_FIELD_EOR2 = SkyCoord(ra='10.33h', dec='-10.0d')
MWA_FREQ_EOR_LOW_40KHZ = np.arange(138.895, 167.055, 0.04)
MWA_FREQ_EOR_HI_40KHZ = np.arange(167.055, 195.255, 0.04)
MWA_FREQ_EOR_ALL_40KHZ = np.arange(138.895, 195.255, 0.04)
MWA_FREQ_EOR_LOW_80KHZ = np.arange(138.915, 167.075, 0.08)
MWA_FREQ_EOR_HI_80KHZ = np.arange(167.075, 195.275, 0.08)
MWA_FREQ_EOR_ALL_80KHZ = np.arange(138.915, 195.275, 0.08)
HERA_ANT_DICT = {'hera19': 3, 'hera37': 4, 'hera61': 5, 'hera91': 6,
'hera127': 7, 'hera169': 8, 'hera217': 9, 'hera271': 10,
'hera331': 11}
|
alex/warehouse
|
tests/unit/i18n/test_init.py
|
Python
|
apache-2.0
| 1,717
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
from warehouse import i18n
def test_sets_locale(monkeypatch):
locale_obj = pretend.stub()
locale_cls = pr
|
etend.stub(
parse=pretend.call_recorder(lambda l: locale_obj),
)
monkeypatch.setattr(i18n,
|
"Locale", locale_cls)
request = pretend.stub(locale_name=pretend.stub())
assert i18n._locale(request) is locale_obj
assert locale_cls.parse.calls == [pretend.call(request.locale_name)]
def test_includeme():
config_settings = {}
config = pretend.stub(
add_request_method=pretend.call_recorder(lambda f, name, reify: None),
get_settings=lambda: config_settings,
)
i18n.includeme(config)
assert config.add_request_method.calls == [
pretend.call(i18n._locale, name="locale", reify=True),
]
assert config_settings == {
"jinja2.filters": {
"format_date": "warehouse.i18n.filters:format_date",
"format_datetime": "warehouse.i18n.filters:format_datetime",
"format_rfc822_datetime":
"warehouse.i18n.filters:format_rfc822_datetime",
"format_number": "warehouse.i18n.filters:format_number",
},
}
|
splice/splice-server
|
src/splice/common/rhic_serve_client.py
|
Python
|
gpl-2.0
| 3,508
| 0.005703
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including
|
the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
#
|
http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
# Responsible for making a remote call to rhic_serve to fetch data for RHIC to Product mapping
#
import gzip
import httplib
import json
import logging
import time
import urllib
import StringIO
from django.conf import settings
from splice.common import config
from splice.common.connect import BaseConnection
from splice.common.exceptions import RequestException
_LOG = logging.getLogger(__name__)
def get_connection(host, port, cert, key, accept_gzip=False):
# Note: this method will be mocked out in unit tests
return BaseConnection(host, port, handler="", https=True, cert_file=cert, key_file=key, accept_gzip=accept_gzip)
def _form_url(url, last_sync=None, offset=None, limit=None):
query_params = {}
if last_sync:
query_params["modified_date__gt"] = last_sync,
if offset is not None:
query_params["offset"] = offset
if limit is not None:
query_params["limit"] = limit
if query_params:
data = urllib.urlencode(query_params, True)
url = url +"?" + data
return url
def get_single_rhic(host, port, url, uuid):
cfg = config.get_rhic_serve_config_info()
url = url + uuid + "/"
try:
conn = get_connection(host, port, cfg["client_cert"], cfg["client_key"])
return conn.GET(url)
except Exception, e:
_LOG.exception("Caught exception from 'get_single_rhic' with config info: %s" % (cfg))
raise
def get_all_rhics(host, port, url, last_sync=None, offset=None, limit=None, accept_gzip=True):
cfg = config.get_rhic_serve_config_info()
try:
conn = get_connection(host, port, cfg["client_cert"], cfg["client_key"], accept_gzip=accept_gzip)
url_with_params = _form_url(url, last_sync, offset, limit)
status, data = conn.GET(url_with_params)
if status == 200:
return data["objects"], data["meta"]
raise RequestException(status, data)
except Exception, e:
_LOG.exception("Caught exception from 'get_all_rhics' with config info: %s" % (cfg))
raise
if __name__ == "__main__":
from datetime import timedelta
from datetime import datetime
from dateutil.tz import tzutc
last_sync = datetime.now(tzutc()) - timedelta(days=30)
config.init(settings.SPLICE_CONFIG_FILE)
cfg = config.get_rhic_serve_config_info()
data, meta = get_all_rhics(host=cfg["host"], port=cfg["port"], url=cfg["rhics_url"],
offset=0, limit=1000,
last_sync=last_sync, accept_gzip=True)
print "--- Test Sync all RHICs ---"
print data
if len(data) > 0:
uuid = data[0]["uuid"]
print "\n---Test A Single RHIC ---\n"
print get_single_rhic(host=cfg["host"], port=cfg["port"], url=cfg["rhics_url"], uuid=uuid)
print "\n -- Test an unknown RHIC ---\n"
uuid = "1a1aa1aa-f6f4-45be-9d86-deb97a79d181"
print get_single_rhic(host=cfg["host"], port=cfg["port"], url=cfg["rhics_url"], uuid=uuid)
|
xkmato/tracpro
|
tracpro/charts/filters.py
|
Python
|
bsd-3-clause
| 5,748
| 0.000696
|
from __future__ import unicode_literals
import copy
from dateutil.relativedelta import relativedelta
import six
from dash.utils import get_month_range
from django import forms
from django.forms.forms import DeclarativeFieldsMetaclass
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from . import fields as filter_fields
from . import utils
class FilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.org = kwargs.pop('org')
super(FilterForm, self).__init__(*args, **kwargs)
# Create a shallow copy of the data to ensure that it is
# mutable. Some filters need the ability to overwrite the
# data that was passed in.
if self.data is not None:
self.data = copy.copy(self.data)
class Filter(six.with_metaclass(DeclarativeFieldsMetaclass, object)):
# The metaclass is what does the work to set up fields
# that are declared as attributes of the class.
pass
class DateRangeFilter(Filter):
DATE_WINDOW_CHOICES = (
('', ''),
('month', _("Current month")),
('30-days', _("Last 30 days")),
('60-days', _("Last 60 days")),
('90-days', _("Last 90 days")),
('6-months', _("Last 6 months")),
('12-months', _("Last 12 months")),
('custom', _("Custom range...")),
)
date_range = forms.ChoiceField(
label=_("Date range"),
choices=DATE_WINDOW_CHOICES)
start_date = filter_fields.FilterDateField(
label=_("Start date"),
required=False)
end_date = filter_fields.FilterDateField(
label=_("End date"),
required=False)
def clean(self):
self.cleaned_data = super(DateRangeFilter, self).clean()
window = self.cleaned_data.get('date_range')
if window == 'custom':
# Only apply additional checks if data did not have errors.
if 'start_date' not in self.errors and 'end_date' not in self.errors:
start_date = self.cleaned_data.get('start_date')
end_date = self.cleaned_data.get('end_date')
# Require at least one date filter.
if not start_date and not end_date:
self.add_error(
forms.ALL_FIELDS,
_("Please choose a start date or an end date."))
# Ensure date filter order makes sense.
elif (start_date and end_date) and start_date > end_date:
self.add_error(
'end_date',
_("End date must be after start date."))
# Set default values for start date and end date.
else:
self.cleaned_data.setdefault('start_date', None)
self.cleaned_data.setdefault('end_date', None)
self.data.setdefault('start_date', None)
self.data.setdefault('end_date', None)
else:
# Throw out user-submitted dates.
self.cleaned_data.pop('start_date', None)
self.cleaned_data.pop('end_date', None)
self.data.pop('start_date', None)
self.data.pop('end_date', None)
self._errors.pop('start_date', None)
self._errors.pop('end_date', None)
# Calculate the correct date window.
if window:
if window == 'month':
# get_month_range() a tuple with datetimes representing
# midnight of the first day of the current month, and
# midnight of the first day of the following month.
start_date, end_date = get_month_range()
# Show the user the last day of the month,
# e.g., show June 1 to June 30 rather than June 1 to July 1.
end_date = end_date - relativedelta(days=1)
else:
number, unit = window.split('-') # e.g., 6-months
end_date = utils.midnig
|
ht(timezone.now())
start_date = end_date - relativedelta(**{unit: int(number)})
self.cleaned_data['start_date'] = start_date
self.cleaned_data['end_date'] = end_date
self.data['start_date'] = start_date
self.data['end_date'] = end_date
# Pad the end_dat
|
e by one day so that results for all times during
# the end_date are accounted for in the query.
end_date = self.cleaned_data.get('end_date')
if end_date is not None:
self.cleaned_data['end_date'] = end_date + relativedelta(days=1)
return self.cleaned_data
class DataFieldFilter(Filter):
def __init__(self, *args, **kwargs):
super(DataFieldFilter, self).__init__(*args, **kwargs)
self.contact_fields = []
for data_field in self.org.datafield_set.visible():
field_name = 'contact_{}'.format(data_field.key)
self.contact_fields.append((field_name, data_field))
self.fields[field_name] = forms.CharField(
label='Contact: {}'.format(data_field.display_name),
required=False)
def filter_contacts(self, queryset=None):
"""Filter queryset to match all contact field search input."""
contacts = queryset if queryset is not None else self.org.contacts.all()
for name, data_field in self.contact_fields:
value = self.cleaned_data.get(name)
if value:
contacts = contacts.filter(
contactfield__field=data_field,
contactfield__value__icontains=value)
return contacts
|
ericmjl/bokeh
|
bokeh/io/output.py
|
Python
|
bsd-3-clause
| 4,608
| 0.006293
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .notebook import run_notebook_hook
from .state import curstate
#-----------------------------------------------------------------------------
# Globals and constants
#----------------
|
-------------------------------------------------------------
__all__ = (
'output_file',
'output_notebook',
'reset_output',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------
|
------------
def output_file(filename, title="Bokeh Plot", mode=None, root_dir=None):
'''Configure the default output state to generate output saved
to a file when :func:`show` is called.
Does not change the current ``Document`` from ``curdoc()``. File and notebook
output may be active at the same time, so e.g., this does not clear the
effects of ``output_notebook()``.
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document (default: "Bokeh Plot")
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.
root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or
``CDN``.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
This output file will be overwritten on every save, e.g., each time
show() or save() is invoked.
'''
curstate().output_file(
filename,
title=title,
mode=mode,
root_dir=root_dir
)
def output_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000, notebook_type='jupyter'):
''' Configure the default output state to generate output in notebook cells
when :func:`show` is called. Note that, :func:`show` may be called multiple
times in a single cell to display multiple objects in the output cell. The
objects will be displayed in order.
Args:
resources (Resource, optional) :
How and where to load BokehJS from (default: CDN)
verbose (bool, optional) :
whether to display detailed BokehJS banner (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
load_timeout (int, optional) :
Timeout in milliseconds when plots assume load timed out (default: 5000)
notebook_type (string, optional):
Notebook type (default: jupyter)
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
'''
# verify notebook_type first in curstate().output_notebook
curstate().output_notebook(notebook_type)
run_notebook_hook(notebook_type, 'load', resources, verbose, hide_banner, load_timeout)
def reset_output(state=None):
''' Clear the default state of all output modes.
Returns:
None
'''
curstate().reset()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
partofthething/home-assistant
|
tests/components/pilight/test_init.py
|
Python
|
apache-2.0
| 14,678
| 0.000409
|
"""The tests for the pilight component."""
from datetime import timedelta
import logging
import socket
from unittest.mock import patch
from voluptuous import MultipleInvalid
from homeassistant.components import pilight
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import assert_setup_component, async_fire_time_changed
_LOGGER = logging.getLogger(__name__)
class PilightDaemonSim:
"""Class to fake the interface of the pilight python package.
Is used in an asyncio loop, thus the mock cannot be accessed to
determine if methods where called?!
This is solved here in a hackish way by printing errors
that can be checked using logging.error mocks.
"""
callback = None
called = None
test_message = {
"protocol": "kaku_switch",
"uuid": "1-2-3-4",
"message": {"id": 0, "unit": 0, "off": 1},
}
def __init__(self, host, port):
"""Init pilight client, ignore parameters."""
def send_code(self, call): # pylint: disable=no-self-use
"""Handle pilight.send service callback."""
_LOGGER.error("PilightDaemonSim payload: %s", call)
def start(self):
"""Handle homeassistant.start callback.
Also sends one test message after start up
"""
_LOGGER.error("PilightDaemonSim start")
# Fake one code receive after daemon started
if not self.called:
self.callback(self.test_message)
self.called = True
def stop(self): # pylint: disable=no-self-use
"""Handle homeassistant.stop callback."""
_LOGGER.error("PilightDaemonSim stop")
def set_callback(self, function):
"""Handle pilight.pilight_received event callback."""
self.callback = function
_LOGGER.error("PilightDaemonSim callback: %s", function)
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_connection_failed_error(mock_error, hass):
"""Try to connect at 127.0.0.1:5001 with socket error."""
with assert_setup_component(4):
with patch("pilight.pilight.Client", side_effect=socket.error) as mock_client:
assert not await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
mock_client.assert_called_once_with(
host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT
)
assert mock_error.call_count == 1
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_connection_timeout_error(mock_error, hass):
"""Try to connect at 127.0.0.1:5001 with socket timeout."""
with assert_setup_component(4):
with patch("pilight.pilight.Client", side_effect=socket.timeout) as mock_client:
assert not await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
mock_client.assert_called_once_with(
host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT
)
assert mock_error.call_count == 1
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code_no_protocol(hass):
"""Try to send data without protocol information, should give error."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call without protocol info, should raise an error
try:
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data={"noprotocol": "test", "value": 42},
blocking=True,
)
await hass.async_block_till_done()
except MultipleInvalid as error:
assert "required key not provided @ data['protocol']" in str(error)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code(mock_pilight_error, hass):
"""Try to send proper data."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call with protocol info, should not give error
service_data = {"protocol": "test", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
service_data["protocol"] = [service_data["protocol"]
|
]
assert str(service_data) in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.components.pilight._LOGGER.error")
async def test_send_code_fail(mock_pilight_error, hass):
"""Check IOError exception error message."""
with assert_setup_component(
|
4):
with patch("pilight.pilight.Client.send_code", side_effect=IOError):
assert await async_setup_component(
hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
# Call with protocol info, should not give error
service_data = {"protocol": "test", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert "Pilight send failed" in str(error_log_call)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_send_code_delay(mock_pilight_error, hass):
"""Try to send proper data with delay afterwards."""
with assert_setup_component(4):
assert await async_setup_component(
hass,
pilight.DOMAIN,
{pilight.DOMAIN: {pilight.CONF_SEND_DELAY: 5.0}},
)
# Call with protocol info, should not give error
service_data1 = {"protocol": "test11", "value": 42}
service_data2 = {"protocol": "test22", "value": 42}
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data1,
blocking=True,
)
await hass.services.async_call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data2,
blocking=True,
)
service_data1["protocol"] = [service_data1["protocol"]]
service_data2["protocol"] = [service_data2["protocol"]]
async_fire_time_changed(hass, dt_util.utcnow())
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert str(service_data1) in str(error_log_call)
new_time = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, new_time)
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert str(service_data2) in str(error_log_call)
@patch("homeassistant.components.pilight._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER", _LOGGER)
@patch("pilight.pilight.Client", PilightDaemonSim)
async def test_start_stop(mock_pilight_error, hass):
"""Check correct startup and stop of pilight daemon."""
with assert_setup_component(4):
assert await async_setup_component(hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Test startup
await hass.async_start()
await hass.async_block_till_done()
error_log_call = mock_pilight_error.call_args_list[-2]
assert "PilightDaemonSim callback" in str(error_log_call)
error_log_call = mock_pilight_error.call_args_list[-1]
assert "PilightDaemonSim start" in str(error_log_call)
# Test stop
with patch.object(hass.loop, "stop"):
|
matpow2/opensnd
|
frame2.py
|
Python
|
gpl-3.0
| 18,104
| 0.005689
|
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of OpenSND.
# OpenSND is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenSND is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with OpenSND. If not, see <http://www.gnu.org/licenses/>.
from common import *
from objects import *
from images import *
from fonts import *
from sounds import *
from PySide.QtCore import Qt
import os
class Frame2(Frame):
name = 'Login'
index = 1
width = 800
height = 600
background = (0, 0, 0)
def initialize(self):
self.create_object(ValidChar_22, 123, 620)
self.create_object(Msg_16, 303, 699)
self.create_object(VerCheck_25, 337, 712)
self.create_object(IrcNick_27, 466, -94)
self.create_object(Newid_28, -2, -66)
self.create_object(Channel1_36, -232, 435)
self.create_object(Title_4, 0, 0)
self.create_object(GlobalName_6, 3, -48)
self.create_object(CheckVersion_8, 180, -42)
self.create_object(Ip2_9, 432, -109)
self.create_object(Port2_10, 462, -72)
self.create_object(Connect_11, 400, 455)
self.create_object(String2_12, 9, 575)
self.create_object(MooSock_13, 567, -65)
self.create_object(CheckUser_14, 666, -34)
self.create_object(StringParser_15, 700, -62)
self.create_object(Timeout_18, 371, -24)
self.create_object(RemoteIP_20, 155, -95)
self.create_object(EigeneIP_21, 173, -121)
self.create_object(ScreenshotNr_23, 588, 621)
self.create_object(Version_26, 532, 659)
self.create_object(SvrKills_29, 68, -112)
self.create_object(SvrDeaths_30, 66, -92)
self.create_object(SvrPoints_31, 63, -73)
self.create_object(SvrKills2_32, 116, -112)
self.create_object(SvrDeaths2_33, 113, -90)
self.create_object(SvrPoints2_34, 113, -71)
self.create_object(String12_37, 51, 515)
self.create_object(String11_17, 50, 514)
self.create_object(BinaryObject_38, 312, -123)
self.create_object(Name_5, 280, 394)
self.create_object(Ini_7, 150, -71)
self.create_object(Edit_19, 31, 641)
self.create_object(Edit2_24, 294, 655)
self.groups = {
'links' : True,
'Check name' : False,
'Get Info' : False,
'Access' : False,
'Check version' : False,
}
def on_start(self):
self.set_event_id(1)
self.get(Name_5).limit_size(15)
self.get(Name_5).set_focus(True)
self.get(Ini_7).set_filename((os.getcwd()+'\\')+'data.ini')
self.get(Ini_7).set_group('Data')
self.get(Ini_7).set_item('Name')
self.get(Name_5).set_value(left_string(self.get(Ini_7).get(), 15))
self.get(Ini_7).set_item('UID')
self.get(Newid_28).set_value(self.get(Ini_7).get())
self.values[10] = 0
self.values[6] = 0
self.get(Connect_11).values[0] = 0
self.show_cursor()
self.get(StringParser_15).add_delimiter('\r\n')
self.get(Edit2_24).load_file('Screenshot.pak')
add_encryption_key('\xf88\xfa2J\xdb\xae\x91=\xd5.\x99\xb3_y\x7f/U%0C\xd9')
self.set_event_id(2)
self.get(ScreenshotNr_23).set_value(self.get(Edit2_24).get_number())
self.set_event_id(3)
if self.get(Newid_28).text == '':
self.get(Newid_28).set_value('0')
self.set_event_id(4)
if self.get(Ini_7).get_value_item('Footsteps') == 1:
self.players[1].lives = 1
self.set_event_id(5)
if self.get(Ini_7).get_value_item('Footsteps') != 1:
self.players[1].lives = 0
self.set_event_id(6)
if (self.get(Ini_7).get_value_item('Music') == 1 and
self.get_global_value(0) == 0):
self.values[0] = 1
self.values[12] = 1
self.set_mod_volume(0, self.get(Ini_7).get_value_item('MusicVolume'))
self.set_mod_volume(1, self.get(Ini_7).get_value_item('MusicVolume'))
self.set_mod_volume(2, self.get(Ini_7).get_value_item('MusicVolume'))
self.set_mod_volume(
|
3, self.get(Ini_7).get_value_item('MusicVolume'))
self.set_mod_volume(4, self.get(Ini_7).get_value_item('MusicVolume'))
self.cross_fade_mod(0, 0, 3000)
self.set_event_id(7)
if self.get(Name_5).get_value() == '':
self.get(Name_
|
5).set_value('Player')
pass
def loop_name(self):
self.set_event_id(21)
if (self.groups['Check name'] and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '[' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != ']' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '!' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '$' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '+' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '*' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != "'" and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '#' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '/' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '\\' and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != '|'):
self.get(IrcNick_27).set_value(self.get(IrcNick_27).text+mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1))
self.set_event_id(22)
for loop_index in xrange(len(self.get(ValidChar_22).text)):
self.loop_indexes['ValidChar'] = loop_index
if self.loop_valid_char() == False: break
pass
def loop_valid_char(self):
self.set_event_id(23)
if (self.groups['Check name'] and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) == mid_string(self.get(ValidChar_22).text, self.get_loop_index('ValidChar'), 1)):
return False # 'ValidChar'
self.set_event_id(24)
if (self.groups['Check name'] and
self.get_loop_index('ValidChar') == len(self.get(ValidChar_22).text)-1 and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) == ' '):
self.groups['Check name'] = False
self.get(Connect_11).set_transparency(0)
self.get(Connect_11).values[0] = 0
self.get(String11_17).set_value('Space is an invalid character, use _ instead')
self.get(CheckUser_14).set_value(0)
self.get(Name_5).set_read_only(False)
self.get(String12_37).set_value('Space is an invalid character, use _ instead')
self.set_event_id(25)
if (self.groups['Check name'] and
self.get_loop_index('ValidChar') == len(self.get(ValidChar_22).text)-1 and
mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1) != ' '):
self.groups['Check name'] = False
self.get(Connect_11).set_transparency(0)
self.get(Connect_11).values[0] = 0
self.get(String11_17).set_value(mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1)+' is an invalid character')
self.get(CheckUser_14).set_value(0)
self.get(Name_5).set_read_only(False)
self.get(String12_37).set_value(mid_string(self.get(Name_5).get_value(), self.get_loop_index('Name'), 1)+' is an invalid character')
pass
def on_mouse_press(self,
|
fsxfreak/esys-pbi
|
src/pupil/pupil_src/player/player_methods.py
|
Python
|
mit
| 13,738
| 0.012957
|
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import os, cv2, csv_utils, shutil
import numpy as np
import collections
# logging
import logging
logger = logging.getLogger(__name__)
from file_methods import save_object, load_object
from version_utils import VersionFormat
from version_utils import read_rec_version
def correlate_data(data,timestamps):
'''
data: list of data :
each datum is a dict with at least:
timestamp: float
timestamps: timestamps list to correlate data to
this takes a data list and a timestamps list and makes a new list
with the length of the number of timestamps.
Each slot contains a list that will have 0, 1 or more assosiated data points.
Finally we add an index field to the datum with the associated index
'''
timestamps = list(timestamps)
data_by_frame = [[] for i in timestamps]
frame_idx = 0
data_index = 0
data.sort(key=lambda d: d['timestamp'])
while True:
try:
datum = data[data_index]
# we can take the midpoint between two frames in time: More appropriate for SW timestamps
ts = ( timestamps[frame_idx]+timestamps[frame_idx+1] ) / 2.
# or the time of the next frame: More appropriate for Sart Of Exposure Timestamps (HW timestamps).
# ts = timestamps[frame_idx+1]
except IndexError:
# we might loose a data point at the end but we dont care
break
if datum['timestamp'] <= ts:
datum['index'] = frame_idx
data_by_frame[frame_idx].append(datum)
data_index +=1
else:
frame_idx+=1
return data_by_frame
def update_recording_to_recent(rec_dir):
meta_info = load_meta_info(rec_dir)
update_meta_info(rec_dir,meta_info)
# Reference format: v0.7.4
rec_version = read_rec_version(meta_info)
# Convert python2 to python3
if rec_version < VersionFormat('0.8.7'):
update_recording_bytes_to_unicode(rec_dir)
if rec_version >= VersionFormat('0.7.4'):
pass
elif rec_version >= VersionFormat('0.7.3'):
update_recording_v073_to_v074(rec_dir)
elif rec_version >= VersionFormat('0.5'):
update_recording_v05_to_v074(rec_dir)
elif rec_version >= VersionFormat('0.4'):
update_recording_v04_to_v074(rec_dir)
elif rec_version >= VersionFormat('0.3'):
update_recording_v03_to_v074(rec_dir)
else:
logger.Error("This recording is too old. Sorry.")
return
# Incremental format updates
if rec_version < VersionFormat('0.8.2'):
update_recording_v074_to_v082(rec_dir)
if rec_version < VersionFormat('0.8.3'):
update_recording_v082_to_v083(rec_dir)
if rec_version < VersionFormat('0.8.6'):
update_recording_v083_to_v086(rec_dir)
if rec_version < VersionFormat('0.8.7'):
update_recording_v086_to_v087(rec_dir)
if rec_version < VersionFormat('0.9.1'):
update_recording_v087_to_v091(rec_dir)
# How to extend:
# if rec_version < VersionFormat('FUTURE FORMAT'):
# update_recording_v081_to_FUTURE(rec_dir)
def load_meta_info(rec_dir):
meta_info_path = os.path.join(rec_dir,"info.csv")
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
return meta_info
def update_meta_info(rec_dir, meta_info):
logger.info('Updating meta info')
meta_info_path = os.path.join(rec_dir,"info.csv")
with open(meta_info_path,'w',newline='') as csvfile:
csv_utils.write_key_value_file(csvfile,meta_info)
def update_recording_v074_to_v082(rec_dir):
meta_info_path = os.path.join(rec_dir,"info.csv")
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.8.2'
update_meta_info(rec_dir,meta_info)
def update_recording_v082_to_v083(rec_dir):
logger.info("Updating recording from v0.8.2 format to v0.8.3 format")
pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
meta_info_path = os.path.join(rec_dir,"info.csv")
for d in pupil_data['gaze_positions']:
if 'base' in d:
d['base_data'] = d.pop('base')
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.8.3'
update_meta_info(rec_dir,meta_info)
def update_recording_v083_to_v086(rec_dir):
logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
meta_info_path = os.path.join(rec_dir,"info.csv")
for topic in pupil_data.keys():
for d in pupil_data[topic]:
d['topic'] = topic
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.8.6'
update_meta_info(rec_dir,meta_info)
def update_recording_v086_to_v087(rec_dir):
logger.info("Updating recording from v0.8.6 format to v0.8.7 format")
pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
meta_info_path = os.path.join(rec_dir,"info.csv")
def _clamp_norm_point(pos):
'''realisitic numbers for norm pos should be in this range.
Grossly bigger or smaller numbers are results bad exrapolation
and can cause overflow erorr when denormalized and cast as int32.
'''
return min(100,max(-100,pos[0])),min(100,max(-100,pos[1]))
for g in pupil_data.get('gaze_positions', []):
if 'topic' not in g:
#we missed this in one gaze mapper
g['topic'] = 'gaze'
g['norm_pos'] = _clamp_norm_point(g['norm_pos'])
save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.8.7'
update_meta_info(rec_dir,meta_info)
def update_recording_v087_to_v091(rec_dir):
logger.info("Updating recording from v0.8.7 format to v0.9.1 format")
meta_info_path = os.path.join(rec_dir,"info.csv")
with open(meta_info_path,'r',encoding='utf-8') as csvfile:
meta_info = csv_utils.read_key_value_file(csvfile)
meta_info['Data Format Version'] = 'v0.9.1'
update_meta_info(rec_dir,meta_info)
def update_recording_bytes_to_unicode(rec_dir):
logger.info("Updating recording from bytes to unicode.")
# update to python 3
meta_info_path = os.path.join(rec_dir, "info.csv")
def convert(data):
if isinstance(data, bytes):
return data.decode()
elif isinstance(data, str) or isinstance(data, np.ndarray):
return data
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
for file in os.listdir(rec_dir):
if file.startswith('.') or os.path.splitext(file)[1] == '.mp4':
continue
rec_file = os.path.join(rec_dir, file)
try:
rec_object = load_object(rec_file)
converted_object = convert(r
|
ec_
|
object)
if converted_object != rec_object:
logger.info('Converted `{}` from bytes to unicode'.format(file))
save_object(rec_object, rec_file)
except (ValueError, IsADirectoryError):
continue
with open(meta_info_path, 'r'
|
slideinc/notifier
|
pavement.py
|
Python
|
bsd-3-clause
| 1,537
| 0.003253
|
import errno
import os
from setuptools import Extension
from paver.easy import *
from paver.path import path
from paver.setuputils import setup
setup(
name="notifier",
description="A pre-connected mesh of servers for fast internal RPC",
version="1.0",
license="bsd",
author="Libor Michalek",
author_email="libor@pobox.com",
packages=["notifier"],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python",
"Topic :: System :: Networking",
],
install_requires=['gogreen', 'wirebin'],
)
MANIFEST = (
"LICENSE",
"setup.py",
"paver-minilib.zip",
)
@task
def manifest():
path('MANIFEST.in').write_lines('include %s' % x for x in MANIFEST)
@task
@needs('generate_setup', 'minilib', 'manifest', 'setuptools.command.sdist')
def sdist():
pass
@task
def clean():
for p in map(path, ('not
|
ifier.egg-info', 'dist', 'build', 'MANIFEST.in')):
if p.exists()
|
:
if p.isdir():
p.rmtree()
else:
p.remove()
for p in path(__file__).abspath().parent.walkfiles():
if p.endswith(".pyc") or p.endswith(".pyo"):
try:
p.remove()
except OSError, exc:
if exc.args[0] == errno.EACCES:
continue
raise
|
open-synergy/opnsynid-hr
|
hr_timesheet_workflow_policy/__openerp__.py
|
Python
|
agpl-3.0
| 733
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# Copyright 2022 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# pylint: disable=locally-disabled, manifest-required-author
{
"name": "HR Timesheet - Workflow Policy",
"version": "8.0.1.0.0",
"category": "Human Resource",
"website": "https://simetri-sinergi.id",
"author": "OpenSynergy Indonesia, PT. Simetri Sinergi Indonesia",
"license": "AGPL-3",
"installable": True,
"depends": [
"hr_attendance_configuration_page"
|
,
"hr_
|
timesheet_sheet",
],
"data": [
"views/hr_attendance_config_setting_views.xml",
"views/hr_timesheet_sheet_views.xml",
],
}
|
sns-chops/multiphonon
|
multiphonon/backward/_sqe2dos_script_templates.py
|
Python
|
mit
| 1,493
| 0
|
# -*- Python -*-
#
# Jiao Lin <jiao.lin@gmail.com>
#
plots_table = """
exp exp-sqe.h5
sim-singlephonon sp-sqe.h5
sim-multiphonon mp-sqe.h5
sim-multiple-scattering ms-sqe.h5
sim-correction sqe_correction.h5
exp-corrected-single-phonon corrected_sqe.h5
sim-total-inel total-inel-sqe.h5
exp-residual residual-sqe.h5
"""
plot_intermediate_result_sqe_code = """#!/usr/bin/env python
import os
curdir = os.path.dirname(__file__)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = 12,9
from multiphonon.backward.plotutils import plot_intermediate_result_sqe as plot
plot(curdir)
from matplotlib import pyplot as plt
plt.show()
"""
plot_intermediate_result_se_code = """#!/usr/bin/env python
import os
curdir = os.path.dirname(__file__)
import ma
|
tplotlib as mpl
mpl.rcParams['figure.figsize'] = 12,9
from multiphonon.backward.plotutils import plot_intermediate_result_se as plot
plot(curdir)
from matplotlib import pyplot as plt
plt.show()
"""
plot_dos_iteration_code = """#!/usr/bin/env python
|
import os
curdir = os.path.dirname(__file__)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = 6,4.5
from multiphonon.backward.plotutils import plot_dos_iteration
plot_dos_iteration(curdir, %(total_rounds)d)
from matplotlib import pyplot as plt
plt.show()
"""
plot_residual_code = """#!/usr/bin/env python
import os
curdir = os.path.dirname(__file__)
from multiphonon.backward.plotutils import plot_residual
plot_residual(curdir)
from matplotlib import pyplot as plt
plt.show()
"""
|
quake0day/oj
|
kthPrimeNumber.py
|
Python
|
mit
| 527
| 0.043643
|
class Solution:
"""
@param k: The number k.
@r
|
eturn: The kth prime number as description.
"""
def kthPrimeNumber(self, k):
# write your code here
q = [1]
i3 = i5 = i7 = 0
while len(q)-1 < k:
m3, m5, m7 = q[i3] * 3, q[i5] * 5, q[i7] * 7
m = min(m3, m5, m7)
if m == m3:
i3 += 1
if m == m5:
i5 += 1
|
if m == m7:
i7 += 1
q.append(m)
return q[-1]
a = Solution()
print a.kthPrimeNumber(5)
|
alexgorban/models
|
research/slim/nets/nasnet/nasnet_utils.py
|
Python
|
apache-2.0
| 20,826
| 0.007395
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
# The cap for tf.clip_by_value, it's hinted from the activation distribution
# that the majority of activation values are in the range [-6, 6].
CLIP_BY_VALUE_CAP = 6
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@contrib_framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@contrib_framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@contrib_framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(input_tensor=x, axis=[1, 2])
else:
return tf.reduce_mean(input_tensor=x, axis=[2, 3])
@contrib_framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.compat.v2.nn.avg_pool2d(
input=net,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(tensor=net, paddings=pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(tensor=net, paddings=pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.compat.v2.nn.avg_pool2d(
input=path2,
ksize=[1, 1, 1, 1],
strides=stride_spec,
padding='VALID',
data_format=data_format)
# If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2)
path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@contrib_framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(input=net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random.uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size,
use_bounded_activation):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
for layer_num in range(num_layers - 1):
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kerne
|
l_size, layer_num + 1))
stride = 1
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride
|
=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation, use_bounded_activation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if use_bounded_activation:
net = tf.nn.relu6(net)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
|
roubert/python-phonenumbers
|
python/phonenumbers/shortdata/region_IE.py
|
Python
|
apache-2.0
| 1,071
| 0.008403
|
"""Auto-generated file, do not edit by hand. IE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IE = PhoneMetadata(id='IE', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[159]\\d{2,5}', possible_number_pattern='\\d{3,6}'),
toll_free=PhoneNumberDesc(national_number_pattern='116(?:00[06]|1(?:11|23))', possible_number_pattern='\\d{6}', example_number='116000'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'
|
),
emergency=PhoneNumberDesc(national_number_pattern='112|999', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='11(?:2|6(?:00[06]|1(?:11|23)))|51210|999', possible_number_pattern='\\d{3,6}', example_number='112'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_n
|
umber_pattern='51210', possible_number_pattern='\\d{5}'),
short_data=True)
|
aplicatii-romanesti/allinclusive-kodi-pi
|
.kodi/userdata/addon_data/plugin.video.p2p-streams/acestream/ace/ACEStream/Core/Statistics/VideoPlaybackCrawler.py
|
Python
|
apache-2.0
| 7,854
| 0.005857
|
#Embedded file name: ACEStream\Core\Statistics\VideoPlaybackCrawler.pyo
from time import strftime
import cPickle
import sys
import threading
import zlib
from ACEStream.Core.BitTornado.BT1.MessageID import CRAWLER_VIDEOPLAYBACK_INFO_QUERY, CRAWLER_VIDEOPLAYBACK_EVENT_QUERY
from ACEStream.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackDBHandler
from ACEStream.Core.Overlay.SecureOverlay import OLPROTO_VER_EIGHTH, OLPROTO_VER_TENTH
from ACEStream.Core.Statistics.Crawler import Crawler
from ACEStream.Core.Utilities.utilities import show_permid, show_permid_short
DEBUG = False
class VideoPlaybackCrawler:
__single = None
lock = threading.Lock()
@classmethod
def get_instance(cls, *args, **kargs):
if cls.__single is None:
cls.lock.acquire()
try:
if cls.__single is None:
cls.__single = cls(*args, **kargs)
finally:
cls.lock.release()
return cls.__single
def __init__(self):
if VideoPlaybackCrawler.__single is not None:
raise RuntimeError, 'VideoPlaybackCrawler is singleton'
crawler = Crawler.get_instance()
if crawler.am_crawler():
self._file = open('videoplaybackcrawler.txt', 'a')
self._file.write(''.join(('# ',
'*' * 80,
'\n# ',
strftime('%Y/%m/%d %H:%M:%S'),
' Crawler started\n')))
self._file.flush()
self._event_db = None
else:
self._file = None
self._event_db = VideoPlaybackDBHandler.get_instance()
def query_initiator(self, permid, selversion, request_callback):
if selversion >= OLPROTO_VER_TENTH:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: query_initiator', show_permid_short(permid), 'version', selversion
request_callback(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, 'SELECT key, timestamp, event FROM playback_event; DELETE FROM playback_event;', callback=self._after_event_request_callback)
elif selversion >= OLPROTO_VER_EIGHTH:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: query_initiator', show_permid_short(permid), 'version', selversion
request_callback(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, 'SELECT key, timestamp, piece_size, num_pieces, bitrate, nat FROM playback_info ORDER BY timestamp DESC LIMIT 50', callback=self._after_info_request_callback)
elif DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: query_info_initiator', show_permid_short(permid), 'unsupported overlay version'
def _after_info_request_callback(self, exc, permid):
if not exc:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: request send to', show_permid_short(permid)
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
'INFO REQUEST',
show_permid(permid),
'\n')))
self._file.flush()
def handle_info_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_info_crawler_request', show_permid_short(permid), message
try:
cursor = self._event_db._db.execute_read(message)
except Exception as e:
reply_callback(str(e), error=1)
else:
if cursor:
reply_callback(zlib.compress(cPickle.dumps(list(cursor), 2), 9))
else:
reply_callback('error', error=2)
def handle_info_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
if error:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', error, message
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' INFO REPLY',
show_permid(permid),
str(error),
message,
'\n')))
self._file.flush()
else:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', show_permid_short(permid), cPickle.loads(message)
info = cPickle.loads(message)
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' INFO REPLY',
show_permid(permid),
str(error),
str(info),
'\n')))
self._file.flush()
i = 0
for key, timestamp, piece_size, num_pieces, bitrate, nat in info:
i += 1
if i == 1:
sql = "\nSELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50;\nDELETE FROM playback_event WHERE key = '%s';\n" % (key, key)
else:
sql = "\nSELECT timestamp, origin, event FROM playback_event WHERE key = '%s' ORDER BY timestamp ASC LIMIT 50;\nDELETE FROM playback_event WHERE key = '%s';\nDELETE FROM playback_info WHERE key = '%s';\n" % (key, key, key)
request_callback(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, sql, channel_data=key, callback=self._after_event_request_callback, frequency=0)
def _after_event_request_callback(self, exc, permid):
if not exc:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: request send to', show_permid_short(permid)
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' EVENT REQUEST',
show_permid(permid),
'\n')))
self._file.flush()
def handle_event_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
if error:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: hand
|
le_crawler_reply', error, message
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' EVENT
|
REPLY',
show_permid(permid),
str(error),
str(channel_data),
message,
'\n')))
self._file.flush()
elif selversion >= OLPROTO_VER_TENTH:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', show_permid_short(permid), len(message), 'bytes zipped'
info = cPickle.loads(zlib.decompress(message))
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' EVENT REPLY',
show_permid(permid),
str(error),
str(channel_data),
str(info),
'\n')))
self._file.flush()
elif selversion >= OLPROTO_VER_EIGHTH:
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_crawler_reply', show_permid_short(permid), cPickle.loads(message)
info = cPickle.loads(message)
self._file.write('; '.join((strftime('%Y/%m/%d %H:%M:%S'),
' EVENT REPLY',
show_permid(permid),
str(error),
str(channel_data),
str(info),
'\n')))
self._file.flush()
def handle_event_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
if DEBUG:
print >> sys.stderr, 'videoplaybackcrawler: handle_event_crawler_request', show_permid_short(permid), message
try:
cursor = self._event_db._db.execute_read(message)
except Exception as e:
reply_callback(str(e), error=1)
else:
if cursor:
reply_callback(zlib.compress(cPickle.dumps(list(cursor), 2), 9))
else:
reply_callback('error', error=2)
|
zoeren/pygeocoder
|
pygeolib.py
|
Python
|
bsd-3-clause
| 4,972
| 0.000603
|
import sys
import collections
class GeocoderResult(collections.Iterator):
"""
A geocoder resultset to iterate through address results.
Exemple:
results = Geocoder.geocode('paris, us')
for result in results:
print(result.formatted_address, result.location)
Provide shortcut to ease field retrieval, looking at 'types' in each
'address_components'.
Example:
result.country
result.postal_code
You can also choose a different property to display for each lookup type.
Example:
result.country__short_name
By default, use 'long_name' property of lookup type, so:
result.country
and:
result.country__long_name
are equivalent.
"""
attribute_mapping = {
"state": "administrative_area_level_1",
"province": "administrative_area_level_1",
"city": "locality",
"county": "administrative_area_level_2",
}
def __init__(self, data):
"""
Creates instance of GeocoderResult from the provided JSON data array
"""
self.data = data
self.len = len(self.data)
self.current_index = 0
self.current_data = self.data[0]
def __len__(self):
return self.len
def __iter__(self):
return self
def return_next(self):
if self.current_index >= self.len:
raise StopIteration
self.current_data = self.data[self.current_index]
self.current_index += 1
return self
def __getitem__(self, key):
"""
Accessing GeocoderResult by index will return a GeocoderResult
with just one data entry
"""
return GeocoderResult([self.data[key]])
def __unicode__(self):
return self.formatted_address
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
def __next__(self):
return self.return_next()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
def next(self):
return self.return_next()
@property
def count(self):
return self.len
@property
def coordinates(self):
"""
Return a (latitude, longitude) coordinate pair of the current result
"""
location = self.current_data['geometry']['location']
return location['lat'], location['lng']
@property
def latitude(self):
return self.coordinates[0]
@property
def longitude(self):
return self.coordinates[1]
@property
def raw(self):
"""
Returns the full result set in dictionary format
"""
return self.data
@property
def valid_address(self):
"""
Returns true if queried address is valid street address
"""
return self.current_data['types'] == ['street_address']
@property
def formatted_address(self):
return self.current_data['formatted_address']
def __getattr__(self, name):
lookup = name.split('__')
attribute = lookup[0]
if (attribute in GeocoderResult.attribute_mapping):
attribute = GeocoderResult.attribute_mapping[attribute]
try:
prop = lookup[1]
except IndexError:
prop = 'long_name'
for elem in self.current_data['address_components']:
if attribute in elem['types']:
return elem[prop]
class GeocoderError(Exception):
"""Base class for errors in the :mod:`pygeocoder` module.
Methods of the :class:`Geocoder` raise this when something goes wrong.
"""
#: See http://code.google.com/apis/maps/documentation/geocoding/index.html#StatusCodes
#: for information on the meaning of these status codes.
G_GEO_OK = "OK"
G_GEO_ZERO_RESULTS = "ZERO_RESULTS"
G_GEO_OVER_QUERY_LIMIT = "OVER_QUERY_LIMIT"
G_GEO_REQUEST_DENIED = "REQUEST_DENIED"
G_GEO_MISSING_QUERY = "INVALID_REQUEST"
def __init__(self, status, url=None, response=None):
"""Create an exception with a status and optional full response.
:param status: Either a ``G_GEO_`` code or a string explaining the
exception.
:type status: int or string
:param url: The query URL that resulted in the error, if any.
:type url: string
:param response: The actual response returned from Google, if any.
:type response: dict
|
"""
Exception.__init__(self, status) # Exception is an old-school class
self.status = status
self.url = url
self.response = response
def __str__(self):
"""R
|
eturn a string representation of this :exc:`GeocoderError`."""
return 'Error %s\nQuery: %s' % (self.status, self.url)
def __unicode__(self):
"""Return a unicode representation of this :exc:`GeocoderError`."""
return unicode(self.__str__())
|
thedod/boilerplate-peewee-flask
|
application/sitepack/db.py
|
Python
|
gpl-3.0
| 204
| 0
|
from peewee import * # no other wa
|
y to reach playhouse :(
from playhouse import flask_utils as peewee_flask_utils
from playhouse import signals as pee
|
wee_signals
database = peewee_flask_utils.FlaskDB()
|
jstitch/gift_circle
|
GiftCircle/circulo/apps.py
|
Python
|
gpl-3.0
| 89
| 0
|
from
|
django.apps import AppConfig
class CirculoConfig(AppCon
|
fig):
name = 'circulo'
|
Valloric/hyde
|
hyde/ext/plugins/images.py
|
Python
|
mit
| 5,652
| 0.000885
|
# -*- coding: utf-8 -*-
"""
Contains classes to handle images related things
# Requires PIL
"""
from hyde.plugin import Plugin
import re
import Image
class ImageSizerPlugin(Plugin):
"""
Each HTML page is modified to add width and height for images if
they are not already specified.
"""
def __init__(self, site):
super(I
|
mageSizerPlugin, self).__init__(site)
self.cache = {}
def _handle_img(self, resource, src, width, he
|
ight):
"""Determine what should be added to an img tag"""
if height is not None and width is not None:
return "" # Nothing
if src is None:
self.logger.warn("[%s] has an img tag without src attribute" % resource)
return "" # Nothing
if src not in self.cache:
if src.startswith(self.site.config.media_url):
path = src[len(self.site.config.media_url):].lstrip("/")
path = self.site.config.media_root_path.child(path)
image = self.site.content.resource_from_relative_deploy_path(path)
elif re.match(r'([a-z]+://|//).*', src):
# Not a local link
return "" # Nothing
elif src.startswith("/"):
# Absolute resource
path = src.lstrip("/")
image = self.site.content.resource_from_relative_deploy_path(path)
else:
# Relative resource
path = resource.node.source_folder.child(src)
image = self.site.content.resource_from_path(path)
if image is None:
self.logger.warn(
"[%s] has an unknown image" % resource)
return "" # Nothing
if image.source_file.kind not in ['png', 'jpg', 'jpeg', 'gif']:
self.logger.warn(
"[%s] has an img tag not linking to an image" % resource)
return "" # Nothing
# Now, get the size of the image
try:
self.cache[src] = Image.open(image.path).size
except IOError:
self.logger.warn(
"Unable to process image [%s]" % image)
self.cache[src] = (None, None)
return "" # Nothing
self.logger.debug("Image [%s] is %s" % (src,
self.cache[src]))
new_width, new_height = self.cache[src]
if new_width is None or new_height is None:
return "" # Nothing
if width is not None:
return 'height="%d" ' % (int(width)*new_height/new_width)
elif height is not None:
return 'width="%d" ' % (int(height)*new_width/new_height)
return 'height="%d" width="%d" ' % (new_height, new_width)
def text_resource_complete(self, resource, text):
"""
When the resource is generated, search for img tag and specify
their sizes.
Some img tags may be missed, this is not a perfect parser.
"""
try:
mode = self.site.config.mode
except AttributeError:
mode = "production"
if not resource.source_file.kind == 'html':
return
if mode.startswith('dev'):
self.logger.debug("Skipping sizer in development mode.")
return
pos = 0 # Position in text
img = None # Position of current img tag
state = "find-img"
while pos < len(text):
if state == "find-img":
img = text.find("<img", pos)
if img == -1:
break # No more img tag
pos = img + len("<img")
if not text[pos].isspace():
continue # Not an img tag
pos = pos + 1
tags = {"src": "",
"width": "",
"height": ""}
state = "find-attr"
continue
if state == "find-attr":
if text[pos] == ">":
# We get our img tag
insert = self._handle_img(resource,
tags["src"] or None,
tags["width"] or None,
tags["height"] or None)
img = img + len("<img ")
text = "".join([text[:img], insert, text[img:]])
state = "find-img"
pos = pos + 1
continue
attr = None
for tag in tags:
if text[pos:(pos+len(tag)+1)] == ("%s=" % tag):
attr = tag
pos = pos + len(tag) + 1
break
if not attr:
pos = pos + 1
continue
if text[pos] in ["'", '"']:
pos = pos + 1
state = "get-value"
continue
if state == "get-value":
if text[pos] == ">":
state = "find-attr"
continue
if text[pos] in ["'", '"'] or text[pos].isspace():
# We got our value
pos = pos + 1
state = "find-attr"
continue
tags[attr] = tags[attr] + text[pos]
pos = pos + 1
continue
return text
|
elenaod/PythonScrollingShooter
|
player.py
|
Python
|
gpl-3.0
| 743
| 0.004038
|
from bases import *
from inanimate import Bullet
class Player(AnimateObject):
def __init__(self, x=0, y=0, hits=MAX_HEALTH, timer=TIMER, damage=1, score=0):
super(Player, self).__init__(x,
|
y, hits, timer)
self.damage = damage
self.score = score
def shoot(self):
if self.x - 1 >= 0:
return Bullet(self.x - 1, self.y, "player", self.damage)
def read(self, data):
super(Player, self).read(data)
values = data.split(" ")
self.score = int(values[6])
self.damage = int(values[7])
def write(self):
data = super(Player, self).write()
data += " " + str(self.damage)
data += " " + st
|
r(self.score)
return data
|
mikeboers/midx
|
midx/notify/__init__.py
|
Python
|
bsd-3-clause
| 51
| 0
|
f
|
rom midx.notify.common import iter_modified_fi
|
les
|
Ubiwhere/django-dbbackup
|
dbbackup/tests/testapp/urls.py
|
Python
|
bsd-3-clause
| 305
| 0
|
try:
from djang
|
o.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
# url(r'^admin/', include(admin.site.urls)),
)
except ImportError:
from django.conf.urls import include, url
urlpatterns = (
# url(r'^admin/', include(admin.site.urls
|
)),
)
|
rowanphipps/Cerberus
|
tests/conftest.py
|
Python
|
mit
| 320
| 0
|
"""Configuration for pytest."""
import js
|
on
def pytest_generate_tests(metafunc):
"""Configure pytest to call each of the tests once for each test case."""
if "test_case" in metafunc.fixturenames:
tests = json.load(open("tests/test_data.json")
|
)["tests"]
metafunc.parametrize("test_case", tests)
|
brutasse/django-le-social
|
le_social/openid/tests/views.py
|
Python
|
bsd-3-clause
| 714
| 0.002801
|
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from .. import views
class Common(object):
"""
Stuff shared by the two views.
"""
def get_return_url(self):
return reverse('openid_callback')
def failure(sel
|
f, message):
return HttpResponse(message)
class Begin(Common, views.Begin):
template_name = 'le_social/openid/openid.html'
begin = Begin.as_view()
class Callback(Common, views.Callback):
def success(s
|
elf):
openid_url = self.openid_response.identity_url
return HttpResponse('OpenID association: %s' % openid_url)
callback = Callback.as_view()
|
JustinWingChungHui/okKindred
|
emailer/migrations/0005_remove_email_creation_date.py
|
Python
|
gpl-2.0
| 358
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db imp
|
ort models, migrations
class Migration(migrations.Migration):
dependencies = [
('emailer', '0004_auto_20150128_2202'),
]
operations = [
migrations.RemoveField(
model_name='email',
name='
|
creation_date',
),
]
|
3cky/netdata
|
collectors/python.d.plugin/litespeed/litespeed.chart.py
|
Python
|
gpl-3.0
| 5,647
| 0.001948
|
# -*- codin
|
g: utf-8 -*-
# Description: litespeed netdata python.d module
# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
import glob
import os
import re
from collections import namedtuple
|
from bases.FrameworkServices.SimpleService import SimpleService
update_every = 10
# charts order (can be overridden if you want less charts, or different order)
ORDER = [
'net_throughput_http', # net throughput
'net_throughput_https', # net throughput
'connections_http', # connections
'connections_https', # connections
'requests', # requests
'requests_processing', # requests
'pub_cache_hits', # cache
'private_cache_hits', # cache
'static_hits', # static
]
CHARTS = {
'net_throughput_http': {
'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput',
'litespeed.net_throughput', 'area'],
'lines': [
['bps_in', 'in', 'absolute'],
['bps_out', 'out', 'absolute', -1]
]
},
'net_throughput_https': {
'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput',
'litespeed.net_throughput', 'area'],
'lines': [
['ssl_bps_in', 'in', 'absolute'],
['ssl_bps_out', 'out', 'absolute', -1]
]
},
'connections_http': {
'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'],
'lines': [
['conn_free', 'free', 'absolute'],
['conn_used', 'used', 'absolute']
]
},
'connections_https': {
'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'],
'lines': [
['ssl_conn_free', 'free', 'absolute'],
['ssl_conn_used', 'used', 'absolute']
]
},
'requests': {
'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'],
'lines': [
['requests', None, 'absolute', 1, 100]
]
},
'requests_processing': {
'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'],
'lines': [
['requests_processing', 'processing', 'absolute']
]
},
'pub_cache_hits': {
'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
'lines': [
['pub_cache_hits', 'hits', 'absolute', 1, 100]
]
},
'private_cache_hits': {
'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
'lines': [
['private_cache_hits', 'hits', 'absolute', 1, 100]
]
},
'static_hits': {
'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'],
'lines': [
['static_hits', 'hits', 'absolute', 1, 100]
]
}
}
t = namedtuple('T', ['key', 'id', 'mul'])
T = [
t('BPS_IN', 'bps_in', 8),
t('BPS_OUT', 'bps_out', 8),
t('SSL_BPS_IN', 'ssl_bps_in', 8),
t('SSL_BPS_OUT', 'ssl_bps_out', 8),
t('REQ_PER_SEC', 'requests', 100),
t('REQ_PROCESSING', 'requests_processing', 1),
t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100),
t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100),
t('STATIC_HITS_PER_SEC', 'static_hits', 100),
t('PLAINCONN', 'conn_used', 1),
t('AVAILCONN', 'conn_free', 1),
t('SSLCONN', 'ssl_conn_used', 1),
t('AVAILSSL', 'ssl_conn_free', 1),
]
RE = re.compile(r'([A-Z_]+): ([0-9.]+)')
ZERO_DATA = {
'bps_in': 0,
'bps_out': 0,
'ssl_bps_in': 0,
'ssl_bps_out': 0,
'requests': 0,
'requests_processing': 0,
'pub_cache_hits': 0,
'private_cache_hits': 0,
'static_hits': 0,
'conn_used': 0,
'conn_free': 0,
'ssl_conn_used': 0,
'ssl_conn_free': 0,
}
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self.path = self.configuration.get('path', '/tmp/lshttpd/')
self.files = list()
def check(self):
if not self.path:
self.error('"path" not specified')
return False
fs = glob.glob(os.path.join(self.path, '.rtreport*'))
if not fs:
self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path))
return None
self.debug('stats files:', fs)
for f in fs:
if not is_readable_file(f):
self.error('{0} is not readable'.format(f))
continue
self.files.append(f)
return bool(self.files)
def get_data(self):
"""
Format data received from http request
:return: dict
"""
data = dict(ZERO_DATA)
for f in self.files:
try:
with open(f) as b:
lines = b.readlines()
except (OSError, IOError) as err:
self.error(err)
return None
else:
parse_file(data, lines)
return data
def parse_file(data, lines):
for line in lines:
if not line.startswith(('BPS_IN:', 'MAXCONN:', 'PLAINCONN:', 'REQ_RATE []:')):
continue
m = dict(RE.findall(line))
for v in T:
if v.key in m:
data[v.id] += float(m[v.key]) * v.mul
def is_readable_file(v):
return os.path.isfile(v) and os.access(v, os.R_OK)
|
Mariaanisimova/pythonintask
|
ISTp/2014/VOLKO_S_D/task-1-46.py
|
Python
|
apache-2.0
| 123
| 0.013889
|
input ('Анук Эме - это псевдоним акт
|
рисы Франсуазы Юдит Сорья Дрейфу
|
с')
|
wenhuchen/ETHZ-Bootstrapped-Captioning
|
visual-concepts/eval.py
|
Python
|
bsd-3-clause
| 11,962
| 0.006437
|
from __future__ import division
from _init_paths import *
import os
import os.path as osp
import sg_utils as utils
import numpy as np
import skimage.io
import skimage.transform
import h5py
import pickle
import json
import math
import argparse
import time
import cv2
from collections import Counter
from json import encoder
"""
import matplotlib
matplotlib.use("Qt4Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
"""
encoder.FLOAT_REPR = lambda o: format(o, '.2f')
mean = np.array([[[ 103.939, 116.779, 123.68]]])
functional_words = ['a', 'on', 'of', 'the', 'in', 'with', 'and', 'is', 'to', 'an', 'two', 'at', 'next', 'are', 'it']
def scaleandtranspose(im, base_image_size):
# Subtract the ilvsr mean value
new_im = im - mean
# Upsample the image and swap the axes to Color x height x width
new_im = upsample_image(new_im, base_image_size, square=True)
return new_im.transpose((2,0,1))
def BGR2RGB(img):
assert img.shape[2] == 3
new_img = img.copy()
new_img[:, :, [0, 1, 2]] = img[:, :, [2, 1, 0]]
return new_img
def clip(lo, x, hi):
return lo if x <= lo else hi if x >= hi else x
def data_crop(im, boxes):
# Make sure the larger edge is 720 in length
H, W = im.shape[0], im.shape[1]
bbox_img = im.copy()
crop_list = []
for box in boxes:
# Careful that the order is height precede width
leftup_x = clip(0, box[0], W)
leftup_y = clip(0, box[1], H)
rightbot_x = clip(0, box[0] + box[2], W)
rightbot_y = clip(0, box[1] + box[3], H)
crop_list.append(im[leftup_y:rightbot_y, leftup_x:rightbot_x, :])
cv2.rectangle(bbox_img, (leftup_x, leftup_y), (rightbot_x, rightbot_y), (0, 255, 0), 2)
return crop_list, bbox_img
def upsample_image(im, upsample_size, square=False):
h, w = im.shape[0], im.shape[1]
s = max(h, w)
if square:
I_out = np.zeros((upsample_size, upsample_size, 3), dtype=np.float)
else:
new_h = math.ceil(h/w * upsample_size) if w>=h else upsample_size
new_w = math.ceil(w/h * upsample_size) if h>=w else upsample_size
I_out = np.zeros((new_h, new_w, 3), dtype=np.float)
im = cv2.resize(im, None, None, fx = upsample_size/s, fy = upsample_size/s, interpolation=cv2.INTER_CUBIC)
I_out[:im.shape[0], :im.shape[1], :] = im
return I_out
def filter_out(concepts):
rank = Counter()
for concept in concepts:
rank.update(concept)
words = map(lambda arg: arg[0], rank.most_common(20))
return words
class DataLoader(object):
def __init__(self, coco_h5, coco_json):
self.h5 = h5py.File(coco_h5)
self.label_start_ix = self.h5['label_start_ix']
self.label_end_ix = self.h5['label_end_ix']
self.json_image = json.load(open(coco_json))['images']
self.image_num = len(json.load(open(coco_json))['images'])
self.ix_to_word = json.load(open(coco_json))['ix_to_word']
self.split_ix = {}
self.seq_length = 16
self.iterator = {}
for i, info in enumerate(self.json_image):
if info['split'] not in self.split_ix:
self.split_ix[info['split']] = [i]
else:
self.split_ix[info['split']].append(i)
self.reset_iterator()
def get_image_num(self, split):
if split == 'train':
return self.image_num - 10000
else:
return 5000
def reset_iterator(self):
for k in self.split_ix.keys():
self.iterator[k] = 0
def get_batch(self, split, batch_size=1, seq_per_img=5, seq_length=16):
images = np.zeros((batch_size, 256, 256, 3))
seq = np.zeros((seq_per_img, seq_length))
split_ix = self.split_ix[split]
max_ix = self.h5['labels'].shape[0]
max_index = len(split_ix)
wrapped = False
info = []
for i in range(batch_size):
ri = self.iterator[split]
ri_next = ri + 1
if ri_next >= max_index:
ri_next = 0
wrapped = True
self.iterator[split] = ri_next
ix = split_ix[ri]
ix1 = self.h5['label_start_ix'][ix]
ix2 = self.h5['label_end_ix'][ix]
ncaps = ix2 - ix1 + 1
assert ncaps > 0
if ncaps >= seq_per_img:
rand_ix = np.random.choice(range(ix1, ix2+1), seq_per_img, replace=False)
else:
rand_ix = np.random.choice(range(ix1, ix2+1), seq_per_img, replace=True)
for j, j_ix in enumerate(rand_ix):
if j_ix >= max_ix:
seq[j] = self.h5['labels'][-1, :seq_length]
else:
seq[j] = self.h5['labels'][j_ix, :seq_length]
im = self.h5['images'][ix].astype(np.float32)
images[i] = np.transpose(im, axes = (1, 2, 0))
info.append({'id': self.json_image[ix]['id'], 'file_path': self.json_image[ix]['file_path']})
return images, seq, info, wrapped
class TestModel(object):
def __init__(self, vocab_file):
# Set threshold_metric_name and output_metric_name
self.base_image_size = 565
self.vocab = utils.load_variables(vocab_file)
self.is_functional = np.array([x not in functional_words for x in self.vocab['words']])
self.threshold = 0.5
def load(self, prototxt_deploy, model_file):
self.net = caffe.Net(prototxt_deploy, model_file, caffe.TEST)
def forward(self, im, order):
# Make sure the image passed in are BGR order and height x width x channel order
self.net.forward(data=im)
# Retrieve the mil probability of the word
mil_probs = self.net.blobs['mil'].data
mil_probs = mil_probs.reshape((mil_probs.shape[0], mil_probs.shape[1]))
top_ind = np.argsort(-mil_probs, axis=-1)[:, :order + len(functional_words)]
# If not for regional features, just return the distribution
if order == 1000:
return self.net.blobs['mil'].data
# Retrive the sigmoid data from the sigmoid layer
fc8_conv_probs = self.net.blobs['fc8-conv-sigmoid'].data
fc8_conv = fc8_conv_probs.reshape((fc8_conv_probs.shape[0], fc8_conv_probs.shape[1], -1))
fc8_conv_arg = fc8_conv.argmax(axis=-1)
# Retrive the correponding feature maps
feat_map = self.net.blobs['fc7-conv'].data
concepts, prob = [], []
att_feat = np.zeros((feat_map.shape[0], order, feat_map.shape[1]), dtype='float32')
feat_probs = np.zeros((feat_map.shape[0], order, 12, 12), dtype='float32')
# Loop over all the sorted indexes
indexes = [
|
]
for i in range(top_ind.shape[0]):
tmp_concepts = []
for j in range(top_ind.shape[1]):
word_idx = top_ind[i, j]
prob_map = fc8_conv_probs[i, word_idx, :, :]
index = fc8_conv_arg[i, word_idx]
word = self.vocab['words'][word_idx]
if word not in functional_words:
if index no
|
t in indexes:
i1, i2 = divmod(index, 12)
att_feat[i, len(indexes)] = feat_map[i,:,i1,i2]
indexes.append(index)
feat_probs[i, len(tmp_concepts)] = prob_map
tmp_concepts.append(word)
if len(tmp_concepts) >= order:
break
concepts.append(tmp_concepts)
prob.append(mil_probs[i, top_ind[i]].tolist())
return concepts, prob, att_feat, feat_probs
if __name__ == "__main__":
parser = argparse.ArgumentParser("run visual concept extraction")
parser.add_argument("--test_json", type=str, required=True, help="test image json")
parser.add_argument("--dataset", type=str, required=True, help="the dataset to use")
parser.add_argument("--split", type=str, required=True, help="Choose a split to evaluate")
parser.add_argument("--order", type=int, default=20, help="test image json")
parser.add_argument("--gpuid", type=int, default=0, help="GPU id to run")
p
|
chubbymaggie/reverse
|
plasma/lib/arch/arm/utils.py
|
Python
|
gpl-3.0
| 4,197
| 0.004289
|
#!/usr/bin/env python3
#
# PLASMA : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRAN
|
TY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Se
|
e the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from capstone import CS_GRP_CALL, CS_GRP_RET
from capstone.arm import (ARM_CC_EQ, ARM_CC_NE, ARM_CC_HS, ARM_CC_LO,
ARM_CC_MI, ARM_CC_PL, ARM_CC_VS, ARM_CC_VC, ARM_CC_HI,
ARM_CC_LS, ARM_CC_GE, ARM_CC_LT, ARM_CC_GT, ARM_CC_LE, ARM_CC_AL,
ARM_INS_EOR, ARM_INS_ADD, ARM_INS_ORR, ARM_INS_AND, ARM_INS_MOV,
ARM_INS_CMP, ARM_INS_SUB, ARM_INS_LDR, ARM_INS_B, ARM_INS_BLX,
ARM_INS_BL, ARM_INS_BX, ARM_REG_LR, ARM_OP_REG, ARM_REG_PC, ARM_INS_POP,
ARM_OP_IMM, ARM_OP_MEM, ARM_REG_SP)
JUMPS = {ARM_INS_B, ARM_INS_BX}
JUMPS_LINK = {ARM_INS_BL, ARM_INS_BLX}
OP_IMM = ARM_OP_IMM
OP_MEM = ARM_OP_MEM
OP_REG = ARM_OP_REG
# Warning: before adding new prolog check in lib.analyzer.has_prolog
PROLOGS = [
[b"\xe9\x2d"], # push registers
]
def is_cmp(i):
return i.id == ARM_INS_CMP
def is_jump(i):
# Suppose that the written register is the first operand
op = i.operands[0]
if op.type == ARM_OP_REG and op.value.reg == ARM_REG_PC:
return True
if i.id == ARM_INS_POP:
for o in i.operands:
if o.type == ARM_OP_REG and o.value.reg == ARM_REG_PC:
return True
return False
return i.id in JUMPS and not (op.type == ARM_OP_REG and \
op.value.reg == ARM_REG_LR)
def is_cond_jump(i):
return is_jump(i) and i.cc != ARM_CC_AL
def is_uncond_jump(i):
return is_jump(i) and i.cc == ARM_CC_AL
def is_ret(i):
op = i.operands[0]
return i.group(CS_GRP_RET) or i.id == ARM_INS_BX and \
op.type == ARM_OP_REG and op.value.reg == ARM_REG_LR
def is_call(i):
return i.group(CS_GRP_CALL) or i.id in JUMPS_LINK
OPPOSITES = [
[ARM_CC_EQ, ARM_CC_NE],
[ARM_CC_GE, ARM_CC_LT],
[ARM_CC_LE, ARM_CC_GT],
[ARM_CC_HI, ARM_CC_LS],
[ARM_CC_HS, ARM_CC_LO],
[ARM_CC_PL, ARM_CC_MI],
[ARM_CC_VS, ARM_CC_VC],
]
OPPOSITES = dict(OPPOSITES + [i[::-1] for i in OPPOSITES])
def invert_cond(i):
return OPPOSITES.get(i.cc, -1)
def get_cond(i):
return i.cc
COND_SYMB = {
ARM_CC_EQ: "==",
ARM_CC_NE: "!=",
ARM_CC_GE: ">=",
ARM_CC_LT: "<",
ARM_CC_LE: "<=",
ARM_CC_GT: ">",
ARM_CC_HI: "(unsigned) >",
ARM_CC_LS: "(unsigned) <=",
ARM_CC_HS: "(unsigned) >=",
ARM_CC_LO: "(unsigned) <",
ARM_CC_VS: "overflow",
ARM_CC_VC: "!overflow",
ARM_CC_PL: ">=",
ARM_CC_MI: "<",
}
INST_SYMB = {
ARM_INS_EOR: "^",
ARM_INS_ORR: "|",
ARM_INS_AND: "&",
ARM_INS_ADD: "+",
ARM_INS_MOV: "=",
ARM_INS_SUB: "-",
ARM_INS_CMP: "cmp",
ARM_INS_LDR: "=",
}
def cond_symbol(ty):
return COND_SYMB.get(ty, "UNKNOWN")
def inst_symbol(i):
return INST_SYMB.get(i.id, "UNKNOWN")
def guess_frame_size(analyzer, ad):
regsctx = analyzer.arch_analyzer.new_regs_context()
if regsctx is None:
return -1
while 1:
i = analyzer.disasm(ad)
if i is None or is_ret(i) or is_call(i) or is_cond_jump(i):
return 0
# Do only registers simulation
analyzer.arch_analyzer.analyze_operands(analyzer, regsctx, i, None, True)
if i.id == ARM_INS_SUB:
op = i.operands[0]
if op.type == ARM_OP_REG and op.value.reg == ARM_REG_SP:
return - analyzer.arch_analyzer.get_sp(regsctx)
ad += i.size
return -1
def search_jmptable_addr(analyzer, jump_i, inner_code):
return None
|
zamattiac/SHARE
|
providers/org/ncar/normalizer.py
|
Python
|
apache-2.0
| 3,649
| 0.00137
|
from share.normalize import ctx, tools
from share.normalize.parsers import Parser
from share.normalize.utils import format_address
class WorkIdentifier(Parser):
uri = tools.RunPython('get_ncar_identifier', ctx)
class Extra:
description = tools.Try(ctx.Related_URL.Description)
url_content_type = tools.Try(ctx.Related_URL.URL_Content_Type.Type)
def get_ncar_identifier(self, ctx):
return 'https://www.earthsystemgrid.org/dataset/{}.html'.format(ctx['Entry_ID'])
class Tag(Pa
|
rser):
name = ctx
class ThroughTags(Parser):
tag = tools.Delegate(Tag, ctx)
class PersonnelAgent(Parser):
schema = tools.GuessAgentType(
|
tools.RunPython('combine_first_last_name', ctx)
)
name = tools.RunPython('combine_first_last_name', ctx)
location = tools.RunPython('get_address', ctx['Contact_Address'])
class Extra:
role = tools.Try(ctx.Role)
url = tools.Try(ctx.Data_Center_URL)
def combine_first_last_name(self, ctx):
return ctx['First_Name'] + ' ' + ctx['Last_Name']
def get_address(self, ctx):
address = ctx['Address']
if isinstance(address, list):
address1 = address[0]
address2 = address[1]
return format_address(
self,
address1=address1,
address2=address2,
city=ctx['City'],
state_or_province=ctx['Province_or_State'],
postal_code=ctx['Postal_Code'],
country=ctx['Country']
)
return format_address(
self,
address1=ctx['Address'],
address2=address2,
city=ctx['City'],
state_or_province=ctx['Province_or_State'],
postal_code=ctx['Postal_Code'],
country=ctx['Country']
)
class IsAffiliatedWith(Parser):
related = tools.Delegate(PersonnelAgent, ctx)
class DataCenterAgent(Parser):
schema = tools.GuessAgentType(
ctx.Data_Center_Name.Long_Name,
default='organization'
)
name = ctx.Data_Center_Name.Long_Name
related_agents = tools.Map(tools.Delegate(IsAffiliatedWith), tools.Try(ctx.Personnel))
class Extra:
data_center_short_name = ctx.Data_Center_Name.Short_Name
class AgentWorkRelation(Parser):
agent = tools.Delegate(DataCenterAgent, ctx)
class DataSet(Parser):
title = tools.Join(tools.Try(ctx.record.metadata.DIF.Entry_Title))
description = tools.Try(ctx.record.metadata.DIF.Summary.Abstract)
related_agents = tools.Map(
tools.Delegate(AgentWorkRelation),
tools.Try(ctx.record.metadata.DIF.Data_Center)
)
tags = tools.Map(
tools.Delegate(ThroughTags),
tools.Try(ctx.record.metadata.DIF.Metadata_Name),
tools.Try(ctx.record.header.setSpec)
)
identifiers = tools.Map(tools.Delegate(WorkIdentifier), tools.Try(ctx.record.metadata.DIF))
date_updated = tools.ParseDate(ctx.record.header.datestamp)
is_deleted = tools.RunPython('check_status', tools.Try(ctx.record.header['@status']))
class Extra:
status = tools.Try(ctx.record.header['@status'])
entry_id = tools.Try(ctx.record.metadata.DIF.Entry_ID)
metadata_name = tools.Try(ctx.record.metadata.DIF.Metadata_Name)
metadata_version = tools.Try(ctx.record.metadata.DIF.Metadata_Version)
last_dif_revision_date = tools.Try(ctx.record.metadata.DIF.Last_DIF_Revision_Date)
set_spec = ctx.record.header.setSpec
def check_status(self, status):
if status == 'deleted':
return True
return False
|
librasungirl/openthread
|
tests/toranj/test-033-mesh-local-prefix-change.py
|
Python
|
bsd-3-clause
| 3,956
| 0.000758
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description:
#
# This test covers the situation where a node attached to a parent with a different mesh-local prefix. It verifies
# that the attaching node adopts the parent's mesh-local prefix and the RLOC addresses on the node are correctly
# filtered (by wpantund).
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
node1 = wpan.Node()
node2 = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# ------------------------
|
-----------------------------------------------------------------------------------------------
# Test implementation
NET_NAME =
|
'ml-change'
CHANNEL = 11
PANID = '0x1977'
XPANID = '1020031510006016'
KEY = '0123456789abcdeffecdba9876543210'
ML_PREFIX_1 = 'fd00:1::'
ML_PREFIX_2 = 'fd00:2::'
# Form a network on node1
node1.form(
NET_NAME,
channel=CHANNEL,
panid=PANID,
xpanid=XPANID,
key=KEY,
mesh_local_prefix=ML_PREFIX_1,
)
# On node2, form a network with same parameters but a different mesh-local
# prefix
node2.form(
NET_NAME,
channel=CHANNEL,
panid=PANID,
xpanid=XPANID,
key=KEY,
mesh_local_prefix=ML_PREFIX_2,
)
# Node 2 is expected to attach to node1 and adopt the mesh-local prefix
# from node1
verify(node2.is_associated())
verify(
node2.get(wpan.WPAN_IP6_MESH_LOCAL_PREFIX) == node1.get(
wpan.WPAN_IP6_MESH_LOCAL_PREFIX))
# Ensure that there are only two addresses on the node2 (link-local and mesh-local address) and that RLOC
# address is correctly filtered (by wpantund).
verify(len(wpan.parse_list(node2.get(wpan.WPAN_IP6_ALL_ADDRESSES))) == 2)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
|
kfrye/SMM-MPI
|
utilities/results/ptdf_export.py
|
Python
|
gpl-3.0
| 2,213
| 0.004067
|
#!/usr/bin/python
import json
class Tests(object):
def __init__(self, tests, name):
self.tests = tests
self.name = name
self.export_tests()
def export_tests(self):
with open(self.name + ".ptdf", "w+") as ptdf_file:
ptdf_file.write("Application " + self.name + '\n')
ptdf_file.write('Resource "wyeast cluster" grid|machine\n')
ptdf_file.write('Resource "v3.3.1" build\n')
ptdf_file.write('Resource "Linux version 3.17.4-301.fc21.x86_64" environment\n')
ptdf_file.write('Resource "self generated" dataFiles\n')
ptdf_file.write('Resource "whole time" time\n')
ptdf_file.write('Resource ext4 fileSystem\n')
ptdf_file.write('Resource "self instrumentation" perfToolName\n')
ptdf_fil
|
e.write('Resource "time in seconds" metric\n')
for test_dictionary in self.tests:
execution = self.name.lower() + '-' + str(test_dictionary['START_TIME'])
ptdf_file.write("Execution " + execution + ' ' + self.name + '\n')
|
for key in test_dictionary:
if key != 'TIME_IN_SECONDS':
ptdf_file.write("ResourceAttribute " + execution + ' ' +
key.lower() + ' "' + str(test_dictionary[key]) + '" string\n')
ptdf_file.write('PerfResult ' + execution +
' "wyeast cluster,v3.3.1,Linux version 3.17.4-301.fc21.x86_64,self generated,' +
execution + ',whole time,ext4" "self instrumentation" "time in seconds" ' +
str(test_dictionary['TIME_IN_SECONDS']) + ' s ' +
str(test_dictionary['START_TIME']) + ' noValue\n')
class Matrix:
def __init__(self, tests):
self.all_data = tests
self.process_matrix()
def process_matrix(self):
for test_type in self.all_data:
test_name = test_type.upper()
data = self.all_data[test_type]
Tests(data, test_name)
with open("data.txt") as json_file:
json_data = json.load(json_file)
Matrix(json_data)
|
chisholm/cti-pattern-validator
|
stix2patterns/grammars/STIXPatternListener.py
|
Python
|
bsd-3-clause
| 91
| 0.010989
|
# Update or remove for 2.0.0
from ..v20.grammars.STIXPatternL
|
istener import * # noqa
|
: F401
|
kjedruczyk/phabricator-tools
|
py/aon/aoncmd_taskupdate.py
|
Python
|
apache-2.0
| 5,958
| 0
|
"""Update a task in maniphest.
you can use the 'task id' output from the 'arcyon task-create' command as input
to this command.
usage examples:
update task '99' with a new title, only show id:
$ arcyon task-update 99 -t 'title' --format-id
99
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# aoncmd_taskupdate
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import textwrap
import phlcon_maniphest
import phlcon_project
import phlcon_user
import phlsys_makeconduit
def getFromfilePrefixChars():
return ""
def setupParser(parser):
# make a list of priority names in increasing order of importance
priority_name_list = phlcon_maniphest.PRIORITIES.keys()
priority_name_list.sort(
key=lambda x: phlcon_maniphest.PRIORITIES[x])
priorities = parser.add_argument_group(
'optional priority arguments',
'use any of ' + textwrap.fill(
str(priority_name_list)))
output_group = parser.add_argument_group(
'Output format arguments',
'Mutually exclusive, defaults to "--format-summary"')
output = output_group.add_mutually_exclusive_group()
opt = parser.add_argument_group(
'Optional task arguments',
'You can supply these later via the web interface if you wish')
priorities.add_argument(
'--priority',
'-p',
choices=priority_name_list,
metavar="PRIORITY",
default=None,
type=str,
help="the priority or importance of the task")
parser.add_argument(
'id',
metavar='INT',
help='the id of the task',
type=str)
parser.add_argument(
'--title',
'-t',
metavar='STRING',
help='the short title of the task',
default=None,
type=str)
opt.add_argument(
'--description',
'-d',
metavar='STRING',
help='the long description of the task',
default=None,
type=str)
opt.add_argument(
'--owner',
'-o',
metavar='USER',
help='the username of the owner',
type=str)
|
opt.add_argu
|
ment(
'--ccs',
'-c',
nargs="*",
metavar='USER',
help='a list of usernames to cc on the task',
type=str)
opt.add_argument(
'--projects',
nargs="*",
metavar='PROJECT',
default=[],
help='a list of project names to add the task to',
type=str)
opt.add_argument(
'--comment',
'-m',
metavar='STRING',
help='an optional comment to make on the task',
default=None,
type=str)
output.add_argument(
'--format-summary',
action='store_true',
help='will print a human-readable summary of the result.')
output.add_argument(
'--format-id',
action='store_true',
help='will print just the id of the new task, for scripting.')
output.add_argument(
'--format-url',
action='store_true',
help='will print just the url of the new task, for scripting.')
phlsys_makeconduit.add_argparse_arguments(parser)
def process(args):
if args.title and not args.title.strip():
print('you must supply a non-empty title', file=sys.stderr)
return 1
conduit = phlsys_makeconduit.make_conduit(
args.uri, args.user, args.cert, args.act_as_user)
# create_task expects an integer
priority = None
if args.priority is not None:
priority = phlcon_maniphest.PRIORITIES[args.priority]
# conduit expects PHIDs not plain usernames
user_phids = phlcon_user.UserPhidCache(conduit)
if args.owner:
user_phids.add_hint(args.owner)
if args.ccs:
user_phids.add_hint_list(args.ccs)
owner = user_phids.get_phid(args.owner) if args.owner else None
ccs = [user_phids.get_phid(u) for u in args.ccs] if args.ccs else None
# conduit expects PHIDs not plain project names
projects = None
if args.projects:
project_to_phid = phlcon_project.make_project_to_phid_dict(conduit)
projects = [project_to_phid[p] for p in args.projects]
result = phlcon_maniphest.update_task(
conduit,
args.id,
args.title,
args.description,
priority,
owner,
ccs,
projects,
args.comment)
if args.format_id:
print(result.id)
elif args.format_url:
print(result.uri)
else: # args.format_summary:
message = (
"Updated task '{task_id}', you can view it at this URL:\n"
" {url}"
).format(
task_id=result.id,
url=result.uri)
print(message)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
jcalazan/glucose-tracker
|
glucosetracker/subscribers/migrations/0001_initial.py
|
Python
|
mit
| 1,627
| 0.00799
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Subscriber'
db.create_table(u'subscribers_subscriber', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('source_ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
))
db.send_create_signal(u'subscribers', ['Subscriber'])
def backwards(self, orm):
# Deleting model 'Subscriber'
db.delete_table(u'subscribers_subscriber')
models = {
u'subscribers.subscriber': {
'Meta': {'ordering': "['-created']", 'object_name': 'Subscriber'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
|
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'source_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'})
}
}
c
|
omplete_apps = ['subscribers']
|
cvegaj/ElectriCERT
|
venv3/lib/python3.6/site-packages/pbr/tests/test_commands.py
|
Python
|
gpl-3.0
| 3,688
| 0.000271
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions
|
and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS I
|
S'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
from testtools import content
from pbr.tests import base
class TestCommands(base.BaseTestCase):
def test_custom_build_py_command(self):
"""Test custom build_py command.
Test that a custom subclass of the build_py command runs when listed in
the commands [global] option, rather than the normal build command.
"""
stdout, stderr, return_code = self.run_setup('build_py')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Running custom build_py command.', stdout)
self.assertEqual(0, return_code)
def test_custom_deb_version_py_command(self):
"""Test custom deb_version command."""
stdout, stderr, return_code = self.run_setup('deb_version')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Extracting deb version', stdout)
self.assertEqual(0, return_code)
def test_custom_rpm_version_py_command(self):
"""Test custom rpm_version command."""
stdout, stderr, return_code = self.run_setup('rpm_version')
self.addDetail('stdout', content.text_content(stdout))
self.addDetail('stderr', content.text_content(stderr))
self.assertIn('Extracting rpm version', stdout)
self.assertEqual(0, return_code)
def test_freeze_command(self):
"""Test that freeze output is sorted in a case-insensitive manner."""
stdout, stderr, return_code = self.run_pbr('freeze')
self.assertEqual(0, return_code)
pkgs = []
for l in stdout.split('\n'):
pkgs.append(l.split('==')[0].lower())
pkgs_sort = sorted(pkgs[:])
self.assertEqual(pkgs_sort, pkgs)
|
Roastero/Openroast
|
openroast/views/recipeeditorwindow.py
|
Python
|
gpl-3.0
| 19,366
| 0.001962
|
# -*- coding: utf-8 -*-
# Roastero, released under GPLv3
import os
import json
import time
import functools
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from openroast import tools
from openroast.views import customqtwidgets
from openroast import utils as utils
class RecipeEditor(QtWidgets.QDialog):
def __init__(self, recipeLocation=None):
super(RecipeEditor, self).__init__()
# Define main window for the application.
self.setWindowTitle('Openroast')
self.setMinimumSize(800, 600)
self.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.create_ui()
self.recipe = {}
self.recipe["steps"] = [{'fanSpeed': 5, 'targetTemp': 150,
'sectionTime': 0}]
if recipeLocation:
self.load_recipe_file(recipeLocation)
self.preload_recipe_information()
else:
self.preload_recipe_steps(self.recipeSteps)
def create_ui(self):
"""A method used to create the basic ui for the Recipe Editor Window"""
# Create main layout for window.
self.layout = QtWidgets.QGridLayout(self)
self.layout.setRowStretch(1, 3)
# Create input fields.
self.create_input_fields()
self.layout.addLayout(self.inputFieldLayout, 0, 0, 1, 2)
# Create big edit boxes.
self.create_big_edit_boxes()
self.layout.addLayout(self.bigEditLayout, 1, 0, 1, 2)
# Create Bottom Buttons.
self.create_bottom_buttons()
self.layout.addLayout(self.bottomButtonLayout, 2, 0, 1, 2)
def create_input_fields(self):
"""Creates all of the UI components for the top of the Recipe Editor
Window."""
# Create layout for section.
self.inputFieldLayout = QtWidgets.QGridLayout()
# Create labels for fields.
recipeNameLabel = QtWidgets.QLabel("Recipe Name: ")
recipeCreatorLabel = QtWidgets.QLabel("Created by: ")
|
recipeRoastTypeLabel = QtWidgets.QLabel("Roast Type: ")
beanRegionLabel = QtWidgets.QLabel("Bean Region: ")
beanCountryLabel = QtWidgets.QLabel("Bean Country: ")
beanLinkLabel = QtWidgets.QLabel("Bean Link: ")
|
beanStoreLabel = QtWidgets.QLabel("Bean Store Name: ")
# Create input fields.
self.recipeName = QtWidgets.QLineEdit()
self.recipeCreator = QtWidgets.QLineEdit()
self.recipeRoastType = QtWidgets.QLineEdit()
self.beanRegion = QtWidgets.QLineEdit()
self.beanCountry = QtWidgets.QLineEdit()
self.beanLink = QtWidgets.QLineEdit()
self.beanStore = QtWidgets.QLineEdit()
# Remove focus from input boxes.
self.recipeName.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.recipeCreator.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.recipeRoastType.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanRegion.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanCountry.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanLink.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.beanStore.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
# Add objects to the inputFieldLayout
self.inputFieldLayout.addWidget(recipeNameLabel, 0, 0)
self.inputFieldLayout.addWidget(self.recipeName, 0, 1)
self.inputFieldLayout.addWidget(recipeCreatorLabel, 1, 0)
self.inputFieldLayout.addWidget(self.recipeCreator, 1, 1)
self.inputFieldLayout.addWidget(recipeRoastTypeLabel, 2, 0)
self.inputFieldLayout.addWidget(self.recipeRoastType, 2, 1)
self.inputFieldLayout.addWidget(beanRegionLabel, 3, 0)
self.inputFieldLayout.addWidget(self.beanRegion, 3, 1)
self.inputFieldLayout.addWidget(beanCountryLabel, 4, 0)
self.inputFieldLayout.addWidget(self.beanCountry, 4, 1)
self.inputFieldLayout.addWidget(beanLinkLabel, 5, 0)
self.inputFieldLayout.addWidget(self.beanLink, 5, 1)
self.inputFieldLayout.addWidget(beanStoreLabel, 6, 0)
self.inputFieldLayout.addWidget(self.beanStore, 6, 1)
def create_big_edit_boxes(self):
"""Creates the Bottom section of the Recipe Editor Window. This method
creates the Description box and calls another method to make the
recipe steps table."""
# Create big edit box layout.
self.bigEditLayout = QtWidgets.QGridLayout()
# Create labels for the edit boxes.
recipeDescriptionBoxLabel = QtWidgets.QLabel("Description: ")
recipeStepsLabel = QtWidgets.QLabel("Steps: ")
# Create widgets.
self.recipeDescriptionBox = QtWidgets.QTextEdit()
self.recipeSteps = self.create_steps_spreadsheet()
# Add widgets to layout.
self.bigEditLayout.addWidget(recipeDescriptionBoxLabel, 0, 0)
self.bigEditLayout.addWidget(self.recipeDescriptionBox, 1, 0)
self.bigEditLayout.addWidget(recipeStepsLabel, 0, 1)
self.bigEditLayout.addWidget(self.recipeSteps, 1, 1)
def create_bottom_buttons(self):
"""Creates the button panel on the bottom of the Recipe Editor
Window."""
# Set bottom button layout.
self.bottomButtonLayout = QtWidgets.QHBoxLayout()
self.bottomButtonLayout.setSpacing(0)
# Create buttons.
self.saveButton = QtWidgets.QPushButton("SAVE")
self.closeButton = QtWidgets.QPushButton("CLOSE")
# Assign object names to the buttons.
self.saveButton.setObjectName("smallButton")
self.saveButton.clicked.connect(self.save_recipe)
self.closeButton.setObjectName("smallButton")
self.closeButton.clicked.connect(self.close_edit_window)
# Create Spacer.
self.spacer = QtWidgets.QWidget()
self.spacer.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
# Add widgets to the layout.
self.bottomButtonLayout.addWidget(self.spacer)
self.bottomButtonLayout.addWidget(self.closeButton)
self.bottomButtonLayout.addWidget(self.saveButton)
def create_steps_spreadsheet(self):
"""Creates Recipe Steps table. It does not populate the table in this
method."""
recipeStepsTable = QtWidgets.QTableWidget()
recipeStepsTable.setShowGrid(False)
recipeStepsTable.setAlternatingRowColors(True)
recipeStepsTable.setCornerButtonEnabled(False)
recipeStepsTable.horizontalHeader().setSectionResizeMode(1)
recipeStepsTable.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
# Steps spreadsheet
recipeStepsTable.setColumnCount(4)
recipeStepsTable.setHorizontalHeaderLabels(["Temperature",
"Fan Speed", "Section Time", "Modify"])
return recipeStepsTable
def close_edit_window(self):
"""Method used to close the Recipe Editor Window."""
self.close()
def preload_recipe_steps(self, recipeStepsTable):
"""Method that just calls load_recipe_steps() with a table specified and
uses the pre-existing loaded recipe steps in the object."""
steps = self.recipe["steps"]
self.load_recipe_steps(recipeStepsTable, steps)
def load_recipe_steps(self, recipeStepsTable, steps):
"""Takes two arguments. One being the table and the second being the
rows you'd like to add. It does not clear the table and simply adds the
rows on the bottom if there are exiting rows."""
# Create spreadsheet choices
fanSpeedChoices = [str(x) for x in range(1,10)]
targetTempChoices = ["Cooling"] + [str(x) for x in range(150, 551, 10)]
# loop through recipe and load each step
for row in range(len(steps)):
recipeStepsTable.insertRow(recipeStepsTable.rowCount())
# Temperature Value
sectionTempWidget = customqtwidgets.ComboBoxNoWheel()
sectionTempWidget.setObjectName("recipeEditCombo")
sectionTempWidget.addItems(targetTempChoices)
sectionTempWidget.insertSeparator(1)
if 'targetTemp' in steps[ro
|
Baguage/pyqualtrics
|
examples/import_panel_example.py
|
Python
|
apache-2.0
| 1,983
| 0.002017
|
# This file is part of the pyqualtrics package.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/Baguage/pyqualtrics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.o
|
rg/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyqualtrics import Qualtrics
import os
user = None
|
# os.environ["QUALTRICS_USER"]
token = None # os.environ["QUALTRICS_TOKEN"]
if __name__ == "__main__":
print "This is an example of panel import"
print "Make sure you have set QUALTRICS_USER, QUALTRICS_TOKEN and QUALTRICS_LIBRARY_ID enviroment variable"
# Note is user and token are None, QUALTRICS_USER and QUALTRICS_TOKEN environment variables will be used instead
qualtrics = Qualtrics(user, token)
library_id = os.environ["QUALTRICS_LIBRARY_ID"]
panel_id = qualtrics.importJsonPanel(
library_id,
Name="New Panel Created by PyQualtrics library (DELETE ME)",
panel=[
{"Email": "pyqualtrics+1@gmail.com", "FirstName": "PyQualtrics", "LastName": "Library", "SubjectID": "123"},
{"Email": "pyqualtrics+2@gmail.com", "FirstName": "PyQualtrics2", "LastName": "Library2"}
],
headers=["Email", "FirstName", "LastName", "ExternalRef", "SubjectID"],
AllED=1)
if qualtrics.last_error_message:
print "Error creating panel: " + qualtrics.last_error_message
else:
print "Panel created successfully, PanelID: " + panel_id
|
aleaxit/pysolper
|
latrop/models.py
|
Python
|
apache-2.0
| 10,073
| 0.014196
|
# -*- coding: utf-8 -*-
"""
models
~~~~~~~~~~~~~~~~~~~~
Top-level models for the entire app.
:copyright: 2011 by Google, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
# standard Python library imports
import datetime
import logging
import urllib
# App Engine imports
from google.appengine.ext import db
from google.appengine.ext import blobstore
from django.utils import simplejson
# local imports
import timesince
# roles the system distinguishes for each user
USER_ROLES = ('Applicant', 'Permit Approver')
# Cases may be ordered lexicographically by state, the first three characters
# of the state string (value in the dict) will be stripped before display.
CASE_STATES = {'incomplete': '00 Incomplete',
'submitted': '10 Submitted For Review',
'under_review': '20 Review Under Way',
'needs_work': '30 Needs Work',
'approved': '40 Approved',
'denied': '50 Rejected',
}
# the case states in which an applicant can upload files and/or notes
APPLICANT_EDITABLE = set(CASE_STATES[x]
for x in 'incomplete submitted needs_work'.split())
# the kind of actions that cause a case to change
CASE_ACTIONS = ('Create', 'Update', 'Submit',
'Review', 'Reassign', 'Comment', 'Approve', 'Deny')
# documents an applicant must upload to submit a case for approver review
PURPOSES = (
'Site Diagram',
'Electrical Diagram',
'Diagram Notes'
)
class ModelEncoder(simplejson.JSONEncoder):
def default(self, obj):
"""Allow JSON encoding of a db.Model instance."""
try:
return obj.json()
except (AttributeError, TypeError):
return simplejson.JSONEncoder.default(self, obj)
class JurisModel(db.Model):
"""A db.Model with a jurisdiction attached (abstract base class)."""
juris = db.StringProperty(required=True)
timestamp = db.DateTimeProperty(auto_now_add=True, required=True)
@classmethod
def get_all(cls):
return cls.all().order('-timestamp')
@property
def timesince(self):
"""Readable form for this object's timestamp."""
return timesince.timesince(self.timestamp)
class LatropMessage(JurisModel):
"""A message received by the latrop."""
msg = db.StringProperty(required=True)
@classmethod
def create(cls, juris, msg):
obj = cls(juris=juris, msg=msg)
obj.put()
# TODO: the other models must be changed to be appropriate for the latrop
# (mandatory juris, factories, different methods, and so on).
class User(JurisModel):
"""A user of this permiting application."""
# TODO: add authentication mechanisms / tokens
# email works as the "primary key" to identify a user
email = db.EmailProperty(required=True)
# application logic ensures a role gets assigned when a new user logs in
# for the first time, but the User object is first created w/o a role
role = db.StringProperty(choices=USER_ROLES, required=False)
def json(self):
"""Return JSON-serializable form."""
return {'cls': 'User', 'email': self.email, 'role': self.role}
@classmethod
def get_by_email(cls, email):
return cls.all().filter('email = ', email).get()
@property
def can_upload(self):
return self.role == 'Applicant'
@property
def can_approve(self):
return self.role == 'Permit Approver'
def __eq__(self, other):
return other is not None and self.email == other.email
def __ne__(self, other):
return other is None or self.email != other.email
class Case(JurisModel):
"""A project for which approval is requested."""
address = db.StringProperty(required=True)
creation_date = db.DateProperty(required=True, auto_now_add=True)
owner = db.ReferenceProperty(User, required=True)
state = db.StringProperty(required=True, choices=CASE_STATES.values())
def json(self):
"""Return JSON-serializable form."""
return {'cls': 'Case', 'address': self.address,
'owner': self.owner.json(), 'state': self.state}
@classmethod
def query_by_owner(cls, user):
"""Returns a db.Query for all cases owned by this user."""
return cls.all().filter('owner = ', user)
@classmethod
def query_under_review(cls):
"""Returns a db.Query for all cases under review."""
return cls.all().filter('state = ', CASE_STATES['under_review'])
@classmethod
def query_submitted(cls):
"""Returns a db.Query for all cases in the submitted state."""
return cls.all().filter('state = ', CASE_STATES['submitted'])
@classmethod
def reviewed_by(cls, user):
"""Returns two lists: cases being reviewed by the user vs by other users."""
these_cases, other_cases = [], []
for case in cls.query_under_review().run():
if case.reviewer == user:
these_cases.append(case)
else:
other_cases.append(case)
return these_cases, other_cases
@classmethod
def create(cls, owner, **k):
"""Creates and returns a new case."""
case = cls(state=CASE_STATES['incomplete'], owner=owner, **k)
case.put()
CaseAction.make(action='Create', case=case, actor=owner)
return case
def submit(self, actor, notes):
"""Submits the case for review."""
self.state = CASE_STATES['submitted']
self.put()
CaseAction.make(action='Submit', case=self, actor=actor, notes=notes)
def review(self, approver):
"""Assigns the case for review by the given approver."""
previous_reviewer = self.reviewer
if previous_reviewer == approver:
# case was already under review by the given approver, no-op
return
# reviewer assignment or change requires actual action, state change
self.state = CASE_STATES['under_review']
self.put()
CaseAction.make(action='Review', case=self, actor=approver)
def approve(self, actor, notes):
"""Marks the case as approved."""
self.state = CASE_STATES['approved']
self.put()
CaseAction.make(action='Approve', case=self, actor=actor, notes=notes)
def comment(self, actor, notes):
"""Returns the case to the applicant requesting changes."""
self.state = CASE_STATES['needs_work']
self.put()
CaseAction.make(action='Comment', case=self, actor=actor, notes=notes)
@property
def visible_state(self):
"""Returns the display form of this case's state."""
return self.state[3:]
@property
def latest_action(self):
"""Returns the latest action recorded on this case."""
return CaseAction.query_by_case(self).order('-timestamp').get()
@pr
|
operty
def last_modified(self):
"""Returns the timestamp at which this case was last modified."""
return datetime.datetime.now() - self.latest_action.timestamp
@property
def applicant_can_edit(self):
"""True iff an applicant can currently modify this case."""
re
|
turn self.state in APPLICANT_EDITABLE
@property
def reviewer(self):
"""Returns the case's current reviewer, or None."""
if self.state != CASE_STATES['under_review']:
return None
return CaseAction.query_by_case(self, 'Review').get().actor
@property
def submit_blockers(self):
"""Returns a list of the reasons the case may not yet be submitted (an
empty list if the case may be submitted).
"""
blockers = []
for purpose in PURPOSES:
if not self.get_document(purpose):
blockers.append('Missing %s' % purpose)
return blockers
def get_document(self, purpose):
"""Returns the document from this case for the given purpose."""
q = CaseAction.query_by_case(self, 'Update').filter('purpose =', purpose)
return q.get()
class CaseAction(JurisModel):
"""Immutable once fully created (by the `make` classmethod)."""
action = db.StringProperty(required=True, choices=CASE_ACTIONS)
case = db.ReferenceProperty(Case, required=True)
actor = db.ReferenceProperty(User, required=True)
purpose = db.StringProperty(required=False, choices=PURPOSES)
n
|
zaquestion/vendttp
|
server/CloseableThread.py
|
Python
|
gpl-2.0
| 447
| 0.035794
|
import threading
class CloseableThread(threading.Thread):
running = false
def __init__(self, group = None, target = None, name = None, args=(), kwarg
|
s={}):
threading.Thread.__init_
|
_(self, group = group, target = target, name = name, args=args, kwargs=kwargs)
def start():
Running = true
threading.Thead.start()
def stop(onStop=None):
Running = false
if (onStop):
onStop()
|
matrix-org/synapse
|
synapse/config/redis.py
|
Python
|
apache-2.0
| 1,857
| 0.000539
|
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agre
|
ed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permi
|
ssions and
# limitations under the License.
from synapse.config._base import Config
from synapse.util.check_dependencies import check_requirements
class RedisConfig(Config):
section = "redis"
def read_config(self, config, **kwargs):
redis_config = config.get("redis") or {}
self.redis_enabled = redis_config.get("enabled", False)
if not self.redis_enabled:
return
check_requirements("redis")
self.redis_host = redis_config.get("host", "localhost")
self.redis_port = redis_config.get("port", 6379)
self.redis_password = redis_config.get("password")
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
# Configuration for Redis when using workers. This *must* be enabled when
# using workers (unless using old style direct TCP configuration).
#
redis:
# Uncomment the below to enable Redis support.
#
#enabled: true
# Optional host and port to use to connect to redis. Defaults to
# localhost and 6379
#
#host: localhost
#port: 6379
# Optional password if configured on the Redis instance
#
#password: <secret_password>
"""
|
sroche0/mfl-pyapi
|
modules/stats.py
|
Python
|
gpl-3.0
| 929
| 0.003229
|
import bench
class Stats(bench.Bench):
def __init__(self, league):
bench.Bench.__init__(self)
self.league = league
self.type = 'stats'
def list(self, team=False, player=False):
"""
Lists all stats for the current season to date. Can be filtered by team or by player. Default will return stat
dump for whole league
:param team: Unique ID of the team to filter for
:param
|
player: Unique ID of the player to filter for
:return:
"""
def get_player_stats(self, player, week=False):
"""
Lists the stat breakdown by week for a given player. Can also be filtered to only return a specific week or
|
a
range of weeks
:param player: Unique ID of the player to filter for
:param week: Optional. Can be a single week or a range ex: 1-4. If blank will default to season to date
:return:
"""
|
ludbek/amenu
|
setup.py
|
Python
|
mit
| 546
| 0.001832
|
fr
|
om distutils.core import setup
setup(
name='amenu',
version='1.0.3',
author='ludbek',
author_email='sth.srn@gmail.com',
packages= ['amenu', 'amenu.migrations', 'amenu.templatetags'],
scripts=[],
url='https://github.com/ludbek/amenu',
license='LICENSE.txt',
description='A menu plugin for DjangoCMS.',
long_description=open('README.md').read(),
install_requires=[
"South == 1.0.1",
"django-cms >= 3.0.7",
"django
|
-classy-tags == 0.5.2",
],
include_package_data=True,
)
|
achillesrasquinha/spockpy
|
spockpy/app/config/server.py
|
Python
|
apache-2.0
| 381
| 0.013123
|
# imports - compatibility imports
from __future__
|
import absolute_import
# imports - standard imports
import os
# imports - module imports
from spockpy.app.config import BaseConfig
class ServerConfig(BaseConfig):
class Path(BaseConfig.Path):
ABSPATH_TEMPLATES = os.path.join(BaseConfig.Path.ABSPATH_VIEWS, 'templates')
HOST = '0.0.0.0'
PORT = int(
|
os.getenv('PORT', 3000))
|
DOV-Vlaanderen/pydov
|
tests/conftest.py
|
Python
|
mit
| 15,105
| 0
|
"""Module grouping session scoped PyTest fixtures."""
import datetime
import glob
import os
import tempfile
import owslib
import pytest
from _pytest.monkeypatch import MonkeyPatch
from owslib.etree import etree
from owslib.feature.schema import _construct_schema, _get_elements
from owslib.iso import MD_Metadata
from owslib.util import ResponseWrapper, findall
from owslib.wfs import WebFeatureService
import pydov
from pydov import Hooks
from pydov.util import owsutil
from pydov.util.caching import GzipTextFileCache, PlainTextFileCache
from pydov.util.dovutil import build_dov_url
def pytest_runtest_setup():
pydov.hooks = Hooks()
def pytest_configure(config):
config.addinivalue_line("markers",
"online: mark test that requires internet access")
@pytest.fixture(scope='module')
def monkeymodule():
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
@pytest.fixture(scope='module')
def mp_wfs(monkeymodule):
"""Monkeypatch the call to the remote GetCapabilities request.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
"""
def read(*args, **kwargs):
with open('tests/data/util/owsutil/wfscapabilities.xml', 'r',
encoding='utf-8') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeymodule.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read)
@pytest.fixture(scope='module')
def wfs(mp_wfs):
"""PyTest fixture providing an instance of a WebFeatureService based on
a local copy of a GetCapabilities request.
Parameters
----------
mp_wfs : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
Returns
-------
owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
return WebFeatureService(
url=build_dov_url('geoserver/wfs'), version="1.1.0")
@pytest.fixture()
def mp_remote_fc_notfound(monkeypatch):
"""Monkeypatch the call to get an inexistent remote featurecatalogue.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def __get_remote_fc(*args, **kwargs):
with open('tests/data/util/owsutil/fc_featurecatalogue_notfound.xml',
'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeypatch.setattr(pydov.util.owsutil, '__get_remote_fc', __get_remote_fc)
@pytest.fixture(scope='module')
def mp_remote_md(wfs, monkeymodule, request):
"""Monkeypatch the call to get the remote metadata of the layer.
This monkeypatch requires a module variable ``location_md_metadata``
with the path to the md_metadata file on disk.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_md(*args, **kwargs):
file_path = getattr(request.module, "location_md_metadata")
with open(file_path, 'r') as
|
f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return MD_Metadata(etree.fromstring(data).find(
'./{http://www.isotc211.org/2005/gmd}MD_Metadata'))
monkeymodule.setattr(pydov.util.owsutil, 'get_remo
|
te_metadata',
__get_remote_md)
@pytest.fixture(scope='module')
def mp_remote_fc(monkeymodule, request):
"""Monkeypatch the call to get the remote feature catalogue.
This monkeypatch requires a module variable
``location_fc_featurecatalogue`` with the path to the fc_featurecatalogue
file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_fc(*args, **kwargs):
file_path = getattr(request.module, "location_fc_featurecatalogue")
with open(file_path, 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil, '__get_remote_fc',
__get_remote_fc)
@pytest.fixture(scope='module')
def mp_remote_describefeaturetype(monkeymodule, request):
"""Monkeypatch the call to a remote DescribeFeatureType.
This monkeypatch requires a module variable
``location_wfs_describefeaturetype`` with the path to the
wfs_describefeaturetype file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_describefeaturetype(*args, **kwargs):
file_path = getattr(request.module, "location_wfs_describefeaturetype")
with open(file_path, 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil,
'__get_remote_describefeaturetype',
__get_remote_describefeaturetype)
@pytest.fixture(scope='module')
def mp_get_schema(monkeymodule, request):
def __get_schema(*args, **kwargs):
file_path = getattr(request.module, "location_wfs_describefeaturetype")
with open(file_path, 'r') as f:
data = f.read()
if not isinstance(data, bytes):
data = data.encode('utf-8')
root = etree.fromstring(data)
typename = root.find(
'./{http://www.w3.org/2001/XMLSchema}element').get('name')
if ":" in typename:
typename = typename.split(":")[1]
type_element = findall(
root,
"{http://www.w3.org/2001/XMLSchema}element",
attribute_name="name",
attribute_value=typename,
)[0]
complex_type = type_element.attrib["type"].split(":")[1]
elements = _get_elements(complex_type, root)
nsmap = None
if hasattr(root, "nsmap"):
nsmap = root.nsmap
return _construct_schema(elements, nsmap)
monkeymodule.setattr(pydov.search.abstract.AbstractSearch, '_get_schema',
__get_schema)
@pytest.fixture(scope='module')
def wfs_getfeature(request):
"""PyTest fixture providing a WFS GetFeature response.
This monkeypatch requires a module variable ``location_wfs_getfeature``
with the path to the wfs_getfeature file on disk.
Parameters
----------
request : pytest.fixtue
PyTest fixture providing request context.
Returns
-------
str
WFS response of a GetFeature call to the dov-pub:Boringen layer.
"""
file_path = getattr(request.module, "location_wfs_getfeature")
with open(file_path, 'r') as f:
data = f.read()
return data
@pytest.fixture(scope='module')
def wfs_feature(request):
"""PyTest fixture providing an XML of a WFS feature element.
This monkeypatch requires a module variable ``location_wfs_feature``
with the path to the wfs_feature file on disk.
Parameters
----------
request : pytest.fixtue
PyTest fixture providing request context.
Returns
-------
etree.Element
XML element representing a single record of the Boring WFS layer.
"""
file_path = getattr(request.module, "location_wfs_feature")
with open(file_path, 'r') as f:
return etree.fromstring(f.read())
@pytest.fixture(scope='module')
def mp_remote_wfs_feature(monkeymodule, request):
"""Monkeyp
|
gambl0r/roguebowl
|
rbowl.py
|
Python
|
unlicense
| 14,250
| 0.007509
|
import libtcod.libtcodpy as libtcod
from random import randint
SCREEN_WIDTH = 40
SCREEN_HEIGHT = 20
LIMIT_FPS = 20
MAP_WIDTH = 80
MAP_HEIGHT = 45
ROOM_MAX_SIZE = 10
ROOM_MIN_SIZE = 10
MAX_ROOMS = 30
MAX_ROOM_MONSTERS = 3
FOV_ALGO = libtcod.FOV_SHADOW
FOV_LIGHT_WALLS = True
TORCH_RADIUS = 8
void_color = libtcod.Color(0, 0, 0)
color_pairs = {
"void": (libtcod.Color(0, 0, 0), libtcod.Color(0, 0, 0)),
"bg_wall": (libtcod.Color(25, 25, 25), libtcod.Color(50, 50, 25)),
"fg_wall": (libtcod.Color(50, 50, 50), libtcod.Color(75, 75, 50)),
"bg_floor": (libtcod.Color(50, 50, 50), libtcod.Color(75, 75, 50)),
"fg_floor": (libtcod.Color(75, 75, 75), libtcod.Color(100, 100, 75)),
"fg_stairs": (libtcod.Color(100, 100, 75), libtcod.Color(125, 125, 75)),
}
libtcod.console_set_custom_font('consolas_unicode_12x12.png', libtcod.FONT_LAYOUT_ASCII_INROW | libtcod.FONT_TYPE_GREYSCALE, nb_char_horiz=32, nb_char_vertic=64)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'rbowl', False)
libtcod.sys_set_fps(LIMIT_FPS)
class Game:
def __init__(self):
self.state = 'playing'
self.player_action = None
self.map = Map()
self.player = Object(self.map,
self.map.start_x,
self.map.start_y,
'@', 'player', blocks=True)
self.screen = Screen(self, self.map)
self.screen.move(self.map.start_x - SCREEN_WIDTH/2,
self.map.start_y - SCREEN_HEIGHT/2)
self.fov_recompute = True
self.con = libtcod.console_new(SCREEN_WIDTH, SCREEN_HEIGHT)
libtcod.console_set_default_foreground(self.con, libtcod.white)
self.pressed = set()
def run(self):
while not libtcod.console_is_window_closed():
self.screen.display(self.con)
for obj in self.map.objects:
obj.clear(self.con)
#handle keys and exit game if needed
action = self.handle_keys()
if action == 'exit':
break
if action is not None:
pass
def handle_keys(self):
key = libtcod.console_check_for_keypress(libtcod.KEY_PRESSED | libtcod.KEY_RELEASED)
if key.vk == libtcod.KEY_ESCAPE:
return 'exit'
elif key.vk == libtcod.KEY_CHAR:
if key.pressed:
self.pressed.add(key.c)
else:
try:
self.pressed.remove(key.c)
except KeyError:
pass
if ord('w') in self.pressed:
self.screen.move(0, -1)
elif ord('s') in self.pressed:
self.screen.move(0, 1)
elif ord('a') in self.pressed:
self.screen.move(-1, 0)
|
elif ord('d') in self.pressed:
self.screen.move(1, 0)
if self.state == 'playing':
if libtcod.con
|
sole_is_key_pressed(libtcod.KEY_UP):
self.player.move(0, -1)
self.fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_DOWN):
self.player.move(0, 1)
self.fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_LEFT):
self.player.move(-1, 0)
self.fov_recompute = True
elif libtcod.console_is_key_pressed(libtcod.KEY_RIGHT):
self.player.move(1, 0)
self.fov_recompute = True
else:
return None
return 'action'
class Rect:
"A rectangle on the map."
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x+w
self.y2 = y+h
def center(self):
center_x = (self.x1 + self.x2) / 2
center_y = (self.y1 + self.y2) / 2
return (center_x, center_y)
def intersect(self, other):
return (self.x1 <= other.x2 and self.x2 >= other.x1 and
self.y1 <= other.y2 and self.y2 >= other.y1)
class TileType:
"Types for tiles of the map, and its properties"
def __init__(self, char, fg_color_pair, bg_color_pair, blocked, block_sight=None):
self.char = char
self.fg_color, self.fg_color_lit = fg_color_pair
self.bg_color, self.bg_color_lit = bg_color_pair
self.blocked = blocked
self.block_sight = block_sight or blocked
tiletypes = {
'void': TileType(' ', color_pairs["void"], color_pairs["void"], True),
'floor': TileType('.', color_pairs["fg_floor"], color_pairs["bg_floor"], False),
'wall': TileType('#', color_pairs["fg_wall"], color_pairs["bg_wall"], True),
'up_stairs': TileType('<', color_pairs["fg_stairs"], color_pairs["bg_floor"], False),
'down_stairs': TileType('>', color_pairs["fg_stairs"], color_pairs["bg_floor"], False),
}
class Tile:
"Tile of the map, and its properties"
def __init__(self, type):
self.type = tiletypes[type]
self.explored = False
class Map:
def __init__(self, width=MAP_WIDTH, height=MAP_HEIGHT):
self.width = width
self.height = height
self.tiles = [[Tile('wall')
for y in range(self.height)]
for x in range(self.width)]
self.fov_map = libtcod.map_new(self.width, self.height)
self.objects = []
self.rooms = []
self.num_rooms = 0
for r in range(MAX_ROOMS):
w = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
h = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
x = libtcod.random_get_int(0, 0, MAP_WIDTH - w - 1)
y = libtcod.random_get_int(0, 0, MAP_HEIGHT - h - 1)
new_room = Rect(x, y, w, h)
failed = False
for other_room in self.rooms:
if new_room.intersect(other_room):
failed = True
break
if not failed:
self.create_room(new_room)
self.generate_room_objects(new_room)
(new_x, new_y) = new_room.center()
if self.num_rooms == 0:
self.start_x = new_x
self.start_y = new_y
else:
self.join_rooms(new_room, self.rooms[-1])
self.end_x = new_x
self.end_y = new_y
self.rooms.append(new_room)
self.num_rooms += 1
self.tiles[self.start_x][self.start_y].type = tiletypes['up_stairs']
self.tiles[self.end_x][self.end_y].type = tiletypes['down_stairs']
for y in range(self.height):
for x in range(self.width):
libtcod.map_set_properties(self.fov_map,
x, y,
not self.tiles[x][y].type.block_sight,
not self.tiles[x][y].type.blocked)
def add_object(self, obj):
self.objects.append(obj)
def remove_object(self, obj):
self.objects.remove(obj)
def generate_room_objects(self, room):
num_monsters = libtcod.random_get_int(0, 0, MAX_ROOM_MONSTERS)
for i in range(num_monsters):
x = libtcod.random_get_int(0, room.x1+1, room.x2-1)
y = libtcod.random_get_int(0, room.y1+1, room.y2-1)
if self.is_blocked(x, y):
continue
pick = libtcod.random_get_int(0, 0, 100)
if pick < 50:
monster = Object(self, x, y, 'g', 'goblin', color=libtcod.green, blocks=True)
if pick < 80:
monster = Object(self, x, y, 'o', 'orc', color=libtcod.desaturated_green, blocks=True)
else:
monster = Object(self, x, y, 'T'
|
mre/tracker
|
face.py
|
Python
|
lgpl-3.0
| 3,699
| 0.012976
|
import cv, cv2
import rectangle
from numpy import concatenate
import logging
class Face(object):
def __init__(self, config = {}):
self.config = {
"top_offset" : 1.0,
"bottom_offset" : 1.0,
"left_offset" : 0.0,
"right_offset" : 0.0,
"haar_confidence" : 3,
"min_face_size" : (70,70),
"cascade_frontal" : "cascades/haarcascade_frontalface_default.xml",
"cascade_profile" : "cascades/haarcascade_profileface.xml"
}
self.set_config(config)
# Create the cascades. We use both, a frontal- and a profile face cascade
self.cascade_frontal = cv2.CascadeClassifier(self.config["cascade_frontal"])
self.cascade_profile = cv2.CascadeClassifier(self.config["cascade_profile"])
# Initially, we have no valid face detection.
self.face_positions = []
# In order to improve perfomance,
# keep the face position for a couple of frames.
# Find face again after a certain number of frames.
self.face_delay = 100
# Count how many frames have passed,
# since we last did a face detection
self.frames_passed = 0
def positions(self, img):
"""
Get all faces in an image.
Also apply some padding to remove the area next to the faces.
This improves both, performance and robustness of the hand search.
"""
self.frames_pas
|
sed += 1
# Speedup. Only redetect after a certain delay.
if self.faces_invalid():
self.recalculate(img)
return self.face_positions
def faces_invalid(self):
"""
Check if we can still use the old face positions or
if the delay is
|
over and we need to find the face again in the image.
"""
if not self.face_positions:
# No previous face detection. Declare invalid.
return True
if self.frames_passed > self.face_delay:
# The delay has passed. Invalidate previous detection
return True
# Everything ok. We can use the old detection.
return False
def recalculate(self, img):
"""
Try to redetect the face position.
"""
logging.debug("Face detector: Scanning...")
# Reset the frame counter
self.frames_passed = 0
# Invalidate previous detections
self.face_positions = None
rects = self.detect(img)
for r in rects:
x1, y1, x2, y2 = r
logging.info("Face detector: Found face at %s", r)
if rects != None:
self.face_positions = rects
def detect(self, img):
"""
Find blobs which match a given HAAR cascade.
"""
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#gray = cv2.equalizeHist(gray)
# Accumulate all detections in a list
r = self.detect_frontal(img) + self.detect_profile(img)
return r
def detect_frontal(self, img):
rects_frontal = self.cascade_frontal.detectMultiScale(img,
scaleFactor=1.1,
minNeighbors=self.config["haar_confidence"],
minSize=self.config["min_face_size"])
if len(rects_frontal) != 0:
# We found a frontal faces.
rects_frontal = rectangle.convert_from_wh(rects_frontal)
return rects_frontal.tolist()
else:
return []
def detect_profile(self, img):
# Detect faces turned sidewards
rects_profile = self.cascade_profile.detectMultiScale(img,
scaleFactor=1.2,
minNeighbors=self.config["haar_confidence"],
minSize=self.config["min_face_size"])
if len(rects_profile) != 0:
# OK, found profile faces.
rects_profile = rectangle.convert_from_wh(rects_profile)
return rects_profile.tolist()
else:
return []
def set_config(self, config):
"""
Load new settings at runtime
"""
for key in config:
self.config[key] = config[key]
|
acdh-oeaw/dig_ed_cat
|
digital_editions/settings/dev.py
|
Python
|
mit
| 533
| 0
|
from .base import *
# Quick-start development s
|
ettings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/chec
|
klist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!i%7s@1+v&293zcy*kljuke=_l176nqpj2-3dtms()pw^et!we'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
|
tweiss1234/Cras
|
strings.py
|
Python
|
apache-2.0
| 824
| 0.003641
|
class Strings:
ADD_SUPERVISOR = "You are now connected with "
START_MONITORING = " wants to monitor you, click to allow"
ACCEPTED_MONITORING= "You started monitoring "
STOP_MONITORING_SUPERVISE = " canceled monitoring"
STOP_MONITORING_SUPERVISOR = " is no longer monitoring you"
APPLICATION_ANOMALY = " opened an unauthorized application"
SPEED_ANOMALY = " drives faster then authorized. Speed: "
MONITOR_ACCEPTED = "Monitor accepted"
registration_id = "cRLRNiCkFPQ:APA91bENV-BxmM3iXhZ_DwlitVpI5nTvdqGhClq5K1M5sLIZ8aAca_EJnkX3MRl9p_tLGBGoUtvROR2gOVI5bDeTIegS-55C8DM-GAnGI0xdlHVTPM5P9fkSYEslS-EcOsK6Y6dAsPca"
registration_other = "fWsYNQNkFfM:APA91bH_Rq5A
|
1rYLapfmii62coTWgNvCMnqq1q8LIxsvNNByT-iPrU-EledwiKHyT7zzCFbPMkbqbZvdb-YVidkQq0u6jvOk_1RZs
|
vK-Q1_XuM3gavyU63GvbgjNcgd5_Ws1Z-H4Xs_l"
|
FluidityProject/multifluids
|
tests/square-convection-parallel-trivial/src/genmesh.py
|
Python
|
lgpl-2.1
| 254
| 0
|
#!/usr/bin/env python
import fluidity.diagnostics.annulus_mesh as mesh
import fluidity.diagnostics.triangletools
|
as tt
div = mesh.SliceCoordsConstant(0.0, 1.0, 3)
m = mesh.GenerateRectangleMesh(div, div)
tt.
|
WriteTriangle(m, "square-structured-linear")
|
swiftstack/swift
|
swift/common/middleware/s3api/s3request.py
|
Python
|
apache-2.0
| 62,988
| 0
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
from collections import defaultdict, OrderedDict
from email.header import Header
from hashlib import sha1, sha256
import hmac
import re
import six
# pylint: disable-msg=import-error
from six.moves.urllib.parse import quote, unquote, parse_qsl
import string
from swift.common.utils import split_path, json, get_swift_info, \
close_if_possible, md5
from swift.common import swob
from swift.common.http import HTTP_OK, HTTP_CREATED, HTTP_ACCEPTED, \
HTTP_NO_CONTENT, HTTP_UNAUTHORIZED, HTTP_FORBIDDEN, HTTP_NOT_FOUND, \
HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY, HTTP_REQUEST_ENTITY_TOO_LARGE, \
HTTP_PARTIAL_CONTENT, HTTP_NOT_MODIFIED, HTTP_PRECONDITION_FAILED, \
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE, HTTP_LENGTH_REQUIRED, \
HTTP_BAD_REQUEST, HTTP_REQUEST_TIMEOUT, HTTP_SERVICE_UNAVAILABLE, \
HTTP_TOO_MANY_REQUESTS, HTTP_RATE_LIMITED, is_success
from swift.common.constraints import check_utf8
from swift.proxy.controllers.base import get_container_info
from swift.common.request_helpers import check_path_header
from swift.common.middleware.s3api.controllers import ServiceController, \
ObjectController, AclController, MultiObjectDeleteController, \
LocationController, LoggingStatusController, PartController, \
UploadController, UploadsController, VersioningController, \
UnsupportedController, S3AclController, BucketController, \
TaggingController
from swift.common.middleware.s3api.s3response import AccessDenied, \
InvalidArgument, InvalidDigest, BucketAlreadyOwnedByYou, \
RequestTimeTooSkewed, S3Response, SignatureDoesNotMatch, \
BucketAlreadyExists, BucketNotEmpty, EntityTooLarge, \
InternalError, NoSuchBucket, NoSuchKey, PreconditionFailed, InvalidRange, \
MissingContentLength, InvalidStorageClass, S3NotImplemented, InvalidURI, \
MalformedXML, InvalidRequest, RequestTimeout, InvalidBucketName, \
BadDigest, AuthorizationHeaderMalformed, SlowDown, \
AuthorizationQueryParametersError, ServiceUnavailable
from swift.common.middleware.s3api.exception import NotS3Request, \
BadSwiftRequest
from swift.common.middleware.s3api.utils import utf8encode, \
S3Timestamp, mktime, MULTIUPLOAD_SUFFIX
from swift.common.middleware.s3api.subresource import decode_acl, encode_acl
from swift.common.middleware.s3api.utils import sysmeta_header, \
validate_b
|
ucket_name
from swift.common.middleware.s3api.acl_utils import handle_acl_header
# List of sub-resources that must be maintained as part of the HMAC
# signature string.
ALLOWED_SUB_RESOURCES = sorted([
'acl', 'delete', 'lifecycle', 'location', 'logging', 'notification',
'partNumber', 'policy', 'requestPayment', 'torrent', 'uploads', 'uploadId',
'versionId', 'versioning', 'versions', 'website',
'response-cache-control', 'response-cont
|
ent-disposition',
'response-content-encoding', 'response-content-language',
'response-content-type', 'response-expires', 'cors', 'tagging', 'restore'
])
MAX_32BIT_INT = 2147483647
SIGV2_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S'
SIGV4_X_AMZ_DATE_FORMAT = '%Y%m%dT%H%M%SZ'
SERVICE = 's3' # useful for mocking out in tests
def _header_strip(value):
# S3 seems to strip *all* control characters
if value is None:
return None
stripped = _header_strip.re.sub('', value)
if value and not stripped:
# If there's nothing left after stripping,
# behave as though it wasn't provided
return None
return stripped
_header_strip.re = re.compile('^[\x00-\x20]*|[\x00-\x20]*$')
def _header_acl_property(resource):
"""
Set and retrieve the acl in self.headers
"""
def getter(self):
return getattr(self, '_%s' % resource)
def setter(self, value):
self.headers.update(encode_acl(resource, value))
setattr(self, '_%s' % resource, value)
def deleter(self):
self.headers[sysmeta_header(resource, 'acl')] = ''
return property(getter, setter, deleter,
doc='Get and set the %s acl property' % resource)
class HashingInput(object):
"""
wsgi.input wrapper to verify the hash of the input as it's read.
"""
def __init__(self, reader, content_length, hasher, expected_hex_hash):
self._input = reader
self._to_read = content_length
self._hasher = hasher()
self._expected = expected_hex_hash
def read(self, size=None):
chunk = self._input.read(size)
self._hasher.update(chunk)
self._to_read -= len(chunk)
short_read = bool(chunk) if size is None else (len(chunk) < size)
if self._to_read < 0 or (short_read and self._to_read) or (
self._to_read == 0 and
self._hasher.hexdigest() != self._expected):
self.close()
# Since we don't return the last chunk, the PUT never completes
raise swob.HTTPUnprocessableEntity(
'The X-Amz-Content-SHA56 you specified did not match '
'what we received.')
return chunk
def close(self):
close_if_possible(self._input)
class SigV4Mixin(object):
"""
A request class mixin to provide S3 signature v4 functionality
"""
def check_signature(self, secret):
secret = utf8encode(secret)
user_signature = self.signature
derived_secret = b'AWS4' + secret
for scope_piece in self.scope.values():
derived_secret = hmac.new(
derived_secret, scope_piece.encode('utf8'), sha256).digest()
valid_signature = hmac.new(
derived_secret, self.string_to_sign, sha256).hexdigest()
return user_signature == valid_signature
@property
def _is_query_auth(self):
return 'X-Amz-Credential' in self.params
@property
def timestamp(self):
"""
Return timestamp string according to the auth type
The difference from v2 is v4 have to see 'X-Amz-Date' even though
it's query auth type.
"""
if not self._timestamp:
try:
if self._is_query_auth and 'X-Amz-Date' in self.params:
# NOTE(andrey-mp): Date in Signature V4 has different
# format
timestamp = mktime(
self.params['X-Amz-Date'], SIGV4_X_AMZ_DATE_FORMAT)
else:
if self.headers.get('X-Amz-Date'):
timestamp = mktime(
self.headers.get('X-Amz-Date'),
SIGV4_X_AMZ_DATE_FORMAT)
else:
timestamp = mktime(self.headers.get('Date'))
except (ValueError, TypeError):
raise AccessDenied('AWS authentication requires a valid Date '
'or x-amz-date header')
if timestamp < 0:
raise AccessDenied('AWS authentication requires a valid Date '
'or x-amz-date header')
try:
self._timestamp = S3Timestamp(timestamp)
except ValueError:
# Must be far-future; blame clock skew
raise RequestTimeTooSkewed()
return self._timestamp
def _validate_expire_param(self):
"""
Validate X-Amz-Expires in query parameter
:raises: AccessDenied
:raises: AuthorizationQueryParametersError
:raises: AccessDenined
"""
err = None
try:
expires = int(self.params['X-Amz-
|
Micronaet/micronaet-accounting
|
account_invoice_multipartner/multipartner.py
|
Python
|
agpl-3.0
| 2,506
| 0.004789
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class AccountInvoiceMultipartner(osv.osv):
''' Add more than one reference partner in account invoice
(only in report document, not in journal entry)
'''
_inherit = 'account.invoice'
# on change function:
def onchange_extra_address(self, cr, uid, ids, extra_address, partner_id,
context=None):
''' Set domain in partner_ids list when
'''
res = {}
if extra_address == 'contact' and partner_id:
res['domain'] = {'partner_ids': [('parent_id', '=', partner_id)]}
else:
res['domain'] = {'partner_ids': []}
res['value'] = {'partner_ids': False}
|
return res
_columns = {
'extra_address'
|
: fields.selection([
('none', 'None'),
('contact', 'Contact'),
('partner', 'Partner'), ],
'Extra address', select=True, readonly=False, required=True),
'partner_ids': fields.many2many(
'res.partner', 'invoice_partner_rel', 'invoice_id', 'partner_id',
'Extra partner'),
}
_defaults = {
'extra_address': lambda *a: 'none',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
avihad/ARP-Storm
|
src/arp_open_flow/pox/misc/of_tutorial.py
|
Python
|
apache-2.0
| 4,582
| 0.006547
|
# Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This component is for use with the OpenFlow tutorial.
It acts as a simple hub, but can be modified to act like an L2
learning switch.
It's roughly similar to the one Brandon Heller did for NOX.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
log = core.getLogger()
class Tutorial (object):
"""
A Tutorial object is created for each switch that connects.
A Connection object for that switch is passed to the __init__ function.
"""
def __init__ (self, connection):
# Keep track of the connection to the switch so that we can
# send it messages!
self.connection = connection
# This binds our PacketIn event listener
connection.addListeners(self)
# Use this table to keep track of which ethernet address is on
# which switch port (keys are MACs, values are ports).
self.mac_to_port = {}
def resend_packet (self, packet_in, out_port):
"""
Instructs the switch to resend a packet that it had sent to us.
"packet_in" is the ofp_packet_in object the switch had sent to the
controller due to a table-miss.
"""
msg = of.ofp_packet_out()
msg.data = packet_in
# Add an action to send to the specified port
action = of.ofp_action_output(port = out_port)
msg.actions.append(action)
# Send message to switch
self.connection.send(msg)
def act_like_hub (self, packet, packet_in):
"""
Implement hub-like behavior -- send all packets to all ports besides
the input port.
"""
# We want to output to all ports -- we do that using the special
# OFPP_ALL port as the output port. (We could have also used
# OFPP_FLOOD.)
self.resend_packet(packet_in, of.OFPP_ALL)
# Note that if we didn't get a valid buffer_id, a slightly better
# implementation would check that we got the full data before
# sending it (len(packet_in.data) should be == packet_
|
in.total_len)).
def act_like_switch (self, packet, packet_in):
"""
Implement switch-like behavior.
"""
""" # DELETE THIS LINE TO START WORKING ON THIS (AND THE ONE BELOW!) #
# Here's some psuedocode to start you off implementing a learning
# switch. You'll need to rewrite it as real Python code.
# Learn the port for the source MAC
self.mac_to_port ... <add or update entry>
if the port associated with the des
|
tination MAC of the packet is known:
# Send packet out the associated port
self.resend_packet(packet_in, ...)
# Once you have the above working, try pushing a flow entry
# instead of resending the packet (comment out the above and
# uncomment and complete the below.)
log.debug("Installing flow...")
# Maybe the log statement should have source/destination/port?
#msg = of.ofp_flow_mod()
#
## Set fields to match received packet
#msg.match = of.ofp_match.from_packet(packet)
#
#< Set other fields of flow_mod (timeouts? buffer_id?) >
#
#< Add an output action, and send -- similar to resend_packet() >
else:
# Flood the packet out everything but the input port
# This part looks familiar, right?
self.resend_packet(packet_in, of.OFPP_ALL)
""" # DELETE THIS LINE TO START WORKING ON THIS #
def _handle_PacketIn (self, event):
"""
Handles packet in messages from the switch.
"""
packet = event.parsed # This is the parsed packet data.
if not packet.parsed:
log.warning("Ignoring incomplete packet")
return
packet_in = event.ofp # The actual ofp_packet_in message.
# Comment out the following line and uncomment the one after
# when starting the exercise.
self.act_like_hub(packet, packet_in)
#self.act_like_switch(packet, packet_in)
def launch ():
"""
Starts the component
"""
def start_switch (event):
log.debug("Controlling %s" % (event.connection,))
Tutorial(event.connection)
core.openflow.addListenerByName("ConnectionUp", start_switch)
|
ccto/python-demos
|
06-requests/framework.py
|
Python
|
apache-2.0
| 303
| 0.030508
|
import requests
def getHTMLText(url):
try:
r=requests.get(url,timeout=30)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.tex
|
t
except:
return "产生异常"
if __name__=="_
|
_main__":
url="http://www.baidu.com"
print(getHTMLText(url))
|
dato-code/how-to
|
triple_apply_weighted_pagerank.py
|
Python
|
cc0-1.0
| 2,161
| 0.006478
|
import graphlab as gl
import time
def pagerank_update_fn(src, edge, dst):
if src['__id'] != dst['__id']: # ignore self-links
dst['pagerank'] += src['prev_pagerank'] * edge['weight']
return (src, edge, dst)
def sum_weight(src, edge, dst):
if src['__id'] != dst['__id']: # ignore self-links
src['total_weight'] += edge['weight']
return src, edge, dst
def normalize_weight(src, edge, dst):
if src['__id'] != dst['__id']: # ignore self-links
edge['weight'] /= src['total_weight']
return src, edge, dst
def pagerank_triple_apply(input_graph, reset_prob=0.15, threshold=1e-3,
max_iterations=20):
g = gl.SGraph(input_graph.vertices, input_graph.edges)
# compute normalized edge weight
g.v
|
ertices['total_weight'] = 0.0
g = g.triple_apply(sum_weight, ['total_weight'])
g = g.triple_apply(normalize_weight, ['weight'])
del g.vertices['total_weight']
# initialize vertex field
g.vertices['prev_pagerank'] = 1.0
it = 0
total_l1_delta = len(g.vertices)
start = time.time()
while(total_l1_delta > threshold and it < max_iterations):
g.vertices['pagerank'] = 0.0
g = g.triple_apply(pagerank_update_fn,
|
['pagerank'])
g.vertices['pagerank'] = g.vertices['pagerank'] * (1 - reset_prob) \
+ reset_prob
g.vertices['l1_delta'] = (g.vertices['pagerank'] - \
g.vertices['prev_pagerank']).apply(lambda x: abs(x))
total_l1_delta = g.vertices['l1_delta'].sum()
g.vertices['prev_pagerank'] = g.vertices['pagerank']
print 'Iteration %d: total pagerank changed in L1 = %f' % (it,\
total_l1_delta)
it = it + 1
print 'Triple apply pagerank finished in: %f secs' % (time.time() - start)
del g.vertices['prev_pagerank']
return g
# Load graph
g = gl.load_graph('http://snap.stanford.edu/data/email-Enron.txt.gz', 'snap')
g.edges['weight'] = 1.0
# Run triple apply sssp
pagerank_graph = pagerank_triple_apply(g)
print pagerank_graph
|
NeuroRoboticTech/Jetduino
|
Software/Python/grove_electricity_sensor.py
|
Python
|
mit
| 2,751
| 0.003999
|
#!/usr/bin/env python
#
# Jetduino Example for using the Grove Electricity Sensor (http://www.seeedstudio.com/wiki/Grove_-_Electricity_Sensor)
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR O
|
THERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Electricity Sensor to analog port A0
# SIG,NC,NC,GND
sensor = ARD_A0
jetduino.pinMode(sensor, INPUT_PIN)
# Vcc of the grove interface is normally 5v
grove_vcc = 5
|
while True:
try:
# Get sensor value
sensor_value = jetduino.analogRead(sensor)
# Calculate amplitude current (mA)
amplitude_current = (float)(sensor_value / 1024 * grove_vcc / 800 * 2000000)
# Calculate effective value (mA)
effective_value = amplitude_current / 1.414
# minimum_current = 1 / 1024 * grove_vcc / 800 * 2000000 / 1.414 = 8.6(mA)
# Only for sinusoidal alternating current
print ("sensor_value", sensor_value)
print ("The amplitude of the current is", amplitude_current, "mA")
print ("The effective value of the current is", effective_value, "mA")
time.sleep(1)
except IOError:
print ("Error")
|
ecreall/nova-ideo
|
novaideo/core.py
|
Python
|
agpl-3.0
| 25,754
| 0.000427
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
import venusian
from BTrees.OOBTree import OOBTree
from persistent.list import PersistentList
from persistent.dict import PersistentDict
from webob.multidict import MultiDict
from zope.interface import implementer
from pyramid.threadlocal import get_current_request
from substanced.util import get_oid
from substanced.util import renamer
from substanced.content import content
from dace.objectofcollaboration.principal.role import DACE_ROLES
from dace.objectofcollaboration.principal.util import get_access_keys
from dace.objectofcollaboration.entity import Entity
from dace.descriptors import (
SharedUniqueProperty,
CompositeUniqueProperty,
SharedMultipleProperty,
CompositeMultipleProperty)
from dace.util import getSite, get_obj, find_catalog
from pontus.schema import Schema
from pontus.core import VisualisableElement
from pontus.widget import (
Select2Widget)
from novaideo import _, ACCESS_ACTIONS
from novaideo.content.interface import (
IVersionableEntity,
IDuplicableEntity,
ISearchableEntity,
ICommentable,
IPrivateChannel,
IChannel,
ICorrelableEntity,
IPresentableEntity,
INode,
IEmojiable,
IPerson,
ISignalableEntity,
ISustainable,
IDebatable,
ITokenable)
BATCH_DEFAULT_SIZE = 8
SEARCHABLE_CONTENTS = {}
SUSTAINABLE_CONTENTS = {}
NOVAIDO_ACCES_ACTIONS = {}
ADVERTISING_CONTAINERS = {}
ON_LOAD_VIEWS = {}
class AnonymisationKinds(object):
anonymity = 'anonymity'
pseudonymity = 'pseudonymity'
@classmethod
def get_items(cls):
return {
cls.anonymity: _('Anonymity'),
cls.pseudonymity: _('Pseudonymity')
}
@classmethod
def get_title(cls, item):
items = cls.get_items()
return items.get(item, None)
class Evaluations():
support = 'support'
oppose = 'oppose'
def get_searchable_content(request=None):
if request is None:
request = get_current_request()
return getattr(request, 'searchable_contents', {})
class advertising_banner_config(object):
""" A function, class or method decorator which allows a
developer to create advertising banner registrations.
Advertising banner is a panel. See pyramid_layout.panel_config.
"""
def __init__(self, name='', context=None, renderer=None, attr=None):
self.name = name
self.context = context
self.renderer = renderer
self.attr = attr
def __call__(self, wrapped):
settings = self.__dict__.copy()
def callback(context, name, ob):
config = context.config.with_package(info.module)
config.add_panel(panel=ob, **settings)
ADVERTISING_CONTAINERS[self.name] = {'title': ob.title,
'description': ob.description,
'order': ob.order,
|
'validator': ob.validator,
'tags': ob.tags
#TODO add validator ob.validator
}
info = venusian.attach(wrapped, callback, category='pyramid_layout')
if info.sc
|
ope == 'class':
# if the decorator was attached to a method in a class, or
# otherwise executed at class scope, we need to set an
# 'attr' into the settings if one isn't already in there
if settings['attr'] is None:
settings['attr'] = wrapped.__name__
settings['_info'] = info.codeinfo # fbo "action_method"
return wrapped
class access_action(object):
""" Decorator for creationculturelle access actions.
An access action allows to view an object"""
def __init__(self, access_key=None):
self.access_key = access_key
def __call__(self, wrapped):
def callback(scanner, name, ob):
if ob.context in ACCESS_ACTIONS:
ACCESS_ACTIONS[ob.context].append({'action': ob,
'access_key': self.access_key})
else:
ACCESS_ACTIONS[ob.context] = [{'action': ob,
'access_key': self.access_key}]
venusian.attach(wrapped, callback)
return wrapped
def can_access(user, context, request=None, root=None):
""" Return 'True' if the user can access to the context"""
declared = getattr(getattr(context, '__provides__', None),
'declared', [None])[0]
for data in ACCESS_ACTIONS.get(declared, []):
if data['action'].processsecurity_validation(None, context):
return True
return False
_marker = object()
def serialize_roles(roles, root=None):
result = []
principal_root = getSite()
if principal_root is None:
return []
if root is None:
root = principal_root
root_oid = str(get_oid(root, ''))
principal_root_oid = str(get_oid(principal_root, ''))
for role in roles:
if isinstance(role, tuple):
obj_oid = str(get_oid(role[1], ''))
result.append((role[0]+'_'+obj_oid).lower())
superiors = getattr(DACE_ROLES.get(role[0], _marker),
'all_superiors', [])
result.extend([(r.name+'_'+obj_oid).lower()
for r in superiors])
else:
result.append(role.lower()+'_'+root_oid)
superiors = getattr(DACE_ROLES.get(role, _marker),
'all_superiors', [])
result.extend([(r.name+'_'+root_oid).lower() for r in
superiors])
for superior in superiors:
if superior.name == 'Admin':
result.append('admin_'+principal_root_oid)
break
return list(set(result))
def generate_access_keys(user, root):
return get_access_keys(
user, root=root)
@implementer(ICommentable)
class Commentable(VisualisableElement, Entity):
""" A Commentable entity is an entity that can be comment"""
name = renamer()
comments = CompositeMultipleProperty('comments')
def __init__(self, **kwargs):
super(Commentable, self).__init__(**kwargs)
self.len_comments = 0
def update_len_comments(self):
result = len(self.comments)
result += sum([c.update_len_comments() for c in self.comments])
self.len_comments = result
return self.len_comments
def addtoproperty(self, name, value, moving=None):
super(Commentable, self).addtoproperty(name, value, moving)
if name == 'comments':
channel = getattr(self, 'channel', self)
channel.len_comments += 1
if self is not channel:
self.len_comments += 1
def delfromproperty(self, name, value, moving=None):
super(Commentable, self).delfromproperty(name, value, moving)
if name == 'comments':
channel = getattr(self, 'channel', self)
channel.len_comments -= 1
if self is not channel:
self.len_comments -= 1
@implementer(IDebatable)
class Debatable(VisualisableElement, Entity):
""" A Debatable entity is an entity that can be comment"""
channels = CompositeMultipleProperty('channels', 'subject')
def __init__(self, **kwargs):
super(Debatable, self).__init__(**kwargs)
@property
def channel(self):
channels = getattr(self, 'channels', [])
return channels[0] if channels else None
def get_channel(self, user):
return self.channel
def get_title(self, user=None):
return getattr(self, 'title', '')
def subscribe_to_channel(self, user):
channel = getattr(self, 'channel', None)
if channel and (user not in channel.members):
channel.addtoproperty('members', user)
def add_new_channel(self):
|
miljkovicivan/MicroComments
|
app/healthcheck.py
|
Python
|
bsd-3-clause
| 222
| 0
|
"""
|
Health check endpoint
"""
from flask import Blueprint
HEALTHCHECK = Blueprint('healthcheck', __name__)
@HEALTHCHECK.ro
|
ute('/comments/health_check')
def healthcheck():
"""
Returns 200
"""
return 'OK'
|
eviljeff/zamboni
|
mkt/files/models.py
|
Python
|
bsd-3-clause
| 11,351
| 0
|
import hashlib
import json
import os
import unicodedata
import uuid
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.db import models
from django.dispatch import receiver
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_str
import commonware
from uuidfield.fields import UUIDField
import mkt
from mkt.site.storage_utils import copy_stored_file, move_stored_file
from mkt.site.decorators import use_master
from mkt.site.helpers import absolutify
from mkt.site.models import ModelBase, OnChangeMixin, UncachedManagerBase
from mkt.site.utils import smart_path, urlparams
log = commonware.log.getLogger('z.files')
# Acceptable extensions.
EXTENSIONS = ('.webapp', '.json', '.zip')
class File(OnChangeMixin, ModelBase):
STATUS_CHOICES = mkt.STATUS_CHOICES.items()
version = models.ForeignKey('versions.Version', related_name='files')
filename = models.CharField(max_length=255, default='')
size = models.PositiveIntegerField(default=0) # In bytes.
hash = models.CharField(max_length=255, default='')
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES,
default=mkt.STATUS_PENDING)
datestatuschanged = models.DateTimeField(null=True, auto_now_add=True)
reviewed = models.DateTimeField(null=True)
# Whether a webapp uses flash or not.
uses_flash = models.BooleanField(default=False, db_index=True)
class Meta(ModelBase.Meta):
db_table = 'files'
def __unicode__(self):
return unicode(self.id)
@property
def has_been_validated(self):
try:
self.validation
except FileValidation.DoesNotExist:
return False
else:
return True
def get_url_path(self, src):
url = os.path.join(reverse('downloads.file', args=[self.id]),
self.filename)
# Firefox's Add-on Manager needs absolute urls.
return absolutify(urlparams(url, src=src))
@classmethod
def from_upload(cls, upload, version, parse_data={}):
upload.path = smart_path(nfd_str(upload.path))
ext = os.path.splitext(upload.path)[1]
f = cls(version=version)
f.filename = f.generate_filename(extension=ext or '.zip')
f.size = storage.size(upload.path) # Size in bytes.
f.status = mkt.STATUS_PENDING
f.hash = f.generate_hash(upload.path)
f.save()
log.debug('New file: %r from %r' % (f, upload))
# Move the uploaded file from the temp location.
copy_stored_file(upload.path, os.path.join(version.path_prefix,
nfd_str(f.filename)))
if upload.validation:
FileValidation.from_json(f, upload.validation)
return f
@property
def addon(self):
from mkt.versions.models import Version
from mkt.webapps.models import Webapp
version = Version.with_deleted.get(pk=self.version_id)
return Webapp.with_deleted.get(pk=version.addon_id)
def generate_hash(self, filename=None):
"""Generate a hash for a file."""
hash = hashlib.sha256()
with open(filename or self.file_path, 'rb') as obj:
for chunk in iter(lambda: obj.read(1024), ''):
hash.update(chunk)
return 'sha256:%s' % hash.hexdigest()
def generate_filename(self, extension=None):
"""
Files are in the format of: {app_slug}-{version}.{extension}
"""
parts = []
addon = self.version.addon
# slugify drops unicode so we may end up with an empty string.
# Apache did not like serving unicode filenames (bug 626587).
extension = extension or '.zip' if addon.is_packaged else '.webapp'
# Apparently we have non-ascii slugs leaking into prod :(
# FIXME.
parts.append(slugify(addon.app_slug) or 'app')
parts.append(self.version.version)
self.filename = '-'.join(parts) + extension
return self.filename
@property
def file_path(self):
if self.status == mkt.STATUS_DISABLED:
return self.guarded_file_path
else:
return self.approved_file_path
@property
def approved_file_path(self):
return os.path.join(settings.ADDONS_PATH, str(self.version.addon_id),
self.filename)
@property
def guarded_file_path(self):
return os.path.join(settings.GUARDED_ADDONS_PATH,
str(self.version.addon_id), self.filename)
@property
def signed_file_path(self):
return os.path.join(settings.SIGNED_APPS_PATH,
str(self.version.addon_id), self._signed())
@property
def signed_reviewer_file_path(self):
return os.path.join(settings.SIGNED_APPS_REVIEWER_PATH,
str(self.version.addon_id), self._signed())
def _signed(self):
split = self.filename.rsplit('.', 1)
split.insert(-1, 'signed')
return '.'.join(split)
@property
def extension(self):
return os.path.splitext(self.filename)[-1]
@classmethod
def mv(cls, src, dst, msg):
"""Move a file from src to dst."""
try:
if storage.exists(src):
log.info(msg % (src, dst))
move_stored_file(src, dst)
except UnicodeEncodeError:
log.error('Move Failure: %s %s' % (smart_str(src), smart_str(dst)))
def hide_disabled_file(self):
"""Move a disabled file to the guarded file path."""
if not self.filename:
return
src, dst = self.approved_file_path, self.guarded_file_path
self.mv(src, dst, 'Moving disabled file: %s => %s')
def unhide_disabled_file(self):
if not self.filename:
return
src, dst = self.guarded_file_path, self.approved_file_path
self.mv(src, dst, 'Moving undisabled file: %s => %s')
@use_master
def update_status(sender, instance, **kw):
if not kw.get('raw'):
try:
instance.version.addon.reload()
instance.version.addon.update_status()
if 'delete' in kw:
instan
|
ce.version.addon.update_version(ignore=instance.version)
else:
instance.version.addon.update_version()
except models.ObjectDoesNotExist:
pass
def update_status_delete(sender, instance, **kw):
kw['delete'] = True
return update_status(sender, instance, **kw)
models.signals.post_save.connect(
update_status, sender=File, dispatch_uid='versio
|
n_update_status')
models.signals.post_delete.connect(
update_status_delete, sender=File, dispatch_uid='version_update_status')
@receiver(models.signals.post_delete, sender=File,
dispatch_uid='cleanup_file')
def cleanup_file(sender, instance, **kw):
""" On delete of the file object from the database, unlink the file from
the file system """
if kw.get('raw') or not instance.filename:
return
# Use getattr so the paths are accessed inside the try block.
for path in ('file_path', 'guarded_file_path'):
try:
filename = getattr(instance, path, None)
except models.ObjectDoesNotExist:
return
if filename and storage.exists(filename):
log.info('Removing filename: %s for file: %s'
% (filename, instance.pk))
storage.delete(filename)
@File.on_change
def check_file(old_attr, new_attr, instance, sender, **kw):
if kw.get('raw'):
return
old, new = old_attr.get('status'), instance.status
if new == mkt.STATUS_DISABLED and old != mkt.STATUS_DISABLED:
instance.hide_disabled_file()
elif old == mkt.STATUS_DISABLED and new != mkt.STATUS_DISABLED:
instance.unhide_disabled_file()
# Log that the hash has changed.
old, new = old_attr.get('hash'), instance.hash
if old != new:
try:
addon = instance.versi
|
Mphaya/heasy
|
heasy/utility_scripts/display_top_prons.py
|
Python
|
apache-2.0
| 4,406
| 0.00749
|
#!/usr/bin/env python
#
# Copyright 2013 CSIR Meraka HLT and Multilingual Speech Technologies (MuST) North-West University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Marelie Davel"
__email__ = "mdavel@csir.co.za"
"""
Display the dictionary pronunciations of the most frequent words occuring in a speech corpus
@param in_trans_list: List of transcription filenames
@param in_dict: Pronunciation dictionary
@param top_n: Number of words to verify
@param out_name: Name of output file for results
"""
import sys, operator, codecs
#------------------------------------------------------------------------------
def display_top_prons(trans_list_name, dict_name, top_n, out_name):
"""Display the dictionary pronunciations of the most frequent words occuring in a speech corpus"""
#Read dictionary
pron_dict = {}
try:
dict_file = codecs.open(dict_name,"r","utf8")
except IOError:
print "Error: Error reading from file " + dict_name
sys.exit(1)
for ln in dict_file:
ln = ln.strip()
parts = ln.split("\t")
if len(parts) != 2:
print "Error: dictionary format error line %s" % ln
word = parts[0]
pron = parts[1]
if pron_dict.has_key(word):
pron_dict[word].append(pron)
else:
pron_dict[word] = []
pron_dict[word].append(pron)
dict_file.close()
#Read and cnt words in transcriptions
counts = {}
try:
list_file = codecs.open(trans_list_name,"r","utf8")
except IOError:
print "Error: Error reading from file " + trans_list_name
sys.exit(1)
for trans_name in list_file:
trans_name = trans_name.strip()
try:
trans_file = codecs.open(trans_name,"r","utf8")
except IOError:
print "Error: Error reading from file " + trans_name
sys.exit(1)
for ln in trans_file:
ln = ln.strip()
parts = ln.split(" ")
for word in parts:
if counts.has_key(word):
counts[word] = counts[word]+1
else:
counts[word] = 1
trans_file.close()
list_file.close()
#Now write top pronunciations to file
try:
out_file = codecs.open(out_name,"w","utf8")
except IOError:
print "Error: Error writing to file " + out_name
sys.exit(1)
top_words = sorted(counts.items(),key=operator.itemgetter(1),reverse=True)
n = 0;
for (w,c) in top_words:
if n < top_n:
if pron_dict.has_key(w):
for var_pron in pron_dict[w]:
out_file.write("%d\t%-20s\t%s\n" % (c,w,var_pron) )
n = n+1
else:
|
print "Error: unknown word %s" % word
else:
break
out_file.close()
#------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) == 5:
trans_list_name = str(sys.argv[1])
|
dict_name = str(sys.argv[2])
top_n = int(sys.argv[3])
out_name = str(sys.argv[4])
print "Displaying the %d most frequent words" % top_n
display_top_prons(trans_list_name, dict_name, top_n, out_name)
else:
print "\nDisplay the dictionary pronunciations of the most frequent words in a speech corpus."
print "Usage: display_top_prons.py <in:trans_list> <in:dict> <n> <out:results>"
print " <in:trans_list> list of transcription filenames"
print " <in:dict> pronunciation dictionary"
print " <n> number of words to verify"
print " <out:results> name of output file for results"
#------------------------------------------------------------------------------
|
tyler-elric/misc
|
rss.py
|
Python
|
gpl-3.0
| 6,353
| 0.030222
|
import xml.etree.ElementTree as ET
from email.utils import parsedate as parsedate_
from time import mktime
import datetime
def parsedate(d):return datetime.datetime.fromtimestamp(mktime(parsedate_(d)))
def element_value(el,default):
return ((el.text + el.tail) if el is not None else "").strip()
def date_value(dv):
return parsedate(dv) if isinstance(dv,str) else dv
def repeat_value(val):
while True: yield val
class Item:
def __init__(self, title, link, description, pubdate, guid):
self.title = title
self.link = link
self.description = description
self.published = date_value(pubdate)
self.guid = guid
class Feed:
def __init__(self,title,description,link,language,copyright,editor,master,version,items):
self.title = title
self.description = description
self.link = link
self.language = language
self.copyright = copyright
self.editor = editor
self.webmaster = master
self.version = version
self.items = items
@classmethod
def create(cls,data):
tree = ET.fromstring(data)
return {
"0.91": cls.parse_91,
"0.90": cls.parse_90,
"2.0": cls.parse_20
}[tree.get("version","2.0")](tree.find("channel"))
@classmethod
def parse_91(cls,tree):
version = tree.get("version","0.91")
title = element_value(tree.find("title"),"unknown")
link = element_value(tree.find("link"),"")
description = element_value(tree.find("description"),"unknown")
language = element_value(tree.find("language"),"en-us")
copyright = element_value(tree.find("copyright"),"unknown")
editor = element_value(tree.find("managingEditor"),"unknown")
master = element_value(tree.find("webMaster"),"unknown")
items = map(cls.parse_item,tree.iter("item"),repeat_value(version))
return cls(title,description,link,language,copyright,editor,master,version,list(items))
@classmethod
def parse_90(cls,tree):
version = tree.get("version","0.90")
title = element_value(tree.find("title"),"unknown")
link = element_value(tree.find("link"),"")
description = element_value(tree.find("description"),"unknown")
language = element_value(tree.find("language"),"en-us")
copyright = element_value(tree.find("copyright"),"unknown")
editor = element_value(tree.find("managingEditor"),"unknown")
master = element_value(tree.find("webMaster"),"unknown")
items = map(cls.parse_item,tree.iter("item"),repeat_value(version))
return cls(title,description,link,language,copyright,editor,master,version,list(items))
@classmethod
def parse_20(cls,tree):
version = tree.get("version","2.0")
title = element_value(tree.find("title"),"unknown")
link = element_value(tree.find("link"),"")
description = element_value(tree.find("description"),"unknown")
language = element_value(tree.find("language"),"en-us")
copyright = element_value(tree.find("copyright"),"unknown")
editor = element_value(tree.find("managingEditor"),"unknown")
master = element_value(tree.find("webMaster"),"unknown")
items = map(cls.parse_item,tree.iter("item"),repeat_value(version))
return cls(title,description,link,language,copyright,editor,master,version,list(items))
@classmethod
def parse_item(cls,node,version="2.0"):
title = element_value(node.find("title"),"unknown")
link = element_value(node.find("link"),"")
description = element_value(node.find("description"),"unknown")
pubdate = element_value(node.find("pubDate"),"unknown")
guid = element_value(node.find("guid"),"unknown")
return Item(title,link,description,pubdate,guid)
def updates(self,since):
include = lambda x: x.date >= since
return filter(include,self.items)
sample = """<?xml version="1.0"?>
<rss version="2.0">
<channel>
<title>Liftoff News</title>
<link>http://liftoff.msfc.nasa.gov/</link>
<description>Liftoff to Space Exploration.</description>
<language>en-us</language>
<pubDate>Tue, 10 Jun 2003 04:00:00 GMT</pubDate>
<lastBuildDate>Tue, 10 Jun 2003 09:41:01 GMT</lastBuildDate>
<docs>http://blogs.law.harvard.edu/tech/rss</docs>
<generator>Weblog Editor 2.0</generator>
<managingEditor>editor@example.com</managingEditor>
<webMaster>webmaster@example.com</webMaster>
<item>
<title>Star City</title>
<link>http://liftoff.msfc.nasa.gov/news/2003/news-starcity.asp</link>
<description>How do Americans get ready to work with Russians aboard the International Space Station? They take a crash course in culture, language and protocol at Russia's <a href="http://howe.iki.rssi.ru/GCTC/gctc_e.htm">Star City</a>.</description>
<pubDate>Tue, 03 Jun 2003 09:39:21 GMT</pubDate>
<guid>http://liftoff.msfc.nasa.gov/2003/06/03.html#item573</guid>
</item>
<item>
<description>Sky watchers in Europe, Asia, and parts of Alaska and Canada will experience a <a href="http://science.nasa.gov/headlines/y2003/30may_solareclipse.htm">partial eclipse of the Sun</a> on Saturday, May 31st.</description>
<pubDate>Fri, 30 May 2003 11:06:42 GMT</pubDate>
<guid>http://liftoff.msfc.nasa.gov/2003/05/30.html#item572</guid>
</item>
<item>
<title>The Engine That Does More</title>
<link>http://liftoff.msfc.nasa.gov/news/2003/news-VASIMR.asp</link>
<descript
|
ion>Before man travels to Mars, NASA hopes to design new engines that will let us fly through the Solar System more quickly. The proposed VASIMR engine would do that.</description>
<pubDate>Tue, 27 May 2003 08:37:32 GMT</pubDate>
<guid>http://liftoff.msfc.nasa.gov/2003/05/27.html
|
#item571</guid>
</item>
<item>
<title>Astronauts' Dirty Laundry</title>
<link>http://liftoff.msfc.nasa.gov/news/2003/news-laundry.asp</link>
<description>Compared to earlier spacecraft, the International Space Station has many luxuries, but laundry facilities are not one of them. Instead, astronauts have other options.</description>
<pubDate>Tue, 20 May 2003 08:56:02 GMT</pubDate>
<guid>http://liftoff.msfc.nasa.gov/2003/05/20.html#item570</guid>
</item>
</channel>
</rss>
"""
feed = Feed.create(sample)
print(feed.title)
print(feed.description)
for item in feed.items:
print(item.title)
print(item.description)
print(item.link)
print(item.published.day,item.published.month,item.published.year)
print()
|
trec-dd/trec-dd-simulation-harness
|
trec_dd/harness/truth_data.py
|
Python
|
mit
| 4,988
| 0.002005
|
'''Generate truth data from a truth_data_file.
A truth_data_file may be produced by a human being, so sometimes
it should be considered truth data. This file provides
utilities for turning a truth_data_file of a certain format into
truth data the harness understands.
'''
from __future__ import absolute_import
import argparse
import json
from bs4 import BeautifulSoup
import logging
import sys
from dossier.label import Label, LabelStore, CorefValue
import kvlayer
import yakonfig
logger = logging.getLogger(__name__)
def parse_passage(p):
'''Extract a line_data dict from a passage's XML data and context.
'''
line_data = {}
domain = p.parent.parent.parent
topic = p.parent.parent
subtopic = p.parent
line_data['domain_id'] = domain['id'].encode('utf-8')
line_data['domain_name'] = domain['name'].encode('utf-8')
line_data['userid'] = 'dropped'
line_data['username'] = 'dropped'
line_data['topic_id'] = topic['id'].encode('utf-8')
line_data['topic_name'] = topic['name'].encode('utf-8')
line_data['subtopic_id'] = subtopic['id'].encode('utf-8')
line_data['subtopic_name'] = subtopic['name'].encode('utf-8')
line_data['passage_id'] = p['id'].encode('utf-8')
line_data['passage_name'] = p.find('text').text.encode('utf-8')
line_data['docno'] = p.docno.text.encode('utf-8')
line_data['grade'] = p.rating.text.encode('utf-8')
return line_data
def make_full_doc_id(doc_id, offset_start, offset_end):
'''A full doc_id is of the form: doc_id#offset_start,offset_end
'''
offset_string = ','.join([offset_start, offset_end])
return '#'.join([doc_id, offset_string])
def make_offset_string(offset_start, offset_end):
'''Create an offset string from a pair of offsets.
:param offset_start: str
:param offset_end: str
'''
return ','.join([offset_start, offset_end])
def label_from_truth_data_file_line(line_data):
'''Create a label from a *parsed* truth_data_file line.
:param line_data: dict
'''
# document data
doc_id = line_data['docno']
if not doc_id.strip():
logger.warn('dropping invalid truth data line: '
'bad docno: %r: %r'
% (doc_id, line_data))
return None
if len(line_data['passage_name'].strip()) < 1:
logger.warn('dropping empty passage: %r', line_data)
return None
# annotation data
topic_id = line_data['topic_id']
subtopic_id = line_data['subtopic_id']
passage_id = line_data['passage_id']
annotator = line_data['userid']
# value data
value = CorefValue.Positive
try:
rating = int(line_data['grade'])
except ValueError:
logger.warn('replacing bogus grade with zero = %r',
line_data['grade'])
rating = 0
if rating < 0:
value = CorefValue.Negative
rating = 0
# meta data
meta = {'domain_name': line_data['domain_name'],
'domain_id': line_data['domain_id'],
'username': line_data['username'],
'topic_name': line_data['topic_name'],
'topic_id': line_data['topic_id'],
'subtopic_name': line_data['subtopic_name'],
'passage_text': line_data['passage_name']}
label = Label(topic_id, doc_id, annotator, value,
subtopic_id1=subtopic_id, subtopic_id2=passage_id,
rating=rating, meta=meta)
return label
def parse_truth_data(label_store, truth_data_path, batch_size=10000):
data_file = open(truth_data_path, 'r')
data = BeautifulSoup(data_file, 'xml')
labels_to_put = []
num_labels = 0
for psg in data.find_all('passage'):
line_data = parse_passage(psg)
label = label_from_truth_data_file_line(line_data)
if label is not None:
labels_to_put.append(label)
num_labels += 1
if num_labels % 1000 == 0:
logger.debug('Converted %d labels.' % num_labels)
if len(la
|
bels_to_put) >= batch_size:
label_store.put(*labels_to_put)
labels_to_put = []
if len(labels_to_put) > 0:
label_store.put(*labels_to_put)
def main():
parser = argparse.ArgumentParser('test tool for checking that we can
|
load '
'the truth data as distributed by NIST for '
'TREC 2015')
parser.add_argument('truth_data_path', help='path to truth data file')
modules = [yakonfig, kvlayer]
args = yakonfig.parse_args(parser, modules)
logging.basicConfig(level=logging.DEBUG)
kvl = kvlayer.client()
label_store = LabelStore(kvl)
parse_truth_data(label_store, args.truth_data_path)
logger.debug('Done! The truth data was loaded into this kvlayer backend: %r',
json.dumps(yakonfig.get_global_config('kvlayer'), indent=4,
sort_keys=True))
if __name__ == '__main__':
main()
|
johndeu/amspy
|
amspy/examples/analytics/indexer_v2/indexer_v2.py
|
Python
|
mit
| 14,879
| 0.01566
|
"""
Copyright (c) 2016, John Deutscher
Description: Sample Python script for Azure Media Indexer V2
License: MIT (see LICENSE.txt file for details)
Documentation : https://azure.microsoft.com/en-us/documentation/articles/media-services-process-content-with-indexer2/
"""
import os
import json
import amspy
import time
import sys
#import pytz
import urllib
import logging
import datetime
from azure import *
from azure.storage.blob import BlockBlobService
from azure.storage.blob import ContentSettings
###########################################################################################
##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# ALL CODE IN THIS DIRECTOY (INCLUDING THIS FILE) ARE EXAMPLE CODES THAT WILL ACT ON YOUR
# AMS ACCOUNT. IT ASSUMES THAT THE AMS ACCOUNT IS CLEAN (e.g.: BRAND NEW), WITH NO DATA OR
# PRODUCTION CODE ON IT. DO NOT, AGAIN: DO NOT RUN ANY EXAMPLE CODE AGAINST PRODUCTION AMS
# ACCOUNT! IF YOU RUN ANY EXAMPLE
|
CODE AGAINST YOUR PRODUCTION AMS ACCOUNT, YOU CAN LOSE
# DATA, AND/OR PUT YOUR AMS SERVICES IN A DEGRADED OR UNAVAILABLE STATE. BE WARNED!
###########################################################################################
##### DISCLA
|
IMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER ##### ##### DISCLAIMER #####
###########################################################################################
# Load Azure app defaults
try:
with open('../../config.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print_phase_message("ERROR: Expecting config.json in examples folder")
sys.exit()
account_name = configData['accountName']
account_key = configData['accountKey']
sto_account_name = configData['sto_accountName']
sto_accountKey = configData['sto_accountKey']
log_name = configData['logName']
log_level = configData['logLevel']
purge_log = configData['purgeLog']
#Initialization...
print ("\n-----------------------= AMS Py =----------------------")
print ("Azure Media Analytics - Indexer v2 Preview Sample")
print ("for details : https://azure.microsoft.com/en-us/documentation/articles/media-services-process-content-with-indexer2/ ")
print ("-------------------------------------------------------\n")
#Remove old log file if requested (default behavior)...
if (os.path.isdir('./log') != True):
os.mkdir('log')
if (purge_log.lower() == "yes"):
if (os.path.isfile(log_name)):
os.remove(log_name)
#Basic Logging...
logging.basicConfig(format='%(asctime)s - %(levelname)s:%(message)s', level=log_level, filename=log_name)
# Get the access token...
response = amspy.get_access_token(account_name, account_key)
resjson = response.json()
access_token = resjson["access_token"]
#Some global vars...
NAME = "movie"
COUNTER = 0;
ENCRYPTION = "1" # 0=None, StorageEncrypted=1, CommonEncryptionProtected=2, EnvelopeEncryptionProtected=4
ENCRYPTION_SCHEME = "StorageEncryption" # StorageEncryption or CommonEncryption.
VIDEO_NAME = "movie.mp4"
VIDEO_PATH = "../assets/movie.mp4"
ASSET_FINAL_NAME = "Python Sample-Indexer-V2"
PROCESSOR_NAME = "Azure Media Indexer 2 Preview"
INDEXER_V2_JSON_PRESET = "indexerv2.json"
# Just a simple wrapper function to print the title of each of our phases to the console...
def print_phase_header(message):
global COUNTER;
print ("\n[" + str("%02d" % int(COUNTER)) + "] >>> " + message)
COUNTER += 1;
# This wrapper function prints our messages to the console with a timestamp...
def print_phase_message(message):
time_stamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print (str(time_stamp) + ": " + message)
### get ams redirected url
response = amspy.get_url(access_token)
if (response.status_code == 200):
ams_redirected_rest_endpoint = str(response.url)
else:
print_phase_message("GET Status: " + str(response.status_code) + " - Getting Redirected URL ERROR." + str(response.content))
exit(1)
######################### PHASE 1: UPLOAD #########################
### create an asset
print_phase_header("Creating a Media Asset")
response = amspy.create_media_asset(access_token, NAME)
if (response.status_code == 201):
resjson = response.json()
asset_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Media Asset Name........................: " + NAME)
print_phase_message("Media Asset Id..........................: " + asset_id)
else:
print_phase_message("POST Status.............................: " + str(response.status_code) + " - Media Asset: '" + NAME + "' Creation ERROR." + str(response.content))
### create an assetfile
print_phase_header("Creating a Media Assetfile (for the video file)")
response = amspy.create_media_assetfile(access_token, asset_id, VIDEO_NAME, "false", "false")
if (response.status_code == 201):
resjson = response.json()
video_assetfile_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Media Assetfile Name....................: " + str(resjson['d']['Name']))
print_phase_message("Media Assetfile Id......................: " + video_assetfile_id)
print_phase_message("Media Assetfile IsPrimary...............: " + str(resjson['d']['IsPrimary']))
else:
print_phase_message("POST Status: " + str(response.status_code) + " - Media Assetfile: '" + VIDEO_NAME + "' Creation ERROR." + str(response.content))
### create an asset write access policy for uploading
print_phase_header("Creating an Asset Write Access Policy")
duration = "440"
response = amspy.create_asset_accesspolicy(access_token, "NewUploadPolicy", duration, "2")
if (response.status_code == 201):
resjson = response.json()
write_accesspolicy_id = str(resjson['d']['Id'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("Asset Access Policy Id..................: " + write_accesspolicy_id)
print_phase_message("Asset Access Policy Duration/min........: " + str(resjson['d']['DurationInMinutes']))
else:
print_phase_message("POST Status: " + str(response.status_code) + " - Asset Write Access Policy Creation ERROR." + str(response.content))
### create a sas locator
print_phase_header("Creating a write SAS Locator")
## INFO: If you need to upload your files immediately, you should set your StartTime value to five minutes before the current time.
#This is because there may be clock skew between your client machine and Media Services.
#Also, your StartTime value must be in the following DateTime format: YYYY-MM-DDTHH:mm:ssZ (for example, "2014-05-23T17:53:50Z").
# EDITED: Not providing starttime is the best approach to be able to upload a file immediatly...
#starttime = datetime.datetime.now(pytz.timezone(time_zone)).strftime("%Y-%m-%dT%H:%M:%SZ")
#response = amspy.create_sas_locator(access_token, asset_id, write_accesspolicy_id, starttime)
response = amspy.create_sas_locator(access_token, asset_id, write_accesspolicy_id)
if (response.status_code == 201):
resjson = response.json()
saslocator_id = str(resjson['d']['Id'])
saslocator_baseuri = str(resjson['d']['BaseUri'])
sto_asset_name = os.path.basename(os.path.normpath(saslocator_baseuri))
saslocator_cac = str(resjson['d']['ContentAccessComponent'])
print_phase_message("POST Status.............................: " + str(response.status_code))
print_phase_message("SAS URL Locator StartTime...............: " + str(resjson['d']['StartTime']))
print_phase_message("SAS URL Locator Id......................: " + saslocator_id)
print_phase_message("SAS URL Locator Base URI................: " + saslocator_baseuri)
print_phase_message("SAS URL Locator Content Access Component: " + saslocator_cac)
else:
print_phase_message("POST Status: " + str(response.status_code) + " - SAS URL Locator Creation ERROR." + str(response.content))
### Use the Azure Blob Blob Servic library from the Azure Storage SDK.
block_blob_service = BlockBlobServic
|
openstack/smaug
|
karbor/services/protection/protection_plugins/server/nova_protection_plugin.py
|
Python
|
apache-2.0
| 20,349
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import partial
from novaclient import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from karbor.common import constants
from karbor import exception
from karbor.services.protection.client_factory import ClientFactory
from karbor.services.protection import protection_plugin
from karbor.services.protection.protection_plugins.server \
import server_plugin_schemas
from karbor.services.protection.protection_plugins import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
VOLUME_ATTACHMENT_RESOURCE = 'OS::Cinder::VolumeAttachment'
FLOATING_IP_ASSOCIATION = 'OS::Nova::FloatingIPAssociation'
nova_backup_opts = [
cfg.IntOpt(
'poll_interval', default=15,
help='Poll interval for Nova backup status'
),
]
class ProtectOperation(protection_plugin.Operation):
def on_main(self, checkpoint, resource, context, parameters, **kwargs):
server_id = resource.id
bank_section = checkpoint.get_resource_bank_section(server_id)
nova_client = ClientFactory.create_client("nova", context)
cinder_client = ClientFactory.create_client("cinder", context)
neutron_client = ClientFactory.create_client("neutron", context)
resource_definition = {"resource_id": server_id}
# get dependent resources
server_child_nodes = []
resources = checkpoint.resource_graph
for resource_node in resources:
resource = resource_node.value
if resource.id == server_id:
server_child_nodes = resource_node.child_nodes
LOG.info("Creating server backup, server_id: %s. ", server_id)
try:
bank_section.update_object("status",
constants.RESOURCE_STATUS_PROTECTING)
# get attach_metadata about volume
attach_metadata = {}
for server_child_node in server_child_nodes:
child_resource = server_child_node.value
if child_resource.type == constants.VOLUME_RESOURCE_TYPE:
volume = cinder_client.volumes.get(child_resource.id)
attachments = getattr(volume, "attachments")
for attachment in attachments:
if attachment["server_id"] == server_id:
attachment["bootable"] = getattr(
volume, "bootable")
attach_metadata[child_resource.id] = attachment
resource_definition["attach_metadata"] = attach_metadata
# get metadat
|
a about AZ
server = nova_client.servers.get(server_id)
availability_zone = getattr(server, "OS-EXT-AZ:availability_zone")
# get metadata about network, flavor, key_name, security_groups
addresses = getattr(server, "addresses")
networks = []
floating_ips = []
for network_infos in addresses.values():
|
for network_info in network_infos:
addr = network_info.get("addr")
mac = network_info.get("OS-EXT-IPS-MAC:mac_addr")
network_type = network_info.get("OS-EXT-IPS:type")
if network_type == 'fixed':
port = neutron_client.list_ports(
mac_address=mac)["ports"][0]
if port["network_id"] not in networks:
networks.append(port["network_id"])
elif network_type == "floating":
floating_ips.append(addr)
flavor = getattr(server, "flavor")["id"]
key_name = getattr(server, "key_name", None)
security_groups = getattr(server, "security_groups", None)
# get metadata about boot device
boot_metadata = {}
image_info = getattr(server, "image", None)
if image_info is not None and isinstance(image_info, dict):
boot_metadata["boot_device_type"] = "image"
boot_metadata["boot_image_id"] = image_info['id']
else:
boot_metadata["boot_device_type"] = "volume"
volumes_attached = getattr(
server, "os-extended-volumes:volumes_attached", [])
for volume_attached in volumes_attached:
volume_id = volume_attached["id"]
volume_attach_metadata = attach_metadata.get(
volume_id, None)
if volume_attach_metadata is not None and (
volume_attach_metadata["bootable"] == "true"):
boot_metadata["boot_volume_id"] = volume_id
boot_metadata["boot_attach_metadata"] = (
volume_attach_metadata)
resource_definition["boot_metadata"] = boot_metadata
# save all server's metadata
server_metadata = {"availability_zone": availability_zone,
"networks": networks,
"floating_ips": floating_ips,
"flavor": flavor,
"key_name": key_name,
"security_groups": security_groups,
}
resource_definition["server_metadata"] = server_metadata
LOG.info("Creating server backup, resource_definition: %s.",
resource_definition)
bank_section.update_object("metadata", resource_definition)
# update resource_definition backup_status
bank_section.update_object("status",
constants.RESOURCE_STATUS_AVAILABLE)
LOG.info("Finish backup server, server_id: %s.", server_id)
except Exception as err:
# update resource_definition backup_status
LOG.exception("Create backup failed, server_id: %s.", server_id)
bank_section.update_object("status",
constants.RESOURCE_STATUS_ERROR)
raise exception.CreateResourceFailed(
name="Server Backup",
reason=err,
resource_id=server_id,
resource_type=constants.SERVER_RESOURCE_TYPE)
class DeleteOperation(protection_plugin.Operation):
def on_main(self, checkpoint, resource, context, parameters, **kwargs):
resource_id = resource.id
bank_section = checkpoint.get_resource_bank_section(resource_id)
LOG.info("deleting server backup, server_id: %s.", resource_id)
try:
bank_section.update_object("status",
constants.RESOURCE_STATUS_DELETING)
objects = bank_section.list_objects()
for obj in objects:
if obj == "status":
continue
bank_section.delete_object(obj)
bank_section.update_object("status",
constants.RESOURCE_STATUS_DELETED)
LOG.info("finish delete server, server_id: %s.", resource_id)
except Exception as err:
# update resource_definition backup_status
LOG.error("Delete backup failed, server_id: %s.", resource_id)
bank_section.update_object("status",
constants.RESOURCE_STATUS_ERROR)
raise exception.DeleteResourceFailed(
name="Server Backup",
|
Tocknicsu/nctuoj
|
backend/test/api/problem/guest.py
|
Python
|
mit
| 1,488
| 0.005376
|
#!/usr/bin/env python3
import sys
import requests
import json
import unittest
import datetime
from util import TestCase
import config
import common
class TestApiProblemGuest(TestCase):
url = '%s/api/groups/3/problems/'%(config.base_url)
token = common.get_user_info({'account': config.user_admin_account, 'passwd': config.user_admin_password})['token']
def test_gets(self):
data = {
"token": self.token,
}
res = requests.get(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 403,
"body": {
"msg": "Permission Denied",
}
}
self.assertEqualR(res, expect_result)
def test_get_visible(self):
data = {
"token": self.token,
}
res = requests.get("%s%s/"%(self.url,10006), data=data)
res.connection.close()
expect_result = {
|
"status_code": 403,
"body": {
"msg": "Permission Denied",
}
}
self.assertEqualR(res, expect_result)
def test_get_invisible(self):
data = {
"token": self.token,
}
|
res = requests.get("%s%s/"%(self.url,10005), data=data)
res.connection.close()
expect_result = {
"status_code": 403,
"body": {
"msg": "Permission Denied",
}
}
self.assertEqualR(res, expect_result)
|
Open-E-WEB/django-powerpages
|
powerpages/cachekeys.py
|
Python
|
mit
| 1,418
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import hashlib
from django.utils import six
URL_LIST_CACHE = 'powerpages:url_list'
SITEMAP_CONTENT = 'powerpages:sitemap'
def get_cache_name(prefix, name):
"""
Cache name constructor. Uses the same methods as django cache system
Examples:
*) prefix=profile.cache, name=<requestuser.id>
*) prefix=template.cache.sidebar, name=<requestuser.id>
"""
return '{0}.{1}'.format(
prefix, hashlib.md5(six.text_type(name).encode('utf-8')).hexdigest()
)
def template_source(page_pk):
"""Create cache key for page template"""
return 'powerpages:templ
|
ate:{0}'.format(page_pk)
def rendered_source_for_user(page_pk, user_id):
"""Create cache key for rendered page source based on current user"""
return 'powerpages:rendered_source_user:{0}:{1}'.format(page_pk, user_id)
def rendered_source_for_lang(page_pk, lang):
"""Create cache key for rendered page source based on current language"""
return 'powerpages:rendered_source_lang:{0}:{1}'.format(page_pk, lang)
def url_cache(name, *args, **kwargs):
"""
Creates cache key fo
|
r url of CMS page or standard Django URL
based on hashed serialized name with optional *args and **kwargs
"""
serialized_url = json.dumps([name, args, kwargs], sort_keys=True)
return get_cache_name('powerpages:urls', serialized_url)
|
open-forcefield-group/openforcefield
|
openff/toolkit/tests/test_toolkits.py
|
Python
|
mit
| 163,747
| 0.002449
|
#!/usr/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Tests for cheminformatics toolkit wrappers
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import logging
import os
from tempfile import NamedTemporaryFile
from typing import Dict
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from simtk import unit
from openff.toolkit.tests.test_forcefield import (
create_acetaldehyde,
create_acetate,
create_cyclohexane,
create_ethanol,
create_reversed_ethanol,
)
from openff.toolkit.tests.utils import (
requires_ambertools,
requires_openeye,
requires_rdkit,
)
from openff.toolkit.topology.molecule import Molecule
from openff.toolkit.utils import get_data_file_path
from openff.toolkit.utils.toolkits import (
GLOBAL_TOOLKIT_REGISTRY,
AmberToolsToolkitWrapper,
BuiltInToolkitWrapper,
ChargeMethodUnavailableError,
GAFFAtomTypeWarning,
IncorrectNumConformersError,
IncorrectNumConformersWarning,
InvalidIUPACNameError,
InvalidToolkitError,
OpenEyeToolkitWrapper,
RDKitToolkitWrapper,
ToolkitRegistry,
ToolkitUnavailableException,
ToolkitWrapper,
UndefinedStereochemistryError,
)
# =============================================================================================
# FIXTURES
# ====================================
|
=========================================================
def get_mini_drug_bank(toolkit_class, xfail_mols=None):
"""Read the mini drug bank sdf file with t
|
he toolkit and return the molecules"""
# This is a work around a weird error where even though the test is skipped due to a missing toolkit
# we still try and read the file with the toolkit
if toolkit_class.is_available():
toolkit = toolkit_class()
molecules = Molecule.from_file(
get_data_file_path("molecules/MiniDrugBank.sdf"),
"sdf",
toolkit_registry=toolkit,
allow_undefined_stereo=True,
)
else:
molecules = []
if xfail_mols is None:
return molecules
for i, mol in enumerate(molecules):
if mol.name in xfail_mols:
marker = pytest.mark.xfail(reason=xfail_mols[mol.name])
molecules[i] = pytest.param(mol, marks=marker)
return molecules
openeye_inchi_stereochemistry_lost = [
"DrugBank_2799",
"DrugBank_5414",
"DrugBank_5415",
"DrugBank_5418",
"DrugBank_2955",
"DrugBank_2987",
"DrugBank_5555",
"DrugBank_472",
"DrugBank_5737",
"DrugBank_3332",
"DrugBank_3461",
"DrugBank_794",
"DrugBank_3502",
"DrugBank_6026",
"DrugBank_3622",
"DrugBank_977",
"DrugBank_3693",
"DrugBank_3726",
"DrugBank_3739",
"DrugBank_6222",
"DrugBank_6232",
"DrugBank_3844",
"DrugBank_6295",
"DrugBank_6304",
"DrugBank_6305",
"DrugBank_3930",
"DrugBank_6329",
"DrugBank_6353",
"DrugBank_6355",
"DrugBank_6401",
"DrugBank_4161",
"DrugBank_4162",
"DrugBank_6509",
"DrugBank_6531",
"DrugBank_1570",
"DrugBank_4249",
"DrugBank_1634",
"DrugBank_1659",
"DrugBank_6647",
"DrugBank_1700",
"DrugBank_1721",
"DrugBank_1742",
"DrugBank_1802",
"DrugBank_6775",
"DrugBank_1849",
"DrugBank_1864",
"DrugBank_6875",
"DrugBank_1897",
"DrugBank_4593",
"DrugBank_1962",
"DrugBank_4662",
"DrugBank_7049",
"DrugBank_4702",
"DrugBank_2095",
"DrugBank_4778",
"DrugBank_2141",
"DrugBank_2148",
"DrugBank_2178",
"DrugBank_4865",
"DrugBank_2208",
"DrugBank_2210",
"DrugBank_2276",
"DrugBank_4959",
"DrugBank_4964",
"DrugBank_5043",
"DrugBank_2429",
"DrugBank_5076",
"DrugBank_2465",
"DrugBank_2519",
"DrugBank_2538",
"DrugBank_5158",
"DrugBank_5176",
"DrugBank_2592",
]
openeye_inchi_isomorphic_fails = ["DrugBank_1661", "DrugBank_4346", "DrugBank_2467"]
rdkit_inchi_stereochemistry_lost = [
"DrugBank_5414",
"DrugBank_2955",
"DrugBank_5737",
"DrugBank_3332",
"DrugBank_3461",
"DrugBank_6026",
"DrugBank_3622",
"DrugBank_3726",
"DrugBank_6222",
"DrugBank_3844",
"DrugBank_6304",
"DrugBank_6305",
"DrugBank_6329",
"DrugBank_6509",
"DrugBank_6647",
"DrugBank_1897",
"DrugBank_4778",
"DrugBank_2148",
"DrugBank_2178",
"DrugBank_2538",
"DrugBank_2592",
"DrugBank_4249",
"DrugBank_5076",
"DrugBank_5418",
"DrugBank_3930",
"DrugBank_1634",
"DrugBank_1962",
"DrugBank_5043",
"DrugBank_2519",
"DrugBank_7124",
"DrugBank_6865",
]
rdkit_inchi_roundtrip_mangled = ["DrugBank_2684"]
openeye_iupac_bad_stereo = [
"DrugBank_977",
"DrugBank_1634",
"DrugBank_1700",
"DrugBank_1962",
"DrugBank_2148",
"DrugBank_2178",
"DrugBank_2186",
"DrugBank_2208",
"DrugBank_2519",
"DrugBank_2538",
"DrugBank_2592",
"DrugBank_2651",
"DrugBank_2987",
"DrugBank_3332",
"DrugBank_3502",
"DrugBank_3622",
"DrugBank_3726",
"DrugBank_3844",
"DrugBank_3930",
"DrugBank_4161",
"DrugBank_4162",
"DrugBank_4778",
"DrugBank_4593",
"DrugBank_4959",
"DrugBank_5043",
"DrugBank_5076",
"DrugBank_5176",
"DrugBank_5418",
"DrugBank_5737",
"DrugBank_5902",
"DrugBank_6295",
"DrugBank_6304",
"DrugBank_6305",
"DrugBank_6329",
"DrugBank_6355",
"DrugBank_6401",
"DrugBank_6509",
"DrugBank_6531",
"DrugBank_6647",
"DrugBank_390",
"DrugBank_810",
"DrugBank_4316",
"DrugBank_4346",
"DrugBank_7124",
"DrugBank_2799",
"DrugBank_4662",
"DrugBank_4865",
"DrugBank_2465",
]
@pytest.fixture()
def formic_acid_molecule() -> Molecule:
formic_acid = Molecule()
formic_acid.add_atom(8, 0, False) # O1
formic_acid.add_atom(6, 0, False) # C1
formic_acid.add_atom(8, 0, False) # O2
formic_acid.add_atom(1, 0, False) # H1
formic_acid.add_atom(1, 0, False) # H2
formic_acid.add_bond(0, 1, 2, False) # O1 - C1
formic_acid.add_bond(1, 2, 1, False) # C1 - O2
formic_acid.add_bond(1, 3, 1, False) # C1 - H1
formic_acid.add_bond(2, 4, 1, False) # O2 - H2
return formic_acid
@pytest.fixture()
def formic_acid_conformers() -> Dict[str, unit.Quantity]:
return {
"cis": np.array(
[
[-0.95927322, -0.91789997, 0.36333418],
[-0.34727824, 0.12828046, 0.22784603],
[0.82766682, 0.26871252, -0.42284882],
[-0.67153811, 1.10376000, 0.61921501],
[1.15035689, -0.58282924, -0.78766006],
]
)
* unit.angstrom,
"trans": np.array(
[
[-0.95927322, -0.91789997, 0.36333418],
[-0.34727824, 0.12828046, 0.22784603],
[0.82766682, 0.26871252, -0.42284882],
[-0.67153811, 1.10376000, 0.61921501],
[1.14532626, 1.19679034, -0.41266876],
]
)
* unit.angstrom,
}
# =============================================================================================
# TESTS
# =============================================================================================
@requires_openeye
class TestOpenEyeToolkitWrapper:
"""Test the OpenEyeToolkitWrapper"""
# TODO: Make separate smiles_add_H and smiles_explicit_H tests
def test_smiles(self):
"""Test OpenEyeToolkitWrapper to_smiles() and from_smiles()"""
toolkit_wrapper = OpenEyeToolkitWrapper()
# This differs from RDKit's SMILES due to different canonicalization schemes
smiles = "[H]C([H])([H])C([H])([H])[H]"
molecule = Molecule.from_smiles(smiles, toolkit_registry=toolkit_w
|
Ziftr/counterpartyd
|
test/unit_test.py
|
Python
|
mit
| 1,568
| 0.007015
|
#! /usr/bin/python3
import sys, os, time, tempfile
import pytest
import util_test
from util_test import CURR_DIR
from fixtures.vectors import UNITTEST_VECTOR
from fixtures.params import DEFAULT_PARAMS as DP
from lib import (config, util, api)
import counterpartyd
def setup_module():
counterpartyd.set_options(database_file=tempfile.gettempdir() + '/fixtures.unittest.db', testnet=True, **util_test.COUNTERPARTYD_OPTIONS)
util_test.restore_database(config.DATABASE, CURR_DIR + '/fixtures/scenarios/unittest_fixture.sql')
config.FIRST_MULTISIG_BLOCK_TESTNET = 1
# start RPC server
api_server = api.APIServer()
api_server.daemon = True
api_server.start()
for attempt in range(5000): # wait until server is ready.
if api_server.is_ready:
|
break
elif attempt == 4999:
raise Exception("Timeout: RPC server not ready after 5s")
else:
time.sleep(0.001)
def teardown_module(function):
util_test.remove_database_files(config.DATABASE)
@pytest.fixture
def counterpartyd_db(request):
|
db = util.connect_to_db()
cursor = db.cursor()
cursor.execute('''BEGIN''')
request.addfinalizer(lambda: cursor.execute('''ROLLBACK'''))
return db
def test_vector(tx_name, method, inputs, outputs, error, records, counterpartyd_db):
if method == 'parse':
util_test.insert_transaction(inputs[0], counterpartyd_db)
inputs += (inputs[0]['data'][4:],) # message arg
util_test.check_ouputs(tx_name, method, inputs, outputs, error, records, counterpartyd_db)
|
ZG-Tennis/django-ckeditor
|
ckeditor/utils.py
|
Python
|
bsd-3-clause
| 8,404
| 0.004522
|
import uuid
import os
import shutil
import urlparse
import re
import hashlib
from lxml import html
from PIL import Image, ImageFile
from django.conf import settings
import views
ImageFile.MAXBLOCKS = 10000000
def match_or_none(string, rx):
"""
Tries to match a regular expression and returns an integer if it can.
Otherwise, returns None.
@param string: String to match against
@type string: basestring
@param rx: compiled regular expression
@return: number or None
@rtype: int/long or None
"""
if string is None:
return None
match = rx.search(string)
if match:
return int(match.groups()[0])
return None
width_rx = re.compile(r'width\s*:\s*(\d+)(px)?')
height_rx = re.compile(r'height\s*:\s*(\d+)(px)?')
def get_dimensions(img):
"""
Attempts to get the dimensions of an image from the img tag.
It first tries to grab it from the css styles and then falls back
to looking at the attributes.
@param img: Image tag.
@type img: etree._Element
@return: width and height of the image
@rtype: (int or None, int or None)
"""
styles = img.attrib.get('style')
width = match_or_none(styles, width_rx) or img.attrib.get('width')
if isinstance(width, basestring):
width = int(width)
height = match_or_none(styles, height_rx) or img.attrib.get('height')
if isinstance(height, basestring):
height= int(height)
return width, height
def get_local_path(url):
"""
Converts a url to a local path
@param url: Url to convert
@type url: basestring
@return: Local path of the url
@rtype: basestring
"""
url = urlparse.unquote(url)
local_path = settings.STATIC_ROOT + os.path.normpath(url[len(settings.STATIC_URL):])
return local_path
# `buffer` is needed since hashlib apparently isn't unicode safe
hexhash = lambda s: hashlib.md5(buffer(s)).hexdigest()
def new_rendered_path(orig_path, width, height, ext=None):
"""
Builds a new rendered path based on the original path, width, and height.
It takes a hash of the original path to prevent users from accidentally
(or purposely) overwritting other's rendered thumbnails.
This isn't perfect: we are assuming that the original file's conents never
changes, which is the django default. We could make this function more
robust by hashing the file everytime we save but that has the obvious
disadvantage of having to hash the file everytime. YMMV.
@param orig_path: Path to the original image.
@type orig_path: "/path/to/file"
@param width: Desired width of the rendered image.
@type width: int or None
@param height: Desired height of the rendered image.
@type height: int or None
@param ext: Desired extension of the new image. If None, uses
the original extension.
@type ext: basestring or None
@return: Absolute path to where the rendered image should live.
@rtype: "/path/to/rendered/image"
"""
dirname = os.path.dirname(orig_path)
rendered_path = os.path.join(dirname, 'rendered')
if not os.path.exists(rendered_path):
os.mkdir(rendered_path)
hash_path = hexhash(orig_path)
if ext is None:
ext = os.path.splitext(os.path.basename(orig_path))[1]
if ext and ext[0] != u'.':
ext = u'.' + ext
name = '%s_%sx%s' % (hash_path, width, height)
return os.path.join(rendered_path, name) + ext
def is_rendered(path, width, height):
"""
Checks whether or not an image has been rendered to the given path
with the given dimensions
@param path: path to check
@type path: u"/path/to/image"
@param width: Desired width
@type width: int
@param height: Desired height
@type height: int
@return: Whether or not the image is correct
@rtype: bool
"""
if os.path.exists(path):
old_width, old_height = Image.open(path).size
return old_width == width and old_height == height
return False
def transcode_to_jpeg(image, path, width, height):
"""
Transcodes an image to JPEG.
@param image: Opened image to transcode to jpeg.
@type image: PIL.Image
@param path: Path to the opened image.
@type path: u"/path/to/image"
@param width: Desired width of the transcoded image.
@type width: int
@param height: Desired height of the transcoded image.
@type height: int
@return: Path to the new transcoded image.
@rtype: "/path/to/image"
"""
i_width, i_height = image.size
new_width = i_width if width is None else width
new_height = i_height if height is None else height
new_path = new_rendered_path(path, width, height, ext='jpg')
if is_rendered(new_path, new_width, new_height):
return new_path
new_image = image.resize((new_width, new_height), Image.ANTIALIAS)
new_image.save(new_path, quality=80, optimize=1)
return new_path
def re_render(path, width, height):
"""
Given an original image, width, and height, creates a thumbnailed image
of the exact dimensions given. We skip animated gifs because PIL can't
resize those automatically whereas browsers can contort them easily. We
also don't stretch images at all and return the original in that case.
@param path: Path to the original image
@type path: "/path/to/image"
@param width: Desired width
@type width: int or None
@param height: Desired height
@type height: int or None
@return: Path to the 'rendered' image.
@rtype: "/path/to/image"
"""
try:
image = Image.open(path)
except IOError:
# Probably doesn't exist or isn't an image
return path
# We have to call image.load first due to a PIL 1.1.7 bug
image.load()
if image.format == 'PNG' and getattr(settings, 'CKEDITOR_PNG_TO_JPEG', False):
pixels = reduce(lambda a,b: a*b, image.size)
# check that our entire alpha channel is set to full opaque
if image.mode == 'RGB' or image.split()[-1].histogram()[-1] == pixels:
return transcode_to_jpeg(image, path, width, height)
if image.size <= (width, height):
return path
if width is None and height is None:
return path
# We can't resize animated gifs
if image.format == 'GIF':
try:
image.seek(1)
return path
except EOFError:
# Static GIFs should throw an EOF on seek
pass
new_path = new_rendered_path(path, width, height)
if is_rendered(new_path, width, height):
return new_path
# Re-render the image, optimizing for filesize
new_image = image.resize((width, height), Image.ANTIALIAS)
new_image.save(new_path, quality=80, optimize=1)
return new_path
def get_html_tree(content):
return html.fragment_fromstring(content, create_parent='div')
def render_html_tree(tree):
return html.tostring(tree)[5:-6]
def resize_images(post_content):
"""
Goes through all images, resizing those that we know to be local to the
correct image size.
@param post_content: Raw html of the content to search for images with.
@type post_content: basestring containg HTML fragments
@return: Modified contents.
@rtype: basestring
"""
# Get tree
tree = get_html_tree(post_content
|
)
# Get images
imgs = tree.xpath('//img[starts-with(@src, "%s")]' % settings.STATIC_URL)
for img in imgs:
orig_url = img.attrib['src']
|
orig_path = get_local_path(orig_url)
width, height = get_dimensions(img)
rendered_path = re_render(orig_path, width, height)
# If we haven't changed the image, move along.
if rendered_path == orig_path:
continue
# Flip to the rendered
img.attrib['data-original'] = orig_url
img.attrib['src'] = views.get_media_url(rendered_path)
# Strip of wrapping div tag
return render_html_tree(tree)
def swap_in_originals(content):
if 'data-original' not in content:
return content
|
JustinSGray/pyCycle
|
setup.py
|
Python
|
apache-2.0
| 386
| 0.005181
|
from distutils.core
|
import setup
|
setup(name='pycycle',
version='3.9.9',
packages=[
'pycycle',
'pycycle/thermo',
'pycycle/thermo/cea',
'pycycle/thermo/cea/thermo_data',
'pycycle/elements',
'pycycle/maps',
'pycycle/thermo/tabular'
],
install_requires=[
'openmdao>=3.5.0',
],
)
|
cuedpc/edpcmentoring
|
edpcmentoring/projectlight/__init__.py
|
Python
|
mit
| 60
| 0
|
defa
|
ult_app_config = 'projectlight.apps.Projec
|
tlightConfig'
|
Pikecillo/genna
|
external/PyXML-0.8.4/xml/xslt/TextElement.py
|
Python
|
gpl-2.0
| 2,357
| 0.003818
|
########################################################################
#
# File Name: TextElement.py
#
#
"""
Implementation of the XSLT Spec text stylesheet element.
WWW: http://4suite.com/4XSLT e-mail: support@4suite.com
Copyright (c) 1999-2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from xml.dom import EMPTY_NAMESPACE
import xml.dom.ext
import xml.dom.Element
from xml.xpath import CoreFunctions
from xml.xslt import XsltElement, XsltException, Error
from xml.dom import Node
class TextElement(XsltElement):
legalAttrs = ('disable-output-escaping',)
def __init__(self, doc, uri=xml.xslt.XSL_NAMESPACE, localName='text', prefix='xsl', baseUri=''):
XsltElement.__init__(self, doc, uri, localName, prefix, baseUri)
return
def setup(self):
self.__dict__['_disable_output_escaping'] = self.getAttributeNS(EMPTY_NAMESPACE, 'disable-output-escaping') == 'yes'
self.__dict__['_nss'] = xml.dom.ext.GetAllNs(self)
for child in self.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
raise XsltE
|
xception(Error.ILLEGAL_TEXT_CHILD)
self.normalize()
return
def instantiate(self, context, processor):
if not self.firstChild:
|
return (context,)
if context.processorNss != self._nss:
origState = context.copyNamespaces()
context.setNamespaces(self._nss)
else:
origState = None
value = self.firstChild and self.firstChild.data or ''
if self._disable_output_escaping:
processor.writers[-1].text(value, escapeOutput=0)
else:
processor.writers[-1].text(value)
origState and context.setNamespaces(origState)
return (context,)
def __getinitargs__(self):
return (None, self.namespaceURI, self.localName, self.prefix,
self.baseUri)
def __getstate__(self):
base_state = XsltElement.__getstate__(self)
new_state = (base_state, self._nss, self._disable_output_escaping)
return new_state
def __setstate__(self, state):
XsltElement.__setstate__(self, state[0])
self._nss = state[1]
self._disable_output_escaping = state[2]
return
|
timabell/gpodder
|
src/gpodder/gtkui/frmntl/portrait.py
|
Python
|
gpl-3.0
| 8,012
| 0.002122
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import dbus
import dbus.glib
import hildon
import osso
# Replace this with your own gettext() functionality
import gpodder
_ = gpodder.gettext
class FremantleRotation(object):
"""thp's screen rotation for Maemo 5
Simply instantiate an object of this class and let it auto-rotate
your StackableWindows depending on the device orientation.
If you need to relayout a window, connect to its "configure-event"
signal and measure the ratio of width/height and relayout for that.
You can set the mode for rotation to AUTOMATIC (default), NEVER or
ALWAYS with the set_mode() method.
"""
AUTOMATIC, NEVER, ALWAYS = range(3)
# Human-readable captions for the above constants
MODE_CAPTIONS = (_('Automatic'), _('Landscape'), _('Portrait'))
# Privately-used constants
_PORTRAIT, _LANDSCAPE = ('portrait', 'landscape')
_ENABLE_ACCEL = 'req_accelerometer_enable'
_DISABLE_ACCEL = 'req_accelerometer_disable'
# Defined in mce/dbus-names.h
_MCE_SERVICE = 'com.nokia.mce'
_MCE_REQUEST_PATH = '/com/nokia/mce/request'
_MCE_REQUEST_IF = 'com.nokia.mce.request'
# sysfs device name for the keyboard slider switch
KBD_SLIDER = '/sys/devices/platform/gpio-switch/slide/state'
_KBD_OPEN = 'open'
_KBD_CLOSED = 'closed'
def __init__(self, app_name, main_window=None, version='1.0', mode=0):
"""Create a new rotation manager
app_name ... The name of your application (for osso.Context)
main_window ... The root window (optional, hildon.StackableWindow)
version ... The version of your application (optional, string)
mode ... Initial mode for this manager (default: AUTOMATIC)
"""
self._orientation = None
self._main_window = main_window
self._stack = hildon.WindowStack.get_default()
self._mode = -1
self._last_dbus_orientation = None
self._keyboard_state = self._get_keyboard_state()
app_id = '-'.join((app_name, self.__class__.__name__))
self._osso_context = osso.Context(app_id, version, False)
program = hildon.Program.get_instance()
program.connect('notify::is-topmost', self._on_topmost_changed)
system_bus = dbus.Bus.get_system()
system_bus.add_signal_receiver(self._on_orientation_signal, \
signal_name='sig_device_orientation_ind', \
dbus_interface='com.nokia.mce.signal', \
path='/com/nokia/mce/signal')
system_bus.add_signal_receiver(self._on_keyboard_signal, \
signal_name='Condition', \
dbus_interface='org.freedesktop.Hal.Device', \
path='/org/freedesktop/Hal/devices/platform_slide')
self.set_mode(mode)
def get_mode(self):
"""Get the currently-set rotation mode
This will return one of three values: AUTOMATIC, ALWAYS or NEVER.
"""
return self._mode
def set_mode(self, new_mode):
"""Set the rotation mode
You can set the rotation mode to AUTOMATIC (use hardware rotation
info), ALWAYS (force portrait) and NEVER (force landscape).
"""
if new_mode not in (self.AUTOMATIC, self.ALWAYS, self.NEVER):
raise ValueError('Unknown rotation mode')
if self._mode != new_mode:
if self._mode == self.AUTOMATIC:
# Remember the current "automatic" orientation for later
self._last_dbus_orientation = self._orientation
# Tell MCE that we don't need the accelerometer anymore
self._send_mce_request(self._DISABLE_ACCEL)
if new_mode == self.NEVER:
self._orientation_changed(self._LANDSCAPE)
elif new_mode == self.ALWAYS and \
self._keyboard_state != self._KBD_OPEN:
self._orientation_changed(self._PORTRAIT)
elif new_mode == self.AUTOMATIC:
# Restore the last-known "automatic" orientation
self._orientation_changed(self._last_dbus_orientation)
# Tell MCE that we need the accelerometer again
self._send_mce_request(self._ENABLE_ACCEL)
self._mode = new_mode
def _send_mce_request(self, request):
rpc = osso.Rpc(self._osso_context)
rpc.rpc_run(self._MCE_SERVICE, \
self._MCE_REQUEST_PATH, \
self._MCE_REQUEST_IF, \
request, \
use_system_bus=True)
def _on_topmost_changed(self, program, property_spec):
# XXX: This seems to never get called on Fremantle(?)
if self._mode == self.AUTOMATIC:
if program.get_is_topmost():
self._send_mce_request(self._ENABLE_ACCEL)
else:
self._send_mce_request(self._DISABLE_ACCEL)
def _get_main_window(self):
if self._main_window:
# If we have gotten the main window as parameter, return it and
# don't try "harder" to find another window using the stack
return self._main_window
else:
# The main window is at the "bottom" of the window stack, and as
# the list we get with get_windows() is sorted "topmost first", we
# simply take the last item of the list to get our main window
windows = self._stack.get_windows()
if windows:
return windows[-1]
else:
return None
def _orientation_changed(self, orientation):
if self._orientation == orientation:
# Ignore repeated requests
return
flags = hildon.PORTRAIT_MODE_SUPPORT
if orientation == self._PORTRAIT:
flags |= hildon.PORTRAIT_MODE_REQUEST
window = self._get_main_window()
if window is not None:
hildon.hildon_gtk_window_set_portrait_flags(window, flags)
self._orientation = orientation
def _get_keyboard_state(self):
return open(self.KBD_SLIDER).read().strip()
def _keyboard_state_changed(self):
state = self._get_keyboard_state()
if state == self._KBD_OPEN:
self._orientation_changed(self._LANDSCAPE)
elif state == self._KBD_CLOSED:
if self._mode == self.AUTOMATIC:
self._orientation_changed(self._last_dbus_orientation)
elif self._mode == self.ALWAYS:
self._orientation_changed(self._PORTRAIT)
self._keyboard_state = state
def _on_keyboard_signal(self, condition, button_name):
if condition == 'ButtonPressed' and button_name == 'cover':
self._keyboard_state_changed()
def _on_orientation_signal(self, orientation, stand, face, x, y, z):
if orientation in (self._PORTRAIT, self._LANDSCAPE):
if self._mode == self.AUTOMATIC and \
self._keyboard_state != self._KBD_OPEN:
# Automatically set the rotation based on hardware orientation
self._orientation_changed(ori
|
entation)
# Save the current orientation for "automatic" mode later on
self._last_dbus_orientation = orient
|
ation
|
QuLogic/meson
|
mesonbuild/compilers/mixins/clike.py
|
Python
|
apache-2.0
| 59,114
| 0.002436
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixin classes to be shared between C and C++ compilers.
Without this we'll end up with awful diamond inherintance problems. The goal
of this is to have mixin's, which are classes that are designed *not* to be
standalone, they only work through inheritance.
"""
import collections
import functools
import glob
import itertools
import os
import re
import subprocess
import typing as T
from pathlib import Path
from ... import arglist
from ... import mesonlib
from ... import mlog
from ...linkers import GnuLikeDynamicLinkerMixin, SolarisDynamicLinker, CompCertDynamicLinker
from ...mesonlib import LibType
from ...coredata import OptionKey
from .. import compilers
from ..compilers import CompileCheckMode
from .visualstudio import VisualStudioLikeCompiler
if T.TYPE_CHECKING:
from ...dependencies import Dependency
from ...environment import Environment
from ...compilers.compilers import Compiler
from ...programs import ExternalProgram
else:
# This is a bit clever, for mypy we pretend that these mixins descend from
# Compiler, so we get all of the methods and attributes defined for us, but
# for runtime we make them descend from object (which all classes normally
# do). This gives up DRYer type checking, with no runtime impact
Compiler = object
GROUP_FLAGS = re.compile(r'''\.so (?:\.[0-9]+)? (?:\.[0-9]+)? (?:\.[0-9]+)?$ |
^(?:-Wl,)?-l |
\.a$''', re.X)
class CLikeCompilerArgs(arglist.CompilerArgs):
prepend_prefixes = ('-I', '-L')
dedup2_prefixes = ('-I', '-isystem', '-L', '-D', '-U')
# NOTE: not thorough. A list of potential corner cases can be found in
# https://github.com/mesonbuild/meson/pull/4593#pullrequestreview-182016038
dedup1_prefixes = ('-l', '-Wl,-l', '-Wl,--export-dynamic')
dedup1_suffixes = ('.lib', '.dll', '.so', '.dylib', '.a')
dedup1_args = ('-c', '-S', '-E', '-pipe', '-pthread')
def to_native(self, copy: bool = False) -> T.List[str]:
# This seems to be allowed, but could never work?
assert isinstance(self.compiler, compilers.Compiler), 'How did you get he
|
re'
# Check if we need to add --start/end-group for circular dependencies
# between static libraries, and for recursively searching for symbols
# needed by static libraries that are provided by object files or
# shared libraries.
self.flush_pre_post()
if copy:
|
new = self.copy()
else:
new = self
# This covers all ld.bfd, ld.gold, ld.gold, and xild on Linux, which
# all act like (or are) gnu ld
# TODO: this could probably be added to the DynamicLinker instead
if isinstance(self.compiler.linker, (GnuLikeDynamicLinkerMixin, SolarisDynamicLinker, CompCertDynamicLinker)):
group_start = -1
group_end = -1
for i, each in enumerate(new):
if not GROUP_FLAGS.search(each):
continue
group_end = i
if group_start < 0:
# First occurrence of a library
group_start = i
if group_start >= 0:
# Last occurrence of a library
new.insert(group_end + 1, '-Wl,--end-group')
new.insert(group_start, '-Wl,--start-group')
# Remove system/default include paths added with -isystem
default_dirs = self.compiler.get_default_include_dirs()
if default_dirs:
bad_idx_list = [] # type: T.List[int]
for i, each in enumerate(new):
if not each.startswith('-isystem'):
continue
# Remove the -isystem and the path if the path is a default path
if (each == '-isystem' and
i < (len(new) - 1) and
new[i + 1] in default_dirs):
bad_idx_list += [i, i + 1]
elif each.startswith('-isystem=') and each[9:] in default_dirs:
bad_idx_list += [i]
elif each[8:] in default_dirs:
bad_idx_list += [i]
for i in reversed(bad_idx_list):
new.pop(i)
return self.compiler.unix_args_to_native(new._container)
def __repr__(self) -> str:
self.flush_pre_post()
return f'CLikeCompilerArgs({self.compiler!r}, {self._container!r})'
class CLikeCompiler(Compiler):
"""Shared bits for the C and CPP Compilers."""
if T.TYPE_CHECKING:
warn_args = {} # type: T.Dict[str, T.List[str]]
# TODO: Replace this manual cache with functools.lru_cache
find_library_cache = {} # type: T.Dict[T.Tuple[T.Tuple[str, ...], str, T.Tuple[str, ...], str, LibType], T.Optional[T.List[str]]]
find_framework_cache = {} # type: T.Dict[T.Tuple[T.Tuple[str, ...], str, T.Tuple[str, ...], bool], T.Optional[T.List[str]]]
internal_libs = arglist.UNIXY_COMPILER_INTERNAL_LIBS
def __init__(self, exe_wrapper: T.Optional['ExternalProgram'] = None):
# If a child ObjC or CPP class has already set it, don't set it ourselves
self.can_compile_suffixes.add('h')
# If the exe wrapper was not found, pretend it wasn't set so that the
# sanity check is skipped and compiler checks use fallbacks.
if not exe_wrapper or not exe_wrapper.found() or not exe_wrapper.get_command():
self.exe_wrapper = None
else:
self.exe_wrapper = exe_wrapper
def compiler_args(self, args: T.Optional[T.Iterable[str]] = None) -> CLikeCompilerArgs:
# This is correct, mypy just doesn't understand co-operative inheritance
return CLikeCompilerArgs(self, args)
def needs_static_linker(self) -> bool:
return True # When compiling static libraries, so yes.
def get_always_args(self) -> T.List[str]:
'''
Args that are always-on for all C compilers other than MSVC
'''
return self.get_largefile_args()
def get_no_stdinc_args(self) -> T.List[str]:
return ['-nostdinc']
def get_no_stdlib_link_args(self) -> T.List[str]:
return ['-nostdlib']
def get_warn_args(self, level: str) -> T.List[str]:
# TODO: this should be an enum
return self.warn_args[level]
def get_no_warn_args(self) -> T.List[str]:
# Almost every compiler uses this for disabling warnings
return ['-w']
def get_depfile_suffix(self) -> str:
return 'd'
def get_exelist(self) -> T.List[str]:
return self.exelist.copy()
def get_preprocess_only_args(self) -> T.List[str]:
return ['-E', '-P']
def get_compile_only_args(self) -> T.List[str]:
return ['-c']
def get_no_optimization_args(self) -> T.List[str]:
return ['-O0']
def get_output_args(self, target: str) -> T.List[str]:
return ['-o', target]
def get_werror_args(self) -> T.List[str]:
return ['-Werror']
def get_include_args(self, path: str, is_system: bool) -> T.List[str]:
if path == '':
path = '.'
if is_system:
return ['-isystem', path]
return ['-I' + path]
def get_compiler_dirs(self, env: 'Environment', name: str) -> T.List[str]:
'''
Get dirs from the compiler, either `libraries:` or `programs:`
'''
return []
@functools.lru_cache()
def _get_library_dirs(self, env: 'Environment',
elf_class: T.Optional[int] = None) -> T.List[str]:
# TODO:
|
codecakes/random_games
|
implement_hash_map.py
|
Python
|
mit
| 5,289
| 0.016449
|
## for learning
|
purpose only
from collections import deque
##The making of a hash Table
def hash_string(keyword,buckets):
'''
# takes as inputs a keyword
# (string) and a number of buckets,
# and returns a number representing
# the bucket for that keyword.
'''
return sum(map(ord, keyword
|
))%buckets
##Testing Hash string distribution using hash str function
def test_hash_func(fn, keys, bucSize):
results = [0] * bucSize
keys_used = set()
for key in keys:
if key and key not in keys_used:
results[fn(key, bucSize)] += 1
keys_used.add(key)
return results
## Implementing a HashTable
## create buckets
create_table = lambda size: [[] for _ in xrange(size)]
##finding buckets
def hashtable_get_bucket(htable,keyword):
return htable[hash_string(keyword, len(htable))]
##adding to buckets
def hashtable_add(htable,key,value):
# your code here
pos = hash_string(key,len(htable))
#O(k/bsize)
for each in htable[pos]:
if each[0] == key: break
else:
htable[pos].append([key, value])
return htable
##look up value of a key
def hashtable_lookup(htable,key):
pos = hash_string(key,len(htable))
for each in htable[pos]:
if each[0] == key: return each[1]
return None
##Update a key if present else add it
def hashtable_update(htable,key,value):
bucket = hashtable_get_bucket(htable,key)
for entry in bucket:
if entry[0] == key:
entry[1] = value
break
else:
hashtable_add(htable,key,value)
return htable
class hashmap(object):
def __init__(self, bsize=0):
self.bsize = bsize or 3
self.table = create_table(self.bsize)
self.keyCount = 0
def __str__(self):
return "%s" %self.table
def __repr__(self):
return "{}".format(self.__str__())
def __len__(self): return len(self.table)
def _getBucket(self, key):
return hashtable_get_bucket(self.table, key)
def _expandTable(self):
self.bsize *= 2
newtable = create_table(self.bsize)
#print "new table %s" %newtable
q = deque(maxlen=self.bsize)
q.appendleft(self.table)
#O(nlogn)
while q:
tbl = q.pop()
ln = len(tbl)
if ln > 1:
q.appendleft(tbl[:ln//2])
q.appendleft(tbl[ln//2:])
else:
#print "_expandTable else tbl is {}".format(tbl)
for each_buck in tbl:
for each_key_list in each_buck:
if each_key_list:
#print "each key list is {}".format(each_key_list)
#print "_expandTable adding key {} val {}".format(each_key_list[0], each_key_list[1])
hashtable_add(newtable, each_key_list[0], each_key_list[1])
assert len(self.table) < len(newtable)
del self.table
self.table = newtable
return self.table
def _addKey(self, key, value):
if self.keyCount+1 > self.bsize:
self._expandTable()
bucket = self._getBucket(key)
for entry in bucket:
if entry[0] == key:
entry[1] = value
break
else:
hashtable_add(self.table, key,value)
self.keyCount += 1
def _getVal(self, key):
return hashtable_lookup(self.table, key)
def __getitem__(self, key):
return self._getVal(key)
def __setitem__(self, key, value):
self._addKey(key, value)
##Delete a key if present else ignore
def _hashtable_delete(self, key):
bucket = hashtable_get_bucket(self.table, key)
for entry in bucket:
if entry[0]==key:
bucket.remove(entry)
self.keyCount -= 1
def remove(self, key):
self._hashtable_delete(key)
if __name__ == "__main__":
table = [[['Francis', 13], ['Ellis', 11]], [], [['Bill', 17],
['Zoe', 14]], [['Coach', 4]], [['Louis', 29], ['Rochelle', 4], ['Nick', 2]]]
assert hashtable_get_bucket(table, "Zoe") == [['Bill', 17], ['Zoe', 14]]
assert hashtable_get_bucket(table, "Brick") == []
assert hashtable_get_bucket(table, "Lilith") == [['Louis', 29], ['Rochelle', 4], ['Nick', 2]]
table = [[['Ellis', 11], ['Francis', 13]], [], [['Bill', 17], ['Zoe', 14]],
[['Coach', 4]], [['Louis', 29], ['Nick', 2], ['Rochelle', 4]]]
hashtable_update(table, 'Bill', 42)
hashtable_update(table, 'Rochelle', 94)
hashtable_update(table, 'Zed', 68)
assert table == [[['Ellis', 11], ['Francis', 13]], [['Zed', 68]], \
[['Bill', 42], ['Zoe', 14]], [['Coach', 4]], [['Louis', 29], \
['Nick', 2], ['Rochelle', 94]]]
#d for dict
d = hashmap(4)
d['fdfds'] = 32423324
d['fdfsfdsds'] = 32423324
d['fdfsfdsdssdfsd'] = 32423324
d['fdffsd'] = 32423324
d['ffsd'] = 32423324
d.remove('ffsd')
t = [[], [], [], [['fdfsfdsdssdfsd', 32423324]], [], [['fdffsd', 32423324]], [], [['fdfds', 32423324], ['fdfsfdsds', 32423324]]]
assert repr(d) == repr(t)
|
higee/project_euler
|
21-30/21.py
|
Python
|
mit
| 860
| 0.010465
|
class amicable():
def d(self, n):
if n == 1:
return 0
else:
sum_of_factors = 0
for i in range(1, int(n**0.5)+1):
if n % i == 0:
sum_of_factors += i
if n/i != n:
sum_of
|
_factors += int(n/i)
return sum_of_factors
def __call__(self, n):
sum_of_amicable = 0
for i in range(1, n):
original = i, amicable.d(self, i)
inverse = amicable.d(self, amicable.d(self, i)), amicable.d(self, i)
if (original == inverse) & (amicable.d(self, i) != i):
|
sum_of_amicable += i
return sum_of_amicable
def main():
euler_21 = amicable()
n=10000
print(euler_21(n))
if __name__ == "__main__":
main()
|
Informatik-AG-KGN-2016/Dokumente
|
2016-11-14/input.py
|
Python
|
gpl-3.0
| 503
| 0
|
# introduction
print("Bitte füllen Sie das folgende Formular aus")
# get name
name = input("Vorname: ")
lastname = input("Nachname: ")
|
fullname = name + ' ' + lastname
# get birth info
birth_year = int(input("Geburtsjahr: "))
birth_place = input("Geburtsort: ")
# calculate age
age = 2016 - birth_year
print("\n")
# print generated info
print("Hallo", fullname + ",")
print("Sie sind", age, "Jahre alt und wurden in", birth
|
_place, "geboren.")
print("Vielen Dank für Ihre Teilnahme an der Umfrage.")
|
TRex22/Sick-Beard
|
cherrypy/lib/auth.py
|
Python
|
gpl-3.0
| 3,288
| 0.007603
|
import cherrypy
from cherrypy.lib import httpauth
def check_auth(users, encrypt=None, realm=None):
"""If an authorization header contains credentials, return True, else False."""
request = cherrypy.serving.request
if 'authorization' in request.headers:
# make sure the provided credentials are correctly set
ah = httpauth.parseAuthorization(request.headers['authorization'])
if ah is None:
raise cherrypy.HTTPError(400, 'Ba
|
d Request')
if not encrypt:
encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5]
if hasattr(users, '__call__'):
try:
# backward compatibility
users = users() # expect it to return a dictionary
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch t
|
he user password
password = users.get(ah["username"], None)
except TypeError:
# returns a password (encrypted or clear text)
password = users(ah["username"])
else:
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
# validate the authorization by re-computing it here
# and compare it with what the user-agent provided
if httpauth.checkResponse(ah, password, method=request.method,
encrypt=encrypt, realm=realm):
request.login = ah["username"]
return True
request.login = False
return False
def basic_auth(realm, users, encrypt=None, debug=False):
"""If auth fails, raise 401 with a basic authentication header.
realm: a string containing the authentication realm.
users: a dict of the form: {username: password} or a callable returning a dict.
encrypt: callable used to encrypt the password returned from the user-agent.
if None it defaults to a md5 encryption.
"""
if check_auth(users, encrypt):
if debug:
cherrypy.log('Auth successful', 'TOOLS.BASIC_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.basicAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
def digest_auth(realm, users, debug=False):
"""If auth fails, raise 401 with a digest authentication header.
realm: a string containing the authentication realm.
users: a dict of the form: {username: password} or a callable returning a dict.
"""
if check_auth(users, realm=realm):
if debug:
cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.digestAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
fotcorn/liveinconcert
|
manage.py
|
Python
|
mit
| 250
| 0
|
#!/usr/bin/env python
import os
import sys
|
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "concert.settings")
from django.core.mana
|
gement import execute_from_command_line
execute_from_command_line(sys.argv)
|
HeavenlySword/INTINT
|
intint/intwriter.py
|
Python
|
mit
| 12,384
| 0.006137
|
import os
import json
import pyodbc
import psycopg2
import psycopg2.extras
from psycopg2.pool import ThreadedConnectionPool
import datetime
from concurrent.futures import ThreadPoolExecutor, wait
import multiprocessing
import sys
import hashlib
from utils import *
THREADNUM = 16
class IntWriter:
def __init__(self, target):
self.inttype = target['type'] #database, file, etc.
self.intconnstr = target['connstr'] #connection string: Server,Port,db/filename
self.mdmconnstr = 'Driver={ODBC Driver 13 for SQL Server}; Server=localhost; Database=MDM_PROD; UID=int_etl; PWD=ugpassword;'
self.mdmquery = 'SELECT [ID],[UID] FROM [MDM_PROD].[MODEL].[OBJECTS] where SystemID = ? and deletiondate is null'
self.goldenquery = 'SELECT [XID] as [ID],[UniqueObjectID] as [GoldenID] FROM [MDM_PROD].[MODEL].[mv_xref] where SystemID = ? and [UniqueObjectID] is not null'
self.mdmssys = target['ssys'] #source system code for UID lookup in MDM
self.intencoding = target['encoding'] #append method description (api, rest, query, etc.)
self.intname = target['name'] #name of table or procedure or whatever else
self.lookupcolumns = target['lookupcolumns']
self.pool = None
self.conn = None
self.curr = None
self.wcounter = 0
self.stream = []
self.intheader = target['header']
self.lookup_table = dict()
self.golden_table = dict()
self.ods_to_dwh_table = set()
self.cache_dict = dict()
self.convtime = datetime.timedelta()
self.connect()
self.active = True
self.executor = ThreadPoolExecutor(max_workers=THREADNUM)
self.futures = []
def golden_tracker(self):
cursor = pyodbc.connect(self.mdmconnstr).execute(self.goldenquery, (self.mdmssys,))
for row in cursor:
self.golden_table[row[0]] = row[1]
logging.info(len(self.golden_table), 'golden IDs are mapped to datasource. Memory used: ', sys.getsizeof(self.golden_table))
def ods_to_dwh_tracker(self):
cursor = pyodbc.connect(self.intconnstr).execute('select odsid from ' + self.intname)
self.ods_to_dwh_table.update([row[0] for row in cursor])
logging.info(len(self.ods_to_dwh_table), 'records already in Staging area. Memory used: ', sys.getsizeof(self.ods_to_dwh_table))
def change_tracker(self, dtype):
query = "select ddochash, dcontenthash from public.v_fact where dtype = %s"
db = psycopg2.connect(self.intconnstr)
cursor = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(query, (dtype,))
for row in cursor.fetchall():
self.cache_dict[row['ddochash'].tobytes()] = row['dcontenthash']
def connect(self):
t = datetime.datetime.today()
# TODO: move to a separate function to make program independant of MDM system
cursor = pyodbc.connect(self.mdmconnstr).execute(self.mdmquery, (self.mdmssys,))
columns = [column[0] for column in cursor.description]
for row in cursor.fetchall():
self.lookup_table[row[1]] = row[0]
# print(self.lookup_table)
self.golden_tracker()
if self.inttype == 'odsf1':
self.pool = ThreadedConnectionPool(1, THREADNUM + 1, self.intconnstr)
if self.inttype == 'staf1':
self.ods_to_dwh_tracker()
if self.intname == 'KS2': # TODO: add proper lookup of possible systems or some other logic when to look for changes (may be target system)
self.change_tracker(self.intname)
logging.info('Cache initialization took ' + str(datetime.datetime.today() - t))
return
def clear(self):
self.stream.clear()
return
def written(self):
print(self.wcounter)
return self.wcounter
def __len__(self):
return len(self.stream)
def append(self, data):
st = datetime.datetime.now()
BATCH_SIZE = 1
if self.inttype == 'apij1':
BATCH_SIZE = 1000
objectkeys = ['ExtractionDate','Migration','ActionID','SystemID','EntityID','UID','ParentUID','Verified','UXID','ValidFrom','ValidTo']
obj = {}
if 'PeriodObjects' in data:
obj['ExtractionDate'] = data['ExtractionDate']
obj['Migration'] = data['Migration']
obj['ActionID'] = data['ActionID']
obj['SystemID'] = data['SystemID']
obj['EntityID'] = data['EntityID']
obj['UID'] = data['UID']
obj['ParentUID'] = data['ParentUID']
obj['Verified'] = data['Verified']
obj['UXID'] = data['UXID']
obj['PeriodObjects'] = data['PeriodObjects']
else:
obj['PeriodObjects'] = []
obj['PeriodObjects'].append({'Attributes': []})
if 'ValidFrom' in data:
obj['PeriodObjects'][0]['ValidFrom'] = data['ValidFrom']
if 'ValidTo' in data:
obj['PeriodObjects'][0]['ValidTo'] = data['ValidTo']
for key in data.keys():
if key not in objectkeys:
if data[key] in self.lookup_table:
data[key] = self.lookup_table[data[key]]
obj['PeriodObjects'][0]['Attributes'].append({'Name': key, 'Value': str(data[key]).replace('00000000-0000-0000-0000-000000000000', '#NULL')})
else:
obj[key] = str(data[key]).replace('00000000-0000-0000-0000-000000000000', '#NULL')
obj['ActionID'] = 3 # Force-set action as "integration"
elif self.inttype == 'odsf1':
objectkeys = ['DataType','SystemID','ActionID','ExtractionDate','DocumentUID','Ln','inttimestamp']
obj = dict()
obj['dtimestamp'] = data['inttimestamp']
obj['dextractiondate'] = data['ExtractionDate']
obj['dtype'] = data['DataType']
obj['dsystem'] = data['SystemID']
obj['ddocuid'] = data['DocumentUID']
obj['ddocln'] = data['Ln']
obj['ddochash'] = hashlib.md5((str(obj['ddocuid']) + str(obj['ddocln'])).encode('utf-8')).digest()
# filter elements where GUID lookup failed --- NO IMPORT before GUIDs are in MDM
errs = [(k,v) for (k,v) in data.items() if k in self.lookupcolumns and v not in self.lookup_table and v != '00000000-0000-0000-0000-000000000000']
if len(errs) > 0:
logging.warning('Failed to convert GUID for %s', str(errs))
self.convtime += datetime.datetime.now() - st
return 0
obj['dcontent'] = json.dumps({k:self.lookup_table[v] if v in self.lookup_table else v.replace('00000000-0000-0000-0000-000000000000', '#NULL')
for (k,v) in data.items() if k not in objectkeys}, sort_keys=True)
obj['dcontenthash'] = hashlib.md5(obj['dcontent'].encode('utf-8')).digest()
obj['delta'] = False
if obj['ddochash'] in self.cache_dict:
# This line has been already posted so we need to check if the last available record is actual
# flag line
|
as delta
ob
|
j['delta'] = True
if self.cache_dict[obj['ddochash']].tobytes() == obj['dcontenthash']:
# Can update some field here with a timestamp to guaranteee that data is actual
self.convtime += datetime.datetime.now() - st
return 0
# Earlier version exists so we have to create a new record for this version
elif self.inttype == 'staf1':
obj = data.copy()
if obj['odsid'] in self.ods_to_dwh_table:
self.convtime += datetime.datetime.now() - st
return 0
# TODO: this list of fields should be another field in sources table
golden_entities = ['ProjectUID', 'ConstrObjectUID']
for key in golden_entities:
if obj[ke
|
yardsalecoin/yardsalecoin
|
contrib/bitrpc/bitrpc.py
|
Python
|
mit
| 7,836
| 0.038157
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9554")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9554")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
|
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input
|
("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
fooelisa/netmiko
|
netmiko/__init__.py
|
Python
|
mit
| 1,198
| 0.003339
|
from __future__ import unicode_literals
import logging
# Logging configuration
log = logging.getLogger(__name__) # noqa
log
|
.addHandler(logging.NullHandler()) # noqa
from netmiko.ssh_dispatcher import ConnectHandler
from netmiko.ssh_dispatcher import ssh_dispatcher
from netmiko.ssh_dispatcher import redispatch
from netmiko.ssh_dispatcher import platforms
from netmiko.ssh_dispatcher import FileTransfer
from netmiko.scp_handler import SCPConn
from netmiko.cisco.cisco_ios import InLineTransfer
f
|
rom netmiko.ssh_exception import NetMikoTimeoutException
from netmiko.ssh_exception import NetMikoAuthenticationException
from netmiko.ssh_autodetect import SSHDetect
from netmiko.base_connection import BaseConnection
# Alternate naming
NetmikoTimeoutError = NetMikoTimeoutException
NetmikoAuthError = NetMikoAuthenticationException
__version__ = '2.0.1'
__all__ = ('ConnectHandler', 'ssh_dispatcher', 'platforms', 'SCPConn', 'FileTransfer',
'NetMikoTimeoutException', 'NetMikoAuthenticationException',
'NetmikoTimeoutError', 'NetmikoAuthError', 'InLineTransfer', 'redispatch',
'SSHDetect', 'BaseConnection')
# Cisco cntl-shift-six sequence
CNTL_SHIFT_6 = chr(30)
|
jakubtuchol/epi
|
test/test_greedy_algorithms.py
|
Python
|
mit
| 2,161
| 0
|
from src.greedy_algorithms import find_ample_city
from src.greedy_algorithms import find_majority_element
from src.greedy_algorithms import GasCity
from src.greedy_algorithms import three_sum
from src.greedy_algorithms import trapped_water
from src.greedy_algorithms import two_sum
class TestTwoSum(object):
"""
Testing two sum method
"""
def test_book_example(self):
in_arr = [11, 2, 5, 7, 3]
assert two_sum(14, in_arr)
assert two_sum(13, in_arr)
assert two_sum(16, in_arr)
assert not two_sum(17, in_arr)
assert not two_sum(21, in_arr)
assert not two_sum(11, in_arr)
class TestThreeSum(object):
"""
Question 18.5
"""
def test_book_example(self):
in_arr = [11, 2, 5, 7, 3]
assert three_sum(21, in_arr)
assert not three_sum(22, in_arr)
class TestFindMajorityElement(object):
"""
Question 18.6
"""
def test_book_example(self):
in_arr = [
'b', 'a',
'c', 'a',
'a', 'b',
'a', 'a',
'c', 'a',
]
assert 'a' == find_majority_element(in_arr)
def test_int_example(self):
in_arr = [
3, 3, 4,
2, 4, 4,
2, 4, 4,
]
assert 4 == find_majority_element(in_arr)
class TestFindAmpleCity(object):
"""
Question 18.7
"""
def test_book_example(self):
cities = [
GasCity(id='A', gas=50,
|
to_next
|
=900),
GasCity(id='B', gas=20, to_next=600),
GasCity(id='C', gas=5, to_next=200),
GasCity(id='D', gas=30, to_next=400),
GasCity(id='E', gas=25, to_next=600),
GasCity(id='F', gas=10, to_next=200),
GasCity(id='G', gas=10, to_next=100),
]
assert 'D' == find_ample_city(cities)
class TestMaxTrappedWater(object):
"""
Question 18.8
"""
def test_book_example(self):
heights = [
1, 2, 1,
3, 4, 4,
5, 6, 2,
1, 3, 1,
3, 2, 1,
2, 4, 1,
]
assert 48 == trapped_water(heights)
|
filbranden/systemd
|
tools/make-autosuspend-rules.py
|
Python
|
gpl-2.0
| 938
| 0
|
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1+
# Generate autosuspend rules for devices that have been tested to work properly
# with autosuspend by the Chromium OS team. Based on
# https://chromium.googlesource.com/chromiumos/platform2/+/master/power_manager/udev/gen_autosuspend_rules.py
import chromiumos.gen_autosuspend_rules
print('# pci:v<00VENDOR>d<00DEVICE> (8 uppercase hexadecimal digits twice)')
for entry in chromiumos.gen_autosuspend_rules.
|
PCI_IDS:
|
vendor, device = entry.split(':')
vendor = int(vendor, 16)
device = int(device, 16)
print('pci:v{:08X}d{:08X}*'.format(vendor, device))
print('# usb:v<VEND>p<PROD> (4 uppercase hexadecimal digits twice')
for entry in chromiumos.gen_autosuspend_rules.USB_IDS:
vendor, product = entry.split(':')
vendor = int(vendor, 16)
product = int(product, 16)
print('usb:v{:04X}p{:04X}*'.format(vendor, product))
print(' ID_AUTOSUSPEND=1')
|
h4ck3rm1k3/gcc-ontology
|
lib/ontologies/org/w3/_2000/_10/swap/pim/doc.py
|
Python
|
gpl-3.0
| 265
| 0.011321
|
import rdf
|
lib
from rdflib import Namespace
from ontology import Ontology
class DOC(Ontology, ):
def __init__(self):
Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/2000/10/swap/pim/doc#'))
prefix = 'd
|
oc'
doc = DOC()
ontology = doc
|
danalec/dotfiles
|
sublime/.config/sublime-text-3/Packages/Anaconda/anaconda_lib/workers/process.py
|
Python
|
mit
| 636
| 0
|
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .local_process import LocalProcess
from .re
|
mote_process import StubProcess
from .vagrant_
|
process import VagrantProcess
class WorkerProcess(object):
"""Return a right processer based in the scheme
"""
_processers = {'tcp': StubProcess, 'vagrant': VagrantProcess}
def __init__(self, interpreter):
self._interpreter = interpreter
def take(self):
scheme = self._interpreter.scheme
return self._processers.get(scheme, LocalProcess)(self._interpreter)
|
Zlash65/erpnext
|
erpnext/manufacturing/doctype/operation/test_operation.py
|
Python
|
gpl-3.0
| 258
| 0.007752
|
# Copyright
|
(c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_records = frappe.get_test_records('Operation')
class TestOperation(unittes
|
t.TestCase):
pass
|
gopythongo/gopythongo
|
src/py/gopythongo/tests/debversion.py
|
Python
|
mpl-2.0
| 3,559
| 0.006185
|
# -* encoding: utf-8 *-
# This Source Code Form is subject
|
to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from unittest.case import TestCase
from gopythongo.utils.debversion import debian_substr_compare, split_versio
|
n_parts, DebianVersion, \
InvalidDebianVersionString
class DebianVersionTests(TestCase):
def test_debian_substr_compare(self) -> None:
self.assertEqual(debian_substr_compare("", "a"), -1)
self.assertEqual(debian_substr_compare("09", "10"), -1)
self.assertEqual(debian_substr_compare("~~", "~"), -1)
self.assertEqual(debian_substr_compare("~~", "~~a"), -1)
self.assertEqual(debian_substr_compare("~~", "~~"), 0)
self.assertEqual(debian_substr_compare("~", ""), -1)
self.assertEqual(debian_substr_compare("30", "30"), 0)
def test_debian_version_compare(self) -> None:
self.assertTrue(DebianVersion.fromstring("2:1.0") < DebianVersion.fromstring("3:1.0"))
self.assertTrue(DebianVersion.fromstring("2:1.0~1") < DebianVersion.fromstring("3:1.0"))
self.assertTrue(DebianVersion.fromstring("2:1.0~bpo1") < DebianVersion.fromstring("2:1.0"))
self.assertTrue(DebianVersion.fromstring("2:1.0dev") > DebianVersion.fromstring("2:1.0"))
self.assertTrue(DebianVersion.fromstring("1.0dev") > DebianVersion.fromstring("1.0"))
self.assertTrue(DebianVersion.fromstring("1.0-1") > DebianVersion.fromstring("1.0"))
self.assertTrue(DebianVersion.fromstring("1.0-2") > DebianVersion.fromstring("1.0-1"))
self.assertTrue(DebianVersion.fromstring("1.0") == DebianVersion.fromstring("1.0"))
self.assertTrue(DebianVersion.fromstring("0:1.0") == DebianVersion.fromstring("1.0"))
self.assertTrue(DebianVersion.fromstring("3:1.0") > DebianVersion.fromstring("2:1.0"))
self.assertTrue(DebianVersion.fromstring("1.1") > DebianVersion.fromstring("1.0"))
def test_split_version_parts(self) -> None:
self.assertListEqual(split_version_parts("a67bhgs89"), ["a", "67", "bhgs", "89"])
self.assertListEqual(split_version_parts("33a67bhgs89"), ["33", "a", "67", "bhgs", "89"])
self.assertListEqual(split_version_parts("~33a67bhgs89"), ["~", "33", "a", "67", "bhgs", "89"])
self.assertListEqual(split_version_parts("33~a67bhgs89"), ["33", "~a", "67", "bhgs", "89"])
self.assertListEqual(split_version_parts("1"), ["1"])
self.assertListEqual(split_version_parts(""), [""])
def test_serialization(self) -> None:
v = DebianVersion.fromstring("2:1.0~bpo1")
self.assertEqual(v, v.fromstring(v.tostring()))
def test_sorting_compatibility_aptpkg(self) -> None:
version_strings = ["~~a", "~", "~~", "a1", "1.0", "1.0-1", "1.0~bpo1", "1.0-1~bpo1"]
# sorted using python-apt's apt_pkg.version_compare
aptpkg_sorting = ['~~', '~~a', '~', '1.0~bpo1', '1.0', '1.0-1~bpo1', '1.0-1', 'a1']
l = []
for x in version_strings:
l.append(DebianVersion.fromstring(x))
l.sort()
self.assertListEqual(aptpkg_sorting, [str(x) for x in l])
def test_validation(self) -> None:
self.assertRaises(InvalidDebianVersionString, DebianVersion.fromstring, "1.0:0")
self.assertRaises(InvalidDebianVersionString, DebianVersion.fromstring, "ö:1.0")
self.assertRaises(InvalidDebianVersionString, DebianVersion.fromstring, "1.Ö")
|
veltzer/riddling
|
instances/cover_subset/solution.py
|
Python
|
gpl-3.0
| 1,036
| 0
|
import random
import numpy
large_set_size = 1000
small_set_size = 20
print_diff = 100
stat_num = 1000
stats = [[], [], []]
for _ in range(stat_num):
lset = set()
sset = set()
stop = False
i = 0
large_set_count = None
small_set_count
|
= None
large_set_size_when_small_set_complete = None
while not stop:
# if i % print_diff == 0:
# print(len(lset), len(sset))
r = random.randrange(large_set_size)
if r < small_set_size:
sset.add(r)
lset.add(r)
if len(sset) == small_set_size and small_set_count is None:
small_set_count =
|
i
large_set_size_when_small_set_complete = len(lset)
if len(lset) == large_set_size:
large_set_count = i
stop = True
i += 1
stats[0].append(large_set_size_when_small_set_complete)
stats[1].append(large_set_count)
stats[2].append(small_set_count)
print(numpy.average(stats[0]))
print(numpy.average(stats[1]))
print(numpy.average(stats[2]))
|
bernieyangmh/spider
|
spider_y/items.py
|
Python
|
gpl-3.0
| 1,245
| 0.00241
|
# -*- coding: utf-8 -*-
# Defin
|
e here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item
|
, Field
from scrapy.loader.processors import Join, MapCompose, TakeFirst
from scrapy.loader import ItemLoader
from w3lib.html import remove_tags
class SpiderYItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
img = Field()
name = Field()
def filter_price(value):
if value.isdigit():
return value
class Example(scrapy.Item):
name = scrapy.Field(
input_processor=MapCompose(unicode.title),
output_processor=Join(),
)
price = scrapy.Field(
input_processor=MapCompose(remove_tags, filter_price),
output_processor=TakeFirst(),
)
# eg: ItemLoader(item=Product()).add_value('price', [u'€', u'<span>1000</span>'])
# length_out = MapCompose(parse_length, unit='cm') or loader = ItemLoader(product, unit='cm')
# or ItemLoader(product).context['unit'] = 'cm' to change loader_context
# def parse_length(text, loader_context):
# unit = loader_context.get('unit', 'm')
# parsed_length = 'some parsing'
# return parsed_length
|
kdz/test
|
VEdit.py
|
Python
|
mit
| 735
| 0.005442
|
__author__ = 'kdsouza'
import traits.api as tr
import traitsui.api as trui
from traitsui.editors import *
class VEdit(tr.HasTraits):
"""
Container class for value, editor type, and editor
|
specifications.
"""
value = tr.Any
editor = trui.EditorFactory
kwargs = tr.Dict(key_trait=tr.Str, value_trait=tr.Any)
# item_kwargs = tr.Dict(key_trait=tr.Str, value_trait=tr.Any) # default hide label, custom style?
def __init__(self, value, editor, kwargs=dict()):
super(VEdit, self).__init__()
self.value, self.ed
|
itor, self.kwargs = value, editor, kwargs
def __eg__(self, other):
return isinstance(other, VEdit) and (self.val == other.val) and (self.editor == other.editor)
|
ESOedX/edx-platform
|
lms/djangoapps/program_enrollments/api/tests/test_writing.py
|
Python
|
agpl-3.0
| 574
| 0.001742
|
"""
(Future home of) Tests for program enrollment
|
writing Python API.
Currently, we do not directly unit test the functions in api/writing.py.
This is okay for now because they are all used in
`rest_api.v1.views` and is thus tested through `rest_api.v1.tests.test_views`.
Eventually it would be good to directly test the Python API function and just use
mocks in the view tests.
This file serves as a placeholder and reminder to do that the next time there
is development on the program_enrollments writing API.
"""
from __future__ im
|
port absolute_import, unicode_literals
|
rosenvladimirov/odoo
|
anglo_saxon_dropshipping/__init__.py
|
Python
|
agpl-3.0
| 1,071
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# C
|
opyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by
|
the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_dropshipping
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tensorflow/tensorflow
|
tensorflow/python/framework/traceable_stack_test.py
|
Python
|
apache-2.0
| 5,145
| 0.003693
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.traceable_stack."""
from tensorflow.python.framework import test_util
from tensorflow.python.framework import traceable_stack
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect as inspect
_LOCAL_OBJECT = lambda x: x
_THIS_FILENAME = inspect.getsourcefile(_LOCAL_OBJECT)
class TraceableObjectTest(test_util.TensorFlowTestCase):
def testSetFilenameAndLineFromCallerUsesCallersStack(self):
t_obj = traceable_stack.TraceableObject(17)
# Do not separate placeholder from the set_filename_and_line_from_caller()
# call one line below it as it is used to calculate the latter's line
# number.
placeholder = lambda x: x
result = t_obj.set_filename_and_line_from_caller()
expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
self.assertEqual(expected_lineno, t_obj.lineno)
self.assertEqual(_THIS_FILENAME, t_obj.filename)
self.assertEqual(t_obj.SUCCESS, result)
def testSetFilenameAndLineFromCallerRespectsOffset(self):
def call_set_filename_and_line_from_caller(t_obj):
# We expect to retrieve the line number from _our_ caller.
return t_obj.set_filename_and_line_from_caller(offset=1)
t_obj = traceable_stack.TraceableObject(None)
# Do not separate placeholder from the
# call_set_filename_and_line_from_caller() call one line below it as it is
# used to calculate the latter's line number.
placeholder = lambda x: x
result = call_set_filename_and_line_from_caller(t_obj)
expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
self.assertEqual(expected_lineno, t_obj.lineno)
self.assertEqual(t_obj.SUCCESS, result)
def testSetFilenameAndLineFromCallerHandlesRidiculousOffset(self):
t_obj = traceable_stack.TraceableObject('The quick brown fox.')
# This line shouldn't die.
result = t_obj.set_filename_and_line_from_caller(offset=300)
# We expect a heuristic to be used because we are not currently 300 frames
# down on the stack. The filename and lineno of the outermost frame are not
# predictable -- in some environments the filename is this test file, but in
# other environments it is not (e.g. due to a test runner calling this
# file). Therefore we only test that the called function knows it applied a
# heuristic for the ridiculous stack offset.
self.assertEqual(t_obj.HEURISTIC_USED, result)
class TraceableStackTest(test_util.TensorFlowTestCase):
def testPushPeekPopObj(self):
t_stack = traceable_stack.TraceableStack()
t_stack.push_obj(42.0)
t_stack.push_obj('hope
|
')
expected_lifo_peek = ['hope', 42.0]
self.assertEqual(expected_lifo_peek, list(t_stack.peek_objs()))
self.assertEqual('hope', t_stack.pop_obj())
self.assertEqual(42.0, t_stack.pop_obj())
def testPushPeekTopObj(self):
t_stack = traceable_stack.TraceableStack()
t_stack.push_obj(42.0)
t_stack.push_obj('hope')
self.assertEqual('hope', t_stack.peek_top_obj())
def testPushPopPreserveLifoOrdering(self):
t_stack = traceable_stack.TraceableStack()
t_stack
|
.push_obj(0)
t_stack.push_obj(1)
t_stack.push_obj(2)
t_stack.push_obj(3)
obj_3 = t_stack.pop_obj()
obj_2 = t_stack.pop_obj()
obj_1 = t_stack.pop_obj()
obj_0 = t_stack.pop_obj()
self.assertEqual(3, obj_3)
self.assertEqual(2, obj_2)
self.assertEqual(1, obj_1)
self.assertEqual(0, obj_0)
def testPushObjSetsFilenameAndLineInfoForCaller(self):
t_stack = traceable_stack.TraceableStack()
# We expect that the line number recorded for the 1-object will come from
# the call to t_stack.push_obj(1). Do not separate the next two lines!
placeholder_1 = lambda x: x
t_stack.push_obj(1)
# We expect that the line number recorded for the 2-object will come from
# the call to call_push_obj() and _not_ the call to t_stack.push_obj().
def call_push_obj(obj):
t_stack.push_obj(obj, offset=1)
# Do not separate the next two lines!
placeholder_2 = lambda x: x
call_push_obj(2)
expected_lineno_1 = inspect.getsourcelines(placeholder_1)[1] + 1
expected_lineno_2 = inspect.getsourcelines(placeholder_2)[1] + 1
t_obj_2, t_obj_1 = t_stack.peek_traceable_objs()
self.assertEqual(expected_lineno_2, t_obj_2.lineno)
self.assertEqual(expected_lineno_1, t_obj_1.lineno)
if __name__ == '__main__':
googletest.main()
|
kevinlioi/GenOpt
|
examples/constrained_optimization.py
|
Python
|
mit
| 762
| 0.045932
|
import numpy
from GenOpt import GeneticOptimizer
##Booths Function but with an additional slack variable to show the constraint feature.
def BoothsFnc(x):
return (x[:,
|
0] + 2*x[:, 1] - 7)**2 + (2*x[:, 0] + x[:, 1] - 5)**2
InitialSolutions = [numpy.array([numpy.random.uniform(), numpy.random.uniform(), numpy.random.uniform()]) for i in range(1000)]
InitialSolutions = numpy.vstack([10*z/sum(z) for z in InitialSolutions])
G2 = GeneticOptimizer(InitialSolutions = InitialSolutions,
Objective = BoothsFnc,
ub = [10,10,10],
|
lb = [0,0,0],
Sense = 'min',
MutationType = 'Sum Constraint',
Crossover = 'Weighted Average')
R = G2.GA()
|
UTAlan/PhotosByMallie
|
mallie/home/views.py
|
Python
|
gpl-2.0
| 361
| 0.019391
|
from
|
django.shortcuts import render_to_response
from django.template import RequestContext
from home.models import HomeImage
def index(request):
info = {}
info['homeImages'] = HomeImage.objects.all().order_by('title').order_by('
|
order')
return render_to_response('home/index.html', { 'info': info }, context_instance=RequestContext(request))
|
oelson/concordance
|
lib/BibleParser/xml.py
|
Python
|
gpl-2.0
| 17,962
| 0.001689
|
#-*- coding: utf-8 -*-
__all__ = ["XML_BibleParser", "reference"]
import re
import xml.etree.ElementTree as ET
from BibleParser.abstract import parser as abstract_parser, reference as abstract_reference
from BibleParser.error import *
from BibleParser.Numbers import Number
class parser(abstract_parser):
"""
Une implémentation de "BibleParser" manipulant un fichier XML organisé de
la manière suivante:
/bible : la racine
/b : une suite de livres
/c : une liste de chapitres
/v : une liste de versets
Chacun des noeuds "b", "c" ou "v" est identifié par un attribut "n".
"""
def __init__(self, xml_content):
"""
Parse le contenu du fichier XML contenant la bible et sauve la racine
sous l'attribut "bible".
"""
# TODO appeler le constructeur parent ?
abstract_parser.__init__(self)
if not isinstance(xml_content, str):
raise ValueError("expected the content of an XML file")
self.bible = ET.fromstring(xml_content)
# Crée une carte des liens parentaux entre tous les éléments du XML
self._parent_map = dict((c, p) for p in self.bible.iter() for c in p)
def get_element_parent(self, element):
"""
Un ajout à l'interface ElementTree : permet de sélectionner le parent de
tout nœud.
"""
return self._parent_map[element]
def get_greatest_element_index(self, root, element):
"""
Retourne le plus grand index (attribut "n") des sous-éléments du
noeud racine /root/.
Les éléments sont de type /element/.
"""
greatest = None
for child in root.iterfind("./{}".format(element)):
greatest = child
if greatest is None:
return greatest
return int(greatest.attrib["n"])
def get_book_size(self, book_element):
"""
Retourne la taille du livre passé en argument (noeud DOM).
"""
return self.get_greatest_element_index(
book_element,
"c"
)
def get_chapter_size(self, chapter_element
|
):
"""
Retourne la taille du chapitre passé en argument (noeud DOM).
"""
return self.get_greatest_element_index(
chapter_element,
"v"
)
def _parse_verse(self, book_element, chapter_element, verse_element):
"""
Vérifie qu'un verset (donné par son noeud
|
) satisfait les exigences de
la recherche par mots-clés.
Si oui, alors les correpondances sont éventuellement mises en
surbrillance.
Retourne une paire consistant en un objet de type "reference"
et son texte.
"""
if verse_element.text is None:
return
text = verse_element.text
# enlève les indications potentielles de numérotation altérée de verset
text = self._regex_match_alter_verse.sub("", text)
# barrière de concordance avec les mots-clés
if not self._verse_match_rules(text):
return
# mise en surbrillance
if self._highlight_prefix is not None:
text = self._prefix_matches(text)
return (
reference(
self,
None,
book_element.attrib["n"],
int(chapter_element.attrib["n"]),
None,
int(verse_element.attrib["n"]),
None
),
text
)
def get_book_element(self, book_name):
"""
Retourne le noeud du livre dont le nom est passé en argument.
"""
book_element = self.bible.find('./b[@n="{}"]'.format(book_name))
if book_element is None:
raise InvalidBookName(book_name)
return book_element
def get_chapter_element(self, book_element, chapter_index):
"""
Retourne le noeud du chapitre dont le numéro est passé en argument.
Le livre doit-être donné en premier argument en tant que noeud DOM.
"""
chapter_element = book_element.find('./c[@n="{}"]'.format(chapter_index))
if chapter_element is None:
raise InvalidChapterIndex(
book_element.attrib["n"],
chapter_index
)
return chapter_element
def get_verse_element(self, chapter_element, verse_index):
"""
Retourne le noeud du verset dont le numéro est passé en argument.
Le chapitre doit-être donné en premier argument en tant que noeud DOM.
"""
verse_element = chapter_element.find('./v[@n="{}"]'.format(verse_index))
if verse_element is None:
raise InvalidVerseIndex(
self.get_element_parent(chapter_element).attrib["n"],
chapter_element.attrib["n"],
verse_index
)
return verse_element
def _build_chapter_range(self, book_element, ref_obj):
"""
Construit un intervalle dense d'indices de chapitres à partir d'une
référence.
Le livre doit-être donné en premier argument en tant que noeud DOM.
"""
# Sélectionne tous les chapitres
if ref_obj.chapter_low == -1:
chapter_range = range(
1,
self.get_greatest_element_index(book_element, "c")+1
)
# Sélectionne un intervalle de chapitres
elif ref_obj.chapter_high != -1:
chapter_range = range(
ref_obj.chapter_low,
ref_obj.chapter_high+1
)
# Sélectionne un seul chapitre
else:
chapter_range = (ref_obj.chapter_low,)
return chapter_range
def _build_verse_range(self, chapter_element, ref_obj):
"""
Construit un intervalle dense d'indices de versets à partir d'une
référence.
Le chapitre doit-être donné en premier argument en tant que noeud DOM.
"""
# Sélectionne tous les versets du chapitre
if ref_obj.verse_low == -1:
verse_range = range(
1,
self.get_greatest_element_index(chapter_element, "v")+1
)
# Sélectionne un intervalle de versets
elif ref_obj.verse_high != -1:
verse_range = range(
ref_obj.verse_low,
ref_obj.verse_high+1
)
# Sélectionne un seul verset
else:
verse_range = (ref_obj.verse_low,)
return verse_range
def add_reference(self, ref_str):
"""
Ajoute une référence en l'état.
L'entrée est une chaîne, ce qui est stocké est une instance de la classe
"reference".
"""
ref_obj = reference(self, ref_str)
self.references[str(ref_obj)] = ref_obj
def add_contextual_reference(self,
ref_str,
left_lookahead,
right_lookahead):
"""
Ajoute une référence simple en l'élargissant afin d'en faire ressortir
le contexte.
"""
# TODO ne pas déborder au delà d'un chapitre dans le contexte pour les Psaumes
# TODO il faut permettre un choix entre plusieurs types de débordement (coupe exacte, au dernier point, au chapitre, au livre)
ref_obj = reference(self, ref_str)
for new_bible_reference in ref_obj.get_overflowing_references(
left_lookahead,
right_lookahead
):
# ajoute la nouvelle référence
self.references[str(new_bible_reference)] = new_bible_reference
def __iter__(self):
"""
Recherche dans la bible à partir de références et les retournes une
à une sous la forme d'objets de type "reference".
"""
# Parcours toute la bible en cas d'absence de référence
if not self.references:
for book_element in self.bible.iterfind("./b"):
for chapter_element in book_element.iterfind("./c"):
|
OpenWinCon/OpenWinNet
|
web-gui/myvenv/lib/python3.4/site-packages/setuptools/py31compat.py
|
Python
|
apache-2.0
| 1,184
| 0.005068
|
__all__ = ['get_config_vars', 'get_path']
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def get_path(
|
name):
if name not in ('platlib', 'purelib'):
raise ValueError("Name must be purelib or platlib")
return get_python_lib(name=='platlib')
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory(object):
""""
Very simple temporary directory context manager.
Wi
|
ll try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp()
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: #removal errors are not the only possible
pass
self.name = None
|
gengwg/leetcode
|
226_invert_binary_tree.py
|
Python
|
apache-2.0
| 1,673
| 0.001267
|
# -*- coding: utf-8 -*-
# 226. Invert Binary Tree
#
# Invert a binary tree.
#
# 4
# / \
# 2 7
# / \ / \
# 1 3 6 9
#
# to
#
# 4
# / \
# 7 2
# / \ / \
# 9 6 3 1
#
# Trivia:
# This problem was inspired by this original tweet by Max Howell:
#
# Google: 90% of our engineers use the software you wrote (Homebrew),
# but you can’t invert a binary tree on a whiteboard so fuck off.
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# http://www.jianshu.com/p/85abb0a5f83e
# 每一个节点的左右子树对换,左右子树的左右节点也需要
|
交换,
# 这种时候很容易想到的就是递归的方法。
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
# http://www.tangjikai.com/algorithms/leetcode-226-invert-binary-tree
class Solution(object):
def invertTree(self, root):
|
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
tmp = root.left
root.left = root.right
root.right = tmp
self.invertTree(root.left)
self.invertTree(root.right)
return root
def invertTree2(self, root):
if root:
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
|
MykeMcG/SummerRoguelike
|
src/utils.py
|
Python
|
gpl-3.0
| 2,363
| 0.003809
|
import libtcodpy as libtcod
import consts
def find_closest_target(caster, entities, range):
closest_target = None
closest_dist = range + 1
for obj
|
in entities:
if obj.fighter and obj != caster:
|
dist = caster.distance_to(obj)
if dist < closest_dist:
closest_target = obj
closest_dist = dist
return closest_target
def random_choice(chances):
dice = libtcod.random_get_int(0, 1, sum(chances))
running_sum = 0
choice = 0
for c in chances:
running_sum += c
if dice <= running_sum:
return choice
choice += 1
def random_choice_dict(chances_dict):
chances = list(chances_dict.values())
strings = list(chances_dict.keys())
return strings[random_choice(chances)]
def from_dungeon_level(table, dungeon_level):
for (value, level) in reversed(table):
if dungeon_level >= level:
return value
return 0
def build_leveled_item_list(level):
item_chances = {}
item_chances[consts.ITEM_HEALTHPOTION_NAME] = consts.ITEM_HEALTHPOTION_SPAWNRATE
item_chances[consts.ITEM_SCROLLLIGHTNING_NAME] = from_dungeon_level(consts.ITEM_SCROLLLIGHTNING_SPAWNRATE, level)
item_chances[consts.ITEM_SCROLLCONFUSE_NAME] = from_dungeon_level(consts.ITEM_SCROLLCONFUSE_SPAWNRATE, level)
item_chances[consts.ITEM_SWORDCOPPER_NAME] = from_dungeon_level(consts.ITEM_SWORDCOPPER_SPAWNRATE, level)
item_chances[consts.ITEM_BUCKLERCOPPER_NAME] = from_dungeon_level(consts.ITEM_BUCKLERCOPPER_SPAWNRATE, level)
return item_chances
def build_leveled_mob_list(level):
mob_chances = {}
mob_chances[consts.MOB_KOBOLD_NAME] = consts.MOB_KOBOLD_SPAWNRATE
mob_chances[consts.MOB_SKELETON_NAME] = consts.MOB_SKELETON_SPAWNRATE
mob_chances[consts.MOB_ORC_NAME] = from_dungeon_level(consts.MOB_ORC_SPAWNRATE, level)
return mob_chances
def get_equipped_in_slot(inventory, slot_to_check):
for obj in inventory:
if obj.equipment and obj.equipment.slot == slot_to_check \
and obj.equipment.is_equipped:
return obj.equipment
return None
def get_all_equipped(inventory):
equipped_list = []
for item in inventory:
if item.equipment and item.equipment.is_equipped:
equipped_list.append(item.equipment)
return equipped_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.