text stringlengths 4 1.02M | meta dict |
|---|---|
import codecs
import sys
import typing as t
import warnings
# We do not trust traditional unixes.
has_likely_buggy_unicode_filesystem = (
sys.platform.startswith("linux") or "bsd" in sys.platform
)
def _is_ascii_encoding(encoding: t.Optional[str]) -> bool:
"""Given an encoding this figures out if the encoding is actually ASCII (which
is something we don't actually want in most cases). This is necessary
because ASCII comes under many names such as ANSI_X3.4-1968.
"""
if encoding is None:
return False
try:
return codecs.lookup(encoding).name == "ascii"
except LookupError:
return False
class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning):
"""The warning used by Werkzeug to signal a broken filesystem. Will only be
used once per runtime."""
_warned_about_filesystem_encoding = False
def get_filesystem_encoding() -> str:
"""Returns the filesystem encoding that should be used. Note that this is
different from the Python understanding of the filesystem encoding which
might be deeply flawed. Do not use this value against Python's string APIs
because it might be different. See :ref:`filesystem-encoding` for the exact
behavior.
The concept of a filesystem encoding in generally is not something you
should rely on. As such if you ever need to use this function except for
writing wrapper code reconsider.
"""
global _warned_about_filesystem_encoding
rv = sys.getfilesystemencoding()
if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv):
if not _warned_about_filesystem_encoding:
warnings.warn(
"Detected a misconfigured UNIX filesystem: Will use"
f" UTF-8 as filesystem encoding instead of {rv!r}",
BrokenFilesystemWarning,
)
_warned_about_filesystem_encoding = True
return "utf-8"
return rv
| {
"content_hash": "18aaac949a0f23a147b7474364a5b856",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 82,
"avg_line_length": 35.56363636363636,
"alnum_prop": 0.696319018404908,
"repo_name": "fkazimierczak/werkzeug",
"id": "36a3d12e9766571588ea34379117ccb5ee8a3b27",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/werkzeug/filesystem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6705"
},
{
"name": "HTML",
"bytes": "124"
},
{
"name": "JavaScript",
"bytes": "10524"
},
{
"name": "Python",
"bytes": "1136488"
}
],
"symlink_target": ""
} |
import time
from collections import OrderedDict
from threading import RLock
from mdstudio.util.exception import MDStudioException
class CacheDict(OrderedDict):
lock = RLock()
def __init__(self, max_age_seconds):
super(CacheDict, self).__init__(self)
self.max_age = max_age_seconds
if max_age_seconds < 0:
raise MDStudioException()
def __getitem__(self, key):
with self.lock:
item = OrderedDict.__getitem__(self, key)
age = time.time() - item[1]
if age < self.max_age:
return item[0]
else:
del self[key]
raise KeyError(key)
def __setitem__(self, key, value, **kwargs):
with self.lock:
OrderedDict.__setitem__(self, key, (value, time.time()), **kwargs)
def __contains__(self, key):
try:
with self.lock:
item = OrderedDict.__getitem__(self, key)
if (time.time() - item[1]) < self.max_age:
return True
else:
del self[key]
except KeyError:
pass
return False
def purge(self):
with self.lock:
n = time.time()
for k,v in self.items():
if (n - v[1]) > self.max_age:
del v
| {
"content_hash": "61addcd4bf5ec52c8992d552d0f427ae",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 26.705882352941178,
"alnum_prop": 0.5,
"repo_name": "MD-Studio/MDStudio",
"id": "78c69f3166e16b52b442c645a6280e94d2fd175f",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mdstudio/mdstudio/collection/cache_dict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70059"
},
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "48489"
},
{
"name": "JavaScript",
"bytes": "2621"
},
{
"name": "Makefile",
"bytes": "6901"
},
{
"name": "Python",
"bytes": "711926"
},
{
"name": "Shell",
"bytes": "6139"
},
{
"name": "TypeScript",
"bytes": "66514"
}
],
"symlink_target": ""
} |
from enum import Enum
class TextOperationStatusCodes(Enum):
not_started = "Not Started"
running = "Running"
failed = "Failed"
succeeded = "Succeeded"
class ComputerVisionErrorCodes(Enum):
invalid_image_url = "InvalidImageUrl"
invalid_image_format = "InvalidImageFormat"
invalid_image_size = "InvalidImageSize"
not_supported_visual_feature = "NotSupportedVisualFeature"
not_supported_image = "NotSupportedImage"
invalid_details = "InvalidDetails"
not_supported_language = "NotSupportedLanguage"
bad_argument = "BadArgument"
failed_to_process = "FailedToProcess"
timeout = "Timeout"
internal_server_error = "InternalServerError"
unspecified = "Unspecified"
storage_exception = "StorageException"
class VisualFeatureTypes(Enum):
image_type = "ImageType"
faces = "Faces"
adult = "Adult"
categories = "Categories"
color = "Color"
tags = "Tags"
description = "Description"
class OcrLanguages(Enum):
unk = "unk"
zh_hans = "zh-Hans"
zh_hant = "zh-Hant"
cs = "cs"
da = "da"
nl = "nl"
en = "en"
fi = "fi"
fr = "fr"
de = "de"
el = "el"
hu = "hu"
it = "it"
ja = "ja"
ko = "ko"
nb = "nb"
pl = "pl"
pt = "pt"
ru = "ru"
es = "es"
sv = "sv"
tr = "tr"
ar = "ar"
ro = "ro"
sr_cyrl = "sr-Cyrl"
sr_latn = "sr-Latn"
sk = "sk"
class AzureRegions(Enum):
westus = "westus"
westeurope = "westeurope"
southeastasia = "southeastasia"
eastus2 = "eastus2"
westcentralus = "westcentralus"
westus2 = "westus2"
eastus = "eastus"
southcentralus = "southcentralus"
northeurope = "northeurope"
eastasia = "eastasia"
australiaeast = "australiaeast"
brazilsouth = "brazilsouth"
class Details(Enum):
celebrities = "Celebrities"
landmarks = "Landmarks"
class Language1(Enum):
en = "en"
zh = "zh"
class DomainModels(Enum):
celebrities = "Celebrities"
landmarks = "Landmarks"
| {
"content_hash": "2fab2032bd950505b8c52cee5f37b2b6",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 62,
"avg_line_length": 19.95098039215686,
"alnum_prop": 0.6122850122850123,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "81f543b7151b70aa234ef0048fca255407417f12",
"size": "2509",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/computer_vision_api_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from dtools import MapReduce
import sys
"""
Join relationship in Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document id
# value: text of the document
order_id = record[1]
# table line item record
mr.emit_intermediate(order_id, record)
def reducer(key, list_of_values):
# key : word
# value: list of occurrence documents
items = []
order = None
for v in list_of_values:
# line item
if v[1] == key :
if v[0] == "line_item":
items.append(v)
else :
order = v
for i in items :
mr.emit(order + i)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
| {
"content_hash": "2198c72bb35406d895fd47f1b6ef362c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 54,
"avg_line_length": 23.153846153846153,
"alnum_prop": 0.5448504983388704,
"repo_name": "houssemFat/MeeM-Dev",
"id": "ef25602cf13f2054dd94a9eabfde537a1ded414d",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teacher/apps/courses/videos/join.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54148"
},
{
"name": "HTML",
"bytes": "360877"
},
{
"name": "JavaScript",
"bytes": "1651985"
},
{
"name": "Nginx",
"bytes": "1597"
},
{
"name": "PHP",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "374180"
},
{
"name": "Smarty",
"bytes": "7600"
}
],
"symlink_target": ""
} |
import json
import requests
from django_parse_push.settings import APPLICATION_ID, REST_API_KEY
class ApiClient(object):
api_url = "https://api.parse.com"
api_version = "1"
def __init__(self, application_id, rest_api_key):
self.application_id = application_id
self.rest_api_key = rest_api_key
def request(self, method, url_path, data):
url = "{}/{}/{}".format(self.api_url, self.api_version, url_path)
headers = {
"X-Parse-Application-Id": self.application_id,
"X-Parse-REST-API-Key": self.rest_api_key,
"Content-Type": "application/json"
}
return requests.request(method=method, url=url, headers=headers, data=json.dumps(data))
def send_notification(self, data):
"""
Example data:
{
"where": {
"channels": "Wall",
"deviceType": "android"
},
"data": {
"alert": "test message"
}
}
"""
return self.request(method="post", url_path="push", data=data)
def get_api_client():
"""
Shortcut method for get api client with required settings.
:return: ApiClient object
"""
api_client = ApiClient(application_id=APPLICATION_ID, rest_api_key=REST_API_KEY)
return api_client
| {
"content_hash": "87cd3a82ed705a3c87876109186a77e5",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 95,
"avg_line_length": 30.57777777777778,
"alnum_prop": 0.5545058139534884,
"repo_name": "gkmngrgn/django-parse-push",
"id": "13c53a6fc71557c20ca196bd74ac58d9e188dd9f",
"size": "1376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_parse_push/api_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10809"
}
],
"symlink_target": ""
} |
"""Family module for Vikidia."""
#
# (C) Pywikibot team, 2010-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import family
class Family(family.SubdomainFamily):
"""Family class for Vikidia."""
name = 'vikidia'
domain = 'vikidia.org'
codes = ['ca', 'de', 'en', 'es', 'eu', 'fr', 'it', 'ru', 'scn']
# Sites we want to edit but not count as real languages
test_codes = ['central', 'test']
def protocol(self, code):
"""Return https as the protocol for this family."""
return "https"
| {
"content_hash": "0ffc11b7a609a39e2212e26ef8a07d66",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 67,
"avg_line_length": 22.75,
"alnum_prop": 0.6216640502354788,
"repo_name": "npdoty/pywikibot",
"id": "f3d4aa32b6df3287a878b42703fdaf21ad355eee",
"size": "661",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pywikibot/families/vikidia_family.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4485564"
}
],
"symlink_target": ""
} |
import os
from speckenv import env
from speckenv_django import django_database_url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATABASES = {
"default": django_database_url(
env("DATABASE_URL", default="postgres://127.0.0.1:5432/zivinetz")
)
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.staticfiles",
"django.contrib.messages",
"testapp",
"towel",
"zivinetz",
"feincms",
"feincms.module.page",
"towel_foundation",
]
MIDDLEWARE = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"testapp.middleware.UglynessMiddleware",
)
MEDIA_ROOT = "/media/"
STATIC_URL = "/static/"
BASEDIR = os.path.dirname(__file__)
MEDIA_ROOT = os.path.join(BASEDIR, "media/")
STATIC_ROOT = os.path.join(BASEDIR, "static/")
SECRET_KEY = "supersikret"
LOGIN_REDIRECT_URL = "/?login=1"
ROOT_URLCONF = "testapp.urls"
LANGUAGES = (("en", "English"), ("de", "German"))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASEDIR, "templates")],
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
}
]
| {
"content_hash": "cad25cfc69edb83a129c58789de10d31",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 73,
"avg_line_length": 29.544117647058822,
"alnum_prop": 0.6436037829766053,
"repo_name": "matthiask/zivinetz",
"id": "e97ec6ed7db5b7525196e7ff85190f76d74de5cd",
"size": "2009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16354"
},
{
"name": "HTML",
"bytes": "63953"
},
{
"name": "JavaScript",
"bytes": "2141"
},
{
"name": "Python",
"bytes": "281404"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
} |
"""Test class for IloPower module."""
import mock
from oslo.utils import importutils
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import deploy as ilo_deploy
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ilo_client = importutils.try_import('proliantutils.ilo.ribcl')
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
@mock.patch.object(ilo_common, 'get_ilo_object')
class IloPowerInternalMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPowerInternalMethodsTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = db_utils.create_test_node(
driver='fake_ilo',
driver_info=driver_info,
instance_uuid='instance_uuid_123')
CONF.set_override('power_retry', 2, 'ilo')
CONF.set_override('power_wait', 0, 'ilo')
def test__get_power_state(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
self.assertEqual(
states.POWER_ON, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'OFF'
self.assertEqual(
states.POWER_OFF, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'ERROR'
self.assertEqual(states.ERROR, ilo_power._get_power_state(self.node))
def test__get_power_state_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_client.IloError('error')
ilo_mock_object.get_host_power_status.side_effect = exc
self.assertRaises(exception.IloOperationError,
ilo_power._get_power_state,
self.node)
ilo_mock_object.get_host_power_status.assert_called_once_with()
def test__set_power_state_invalid_state(self, get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
ilo_power._set_power_state,
task,
states.ERROR)
def test__set_power_state_reboot_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_client.IloError('error')
ilo_mock_object.reset_server.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task,
states.REBOOT)
ilo_mock_object.reset_server.assert_called_once_with()
def test__set_power_state_reboot_ok(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.side_effect = ['ON', 'OFF', 'ON']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.REBOOT)
ilo_mock_object.reset_server.assert_called_once_with()
def test__set_power_state_off_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task,
states.POWER_OFF)
ilo_mock_object.get_host_power_status.assert_called_with()
ilo_mock_object.hold_pwr_btn.assert_called_once_with()
def test__set_power_state_on_ok(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.side_effect = ['OFF', 'ON']
target_state = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, target_state)
ilo_mock_object.get_host_power_status.assert_called_with()
ilo_mock_object.set_host_power.assert_called_once_with('ON')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot')
def test__attach_boot_iso(self, setup_vmedia_mock, set_boot_device_mock,
get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
ilo_power._attach_boot_iso(task)
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
class IloPowerTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPowerTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='fake_ilo',
driver_info=driver_info)
def test_get_properties(self):
expected = ilo_common.COMMON_PROPERTIES
expected.update(ilo_deploy.COMMON_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info')
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(ilo_common, 'parse_driver_info')
def test_validate_fail(self, mock_drvinfo):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(ilo_power, '_get_power_state')
def test_get_power_state(self, mock_get_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_get_power.return_value = states.POWER_ON
self.assertEqual(states.POWER_ON,
task.driver.power.get_power_state(task))
mock_get_power.assert_called_once_with(task.node)
@mock.patch.object(ilo_power, '_set_power_state')
def test_set_power_state(self, mock_set_power):
mock_set_power.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
mock_set_power.assert_called_once_with(task, states.POWER_ON)
@mock.patch.object(ilo_power, '_set_power_state')
@mock.patch.object(ilo_power, '_get_power_state')
def test_reboot(self, mock_get_power, mock_set_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
mock_get_power.return_value = states.POWER_ON
mock_set_power.return_value = states.POWER_ON
task.driver.power.reboot(task)
mock_get_power.assert_called_once_with(task.node)
mock_set_power.assert_called_once_with(task, states.REBOOT)
| {
"content_hash": "a8ec7ab9dedabe670e9e823d83ea4272",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 79,
"avg_line_length": 45.373056994818654,
"alnum_prop": 0.6133379011076853,
"repo_name": "ramineni/myironic",
"id": "3614b689f350d302ac10cdb7af4ea18e63688a46",
"size": "9414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/drivers/ilo/test_power.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1667"
},
{
"name": "Python",
"bytes": "2501292"
},
{
"name": "XML",
"bytes": "804"
}
],
"symlink_target": ""
} |
import glob
import logging
import os
import pyauto_functional # Must be imported before pyauto
import pyauto
class ThemesTest(pyauto.PyUITest):
"""TestCase for Themes."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Hit <enter> to dump info.. ')
self.pprint(self.GetThemeInfo())
def testSetTheme(self):
"""Verify theme install."""
self.assertFalse(self.GetThemeInfo()) # Verify there's no theme at startup
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self.SetTheme(crx_file)
# Verify "theme installed" infobar shows up
self.assertTrue(self.WaitForInfobarCount(1))
theme = self.GetThemeInfo()
self.assertEqual('camo theme', theme['name'])
self.assertTrue(self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars'])
def testThemeInFullScreen(self):
"""Verify theme can be installed in FullScreen mode."""
self.ApplyAccelerator(pyauto.IDC_FULLSCREEN )
self.assertFalse(self.GetThemeInfo()) # Verify there's no theme at startup
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self.SetTheme(crx_file)
# Verify "theme installed" infobar shows up
self.assertTrue(self.WaitForInfobarCount(1))
theme = self.GetThemeInfo()
self.assertEqual('camo theme', theme['name'])
def testThemeReset(self):
"""Verify theme reset."""
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self.SetTheme(crx_file)
self.assertTrue(self.ResetToDefaultTheme())
self.assertFalse(self.GetThemeInfo())
def _ReturnCrashingThemes(self, themes, group_size, urls):
"""Install the given themes in groups of group_size and return the
group of themes that crashes (if any).
Note: restarts the browser at the beginning of the function.
Args:
themes: A list of themes to install.
group_size: The number of themes to install at one time.
urls: The list of urls to visit.
"""
self.RestartBrowser()
curr_theme = 0
num_themes = len(themes)
while curr_theme < num_themes:
logging.debug('New group of %d themes.' % group_size)
group_end = curr_theme + group_size
this_group = themes[curr_theme:group_end]
# Apply each theme in this group.
for theme in this_group:
logging.debug('Applying theme: %s' % theme)
self.SetTheme(theme)
for url in urls:
self.NavigateToURL(url)
def _LogAndReturnCrashing():
logging.debug('Crashing themes: %s' % this_group)
return this_group
# Assert that there is at least 1 browser window.
try:
num_browser_windows = self.GetBrowserWindowCount()
except:
return _LogAndReturnCrashing()
else:
if not num_browser_windows:
return _LogAndReturnCrashing()
curr_theme = group_end
# None of the themes crashed.
return None
def Runner(self):
"""Apply themes; verify that theme has been applied and browser doesn't
crash.
This does not get run automatically. To run:
python themes.py themes.ThemesTest.Runner
Note: this test requires that a directory of crx files called 'themes'
exists in the data directory.
"""
themes_dir = os.path.join(self.DataDir(), 'themes')
urls_file = os.path.join(self.DataDir(), 'urls.txt')
assert os.path.exists(themes_dir), \
'The dir "%s" must exist' % os.path.abspath(themes_dir)
group_size = 20
num_urls_to_visit = 100
urls = [l.rstrip() for l in
open(urls_file).readlines()[:num_urls_to_visit]]
failed_themes = glob.glob(os.path.join(themes_dir, '*.crx'))
while failed_themes and group_size:
failed_themes = self._ReturnCrashingThemes(failed_themes, group_size,
urls)
group_size = group_size // 2
self.assertFalse(failed_themes,
'Theme(s) in failing group: %s' % failed_themes)
if __name__ == '__main__':
pyauto_functional.Main()
| {
"content_hash": "2118581db083c29f3c6a3dcf9ce988cc",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 79,
"avg_line_length": 31.757575757575758,
"alnum_prop": 0.6479007633587787,
"repo_name": "gavinp/chromium",
"id": "2c81200c521c58d36cc01fffca5bdb03da056bff",
"size": "4381",
"binary": false,
"copies": "9",
"ref": "refs/heads/trunk",
"path": "chrome/test/functional/themes.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1178292"
},
{
"name": "C",
"bytes": "72353788"
},
{
"name": "C++",
"bytes": "117593783"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "10440"
},
{
"name": "Java",
"bytes": "24087"
},
{
"name": "JavaScript",
"bytes": "8781314"
},
{
"name": "Objective-C",
"bytes": "5340290"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "918286"
},
{
"name": "Python",
"bytes": "5942009"
},
{
"name": "R",
"bytes": "524"
},
{
"name": "Shell",
"bytes": "4149832"
},
{
"name": "Tcl",
"bytes": "255109"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bordercolorsrc", parent_name="violin.hoverlabel", **kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "beec9c2324a45f09a7a34ef645685234",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 85,
"avg_line_length": 34.5,
"alnum_prop": 0.6107660455486542,
"repo_name": "plotly/python-api",
"id": "1b513782c3a3937cb03e0bb6483186c91af22f9a",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/violin/hoverlabel/_bordercolorsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import time
import random
import hmac
import hashlib
import string
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord
from shadowsocks import lru_cache
def create_tls_ticket_auth_obfs(method):
return tls_ticket_auth(method)
obfs_map = {
'tls1.2_ticket_auth': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_auth_compatible': (create_tls_ticket_auth_obfs,),
}
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False
class obfs_auth_data(object):
def __init__(self):
self.client_data = lru_cache.LRUCache(60 * 5)
self.client_id = os.urandom(32)
self.startup_time = int(time.time() - 60 * 30) & 0xFFFFFFFF
class tls_ticket_auth(plain.plain):
def __init__(self, method):
self.method = method
self.handshake_status = 0
self.send_buffer = b''
self.recv_buffer = b''
self.client_id = b''
self.max_time_dif = 0 # time dif (second) setting
self.tls_version = b'\x03\x03'
def init_data(self):
return obfs_auth_data()
def sni(self, url):
url = common.to_bytes(url)
data = b"\x00" + struct.pack('>H', len(url)) + url
data = b"\x00\x00" + struct.pack('>H', len(data) + 2) + struct.pack('>H', len(data)) + data
return data
def pack_auth_data(self, client_id):
utc_time = int(time.time()) & 0xFFFFFFFF
data = struct.pack('>I', utc_time) + os.urandom(18)
data += hmac.new(self.server_info.key + client_id, data, hashlib.sha1).digest()[:10]
return data
def client_encode(self, buf):
if self.handshake_status == -1:
return buf
if self.handshake_status == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
self.send_buffer += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
if self.handshake_status == 0:
self.handshake_status = 1
data = self.tls_version + self.pack_auth_data(self.server_info.data.client_id) + b"\x20" + self.server_info.data.client_id + binascii.unhexlify(b"001cc02bc02fcca9cca8cc14cc13c00ac014c009c013009c0035002f000a" + b"0100")
ext = binascii.unhexlify(b"ff01000100")
host = self.server_info.obfs_param or self.server_info.host
if host and host[-1] in string.digits:
host = ''
hosts = host.split(',')
host = random.choice(hosts)
ext += self.sni(host)
ext += b"\x00\x17\x00\x00"
ext += b"\x00\x23\x00\xd0" + os.urandom(208) # ticket
ext += binascii.unhexlify(b"000d001600140601060305010503040104030301030302010203")
ext += binascii.unhexlify(b"000500050100000000")
ext += binascii.unhexlify(b"00120000")
ext += binascii.unhexlify(b"75500000")
ext += binascii.unhexlify(b"000b00020100")
ext += binascii.unhexlify(b"000a0006000400170018")
data += struct.pack('>H', len(ext)) + ext
data = b"\x01\x00" + struct.pack('>H', len(data)) + data
data = b"\x16\x03\x01" + struct.pack('>H', len(data)) + data
return data
elif self.handshake_status == 1 and len(buf) == 0:
data = b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
data += b"\x16" + self.tls_version + b"\x00\x20" + os.urandom(22) #Finished
data += hmac.new(self.server_info.key + self.server_info.data.client_id, data, hashlib.sha1).digest()[:10]
ret = data + self.send_buffer
self.send_buffer = b''
self.handshake_status = 8
return ret
return b''
def client_decode(self, buf):
if self.handshake_status == -1:
return (buf, False)
if self.handshake_status == 8:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
buf = self.recv_buffer[5:size+5]
ret += buf
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, False)
if len(buf) < 11 + 32 + 1 + 32:
raise Exception('client_decode data error')
verify = buf[11:33]
if hmac.new(self.server_info.key + self.server_info.data.client_id, verify, hashlib.sha1).digest()[:10] != buf[33:43]:
raise Exception('client_decode data error')
return (b'', True)
def server_encode(self, buf):
if self.handshake_status == -1:
return buf
if self.handshake_status == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
self.handshake_status = 3
data = self.tls_version + self.pack_auth_data(self.client_id) + b"\x20" + self.client_id + binascii.unhexlify(b"c02f000005ff01000100")
data = b"\x02\x00" + struct.pack('>H', len(data)) + data #server hello
data = b"\x16\x03\x03" + struct.pack('>H', len(data)) + data
data += b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
data += b"\x16" + self.tls_version + b"\x00\x20" + os.urandom(22) #Finished
data += hmac.new(self.server_info.key + self.client_id, data, hashlib.sha1).digest()[:10]
return data
def decode_error_return(self, buf):
self.handshake_status = -1
if self.method == 'tls1.2_ticket_auth':
return (b'E'*2048, False, False)
return (buf, True, False)
def server_decode(self, buf):
if self.handshake_status == -1:
return (buf, True, False)
if self.handshake_status == 8:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17 or ord(self.recv_buffer[1]) != 0x3 or ord(self.recv_buffer[2]) != 0x3:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
ret += self.recv_buffer[5:size+5]
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, True, False)
if self.handshake_status == 3:
verify = buf
verify_len = 43 - 10
if len(buf) < 43:
raise Exception('server_decode data error')
if not match_begin(buf, b"\x14" + self.tls_version + b"\x00\x01\x01"): #ChangeCipherSpec
raise Exception('server_decode data error')
buf = buf[6:]
if not match_begin(buf, b"\x16" + self.tls_version + b"\x00\x20"): #Finished
raise Exception('server_decode data error')
if hmac.new(self.server_info.key + self.client_id, verify[:verify_len], hashlib.sha1).digest()[:10] != verify[verify_len:verify_len+10]:
raise Exception('server_decode data error')
if len(buf) < 37:
raise Exception('server_decode data error')
self.recv_buffer = buf[37:]
self.handshake_status = 8
return self.server_decode(b'')
#raise Exception("handshake data = %s" % (binascii.hexlify(buf)))
self.handshake_status = 2
ogn_buf = buf
if not match_begin(buf, b'\x16\x03\x01'):
return self.decode_error_return(ogn_buf)
buf = buf[3:]
if struct.unpack('>H', buf[:2])[0] != len(buf) - 2:
logging.info("tls_auth wrong tls head size")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if not match_begin(buf, b'\x01\x00'): #client hello
logging.info("tls_auth not client hello message")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if struct.unpack('>H', buf[:2])[0] != len(buf) - 2:
logging.info("tls_auth wrong message size")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if not match_begin(buf, self.tls_version):
logging.info("tls_auth wrong tls version")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
verifyid = buf[:32]
buf = buf[32:]
sessionid_len = ord(buf[0])
if sessionid_len < 32:
logging.info("tls_auth wrong sessionid_len")
return self.decode_error_return(ogn_buf)
sessionid = buf[1:sessionid_len + 1]
buf = buf[sessionid_len+1:]
self.client_id = sessionid
sha1 = hmac.new(self.server_info.key + sessionid, verifyid[:22], hashlib.sha1).digest()[:10]
utc_time = struct.unpack('>I', verifyid[:4])[0]
time_dif = common.int32((int(time.time()) & 0xffffffff) - utc_time)
if self.server_info.obfs_param:
try:
self.max_time_dif = int(self.server_info.obfs_param)
except:
pass
if self.max_time_dif > 0 and (time_dif < -self.max_time_dif or time_dif > self.max_time_dif \
or common.int32(utc_time - self.server_info.data.startup_time) < -self.max_time_dif / 2):
logging.info("tls_auth wrong time")
return self.decode_error_return(ogn_buf)
if sha1 != verifyid[22:]:
logging.info("tls_auth wrong sha1")
return self.decode_error_return(ogn_buf)
if self.server_info.data.client_data.get(verifyid[:22]):
logging.info("replay attack detect, id = %s" % (binascii.hexlify(verifyid)))
return self.decode_error_return(ogn_buf)
self.server_info.data.client_data.sweep()
self.server_info.data.client_data[verifyid[:22]] = sessionid
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
buf = buf[48:]
host_name = ''
for index in range(len(buf)):
if index + 4 < len(buf):
if buf[index:index + 4] == b"\x00\x17\x00\x00":
if buf[:index] != '':
host_name = buf[:index]
return (b'', False, True, host_name)
| {
"content_hash": "79414beaff5cc4865c5286ecf88f5799",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 230,
"avg_line_length": 43.71102661596958,
"alnum_prop": 0.5565414057063326,
"repo_name": "82604716/shadowsicks",
"id": "b845a9ac6233b5bfcc4eceb8f93f30f2e9719bf2",
"size": "12099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shadowsocks/obfsplugin/obfs_tls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "740522"
},
{
"name": "Roff",
"bytes": "1737"
},
{
"name": "Shell",
"bytes": "14945"
}
],
"symlink_target": ""
} |
"""A simple template system that compiles templates to Python code.
Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX")
Loader is a class that loads templates from a root directory and caches
the compiled templates::
loader = template.Loader("/home/btaylor")
print loader.load("test.html").generate(myvalue="XXX")
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates::
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% end %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. if and for blocks get
translated exactly into Python, you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the escape() function in the examples above. You can pass
functions in to your template just like any other variable::
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions escape(), url_escape(), json_encode(), and squeeze()
to all templates by default.
Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `render` and `render_string` methods of
`tornado.web.RequestHandler`, which load templates automatically based
on the ``template_path`` `Application` setting.
Syntax Reference
----------------
Template expressions are surrounded by double curly braces: ``{{ ... }}``.
The contents may be any python expression, which will be escaped according
to the current autoescape setting and inserted into the output. Other
template directives use ``{% %}``. These tags may be escaped as ``{{!``
and ``{%!`` if you need to include a literal ``{{`` or ``{%`` in the output.
To comment out a section so that it is omitted from the output, surround it
with ``{# ... #}``.
``{% apply *function* %}...{% end %}``
Applies a function to the output of all template code between ``apply``
and ``end``::
{% apply linkify %}{{name}} said: {{message}}{% end %}
``{% autoescape *function* %}``
Sets the autoescape mode for the current file. This does not affect
other files, even those referenced by ``{% include %}``. Note that
autoescaping can also be configured globally, at the `Application`
or `Loader`.::
{% autoescape xhtml_escape %}
{% autoescape None %}
``{% block *name* %}...{% end %}``
Indicates a named, replaceable block for use with ``{% extends %}``.
Blocks in the parent template will be replaced with the contents of
the same-named block in a child template.::
<!-- base.html -->
<title>{% block title %}Default title{% end %}</title>
<!-- mypage.html -->
{% extends "base.html" %}
{% block title %}My page title{% end %}
``{% comment ... %}``
A comment which will be removed from the template output. Note that
there is no ``{% end %}`` tag; the comment goes from the word ``comment``
to the closing ``%}`` tag.
``{% extends *filename* %}``
Inherit from another template. Templates that use ``extends`` should
contain one or more ``block`` tags to replace content from the parent
template. Anything in the child template not contained in a ``block``
tag will be ignored. For an example, see the ``{% block %}`` tag.
``{% for *var* in *expr* %}...{% end %}``
Same as the python ``for`` statement.
``{% from *x* import *y* %}``
Same as the python ``import`` statement.
``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
Conditional statement - outputs the first section whose condition is
true. (The ``elif`` and ``else`` sections are optional)
``{% import *module* %}``
Same as the python ``import`` statement.
``{% include *filename* %}``
Includes another template file. The included file can see all the local
variables as if it were copied directly to the point of the ``include``
directive (the ``{% autoescape %}`` directive is an exception).
Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
to include another template with an isolated namespace.
``{% module *expr* %}``
Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
not escaped::
{% module Template("foo.html", arg=42) %}
``{% raw *expr* %}``
Outputs the result of the given expression without autoescaping.
``{% set *x* = *y* %}``
Sets a local variable.
``{% try %}...{% except %}...{% finally %}...{% else %}...{% end %}``
Same as the python ``try`` statement.
``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement.
"""
from __future__ import absolute_import, division, with_statement
import cStringIO
import datetime
import linecache
import os.path
import posixpath
import re
import threading
from twisted.python import log
from cyclone import escape
from cyclone.util import bytes_type, ObjectDict
_DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object()
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None, autoescape=_UNSET):
self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \
name.endswith(".js")
if autoescape is not _UNSET:
self.autoescape = autoescape
elif loader:
self.autoescape = loader.autoescape
else:
self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string))
self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader, compress_whitespace)
self.loader = loader
try:
# Under python2.5, the fake filename used here must match
# the module name used in __name__ below.
self.compiled = compile(
escape.to_unicode(self.code),
"%s.generated.py" % self.name.replace('.', '_'), "exec")
except Exception:
formatted_code = _format_code(self.code).rstrip()
log.msg("%s code:" % self.name)
for line in formatted_code.split("\n"):
log.msg(line)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": escape.xhtml_escape,
"xhtml_escape": escape.xhtml_escape,
"url_escape": escape.url_escape,
"json_encode": escape.json_encode,
"squeeze": escape.squeeze,
"linkify": escape.linkify,
"datetime": datetime,
"_utf8": escape.utf8, # for internal use
"_string_types": (unicode, bytes_type),
# __name__ and __loader__ allow the traceback mechanism to find
# the generated source code.
"__name__": self.name.replace('.', '_'),
"__loader__": ObjectDict(get_source=lambda name: self.code),
}
namespace.update(self.namespace)
namespace.update(kwargs)
exec self.compiled in namespace
execute = namespace["_execute"]
# Clear the traceback module's cache of source data now that
# we've generated a new template (mainly for this module's
# unittests, where different tests reuse the same name).
linecache.clearcache()
try:
return execute()
except Exception:
formatted_code = _format_code(self.code).rstrip()
log.msg("%s code:" % self.name)
for line in formatted_code.split("\n"):
log.msg(line)
raise
def _generate_python(self, loader, compress_whitespace):
buffer = cStringIO.StringIO()
try:
# named_blocks maps from names to _NamedBlock objects
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
self.file.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader,
ancestors[0].template,
compress_whitespace)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class BaseLoader(object):
"""Base class for template loaders."""
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None):
"""Creates a template loader.
root_directory may be the empty string if this loader does not
use the filesystem.
autoescape must be either None or a string naming a function
in the template namespace, such as "xhtml_escape".
"""
self.autoescape = autoescape
self.namespace = namespace or {}
self.templates = {}
# self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or
# `extends`. Note that thanks to the GIL this code would be safe
# even without the lock, but could lead to wasted work as multiple
# threads tried to compile the same template simultaneously.
self.lock = threading.RLock()
def reset(self):
"""Resets the cache of compiled templates."""
with self.lock:
self.templates = {}
def resolve_path(self, name, parent_path=None):
"""Converts a possibly-relative path to absolute (used internally)."""
raise NotImplementedError()
def load(self, name, parent_path=None):
"""Loads a template."""
name = self.resolve_path(name, parent_path=parent_path)
with self.lock:
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
raise NotImplementedError()
class Loader(BaseLoader):
"""A template loader that loads from a single root directory.
You must use a template loader to use template constructs like
{% extends %} and {% include %}. Loader caches all templates after
they are loaded the first time.
"""
def __init__(self, root_directory, **kwargs):
super(Loader, self).__init__(**kwargs)
self.root = os.path.abspath(root_directory)
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def _create_template(self, name):
path = os.path.join(self.root, name)
f = open(path, "r")
template = Template(f.read(), name=name, loader=self)
f.close()
return template
class DictLoader(BaseLoader):
"""A template loader that loads from a dictionary."""
def __init__(self, dict, **kwargs):
super(DictLoader, self).__init__(**kwargs)
self.dict = dict
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
file_dir = posixpath.dirname(parent_path)
name = posixpath.normpath(posixpath.join(file_dir, name))
return name
def _create_template(self, name):
return Template(self.dict[name], name=name, loader=self)
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, template, body):
self.template = template
self.body = body
self.line = 0
def generate(self, writer):
writer.write_line("def _execute():", self.line)
with writer.indent():
writer.write_line("_buffer = []", self.line)
writer.write_line("_append = _buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _utf8('').join(_buffer)", self.line)
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body, template, line):
self.name = name
self.body = body
self.template = template
self.line = line
def each_child(self):
return (self.body,)
def generate(self, writer):
block = writer.named_blocks[self.name]
with writer.include(block.template, self.line):
block.body.generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader, line):
self.name = name
self.template_name = reader.name
self.line = line
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
with writer.include(included, self.line):
included.file.body.generate(writer)
class _ApplyBlock(_Node):
def __init__(self, method, line, body=None):
self.method = method
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_buffer = []", self.line)
writer.write_line("_append = _buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _utf8('').join(_buffer)", self.line)
writer.write_line("_append(%s(%s()))" % (
self.method, method_name), self.line)
class _ControlBlock(_Node):
def __init__(self, statement, line, body=None):
self.statement = statement
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line)
with writer.indent():
self.body.generate(writer)
class _IntermediateControlBlock(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line,
writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line(self.statement, self.line)
class _Expression(_Node):
def __init__(self, expression, line, raw=False):
self.expression = expression
self.line = line
self.raw = raw
def generate(self, writer):
writer.write_line("_tmp = %s" % self.expression, self.line)
writer.write_line("if isinstance(_tmp, _string_types):"
" _tmp = _utf8(_tmp)", self.line)
writer.write_line("else: _tmp = _utf8(str(_tmp))", self.line)
if not self.raw and writer.current_template.autoescape is not None:
# In python3 functions like xhtml_escape return unicode,
# so we have to convert to utf8 again.
writer.write_line("_tmp = _utf8(%s(_tmp))" %
writer.current_template.autoescape, self.line)
writer.write_line("_append(_tmp)", self.line)
class _Module(_Expression):
def __init__(self, expression, line):
super(_Module, self).__init__("_modules." + expression, line,
raw=True)
class _Text(_Node):
def __init__(self, value, line):
self.value = value
self.line = line
def generate(self, writer):
value = self.value
# Compress lots of white space to a single character. If the whitespace
# breaks a line, have it continue to break a line, but just with a
# single \n character
if writer.compress_whitespace and "<pre>" not in value:
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value:
writer.write_line('_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception):
"""Raised for template syntax errors."""
pass
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template,
compress_whitespace):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0
self.include_stack = []
self._indent = 0
def indent_size(self):
return self._indent
def indent(self):
class Indenter(object):
def __enter__(_):
self._indent += 1
return self
def __exit__(_, *args):
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template, line):
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate(object):
def __enter__(_):
return self
def __exit__(_, *args):
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(self, line, line_number, indent=None):
if indent == None:
indent = self._indent
line_comment = ' # %s:%d' % (self.current_template.name, line_number)
if self.include_stack:
ancestors = ["%s:%d" % (tmpl.name, lineno)
for (tmpl, lineno) in self.include_stack]
line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
print >> self.file, " " * indent + line + line_comment
class _TemplateReader(object):
def __init__(self, name, text):
self.name = name
self.text = text
self.line = 1
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, template, in_block=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
raise ParseError("Missing {%% end %%} block for %s" %
in_block)
body.chunks.append(_Text(reader.consume(), reader.line))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%", "#"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line))
start_brace = reader.consume(2)
line = reader.line
# Template directives may be escaped as "{{!" or "{%!".
# In this case output the braces and consume the "!".
# This is especially useful in conjunction with jquery templates,
# which also use double braces.
if reader.remaining() and reader[0] == "!":
reader.consume(1)
body.chunks.append(_Text(start_brace, line))
continue
# Comment
if start_brace == "{#":
end = reader.find("#}")
if end == -1:
raise ParseError("Missing end expression #} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
continue
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1:
raise ParseError("Missing end expression }} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty expression on line %d" % line)
body.chunks.append(_Expression(contents, line))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1:
raise ParseError("Missing end block %%} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while", "try"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
raise ParseError("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % \
(operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line))
continue
# End tag
elif operator == "end":
if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line)
return body
elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "raw", "module"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("extends missing file path on line %d" % \
line)
block = _ExtendsBlock(suffix)
elif operator in ("import", "from"):
if not suffix:
raise ParseError("import missing statement on line %d" % \
line)
block = _Statement(contents, line)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("include missing file path on line %d" % \
line)
block = _IncludeBlock(suffix, reader, line)
elif operator == "set":
if not suffix:
raise ParseError("set missing statement on line %d" % line)
block = _Statement(suffix, line)
elif operator == "autoescape":
fn = suffix.strip()
if fn == "None":
fn = None
template.autoescape = fn
continue
elif operator == "raw":
block = _Expression(suffix, line, raw=True)
elif operator == "module":
block = _Module(suffix, line)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
block_body = _parse(reader, template, operator)
if operator == "apply":
if not suffix:
raise ParseError("apply missing method name on line %d" % \
line)
block = _ApplyBlock(suffix, line, block_body)
elif operator == "block":
if not suffix:
raise ParseError("block missing name on line %d" % line)
block = _NamedBlock(suffix, block_body, template, line)
else:
block = _ControlBlock(contents, line, block_body)
body.chunks.append(block)
continue
else:
raise ParseError("unknown operator: %r" % operator)
| {
"content_hash": "6f40a23ad251ce96fac2c2bd8c2500ff",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 79,
"avg_line_length": 35.04710144927536,
"alnum_prop": 0.564630069954168,
"repo_name": "shirk3y/cyclone",
"id": "8d8bc9852e38732b937c8f5ac5f21416acacd80e",
"size": "29661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyclone/template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "3863"
},
{
"name": "JavaScript",
"bytes": "6002"
},
{
"name": "Python",
"bytes": "402770"
},
{
"name": "Shell",
"bytes": "529"
}
],
"symlink_target": ""
} |
"""
Download API.
"""
# Standard library imports
from collections import deque
import json
import os
import sys
# Third part imports
from qtpy.QtCore import QByteArray, QObject, QTimer, QThread, QUrl, Signal
from qtpy.QtNetwork import QNetworkAccessManager, QNetworkRequest
import requests
# Local imports
from conda_manager.api.conda_api import CondaAPI
from conda_manager.utils.logs import logger
PY2 = sys.version[0] == '2'
PY3 = sys.version[0] == '3'
def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2:
# Python 2
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
# Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding)
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string."""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
def handle_qbytearray(obj, encoding):
"""
Qt/Python3 compatibility helper.
"""
if isinstance(obj, QByteArray):
obj = obj.data()
return to_text_string(obj, encoding=encoding)
class DownloadWorker(QObject):
# url, path
sig_download_finished = Signal(str, str)
# url, path, progress_size, total_size
sig_download_progress = Signal(str, str, int, int)
sig_finished = Signal(object, object, object)
def __init__(self, url, path):
super(DownloadWorker, self).__init__()
self.url = url
self.path = path
self.finished = False
def is_finished(self):
return self.finished
class _DownloadAPI(QObject):
"""
Download API based on QNetworkAccessManager
"""
def __init__(self, chunk_size=1024):
super(_DownloadAPI, self).__init__()
self._chunk_size = chunk_size
self._head_requests = {}
self._get_requests = {}
self._paths = {}
self._workers = {}
self._manager = QNetworkAccessManager(self)
self._timer = QTimer()
# Setup
self._timer.setInterval(1000)
self._timer.timeout.connect(self._clean)
# Signals
self._manager.finished.connect(self._request_finished)
self._manager.sslErrors.connect(self._handle_ssl_errors)
def _handle_ssl_errors(self, reply, errors):
logger.error(str(('SSL Errors', errors)))
def _clean(self):
"""
Periodically check for inactive workers and remove their references.
"""
if self._workers:
for url in self._workers.copy():
w = self._workers[url]
if w.is_finished():
self._workers.pop(url)
self._paths.pop(url)
if url in self._get_requests:
self._get_requests.pop(url)
else:
self._timer.stop()
def _request_finished(self, reply):
url = to_text_string(reply.url().toEncoded(), encoding='utf-8')
if url in self._paths:
path = self._paths[url]
if url in self._workers:
worker = self._workers[url]
if url in self._head_requests:
self._head_requests.pop(url)
start_download = True
header_pairs = reply.rawHeaderPairs()
headers = {}
for hp in header_pairs:
headers[to_text_string(hp[0]).lower()] = to_text_string(hp[1])
total_size = int(headers.get('content-length', 0))
# Check if file exists
if os.path.isfile(path):
file_size = os.path.getsize(path)
# Check if existing file matches size of requested file
start_download = file_size != total_size
if start_download:
# File sizes dont match, hence download file
qurl = QUrl(url)
request = QNetworkRequest(qurl)
self._get_requests[url] = request
reply = self._manager.get(request)
error = reply.error()
if error:
logger.error(str(('Reply Error:', error)))
reply.downloadProgress.connect(
lambda r, t, w=worker: self._progress(r, t, w))
else:
# File sizes match, dont download file
worker.finished = True
worker.sig_download_finished.emit(url, path)
worker.sig_finished.emit(worker, path, None)
elif url in self._get_requests:
data = reply.readAll()
self._save(url, path, data)
def _save(self, url, path, data):
"""
"""
worker = self._workers[url]
path = self._paths[url]
if len(data):
with open(path, 'wb') as f:
f.write(data)
# Clean up
worker.finished = True
worker.sig_download_finished.emit(url, path)
worker.sig_finished.emit(worker, path, None)
self._get_requests.pop(url)
self._workers.pop(url)
self._paths.pop(url)
def _progress(self, bytes_received, bytes_total, worker):
"""
"""
worker.sig_download_progress.emit(
worker.url, worker.path, bytes_received, bytes_total)
def download(self, url, path):
"""
"""
# original_url = url
qurl = QUrl(url)
url = to_text_string(qurl.toEncoded(), encoding='utf-8')
logger.debug(str((url, path)))
if url in self._workers:
while not self._workers[url].finished:
return self._workers[url]
worker = DownloadWorker(url, path)
# Check download folder exists
folder = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(folder):
os.makedirs(folder)
request = QNetworkRequest(qurl)
self._head_requests[url] = request
self._paths[url] = path
self._workers[url] = worker
self._manager.head(request)
self._timer.start()
return worker
def terminate(self):
pass
class RequestsDownloadWorker(QObject):
"""
"""
sig_finished = Signal(object, object, object)
sig_download_finished = Signal(str, str)
sig_download_progress = Signal(str, str, int, int)
def __init__(self, method, args, kwargs):
super(RequestsDownloadWorker, self).__init__()
self.method = method
self.args = args
self.kwargs = kwargs
self._is_finished = False
def is_finished(self):
"""
"""
return self._is_finished
def start(self):
"""
"""
error = None
output = None
try:
output = self.method(*self.args, **self.kwargs)
except Exception as err:
error = err
logger.debug(str((self.method.__name__,
self.method.__module__,
error)))
self.sig_finished.emit(self, output, error)
self._is_finished = True
class _RequestsDownloadAPI(QObject):
"""
"""
_sig_download_finished = Signal(str, str)
_sig_download_progress = Signal(str, str, int, int)
def __init__(self):
super(QObject, self).__init__()
self._conda_api = CondaAPI()
self._queue = deque()
self._threads = []
self._workers = []
self._timer = QTimer()
self._chunk_size = 1024
self._timer.setInterval(1000)
self._timer.timeout.connect(self._clean)
def _clean(self):
"""
Periodically check for inactive workers and remove their references.
"""
if self._workers:
for w in self._workers:
if w.is_finished():
self._workers.remove(w)
if self._threads:
for t in self._threads:
if t.isFinished():
self._threads.remove(t)
else:
self._timer.stop()
def _start(self):
"""
"""
if len(self._queue) == 1:
thread = self._queue.popleft()
thread.start()
self._timer.start()
def _create_worker(self, method, *args, **kwargs):
"""
"""
# FIXME: this might be heavy...
thread = QThread()
worker = RequestsDownloadWorker(method, args, kwargs)
worker.moveToThread(thread)
worker.sig_finished.connect(self._start)
self._sig_download_finished.connect(worker.sig_download_finished)
self._sig_download_progress.connect(worker.sig_download_progress)
worker.sig_finished.connect(thread.quit)
thread.started.connect(worker.start)
self._queue.append(thread)
self._threads.append(thread)
self._workers.append(worker)
self._start()
return worker
def _download(self, url, path=None, force=False):
"""
"""
if path is None:
path = url.split('/')[-1]
# Make dir if non existent
folder = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(folder):
os.makedirs(folder)
# Start actual download
try:
r = requests.get(url, stream=True)
except Exception as error:
logger.error(str(error))
# Break if error found!
# self._sig_download_finished.emit(url, path)
# return path
total_size = int(r.headers.get('Content-Length', 0))
# Check if file exists
if os.path.isfile(path) and not force:
file_size = os.path.getsize(path)
# Check if existing file matches size of requested file
if file_size == total_size:
self._sig_download_finished.emit(url, path)
return path
# File not found or file size did not match. Download file.
progress_size = 0
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=self._chunk_size):
if chunk:
f.write(chunk)
progress_size += len(chunk)
self._sig_download_progress.emit(url, path,
progress_size,
total_size)
self._sig_download_finished.emit(url, path)
return path
def _is_valid_url(self, url):
try:
r = requests.head(url)
value = r.status_code in [200]
except Exception as error:
logger.error(str(error))
value = False
return value
def _is_valid_channel(self, channel,
conda_url='https://conda.anaconda.org'):
"""
"""
if channel.startswith('https://') or channel.startswith('http://'):
url = channel
else:
url = "{0}/{1}".format(conda_url, channel)
if url[-1] == '/':
url = url[:-1]
plat = self._conda_api.get_platform()
repodata_url = "{0}/{1}/{2}".format(url, plat, 'repodata.json')
try:
r = requests.head(repodata_url)
value = r.status_code in [200]
except Exception as error:
logger.error(str(error))
value = False
return value
def _is_valid_api_url(self, url):
"""
"""
# Check response is a JSON with ok: 1
data = {}
try:
r = requests.get(url)
content = to_text_string(r.content, encoding='utf-8')
data = json.loads(content)
except Exception as error:
logger.error(str(error))
return data.get('ok', 0) == 1
def download(self, url, path=None, force=False):
logger.debug(str((url, path, force)))
method = self._download
return self._create_worker(method, url, path=path, force=force)
def terminate(self):
for t in self._threads:
t.quit()
self._thread = []
self._workers = []
def is_valid_url(self, url, non_blocking=True):
logger.debug(str((url)))
if non_blocking:
method = self._is_valid_url
return self._create_worker(method, url)
else:
return self._is_valid_url(url)
def is_valid_api_url(self, url, non_blocking=True):
logger.debug(str((url)))
if non_blocking:
method = self._is_valid_api_url
return self._create_worker(method, url)
else:
return self._is_valid_api_url(url=url)
def is_valid_channel(self, channel,
conda_url='https://conda.anaconda.org',
non_blocking=True):
logger.debug(str((channel, conda_url)))
if non_blocking:
method = self._is_valid_channel
return self._create_worker(method, channel, conda_url)
else:
return self._is_valid_channel(channel, conda_url=conda_url)
DOWNLOAD_API = None
REQUESTS_DOWNLOAD_API = None
def DownloadAPI():
global DOWNLOAD_API
if DOWNLOAD_API is None:
DOWNLOAD_API = _DownloadAPI()
return DOWNLOAD_API
def RequestsDownloadAPI():
global REQUESTS_DOWNLOAD_API
if REQUESTS_DOWNLOAD_API is None:
REQUESTS_DOWNLOAD_API = _RequestsDownloadAPI()
return REQUESTS_DOWNLOAD_API
def ready_print(worker, output, error):
print(worker.method.__name__, output, error)
def test():
from conda_manager.utils.qthelpers import qapplication
urls = ['http://repo.continuum.io/pkgs/free/linux-64/repodata.json.bz2',
'https://conda.anaconda.org/anaconda/linux-64/repodata.json.bz2',
'https://conda.anaconda.org/asmeurer/linux-64/repodata.json.bz2',
]
path = os.sep.join([os.path.expanduser('~'), 'testing-download'])
app = qapplication()
api = DownloadAPI()
for i, url in enumerate(urls):
filepath = os.path.join(path, str(i) + '.json.bz2')
api.download(url, filepath)
print('Downloading', url, filepath)
path = os.sep.join([os.path.expanduser('~'), 'testing-download-requests'])
api = RequestsDownloadAPI()
urls += ['asdasdasdad']
for i, url in enumerate(urls):
worker = api.is_valid_url(url)
worker.url = url
worker.sig_finished.connect(ready_print)
filepath = os.path.join(path, str(i) + '.json.bz2')
worker = api.download(url, path=filepath, force=True)
worker.sig_finished.connect(ready_print)
api = RequestsDownloadAPI()
print(api._is_valid_api_url('https://api.anaconda.org'))
print(api._is_valid_api_url('https://conda.anaconda.org'))
print(api._is_valid_channel('https://google.com'))
print(api._is_valid_channel('https://conda.anaconda.org/continuumcrew'))
app.exec_()
if __name__ == '__main__':
test()
| {
"content_hash": "bd5f44b10be542e2064bffb8fe513907",
"timestamp": "",
"source": "github",
"line_count": 520,
"max_line_length": 78,
"avg_line_length": 29.573076923076922,
"alnum_prop": 0.5518923136948888,
"repo_name": "martindurant/conda-manager",
"id": "0c75f5a17429bb06c1bc0f632f22c01e17e81560",
"size": "15403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_manager/api/download_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "289236"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from random import randrange
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type
import numpy as np
def h2o_H2OFrame_ifelse():
"""
Python API test: h2o.frame.H2OFrame.ifelse(yes, no)
Copied from pyunit_ifelse.py
"""
python_lists = np.random.uniform(-1,1, (5,5))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
newFrame = (h2oframe>0).ifelse(1, -1)
assert_is_type(h2oframe, H2OFrame) # check return type
# randomly check one entry
rowInd = randrange(0, h2oframe.nrow)
colInd = randrange(0, h2oframe.ncol)
assert newFrame[rowInd, colInd]==np.sign(h2oframe[rowInd, colInd]), "h2o.H2OFrame.ifelse() command is not working."
pyunit_utils.standalone_test(h2o_H2OFrame_ifelse)
| {
"content_hash": "9858081bb5ca35f3db88060a28e2c552",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 119,
"avg_line_length": 30.17241379310345,
"alnum_prop": 0.7097142857142857,
"repo_name": "h2oai/h2o-3",
"id": "d4510289dbd462a0d4fba726a8927f61f4e80514",
"size": "875",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_ifelse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
} |
import pytest
from copy import deepcopy
from minette import Topic, Priority
def test_init():
topic = Topic()
assert topic.name == ""
assert topic.status == ""
assert topic.is_new is False
assert topic.keep_on is False
assert topic.previous is None
assert topic.priority == Priority.Normal
def test_is_changed():
# create topic
topic = Topic()
topic.name = "favorite_fruit"
topic.previous = deepcopy(topic)
# keep topic
topic.name = "favorite_fruit"
assert topic.is_changed is False
# change topic
topic.name = "favorite_sushi"
assert topic.is_changed is True
| {
"content_hash": "84fc1c952215042365aa7440c29b16e6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 44,
"avg_line_length": 22.607142857142858,
"alnum_prop": 0.669826224328594,
"repo_name": "uezo/minette-python",
"id": "5f92c03290ddee12b4c71fcbf8e53653a07276f1",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models/test_topic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "229734"
}
],
"symlink_target": ""
} |
import enum
import logging
from .base_option import BaseOption, BaseContainerOption
from .float_option import FloatOption
from .int_option import IntOption
from .string_option import StringOption
from nativeconfig.exceptions import DeserializationError, ValidationError
LOG = logging.getLogger('nativeconfig')
class EnumOption(BaseOption):
"""±
EnumOption represents Python Enum in config.
"""
def __init__(self, name, enum_type, value_option=None, **kwargs):
"""
Accepts all the arguments of BaseConfig except choices.
If value_option is provided, it's used to serialize values. Otherwise Enum names are used.
@param enum_type: Type that subclasses from enum.Enum that will be used to instantiate python value.
@type enum_type: enum.EnumMeta
@param value_option: An instance of BaseOption subclass that provides serialization and validation.
For known Enum types, value_option will be picked automatically unless set explicitly.
@type value_option: BaseOption
"""
if issubclass(enum_type, enum.Enum):
self._enum_type = enum_type
else:
raise ValueError("'enum_type' must be a subclass of enum.Enum")
if value_option:
if isinstance(value_option, BaseOption) and not isinstance(value_option, BaseContainerOption):
self._value_option = value_option
else:
raise ValueError("'value_option' cannot be a container option")
elif issubclass(enum_type, int):
self._value_option = IntOption('IntOption')
elif issubclass(enum_type, float):
self._value_option = FloatOption('FloatOption')
elif issubclass(enum_type, str):
self._value_option = StringOption('StringOption')
else:
self._value_option = None
choices = kwargs.pop('choices', tuple(enum_type.__members__.values()))
super().__init__(name, choices=choices, **kwargs)
def serialize(self, python_value):
if self._value_option:
return self._value_option.serialize(python_value.value)
else:
return python_value.name
def deserialize(self, raw_value):
"""
1. If value_option is set, will try to instantiate enum by value
2. Otherwise will try to find an appropriate value by comparing string.
"""
if self._value_option:
try:
return self._enum_type(self._value_option.deserialize(raw_value))
except (ValueError, DeserializationError):
pass
LOG.info("Unable to instantiate \"{}\" directly.", self._enum_type)
raw_value_lower = raw_value.lower()
for name, value in self._enum_type.__members__.items():
if str(value).lower() == raw_value_lower or name.lower() == raw_value_lower:
return value
raise DeserializationError("unable to deserialize '{}' into {}".format(raw_value, self._enum_type), raw_value, self.name)
def serialize_json(self, python_value):
if python_value is None:
return super().serialize_json(python_value)
elif self._value_option:
return self._value_option.serialize_json(python_value.value)
else:
return super().serialize_json(python_value.name)
def deserialize_json(self, json_value):
if json_value == 'null':
return None
elif self._value_option:
try:
enum_value = self._value_option.deserialize_json(json_value)
except DeserializationError:
enum_value = super().deserialize_json(json_value)
else:
enum_value = super().deserialize_json(json_value)
try:
return self._enum_type(enum_value)
except ValueError:
pass
try:
return self._enum_type[enum_value]
except KeyError:
pass
raw_value = str(enum_value)
return self.deserialize(raw_value)
def validate(self, python_value):
super().validate(python_value)
if not isinstance(python_value, self._enum_type):
raise ValidationError("'{}' must be in {}".format(python_value, self._enum_type), python_value, self.name)
| {
"content_hash": "cf340eb8f81a24dd9f95cece4a345ede",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 129,
"avg_line_length": 36.73728813559322,
"alnum_prop": 0.6235294117647059,
"repo_name": "GreatFruitOmsk/nativeconfig",
"id": "f67f9c7367dcb09e5f81a9fe73698ee591fdc820",
"size": "4336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nativeconfig/options/enum_option.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149753"
}
],
"symlink_target": ""
} |
from .. import Analysis, AnalysesHub
class Decompiler(Analysis):
def __init__(self, func, cfg=None, optimization_passes=None, sp_tracker_track_memory=True):
self.func = func
self._cfg = cfg
self._optimization_passes = optimization_passes
self._sp_tracker_track_memory = sp_tracker_track_memory
self.clinic = None # mostly for debugging purposes
self.codegen = None
self._decompile()
def _decompile(self):
if self.func.is_simprocedure:
return
# convert function blocks to AIL blocks
clinic = self.project.analyses.Clinic(self.func,
kb=self.kb,
optimization_passes=self._optimization_passes,
sp_tracker_track_memory=self._sp_tracker_track_memory)
# recover regions
ri = self.project.analyses.RegionIdentifier(self.func, graph=clinic.graph, kb=self.kb)
# structure it
rs = self.project.analyses.RecursiveStructurer(ri.region, kb=self.kb)
# simplify it
s = self.project.analyses.RegionSimplifier(rs.result, kb=self.kb)
codegen = self.project.analyses.StructuredCodeGenerator(self.func, s.result, cfg=self._cfg, kb=self.kb)
self.clinic = clinic
self.codegen = codegen
AnalysesHub.register_default('Decompiler', Decompiler)
| {
"content_hash": "9744a74d256294ecbf2863b5c0045e99",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 111,
"avg_line_length": 34.357142857142854,
"alnum_prop": 0.6077616077616078,
"repo_name": "schieb/angr",
"id": "042f22b48d137a316f74d8197777b4660b8cc3cc",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/analyses/decompiler/decompiler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39522"
},
{
"name": "Dockerfile",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "739"
},
{
"name": "Python",
"bytes": "4987778"
}
],
"symlink_target": ""
} |
from smart.interfaces.gtk.packageview import GtkPackageView
from smart.interfaces.gtk import getPixbuf
from smart.util.strtools import sizeToStr
from smart.report import Report
from smart import *
import gobject, gtk
class GtkChanges(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
self.__gobject_init__()
self.set_icon(getPixbuf("smart"))
self.set_title(_("Change Summary"))
self.set_modal(True)
self.set_position(gtk.WIN_POS_CENTER)
self.set_geometry_hints(min_width=600, min_height=400)
def delete(widget, event):
gtk.main_quit()
return True
self.connect("delete-event", delete)
self._vbox = gtk.VBox()
self._vbox.set_border_width(10)
self._vbox.set_spacing(10)
self._vbox.show()
self.add(self._vbox)
self._label = gtk.Label()
self._vbox.pack_start(self._label, expand=False)
self._pv = GtkPackageView()
self._pv.getTreeView().set_headers_visible(False)
self._pv.setExpandPackage(True)
self._pv.show()
self._vbox.pack_start(self._pv)
self._sizelabel = gtk.Label()
self._vbox.pack_start(self._sizelabel, expand=False)
self._confirmbbox = gtk.HButtonBox()
self._confirmbbox.set_spacing(10)
self._confirmbbox.set_layout(gtk.BUTTONBOX_END)
self._vbox.pack_start(self._confirmbbox, expand=False)
self._cancelbutton = gtk.Button(stock="gtk-cancel")
self._cancelbutton.show()
self._cancelbutton.connect("clicked", lambda x: gtk.main_quit())
self._confirmbbox.pack_start(self._cancelbutton)
self._okbutton = gtk.Button(stock="gtk-ok")
self._okbutton.show()
def clicked(x):
self._result = True
gtk.main_quit()
self._okbutton.connect("clicked", clicked)
self._confirmbbox.pack_start(self._okbutton)
self._closebbox = gtk.HButtonBox()
self._closebbox.set_spacing(10)
self._closebbox.set_layout(gtk.BUTTONBOX_END)
self._vbox.pack_start(self._closebbox, expand=False)
self._closebutton = gtk.Button(stock="gtk-close")
self._closebutton.show()
self._closebutton.connect("clicked", lambda x: gtk.main_quit())
self._closebbox.pack_start(self._closebutton)
def showChangeSet(self, changeset, keep=None, confirm=False, label=None):
report = Report(changeset)
report.compute()
class Sorter(str):
ORDER = [_("Remove"), _("Downgrade"), _("Reinstall"),
_("Install"), _("Upgrade")]
def _index(self, s):
i = 0
for os in self.ORDER:
if os.startswith(s):
return i
i += 1
return i
def __cmp__(self, other):
return cmp(self._index(str(self)), self._index(str(other)))
def __lt__(self, other):
return cmp(self, other) < 0
packages = {}
if report.install:
install = {}
reinstall = {}
upgrade = {}
downgrade = {}
lst = report.install.keys()
lst.sort()
for pkg in lst:
package = {}
done = {}
if pkg in report.upgrading:
for upgpkg in report.upgrading[pkg]:
package.setdefault(_("Upgrades"), []).append(upgpkg)
done[upgpkg] = True
if pkg in report.downgrading:
for dwnpkg in report.downgrading[pkg]:
package.setdefault(_("Downgrades"), []).append(dwnpkg)
done[dwnpkg] = True
if pkg in report.requires:
for reqpkg in report.requires[pkg]:
package.setdefault(_("Requires"), []).append(reqpkg)
if pkg in report.requiredby:
for reqpkg in report.requiredby[pkg]:
package.setdefault(_("Required By"), []).append(reqpkg)
if pkg in report.conflicts:
for cnfpkg in report.conflicts[pkg]:
if cnfpkg in done:
continue
package.setdefault(_("Conflicts"), []).append(cnfpkg)
if pkg.installed:
reinstall[pkg] = package
elif pkg in report.upgrading:
upgrade[pkg] = package
elif pkg in report.downgrading:
downgrade[pkg] = package
else:
install[pkg] = package
if reinstall:
packages[Sorter(_("Reinstall (%d)") % len(reinstall))] = \
reinstall
if install:
packages[Sorter(_("Install (%d)") % len(install))] = install
if upgrade:
packages[Sorter(_("Upgrade (%d)") % len(upgrade))] = upgrade
if downgrade:
packages[Sorter(_("Downgrade (%d)") % len(downgrade))] = \
downgrade
if report.removed:
remove = {}
lst = report.removed.keys()
lst.sort()
for pkg in lst:
package = {}
done = {}
if pkg in report.requires:
for reqpkg in report.requires[pkg]:
package.setdefault(_("Requires"), []).append(reqpkg)
if pkg in report.requiredby:
for reqpkg in report.requiredby[pkg]:
package.setdefault(_("Required By"), []).append(reqpkg)
if pkg in report.conflicts:
for cnfpkg in report.conflicts[pkg]:
if cnfpkg in done:
continue
package.setdefault(_("Conflicts"), []).append(cnfpkg)
remove[pkg] = package
if remove:
packages[Sorter(_("Remove (%d)") % len(report.removed))] = \
remove
if keep:
packages[Sorter(_("Keep (%d)") % len(keep))] = keep
dsize = report.getDownloadSize() - report.getCachedSize()
size = report.getInstallSize() - report.getRemoveSize()
sizestr = ""
if dsize:
sizestr += _("%s of package files are needed. ") % sizeToStr(dsize)
if size > 0:
sizestr += _("%s will be used.") % sizeToStr(size)
elif size < 0:
size *= -1
sizestr += _("%s will be freed.") % sizeToStr(size)
if dsize or size:
self._sizelabel.set_text(sizestr)
self._sizelabel.show()
else:
self._sizelabel.hide()
if confirm:
self._confirmbbox.show()
self._closebbox.hide()
else:
self._closebbox.show()
self._confirmbbox.hide()
if label:
self._label.set_text(label)
self._label.show()
else:
self._label.hide()
self._pv.setPackages(packages, changeset)
# Expand first level
self._pv.setExpanded([(x,) for x in packages])
self._result = False
self.show()
gtk.main()
self.hide()
return self._result
# vim:ts=4:sw=4:et
| {
"content_hash": "48a6553423a1ad5f759929ff2b77bc5f",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 79,
"avg_line_length": 36.8743961352657,
"alnum_prop": 0.4974453032883532,
"repo_name": "blackPantherOS/packagemanagement",
"id": "16c6151823bce0e770e6b2278979834d2b47aa2d",
"size": "8508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smartpm/smart/interfaces/gtk/changes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "605504"
},
{
"name": "C++",
"bytes": "65879"
},
{
"name": "CSS",
"bytes": "4820"
},
{
"name": "HTML",
"bytes": "17187"
},
{
"name": "M4",
"bytes": "170666"
},
{
"name": "Makefile",
"bytes": "5031"
},
{
"name": "Perl",
"bytes": "311801"
},
{
"name": "Prolog",
"bytes": "5458"
},
{
"name": "Python",
"bytes": "2250512"
},
{
"name": "Roff",
"bytes": "1805"
},
{
"name": "Shell",
"bytes": "283804"
},
{
"name": "XSLT",
"bytes": "312"
}
],
"symlink_target": ""
} |
class LoaderResult:
ERROR_NOT_FOUND = "not_found"
ERROR_UPSTREAM = "upstream"
ERROR_TIMEOUT = "timeout"
def __init__(self, buffer=None, successful=True, error=None, metadata=None):
"""
:param buffer: The media buffer
:param successful: True when the media has been loaded.
:type successful: bool
:param error: Error code
:type error: str
:param metadata: Dictionary of metadata about the buffer
:type metadata: dict
"""
if metadata is None:
metadata = {}
self.buffer = buffer
self.successful = successful
self.error = error
self.metadata = metadata
| {
"content_hash": "4de7431d9d73af2ceb8f572c95e45594",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.5933908045977011,
"repo_name": "scorphus/thumbor",
"id": "d2eed95cb893aeff8203882fc9ea83b0d88afa62",
"size": "949",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "thumbor/loaders/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58654"
},
{
"name": "JavaScript",
"bytes": "2514"
},
{
"name": "Makefile",
"bytes": "11518"
},
{
"name": "Python",
"bytes": "604965"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
} |
import os
import json
''' dotfile is a class for managing the local dotfile storage
saves a file called, '.fu' to your home directory
the file is format a json file
{
result : Last Search Result
last : Last Copied command
history : History of used commands
}
the entire dot file is rewritten on every write
'''
class dotfile:
''' Initialize the dotfile '''
def __init__(self):
self.path = os.path.join(os.getenv("HOME"),'.fu')
self.__load()
self.history_size = 30
''' Get the history of all the copied command '''
def history(self):
return self._history
''' Get the command that was copied'''
def last(self):
return self._last
''' Get the last search result '''
def result(self):
return str(self._result)
''' Save the search result dotfile '''
def save_result(self,string):
self._result = string
''' Copy will add the string to be copied to the dotfile '''
def save_copy(self,string):
self._last = string
self.__record(string)
''' Record will add a command to the history '''
def __record(self,string):
# If we are at capacity, remove
if len(self._history) >= self.history_size :
self._history = self.history[:used_size]
# Prepend to the history
self._history.insert(0,string)
''' Private file for loading the dotfile '''
def __load(self):
# If the file doesn't exist make it
if not os.path.isfile(self.path):
self.__make()
# Read the file name
fid = open(self.path, 'r')
raw = fid.read()
fid.close()
# Check if we have the json objects we are looking for
self._storage = json.loads(raw)
if 'result' in self._storage :
self._result = str (self._storage['result'])
else :
self._result = ""
if 'last' in self._storage:
self._last = self._storage['last']
else :
self._last = ""
if 'history' in self._storage :
self._history = self._storage['history']
else :
self._history = [];
''' Private helper for making an empty json file '''
def __make(self):
fid = open( self.path ,'w')
fid.write("{}")
fid.close()
def save(self):
savefile = json.dumps({ 'result' : self._result, 'history' : self._history , 'last' : self._last } )
fid = open(self.path, 'w')
fid.write(savefile)
fid.close()
| {
"content_hash": "14f8bd06234547e22b02b56e47e162b2",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 106,
"avg_line_length": 24.690721649484537,
"alnum_prop": 0.6004175365344467,
"repo_name": "samirahmed/fu",
"id": "92b8fe75861c379793855b8e921a16db21a652a2",
"size": "2395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fu_core/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111417"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefb_ds
version_added: '2.8'
short_description: Configure FlashBlade Directory Service
description:
- Create or erase directory services configurations. There is no facility
to SSL certificates at this time. Use the FlashBlade GUI for this
additional configuration work.
- To modify an existing directory service configuration you must first delete
an exisitng configuration and then recreate with new settings.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
state:
description:
- Create or delete directory service configuration
default: present
type: str
choices: [ absent, present ]
dstype:
description:
- The type of directory service to work on
choices: [ management, nfs, smb ]
type: str
enable:
description:
- Whether to enable or disable directory service support.
default: false
type: bool
uri:
description:
- A list of up to 30 URIs of the directory servers. Each URI must include
the scheme ldap:// or ldaps:// (for LDAP over SSL), a hostname, and a
domain name or IP address. For example, ldap://ad.company.com configures
the directory service with the hostname "ad" in the domain "company.com"
while specifying the unencrypted LDAP protocol.
type: list
base_dn:
description:
- Sets the base of the Distinguished Name (DN) of the directory service
groups. The base should consist of only Domain Components (DCs). The
base_dn will populate with a default value when a URI is entered by
parsing domain components from the URI. The base DN should specify DC=
for each domain component and multiple DCs should be separated by commas.
required: true
type: str
bind_password:
description:
- Sets the password of the bind_user user name account.
type: str
bind_user:
description:
- Sets the user name that can be used to bind to and query the directory.
- For Active Directory, enter the username - often referred to as
sAMAccountName or User Logon Name - of the account that is used to
perform directory lookups.
- For OpenLDAP, enter the full DN of the user.
type: str
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = r'''
- name: Delete existing management directory service
purefb_ds:
dstype: management
state: absent
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Create NFS directory service (disabled)
purefb_ds:
dstype: nfs
uri: "ldap://lab.purestorage.com"
base_dn: "DC=lab,DC=purestorage,DC=com"
bind_user: Administrator
bind_password: password
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Enable existing SMB directory service
purefb_ds:
dstypr: smb
enable: true
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Disable existing management directory service
purefb_ds:
dstype: management
enable: false
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Create NFS directory service (enabled)
purefb_ds:
dstype: nfs
enable: true
uri: "ldap://lab.purestorage.com"
base_dn: "DC=lab,DC=purestorage,DC=com"
bind_user: Administrator
bind_password: password
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
HAS_PURITY_FB = True
try:
from purity_fb import DirectoryService
except ImportError:
HAS_PURITY_FB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_blade, purefb_argument_spec
def update_ds(module, blade):
"""Update Directory Service"""
changed = False
module.exit_json(changed=changed)
def enable_ds(module, blade):
"""Enable Directory Service"""
changed = False
try:
blade.directory_services.update_directory_services(names=[module.params['dstype']],
directory_service=DirectoryService(enabled=True))
changed = True
except Exception:
module.fail_json(msg='Enable {0} Directory Service failed: Check Configuration'.format(module.params['dstype']))
module.exit_json(changed=changed)
def disable_ds(module, blade):
"""Disable Directory Service"""
changed = False
try:
blade.directory_services.update_directory_services(names=[module.params['dstype']],
directory_service=DirectoryService(enabled=False))
changed = True
except Exception:
module.fail_json(msg='Disable {0} Directory Service failed'.format(module.params['dstype']))
module.exit_json(changed=changed)
def delete_ds(module, blade):
"""Delete Directory Service"""
changed = False
try:
dir_service = DirectoryService(uris=[''],
base_dn="",
bind_user="",
bind_password="",
enabled=False)
blade.directory_services.update_directory_services(names=[module.params['dstype']],
directory_service=dir_service)
changed = True
except Exception:
module.fail_json(msg='Delete {0} Directory Service failed'.format(module.params['dstype']))
module.exit_json(changed=changed)
def create_ds(module, blade):
"""Create Directory Service"""
changed = False
try:
dir_service = DirectoryService(uris=module.params['uri'],
base_dn=module.params['base_dn'],
bind_user=module.params['bind_user'],
bind_password=module.params['bind_password'],
enabled=module.params['enable'])
blade.directory_services.update_directory_services(names=[module.params['dstype']],
directory_service=dir_service)
changed = True
except Exception:
module.fail_json(msg='Create {0} Directory Service failed: Check configuration'.format(module.params['dstype']))
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(dict(
uri=dict(type='list'),
dstype=dict(required=True, type='str', choices=['management', 'nfs', 'smb']),
state=dict(type='str', default='present', choices=['absent', 'present']),
enable=dict(type='bool', default=False),
bind_password=dict(type='str', no_log=True),
bind_user=dict(type='str'),
base_dn=dict(type='str'),
))
required_together = [['uri', 'bind_password', 'bind_user', 'base_dn']]
module = AnsibleModule(argument_spec,
required_together=required_together,
supports_check_mode=False)
if not HAS_PURITY_FB:
module.fail_json(msg='purity_fb sdk is required for this module')
state = module.params['state']
blade = get_blade(module)
ds_configured = False
dirserv = blade.directory_services.list_directory_services(names=[module.params['dstype']])
ds_enabled = dirserv.items[0].enabled
if dirserv.items[0].base_dn is not None:
ds_configured = True
if state == 'absent' and ds_configured:
delete_ds(module, blade)
elif ds_configured and module.params['enable'] and ds_enabled:
update_ds(module, blade)
elif ds_configured and not module.params['enable'] and ds_enabled:
disable_ds(module, blade)
elif ds_configured and module.params['enable'] and not ds_enabled:
enable_ds(module, blade)
elif not ds_configured and state == 'present':
create_ds(module, blade)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| {
"content_hash": "a612a139ed4d67c4b1516d6b9ee6093b",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 120,
"avg_line_length": 35.037974683544306,
"alnum_prop": 0.6382466281310212,
"repo_name": "SergeyCherepanov/ansible",
"id": "797eee70d9fa30c18adf5d0ce52bee595e0c0d73",
"size": "8491",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/storage/purestorage/purefb_ds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
from . import core, utils
import cgt
import ctypes, os.path as osp, hashlib, numpy as np, sys, subprocess, string, os, time, traceback, cPickle
from collections import defaultdict, namedtuple
from StringIO import StringIO
import logging
def function(inputs, outputs, dbg=None, updates=None, givens=None):
assert isinstance(inputs, list), "Inputs must be a list"
assert all(el.is_argument() for el in inputs), "Invalid input: should be a list of Argument nodes"
if isinstance(outputs, list):
assert all(isinstance(el, core.Node) for el in outputs), "Invalid output: should all be symbolic variables"
return _function_listout(inputs, outputs, dbg, updates, givens)
elif isinstance(outputs, core.Node):
f_listout = _function_listout(inputs, [outputs], dbg, updates, givens)
return lambda *args : f_listout(*args)[0]
else:
raise ValueError("Expected `outputs` to be a Node or a list of Nodes. Got an object of type %s"%type(outputs))
def _function_listout(inputs, outputs, dbg = None, updates=None, givens=None):
if updates is None: updates = []
else: assert (isinstance(updates, list) and
all(isinstance(a,tuple) and len(a)==2
and isinstance(a[0], core.Node) and isinstance(a[1], core.Node)
for a in updates)), "updates should be a list of pairs (before, after)"
if givens is None: givens = []
else: assert all(before.is_data() for (before,_) in updates), "lhs of updates must be Data instances"
if dbg: raise core.Todo("debug functionality is broken")
outputs = [cgt.make_tuple(*x) if isinstance(x, tuple) else x for x in outputs]
interp = run_compilation_pipeline(inputs, outputs, updates, givens)
return interp
# ================================================================
# Execution
# ================================================================
def python_only():
return not hasattr(cgt,"cycgt")
def determine_devices(nodes_sorted, updatetarg2src):
# Op definitions (available impls, inplace-ness, etc) define constraints
# on possible devices for a node
if python_only():
return {node:Device() for node in nodes_sorted}
# (1) Get available devices for nodes, determined by which impls are available and node types
compile_info = get_compile_info()
cuda_enabled = compile_info["CGT_ENABLE_CUDA"]
node2dev = {}
home_device = core.Device(devtype="cpu", idx=0)
for node in nodes_sorted:
default_device = node.props.get("default_device", home_device)
if node in updatetarg2src:
device = node2dev[updatetarg2src[node]]
elif node.is_data():
device = node.op.device
elif node.is_argument():
device = home_device
else:
if "native_gpu" in node.op.available_impls and (default_device.devtype == "gpu" or "native_cpu" not in node.op.available_impls):
assert cuda_enabled, "trying to put op on gpu but cuda is disabled"
device = core.Device("gpu", default_device.idx)
else:
device = core.Device(devtype="cpu", idx=default_device.idx)
node2dev[node] = device
return node2dev
def is_tensor(x):
return isinstance(x.typ, core.TensorType)
def is_tuple(x):
return isinstance(x.typ, core.TupleType)
def create_interpreter(inputs, outputs, eg, node2memloc):
assert isinstance(eg, ExecutionGraph)
input_types = [input.typ for input in inputs] #pylint: disable=W0622
output_locs = [node2memloc[node] for node in outputs]
config = cgt.get_config()
backend = config["backend"]
parallel = config["parallel"]
if backend == "python":
if parallel:
raise NotImplementedError("For parallel=True, set backend=native")
# return ParallelInterpreter(eg, output_locs, input_types)
else:
return SequentialInterpreter(eg, output_locs, input_types)
elif backend == "native":
if parallel:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, config["num_threads"])
else:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, 0)
else:
raise NotImplementedError("invalid backend %s"%backend)
def topsorted_shapes_first(outputs, node2shape):
# Almost identical to topsorted(...) function
# But we also need to visit the shape elements of an in-place node
# before visiting that node
marks = {}
out = []
stack = []
for x in outputs:
stack.append((x,0))
while stack:
(i,jidx) = stack.pop()
if jidx == 0:
m = marks.get(i,0)
if m == 0:
marks[i] = 1
elif m == 1:
raise ValueError("not a dag")
else:
continue
ps = i.parents
###### Changed part ######
if i.ndim > 0 and not i.is_input() and i.op.return_type=="byref":
if i in node2shape:
shpels = node2shape[i]
else:
raise core.Unreachable
# shpels = i.op.shp_apply(i.parents)
ps = ps + shpels
elif is_tuple(i):
for arrshp in node2shape[i]:
ps = ps + arrshp
##########################
if jidx == len(ps):
marks[i] = 2
out.append(i)
else:
stack.append((i,jidx+1))
j = ps[jidx]
stack.append((j,0))
return out
def determine_memowner(nodes_sorted, updates, node2dev):
# First determine how many "child" nodes each node has
node2child = defaultdict(list)
for node in nodes_sorted:
for parent in node.parents:
node2child[parent].append(node)
# Now traverse graph again and see where we can use the same memory
node2memowner = {} # mapping node x -> the node that owns its memory
# For updates, memlocation(RHS) = memlocation(LHS)
after2before = {after:before for (before,after) in updates}
enable_inplace_opt = core.get_config()["enable_inplace_opt"]
for node in nodes_sorted:
base = node # by default,
if node.is_argument():
pass
elif node.op.writes_to_input >= 0:
base = node2memowner[node.parents[node.op.writes_to_input]]
elif node in after2before:
base = after2before[node]
elif enable_inplace_opt and node.op.return_type == "byref": # TODO think about if we need any other conditions
nodeshape = node.op.shp_apply(node.parents)
for parent in node.parents:
if (len(node2child[parent])==1
and nodeshape==cgt.shape(parent) # XXX not a very robust way to check
and node.dtype == parent.dtype
and _is_data_mutable(parent)):
base = parent
break
# TODO: add optimization for in-place incrementing
node2memowner[node] = base
return node2memowner
class MemCounter(object):
"""
returns `MemLocation`s with indices 0,1,...
`count` member indicates how many have been returned thus far
"""
def __init__(self):
self.count=0
def new_memloc(self, devtype):
out = MemLocation(self.count, devtype)
self.count += 1
return out
def create_execution_graph(inputs, nodes_sorted, node2shape, node2memowner, node2dev):
# node2impltype = copy.copy(node2impltype) # we'll insert transport ops
instrs = []
counter = MemCounter()
node2memloc = {}
for node in nodes_sorted:
if node not in node2dev: node2dev[node] = core.Device(devtype="cpu",idx=node2dev[node.parents[0]].idx if len(node.parents)>0 else 0)
if node.is_argument():
write_loc = counter.new_memloc(node2dev[node].devtype)
node2memloc[node] = write_loc
i = inputs.index(node)
instrs.append(LoadArgument(i, write_loc))
else:
read_locs = [node2memloc[parent] for parent in node.parents]
if node.op.return_type == "byref":
if node2memowner[node] is node:
if is_tensor(node): # just make one memory location for output
nodeshape = node2shape[node] if node.ndim > 0 else []
shape_locs = [node2memloc[shpel] for shpel in nodeshape]
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(Alloc(node.dtype, shape_locs, write_loc))
else: # if it's a tuple, we need to allocate all of the components, then build tuple
nodeshape = node2shape[node]
assert isinstance(nodeshape, tuple)
arr_locs = []
for (arrshp, arrtyp) in utils.safezip(nodeshape, node.typ):
arr_loc = counter.new_memloc(node2dev[node].devtype)
shape_locs = [node2memloc[shpel] for shpel in arrshp]
instrs.append(Alloc(arrtyp.dtype, shape_locs, arr_loc))
arr_locs.append(arr_loc)
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(BuildTup(node.typ, arr_locs, write_loc))
else:
# If this node writes to another node's memory, the devices must be the same
# this should have been enforced in determine_devices()
assert node2dev[node] == node2dev[node2memowner[node]]
write_loc = node2memloc[node2memowner[node]]
instrs.append(ReturnByRef(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
else:
assert node.op.return_type == "byval"
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(ReturnByVal(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
node2memloc[node] = write_loc
return ExecutionGraph(instrs, len(inputs), counter.count), node2memloc
def get_callable(op, input_types, devtype, prefer_python=False):
assert op.available_impls, "need to set op.available_impls"
config = core.get_config()
if (prefer_python or config["force_python_impl"]) and "python" in op.available_impls:
return op.get_py_callable(input_types)
elif config["backend"] == "python":
if "python" in op.available_impls:
return op.get_py_callable(input_types)
else:
assert devtype=="cpu", "can't use devtype=gpu with python backend"
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
raise RuntimeError("Can't find an implementation of %s suitable for python backend. Just have available_impls=%s"%(op,op.available_impls))
else: # backend = native
if devtype == "cpu":
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
print "using python impl for",op
return op.get_py_callable(input_types)
else:
if "native_gpu" in op.available_impls:
return get_native_callable(op, input_types, "gpu")
else:
raise RuntimeError("Tried to put Op %s on the GPU but I only have a python impl :("%op)
def get_native_callable(op, input_types, devtype):
nci = op.get_native_compile_info(input_types, devtype)
nci.op_str = str(op)
nci.return_type = op.return_type
nci.n_in = len(input_types)
return nci2callable(nci)
def add_transports(nodelist, node2dev, node2shape):
node2child = defaultdict(list)
for node in nodelist:
for par in node.parents:
node2child[par].append(node)
# XXX look at native compilation info, gpu deref mask
for node in nodelist:
dev = node2dev[node]
dev2copy = {}
for child in node2child[node]:
childdev = node2dev[child]
if not childdev == dev:
if childdev not in dev2copy:
nodecopy = core.Result(core.Transport(childdev), [node])
node2dev[nodecopy] = childdev
dev2copy[childdev] = nodecopy
node2shape[nodecopy] = node2shape[node]
replace_parents(child, node, dev2copy[childdev])
def replace_parents(node, before, after):
for (i,p) in enumerate(node.parents):
if p is before:
node.parents[i] = after
def run_compilation_pipeline(inputs, outputs, updates, givens):
"""
Compiles the expression graph into an execution graph.
"""
config = core.get_config()
# Phase 1: simplification and analysis of expression graph
# ------------------------------------------------------
# Add add update targets to outputs
logging.info("Simplification")
outputs_updatetargs = outputs + [after for (_before, after) in updates]
if givens: outputs_updatetargs = core.clone(outputs_updatetargs, dict(givens))
# Do simplification + analysis pass on expression graph
outputs_updatetargs_simple, analysis, _ = \
core.simplify_and_analyze(outputs_updatetargs) if config["enable_simplification"] \
else (outputs_updatetargs, core.analyze(outputs_updatetargs), {})
# Phase 2: device targeting
# ------------------------------------------------------
logging.info("Device targeting")
outputs_updatetargs_simple = cgt.core.clone(outputs_updatetargs_simple)
analysis = core.analyze(outputs_updatetargs_simple)
# XXX inefficient to just copy the graph and redo analysis
nodelist = core.topsorted(outputs_updatetargs_simple)
updatesrcs = [before for (before, _) in updates]
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2dev = determine_devices(nodelist, {targ:src for (src,targ) in zip(updatesrcs, updatetargs_simple)})
add_transports(nodelist, node2dev, analysis["node2shape"])
# Phase 3: build execution graph
# ------------------------------------------------------
# Sort nodes so that shape elements appear before a given node
logging.info("Build execution graph")
nodes_sorted = topsorted_shapes_first(outputs_updatetargs_simple, analysis["node2shape"])
# For each node, figure out if its output should be written to a previous node's memory
# (memowner : "memory owner")
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2memowner = determine_memowner(nodes_sorted, zip(updatesrcs, updatetargs_simple), node2dev)
# Find the outputs we want to return
outputs_simple = outputs_updatetargs_simple[:len(outputs)] # get rid
# Generate execution graph
eg, node2memloc = create_execution_graph(
inputs, nodes_sorted, analysis["node2shape"], node2memowner, node2dev)
# print execution graph
if config["verbose"]:
print 'begin'
print '\n'.join(str(i)+'.) \t'+repr(instr) for (i,instr) in enumerate(eg.instrs))
print 'end'
# Phase 3: create C or Python interpreter for graph
# ------------------------------------------------------
interp = create_interpreter(inputs, outputs_simple, eg, node2memloc)
# Done!
return interp
# ================================================================
# Simple numeric eval via traversal
# ================================================================
def numeric_eval(output, arg2val):
"""
Numerically evaluates symbolic variable without any compilation,
by associating each argument with a value (via `arg2val`) and traversing the
computation graph
Inputs
------
output: symbolic variable or list of variables we would like to evaluate
arg2val: dictionary assigning each argument that output depends on to a numerical value
Returns
-------
Numeric value or list of numeric values of variables corresponding to output
"""
if isinstance(output, list):
assert all(isinstance(x, core.Node) for x in output), "expected a list of Nodes"
return _numeric_eval_listout(output, arg2val)
elif isinstance(output, core.Node):
return _numeric_eval_listout([output],arg2val)[0]
else:
raise ValueError("expected `output` to be a Node or a list of Nodes. Got an object of type %s"%type(output))
def _numeric_eval_listout(outputs, arg2val):
"""
Evaluate outputs numerically. arg2val is a dictionary mapping arguments to numerical values
"""
assert isinstance(outputs, list)
assert isinstance(arg2val, dict)
nodes = list(core.topsorted(outputs))
node2val = {}
for node in nodes:
if node.is_argument():
node2val[node] = arg2val[node]
elif node.is_data():
node2val[node] = node.op.get_value()
else:
parentvals = [node2val[par] for par in node.parents]
node2val[node] = core.py_numeric_apply(node, parentvals)
# assert node.get_ndim() == np.array(node2val[node]).ndim
numeric_outputs = [node2val[node] for node in outputs]
return numeric_outputs
################################################################
### Execution graph
################################################################
MemInfo = namedtuple("MemInfo",["loc","access"])
MEM_OVERWRITE = "overwrite"
MEM_INCREMENT = "increment"
class ExecutionGraph(object):
def __init__(self, instrs, n_args, n_locs):
self.instrs = instrs
self.n_args = n_args
self.n_locs = n_locs
class MemLocation(object):
def __init__(self, idx, devtype):
assert isinstance(idx, int) and devtype in ["cpu", "gpu"]
self.index = idx
self.devtype = devtype
# TODO: dtype
def __repr__(self):
return "%%%i/%s" % (self.index, self.devtype)
# ================================================================
# Instructions
# ================================================================
class Instr(object):
def fire(self, interp):
raise NotImplementedError
class LoadArgument(Instr):
def __init__(self, ind, write_loc):
self.ind = ind
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, interp.getarg(self.ind))
def __repr__(self):
return "%s = LoadArg ind:%i" % (self.write_loc, self.ind)
class Alloc(Instr):
def __init__(self, dtype, read_locs, write_loc):
self.dtype = dtype
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
shp = tuple(interp.get(mem) for mem in self.read_locs)
prevarr = interp.get(self.write_loc)
if prevarr is None or prevarr.shape != shp:
interp.set(self.write_loc, np.ones(shp, self.dtype))
def __repr__(self):
return "%s = Alloc shp:%s dtype:%s" % (self.write_loc, str(self.read_locs), self.dtype)
class BuildTup(Instr):
def __init__(self, typ, read_locs, write_loc):
self.typ = typ
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, tuple(interp.get(loc) for loc in self.read_locs))
def __repr__(self):
return "%s = BuildTup args:%s" % (self.write_loc, str(self.read_locs))
class ReturnByRef(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
self._callable.call(
[interp.get(mem) for mem in self.read_locs],
interp.get(self.write_loc))
def __repr__(self):
return "%s = ReturnByRef op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
class ReturnByVal(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
interp.set(self.write_loc, self._callable.call([interp.get(mem) for mem in self.read_locs]))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
def __repr__(self):
return "%s = ReturnByVal op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
# ================================================================
# Compiling native code
# ================================================================
def nci2callable(nci):
template_code = gen_templated_code(nci.includes, nci.closure_triples, nci.func_code)
compile_info = get_compile_info()
prefix = utils.hash_seq1(template_code, compile_info["CPP_FLAGS"], *(src.code for src in nci.extra_srcs))
d = dict(function=_funcname(prefix), closure=_closurename(prefix),setup=_setupname(prefix),teardown=_teardownname(prefix))
fn_srcfile = core.SrcFile("c++",string.Template(template_code).substitute(d))
srcfiles = [fn_srcfile]
srcfiles.extend(core.SrcFile(sf.lang, string.Template(sf.code).substitute(d)) for sf in nci.extra_srcs)
CACHE_ROOT = compile_info["CACHE_ROOT"]
libpath = osp.join(CACHE_ROOT, prefix+".so")
if not osp.exists(libpath):
tu = TranslationUnit(srcfiles, nci.link_flags)
tu.compile(prefix, libpath)
lib = get_or_load_lib(libpath)
fptr = getattr(lib, _funcname(prefix))
setup_fptr = getattr(lib, _setupname(prefix)) if nci.setup else None
teardown_fptr = getattr(lib, _teardownname(prefix)) if nci.teardown else None
cldata = _build_closure(nci.closure_triples)
return core.NativeCallable(nci.n_in, nci.return_type, nci.op_str, fptr, cldata=cldata, setup_fptr=setup_fptr, teardown_fptr=teardown_fptr,
store_objects=nci.store_objects)
def _funcname(prefix):
return "call_"+prefix
def _setupname(prefix):
return "setup_"+prefix
def _teardownname(prefix):
return "teardown_"+prefix
def _closurename(prefix):
return "closure_"+prefix
def gen_templated_code(includes, closure_info, func_code):
s = StringIO()
includes = ["cgt_common.h"] + includes
for fname in includes:
s.write('#include "%s"\n'%fname)
gen_struct_code(closure_info, s)
s.write(func_code)
return s.getvalue()
def gen_struct_code(triples, outstream):
if triples is None:
return
outstream.write("typedef struct $closure {\n")
for (fieldname,fieldtype,_val) in triples:
outstream.write(_ctypes2str[fieldtype])
outstream.write(" ")
outstream.write(fieldname)
outstream.write(";\n")
outstream.write("} $closure;\n")
_LIBRARIES = {}
def get_or_load_lib(libname):
if libname in _LIBRARIES:
return _LIBRARIES[libname]
else:
out = ctypes.cdll.LoadLibrary(libname)
_LIBRARIES[libname] = out
return out
class TranslationUnit(object):
"""All the input that goes into building a native binary for one or more ops"""
def __init__(self, srcfiles, link_flags):
self.srcfiles = srcfiles
self.link_flags = link_flags
def compile(self, prefix, libpath):
"""
Compiles all of the files, places them in the cache directory
Then links them creating prefix.so
"""
CACHE_ROOT = get_compile_info()["CACHE_ROOT"]
cmds = ["cd %s"%CACHE_ROOT]
objs = []
for (i,(lang,code)) in enumerate(self.srcfiles):
if lang=="c++":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cpp"%i)
cmds.append(_make_cpp_compile_cmd(srcpath))
elif lang=="cuda":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cu"%i)
cmds.append(_make_cuda_compile_cmd(srcpath))
else:
raise NotImplementedError
with open(srcpath,"w") as fh: fh.write(code)
objs.append(srcpath+".o")
cmds.append(_make_link_cmd(objs, self.link_flags, libpath))
bigcmd = " && ".join(cmds)
call_and_print(bigcmd)
_COMPILE_CONFIG = None
def get_compile_info():
global _COMPILE_CONFIG
if _COMPILE_CONFIG is None:
config = core.get_config()
CGT_BUILD_ROOT = cgt.cycgt.cgt_build_root() #pylint: disable=E1101
cmake_info = {}
with open(osp.join(CGT_BUILD_ROOT,"build_info.txt")) as fh:
lines = fh.readlines()
for line in lines:
if ":=" not in line: print "skipping",line
lhs,rhs = line.split(":=")
lhs = lhs.strip()
rhs = rhs.strip()
cmake_info[lhs] = rhs
CUDA_ROOT = cmake_info["CUDA_ROOT"]
CGT_ENABLE_CUDA = cmake_info["CGT_ENABLE_CUDA"] in ["1","ON"]
CGT_ENABLE_CUDNN = cmake_info["CGT_ENABLE_CUDNN"] in ["1","ON"]
DEFINITIONS = "-DENABLE_CUDA" if CGT_ENABLE_CUDA else ""
CUDNN_ROOT = cmake_info["CUDNN_ROOT"]
_COMPILE_CONFIG = dict(
OPENBLAS_INCLUDE_DIR = osp.join(CGT_BUILD_ROOT,"OpenBLAS"),
CGT_INCLUDE_DIR = cmake_info["CGT_INCLUDE_DIR"],
CGT_LIBRARY_DIR = osp.join(CGT_BUILD_ROOT,"lib"),
CUDA_LIBRARY_DIR = osp.join(CUDA_ROOT,"lib"),
CUDA_INCLUDE_DIR = osp.join(CUDA_ROOT,"include"),
CUDA_LIBRARIES = cmake_info["CUDA_LIBRARIES"],
DEFINITIONS = DEFINITIONS,
CUDA_ROOT = CUDA_ROOT,
CUDNN_ROOT = CUDNN_ROOT,
CACHE_ROOT = osp.expanduser(config["cache_dir"]),
CGT_ENABLE_CUDA = CGT_ENABLE_CUDA,
CGT_ENABLE_CUDNN = CGT_ENABLE_CUDNN,
# CGT_LIBRARY = cmake_info["CGT_LIBRARY"],
)
includes = "-I"+_COMPILE_CONFIG["CGT_INCLUDE_DIR"]
includes += " -I"+_COMPILE_CONFIG["OPENBLAS_INCLUDE_DIR"]
link_flags = ""
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: includes += " -I"+_COMPILE_CONFIG["CUDA_INCLUDE_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]: includes += " -I"+_COMPILE_CONFIG["CUDNN_ROOT"]
_COMPILE_CONFIG["INCLUDES"] = includes
link_flags = "-lcgt -L"+_COMPILE_CONFIG["CGT_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: link_flags += " -L"+_COMPILE_CONFIG["CUDA_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]:
link_flags += " -L"+_COMPILE_CONFIG["CUDNN_ROOT"]
link_flags += " -Wl,-rpath,"+_COMPILE_CONFIG["CUDNN_ROOT"]
if sys.platform == "darwin":
link_flags += " -dynamiclib -Wl,-headerpad_max_install_names"
else:
link_flags += " -shared -rdynamic"
_COMPILE_CONFIG["LINK_FLAGS"] = link_flags
cpp_flags = "-fvisibility=hidden -std=c++11 -fPIC" + (" -O0 -g" if config["debug_cpp"] else " -O3 -DNDEBUG")
if sys.platform == "darwin": cpp_flags += " -stdlib=libc++"
_COMPILE_CONFIG["CPP_FLAGS"] = cpp_flags
CACHE_ROOT = _COMPILE_CONFIG["CACHE_ROOT"]
if not osp.exists(CACHE_ROOT):
os.makedirs(CACHE_ROOT)
return _COMPILE_CONFIG
def _make_cpp_compile_cmd(srcpath):
d = get_compile_info()
return "c++ %(cpp_flags)s %(srcpath)s -c -o %(srcpath)s.o %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"],
cpp_flags=d["CPP_FLAGS"], cacheroot=d["CACHE_ROOT"])
def _make_cuda_compile_cmd(srcpath):
d = get_compile_info()
return "nvcc %(srcpath)s -c -o %(srcpath)s.o -ccbin cc -m64 -Xcompiler -fPIC -Xcompiler -O3 -Xcompiler -arch -Xcompiler x86_64 %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"])
def _make_link_cmd(objs, extra_link_flags, libpath):
d = get_compile_info()
iname = "-install_name %s"%osp.basename(libpath) if sys.platform=="darwin" else ""
return r"c++ %(cpp_flags)s %(objnames)s %(link_flags)s %(iname)s -o %(libpath)s"%dict(
objnames=" ".join(objs), includes=d["INCLUDES"], cpp_flags=d["CPP_FLAGS"], libpath=libpath,
link_flags=d["LINK_FLAGS"]+" "+extra_link_flags, cacheroot=d["CACHE_ROOT"], iname=iname)
def call_and_print(cmd):
print "\x1b[32m%s\x1b[0m"%cmd
subprocess.check_call(cmd,shell=True)
_ctypes2str = {
ctypes.c_byte : "uint8_t",
ctypes.c_bool : "bool",
ctypes.c_char : "char",
ctypes.c_int : "int",
ctypes.c_long : "long",
ctypes.c_void_p : "void*",
ctypes.c_double : "double",
ctypes.c_float : "float"
}
_struct_cache = {} # because creating ctypes.Structure class is slow for some reason
def _build_closure(triples):
if triples is None:
return ctypes.c_void_p(0)
vals = []
fields = []
for (fieldname,fieldtype,val) in triples:
vals.append(val)
fields.append((fieldname,fieldtype))
try:
key = cPickle.dumps(fields)
S = _struct_cache[key]
except KeyError:
class S(ctypes.Structure):
_fields_ = fields
_struct_cache[key] = S
closure = S(*vals)
return closure
################################################################
### Interpreters
################################################################
class Interpreter(object):
def __call__(self, args):
raise NotImplementedError
def get(self, mem):
raise NotImplementedError
def set(self, mem, val):
raise NotImplementedError
def getarg(self, i):
raise NotImplementedError
class SequentialInterpreter(Interpreter):
"""
Runs an execution graph
"""
def __init__(self, eg, output_locs, input_types, copy_outputs=True):
self.eg = eg
self.input_types = input_types
self.output_locs = output_locs
self.storage = [None for _ in xrange(self.eg.n_locs)]
self.args = None
self.copy_outputs = copy_outputs
def __call__(self, *args):
assert len(args) == len(self.input_types), "Wrong number of inputs provided"
self.args = tuple(core.as_valid_array(arg, intype) for (arg, intype) in zip(args, self.input_types))
for instr in self.eg.instrs:
if profiler.on: tstart = time.time()
try:
instr.fire(self)
except Exception as e:
traceback.print_exc()
if isinstance(instr, (ReturnByRef,ReturnByVal)):
if core.get_config()["debug"]:
assert "stack" in instr.node_props
utils.colorprint(utils.Color.MAGENTA, "HERE'S THE STACK WHEN THE OFFENDING NODE WAS CREATED\n",o=sys.stderr)
print>>sys.stderr, ">>>>>>>>>>>>>>>>>>>>>>>>>>"
traceback.print_list(instr.node_props["stack"])
print>>sys.stderr, "<<<<<<<<<<<<<<<<<<<<<<<<<<"
raise e
else:
utils.error("Didn't save the stack so I can't give you a nice traceback :(. Try running with CGT_FLAGS=debug=True")
raise e
else:
utils.error("Oy vey, an exception occurred in a %s Instruction. I don't know how to help you debug this one right now :(."%type(instr))
raise e
if profiler.on: profiler.update(instr, time.time()-tstart)
outputs = [self.get(loc) for loc in self.output_locs]
if self.copy_outputs: outputs = map(_copy, outputs)
return outputs
# need to copy because otherwise we might mess up the data when we call func again
# todo: add option that prevents this behavior
def get(self, mem):
return self.storage[mem.index]
def set(self, mem, val):
self.storage[mem.index] = val
def getarg(self, i):
return self.args[i]
# ================================================================
# Profiler
# ================================================================
class _Profiler(object):
"""
Profiler for Python backend, i.e. Interpreter
"""
def __init__(self):
self.instr2stats = {}
self.on = False
self.t_total = 0.0
def start(self): self.on = True
def stop(self): self.on = False
def update(self, instr, elapsed):
(prevcount, prevtime) = self.instr2stats.get(instr, (0,0.0))
self.instr2stats[instr] = (prevcount+1, prevtime+elapsed)
self.t_total += elapsed
def print_stats(self):
op2stats = {}
# Collapse by Op, rather than instruction
for (instr,(count,t)) in self.instr2stats.iteritems():
if isinstance(instr, (ReturnByRef, ReturnByVal)):
opkey = str(instr.op)
elif isinstance(instr, Alloc):
opkey = "Alloc{dtype=%s,ndim=%i}"%(instr.dtype, len(instr.read_locs))
else:
opkey = instr.__class__.__name__
(prevcount, prevtime) = op2stats.get(opkey, (0, 0.0))
op2stats[opkey] = (prevcount+count, prevtime+t)
print "Total time elapsed: %.3g seconds"%self.t_total
# _print_heading("By instruction")
# _print_stats(self.instr2stats, self.t_total)
_print_heading("By Op")
_print_stats(op2stats, self.t_total)
def clear_stats(self):
self.instr2stats = {}
self.t_total = 0.0
profiler = _Profiler()
def _print_heading(heading):
heading = " " + heading + " "
width = 60
assert len(heading) < width-10
print
print "*"*width
padleft = (width-len(heading))//2
padright = width-len(heading)-padleft
print "*"*padleft + heading + "*"*padright
print "*"*width
def _print_stats(key2stats, t_total):
rows = []
for (key, (count,t)) in key2stats.iteritems():
rows.append([str(key), count, t, t/t_total])
rows = sorted(rows, key=lambda row: row[2], reverse=True)
cumsum = 0
for row in rows:
cumsum += row[3]
row.append(cumsum)
from thirdparty.tabulate import tabulate
print tabulate(rows, headers=["Instruction","Count","Time","Frac","Frac cumsum"])
def _copy(x):
if isinstance(x, np.ndarray): return x.copy()
elif isinstance(x, tuple): return tuple(el.copy() for el in x)
elif np.isscalar(x): return x # xxx is this case ok?
else: raise NotImplementedError
def typecheck_args(numargs, types):
assert len(numargs)==len(types), "wrong number of arguments. got %i, expected %i"%(len(numargs),len(types))
for (numarg,typ) in zip(numargs,types):
if isinstance(typ, core.TensorType):
assert numarg.dtype==typ.dtype and numarg.ndim==typ.ndim
# ================================================================
# Utils
# ================================================================
def _list_to_json(xs):
return [x.to_json() for x in xs]
def _is_data_mutable(node):
return not node.is_input() and not isinstance(node.op, core.Constant)
| {
"content_hash": "42f0b3cde4633a93d03120f231182686",
"timestamp": "",
"source": "github",
"line_count": 896,
"max_line_length": 167,
"avg_line_length": 40.130580357142854,
"alnum_prop": 0.5845315237644965,
"repo_name": "codeAshu/cgt",
"id": "0e2f3ebc654ac99fb22bd58ed01c19aba66bed8a",
"size": "35957",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cgt/compilation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10057"
},
{
"name": "C++",
"bytes": "39559"
},
{
"name": "CMake",
"bytes": "21519"
},
{
"name": "Cuda",
"bytes": "5004"
},
{
"name": "Python",
"bytes": "265609"
}
],
"symlink_target": ""
} |
import re
import sys
import time
if sys.version_info < (3,):
range = xrange
class Tailer(object):
"""\
Implements tailing and heading functionality like GNU tail and head
commands.
"""
line_terminators = ('\r\n', '\n', '\r')
def __init__(self, file, read_size=1024, end=False):
self.read_size = read_size
self.file = file
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split('|'.join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return len(read_str), read_str
def seek_line_forward(self):
"""\
Searches forward from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = start_pos = self.file.tell()
bytes_read, read_str = self.read(self.read_size)
start = 0
if bytes_read and read_str[0] in self.line_terminators:
# The first charachter is a line terminator, don't count this one
start += 1
while bytes_read > 0:
# Scan forwards, counting the newlines in this bufferfull
i = start
while i < bytes_read:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i += 1
pos += self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def seek_line(self):
"""\
Searches backwards from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
bytes_read, read_str = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count this one
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def tail(self, lines=10):
"""\
Return the last lines of the file.
"""
self.seek_end()
end_pos = self.file.tell()
for i in range(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return []
def head(self, lines=10):
"""\
Return the top lines of the file.
"""
self.seek(0)
for i in range(lines):
if not self.seek_line_forward():
break
end_pos = self.file.tell()
self.seek(0)
data = self.file.read(end_pos - 1)
if data:
return self.splitlines(data)
else:
return []
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
trailing = True
while 1:
where = self.file.tell()
line = self.file.readline()
if line:
if trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
trailing = False
yield line
else:
trailing = True
self.seek(where)
time.sleep(delay)
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
def tail(file, lines=10):
"""\
Return the last lines of the file.
>>> try:
... from StringIO import StringIO
... except ImportError:
... from io import StringIO
>>> f = StringIO()
>>> for i in range(11):
... _ = f.write('Line %d\\n' % (i + 1))
>>> tail(f, 3)
['Line 9', 'Line 10', 'Line 11']
"""
return Tailer(file).tail(lines)
def head(file, lines=10):
"""\
Return the top lines of the file.
>>> try:
... from StringIO import StringIO
... except ImportError:
... from io import StringIO
>>> f = StringIO()
>>> for i in range(11):
... _ = f.write('Line %d\\n' % (i + 1))
>>> head(f, 3)
['Line 1', 'Line 2', 'Line 3']
"""
return Tailer(file).head(lines)
def follow(file, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
>>> import os
>>> f = open('test_follow.txt', 'w')
>>> fo = open('test_follow.txt', 'r')
>>> generator = follow(fo)
>>> _ = f.write('Line 1\\n')
>>> f.flush()
>>> next(generator)
'Line 1'
>>> _ = f.write('Line 2\\n')
>>> f.flush()
>>> next(generator)
'Line 2'
>>> f.close()
>>> fo.close()
>>> os.remove('test_follow.txt')
"""
return Tailer(file, end=True).follow(delay)
def _test():
import doctest
doctest.testmod()
def _main(filepath, options):
tailer = Tailer(open(filepath, 'rb'))
try:
try:
if options.lines > 0:
if options.head:
if options.follow:
sys.stderr.write('Cannot follow from top of file.\n')
sys.exit(1)
lines = tailer.head(options.lines)
else:
lines = tailer.tail(options.lines)
for line in lines:
print(line)
elif options.follow:
# Seek to the end so we can follow
tailer.seek_end()
if options.follow:
for line in tailer.follow(delay=options.sleep):
print(line)
except KeyboardInterrupt:
# Escape silently
pass
finally:
tailer.close()
def main():
from optparse import OptionParser
import sys
parser = OptionParser(usage='usage: %prog [options] filename')
parser.add_option('-f', '--follow', dest='follow', default=False, action='store_true',
help='output appended data as the file grows')
parser.add_option('-n', '--lines', dest='lines', default=10, type='int',
help='output the last N lines, instead of the last 10')
parser.add_option('-t', '--top', dest='head', default=False, action='store_true',
help='output lines from the top instead of the bottom. Does not work with follow')
parser.add_option('-s', '--sleep-interval', dest='sleep', default=1.0, metavar='S', type='float',
help='with -f, sleep for approximately S seconds between iterations')
parser.add_option('', '--test', dest='test', default=False, action='store_true',
help='Run some basic tests')
(options, args) = parser.parse_args()
if options.test:
_test()
elif not len(args) == 1:
parser.print_help()
sys.exit(1)
else:
_main(args[0], options)
if __name__ == '__main__':
main()
| {
"content_hash": "223249da1e27c94660682f05a92b72c0",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 104,
"avg_line_length": 28.18831168831169,
"alnum_prop": 0.5065653075328266,
"repo_name": "pybuilder/pybuilder",
"id": "1f8c1e162638ac5991fc63d8e36a62eddf1cc0d0",
"size": "8682",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/main/python/pybuilder/_vendor/tailer/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1567"
},
{
"name": "Nu",
"bytes": "3265"
},
{
"name": "Perl",
"bytes": "4025"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "2699121"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
} |
"""
@copyright: 2016
@author: Steve Malenfant http://github.com/smalenfant
@author: Hank Beatty http://github.com/hbeatty
@organization: Cox Communications Inc. - Advanced Network Platforms
@license: Apache-2.0
"""
# -*- coding: utf-8 -*-
#
# This file is part of traffic-control-python.
#
# Copyright 2016 Cox Communications Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
#usage: traffic-ops.py [--list] [--host HOST]
from __future__ import print_function
import os
import sys
import argparse
import string
try:
import json
except:
import simplejson as json
try:
from traffic_control import TrafficOps
except ImportError:
print("Error: Traffic Control library must be installed: pip install traffic-control.",
file=sys.stderr)
sys.exit(1)
class TrafficOpsInventory(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
options = parser.parse_args()
try:
self.to = TrafficOps('https://cms.kabletown.net', user='user', password='pass')
except:
print("Error: Could not connect to Traffic Ops API", file=sys.stderr)
if options.host:
data = self.get_host(options.host)
#print(json.dumps(data, indent=3))
elif options.list:
data = self.get_list()
print(json.dumps(data, indent=3))
else:
print("usage: --list | --host <hostname>",
file=sys.stderr)
sys.exit(1)
def get_host(self, name):
host = self.to.get_server(name)
data = {}
if not host:
return data
if 'cachegroup' in host:
data['group'] = host['cachegroup']
for k, v in host.iteritems():
data[k] = host[k]
return data
def get_list(self):
profiles = []
data = {
'_meta': {
'hostvars': {},
},
}
hosts = self.to.get_servers()
if not hosts:
return data
for host in hosts['response']:
fqdn = host['hostName']+"."+host['domainName']
# check to see if the type (edge, mid, chs, crs, etc) exists
if host['type'] not in data:
data[host['type']] = {}
data[host['type']]['children'] = []
# check to see if the cachegroup is already in the type
if host['cachegroup'] not in data[host['type']]['children']:
data[host['type']]['children'].append(host['cachegroup'])
# check to see if the cachegroup (a.k.a. Ansible group) exits
if host['cachegroup'] not in data:
data[host['cachegroup']] = {}
data[host['cachegroup']]['hosts'] = []
# add this host to the cachegroup (a.k.a. Ansible group)
if fqdn not in data[host['cachegroup']]['hosts']:
data[host['cachegroup']]['hosts'].append(fqdn)
# add this host's vars to _meta
data['_meta']['hostvars'][fqdn] = host
### EXPERIMENTAL ###
#if 'profile' in host:
# if host['profile'] not in profiles:
# profiles.append(host['profile'])
### END
### EXPERIMENTAL ###
#profiles = self.to.get_profiles()
#config_filter = ['grub.conf','kickstart']
#
#for profile in profiles:
# #print (profile)
# data['_meta']['profiles'][profile] = {}
# params = self.to.get_profile_parameters(profile=profile)
# for p in params:
# if p['configFile'] in config_filter:
# if 'grub.conf' in p['configFile'] and 'ramdisk_size' in p['name']:
# data['_meta']['profiles'][profile]['ramdisk_size'] = string.split(p['value'], '=')[1]
# elif 'kickstart' in p['configFile'] and 'ondisk' in p['name']:
# data['_meta']['profiles'][profile]['ondisk'] = p['value']
### END EXPERIMENTAL ###
return data
if __name__ == '__main__':
TrafficOpsInventory()
| {
"content_hash": "aa8d25e66857226ca11ec294566fe51d",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 110,
"avg_line_length": 31.651315789473685,
"alnum_prop": 0.5512367491166078,
"repo_name": "hbeatty/traffic-control-python",
"id": "c967f4a2ea554a2e8ab80bd478131a337d4d2db4",
"size": "4833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/examples/traffic-ops.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18382"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from rest_framework.response import Response
from rest_framework import serializers, status
from django.conf import settings
from django.utils import timezone
from django.core.cache import cache as default_cache
from sentry.utils import json
from sentry.models import Relay
from sentry.api.base import Endpoint
from sentry.api.serializers import serialize
from sentry.relay.utils import get_header_relay_id, get_header_relay_signature
from semaphore import create_register_challenge, validate_register_response, \
get_register_response_relay_id, PublicKey
class RelayIdSerializer(serializers.Serializer):
relay_id = serializers.RegexField(
r'^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$',
required=True)
class RelayRegisterChallengeSerializer(RelayIdSerializer):
public_key = serializers.CharField(max_length=64, required=True)
class RelayRegisterResponseSerializer(RelayIdSerializer):
token = serializers.CharField(required=True)
class RelayRegisterChallengeEndpoint(Endpoint):
authentication_classes = ()
permission_classes = ()
def check_allowed_relay(self, request, data):
"""
Checks if the relay is allowed to register, otherwise raises an exception
"""
if (settings.DEBUG or
request.META.get('REMOTE_ADDR', None) in settings.INTERNAL_IPS or
data.get('public_key', None) in settings.SENTRY_RELAY_WHITELIST_PK):
return True
return False
def post(self, request):
"""
Requests to Register a Relay
````````````````````````````
Registers the relay with the sentry installation. If a relay boots
it will always attempt to invoke this endpoint.
"""
try:
json_data = json.loads(request.body)
except ValueError:
return Response({
'detail': 'No valid json body',
}, status=status.HTTP_400_BAD_REQUEST)
serializer = RelayRegisterChallengeSerializer(data=json_data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if not self.check_allowed_relay(request, json_data):
return Response({
'detail': 'Relay is not allowed to register',
}, status=status.HTTP_401_UNAUTHORIZED)
sig = get_header_relay_signature(request)
if not sig:
return Response({
'detail': 'Missing relay signature',
}, status=status.HTTP_400_BAD_REQUEST)
try:
challenge = create_register_challenge(request.body, sig)
except Exception as exc:
return Response({
'detail': str(exc).splitlines()[0],
}, status=status.HTTP_400_BAD_REQUEST)
relay_id = six.text_type(challenge['relay_id'])
if relay_id != get_header_relay_id(request):
return Response({
'detail': 'relay_id in payload did not match header',
}, status=status.HTTP_400_BAD_REQUEST)
try:
relay = Relay.objects.get(relay_id=relay_id)
except Relay.DoesNotExist:
pass
else:
if relay.public_key != six.text_type(challenge['public_key']):
# This happens if we have an ID collision or someone copies an existing id
return Response({
'detail': 'Attempted to register agent with a different public key',
}, status=status.HTTP_400_BAD_REQUEST)
default_cache.set('relay-auth:%s' % relay_id, {
'token': challenge['token'],
'public_key': six.text_type(challenge['public_key']),
}, 60)
return Response(serialize({
'relay_id': six.text_type(challenge['relay_id']),
'token': challenge['token'],
}))
class RelayRegisterResponseEndpoint(Endpoint):
authentication_classes = ()
permission_classes = ()
def post(self, request):
"""
Registers a Relay
`````````````````
Registers the relay with the sentry installation. If a relay boots
it will always attempt to invoke this endpoint.
"""
try:
json_data = json.loads(request.body)
except ValueError:
return Response({
'detail': 'No valid json body',
}, status=status.HTTP_400_BAD_REQUEST)
serializer = RelayRegisterResponseSerializer(data=json_data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
sig = get_header_relay_signature(request)
if not sig:
return Response({
'detail': 'Missing relay signature',
}, status=status.HTTP_400_BAD_REQUEST)
relay_id = six.text_type(get_register_response_relay_id(request.body))
if relay_id != get_header_relay_id(request):
return Response({
'detail': 'relay_id in payload did not match header',
}, status=status.HTTP_400_BAD_REQUEST)
params = default_cache.get('relay-auth:%s' % relay_id)
if params is None:
return Response({
'detail': 'Challenge expired'
}, status=status.HTTP_401_UNAUTHORIZED)
key = PublicKey.parse(params['public_key'])
try:
validate_register_response(key, request.body, sig)
except Exception as exc:
return Response({
'detail': str(exc).splitlines()[0],
}, status=status.HTTP_400_BAD_REQUEST)
try:
relay = Relay.objects.get(relay_id=relay_id)
except Relay.DoesNotExist:
relay = Relay.objects.create(
relay_id=relay_id,
public_key=params['public_key'],
)
else:
relay.last_seen = timezone.now()
relay.save()
default_cache.delete('relay-auth:%s' % relay_id)
return Response(serialize({
'relay_id': relay.relay_id,
}))
| {
"content_hash": "5527f8b0e08d89bf300da5062e9a14d6",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 90,
"avg_line_length": 34.05494505494506,
"alnum_prop": 0.5992255566311714,
"repo_name": "looker/sentry",
"id": "c6ec437a131858ab685f0ac7f156f83b60e7e7db",
"size": "6198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/relay_register.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
import unittest
from unittest import TestCase
from app.dojo import *
from app.rooms import *
class TestPerson(TestCase):
def test_person_type_is_fellow(self):
fellow1 = Fellow(2, 'ray atuhe')
self.assertEqual(fellow1.person_type, 'fellow')
def test_person_type_is_staff(self):
staff1 = Staff(3, 'Lydia Ashaba')
self.assertEqual(staff1.person_type, 'staff') | {
"content_hash": "9817b8a7c359b072cf53420accc498db",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 55,
"avg_line_length": 30.615384615384617,
"alnum_prop": 0.6884422110552764,
"repo_name": "atuhe/RoomAllocator",
"id": "c10d1ab6882474b2e85072375d54be212786a9ca",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/test_person.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15231"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(ext_modules=cythonize("geometry_cy.pyx"),
include_dirs=[numpy.get_include()])
# hack to copy build extension into this directory so we can use it without
# setting paths.
import glob
import shutil
for src in glob.glob("build/lib*/*"):
shutil.copy(src, ".")
| {
"content_hash": "016e6a1dade62f6f7dcbe11ebdb7d7cd",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 27.384615384615383,
"alnum_prop": 0.7387640449438202,
"repo_name": "thehackerwithin/berkeley",
"id": "00a9020404c121204a1977541cd9ba342044cb2e",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_examples/cython_spring16/setup_cy.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9196"
},
{
"name": "C++",
"bytes": "9944"
},
{
"name": "Dockerfile",
"bytes": "1068"
},
{
"name": "Fortran",
"bytes": "434"
},
{
"name": "Gnuplot",
"bytes": "240"
},
{
"name": "HTML",
"bytes": "1901059"
},
{
"name": "Jupyter Notebook",
"bytes": "23122238"
},
{
"name": "Makefile",
"bytes": "1416"
},
{
"name": "PostScript",
"bytes": "287518"
},
{
"name": "Python",
"bytes": "62059"
},
{
"name": "R",
"bytes": "5431"
},
{
"name": "Shell",
"bytes": "1493"
},
{
"name": "TeX",
"bytes": "51016"
}
],
"symlink_target": ""
} |
from .message import Message
from .connection import Connection
class Mail(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
Initializes your mail settings from app.config
Can be used to set up Mail at configuration time
:param app: Flask application instance
"""
self.debug = app.config.get('MAIL_DEBUG', app.debug)
self.mailer = app.config.get('MAIL_MAILER', '/usr/sbin/sendmail')
self.mailer_flags = app.config.get('MAIL_MAILER_FLAGS', '-t')
self.suppress = app.config.get('MAIL_SUPPRESS_SEND', False)
self.fail_silently = app.config.get('MAIL_FAIL_SILENTLY', True)
self.max_emails = None
self.suppress = self.suppress or app.testing
self.app = app
#register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['sendmail'] = self
def send(self, message):
"""
Sends message through system's sendmail client.
:param message: Mail Message instance
"""
message.send(self.connect())
def send_message(self, *args, **kwargs):
"""
Shortcut for send(msg).
Takes same arguments as Message constructor.
"""
self.send(Message(*args, **kwargs))
def connect(self, max_emails=None):
"""
Opens a connection to the system's sendmail client.
"""
return Connection(self, max_emails)
| {
"content_hash": "6f00920778a0e7ab36d31e23b4f19f7a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 73,
"avg_line_length": 27.678571428571427,
"alnum_prop": 0.5980645161290322,
"repo_name": "plaes/flask-sendmail",
"id": "2bed78ddac889bd63d1448e72c50e0c1e43efaa3",
"size": "1550",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask_sendmail/mail.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12195"
}
],
"symlink_target": ""
} |
import time
import hashlib
def checksign(paras, appSecret):
m = hashlib.md5()
origin = ''
if not 'appKey' in paras:
raise Exception('No appKey.')
if not 'appSecret' in paras:
paras['appSecret'] = appSecret
for one in sorted(paras.items(), key=lambda x: x[0]):
if one[0] == 'sign':
continue
origin += one[-1]
m.update(origin)
sign = m.hexdigest()
return sign == paras['sign']
if __name__ == '__main__':
pass
| {
"content_hash": "120d8cec98972f34b92622a352890044",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 57,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.5591836734693878,
"repo_name": "listen-lavender/pholcus",
"id": "26fdafd79de6d2baebe8dec6ecf992549049d644",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gds/util/validate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "631052"
},
{
"name": "HTML",
"bytes": "11797"
},
{
"name": "JavaScript",
"bytes": "803731"
},
{
"name": "Python",
"bytes": "167244"
},
{
"name": "Vue",
"bytes": "52822"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from base64 import b64encode
import tinify
from tinify import Client, AccountError, ClientError, ConnectionError, ServerError
import requests
import pytest
from helper import *
try:
from unittest.mock import patch
except ImportError:
from mock import patch
Client.RETRY_DELAY = 10
class TinifyClientRequestWhenValid(TestHelper):
def setUp(self):
super(type(self), self).setUp()
httpretty.register_uri(httpretty.GET, 'https://api.tinify.com/', **{
'compression-count': 12
})
def test_should_issue_request(self):
Client('key').request('GET', '/')
self.assertEqual(self.request.headers['authorization'], 'Basic {0}'.format(
b64encode(b'api:key').decode('ascii')))
def test_should_issue_request_without_body_when_options_are_empty(self):
Client('key').request('GET', '/', {})
self.assertEqual(self.request.body, b'')
def test_should_issue_request_without_content_type_when_options_are_empty(self):
Client('key').request('GET', '/', {})
self.assertIsNone(self.request.headers.get('content-type'))
def test_should_issue_request_with_json_body(self):
Client('key').request('GET', '/', {'hello': 'world'})
self.assertEqual(self.request.headers['content-type'], 'application/json')
self.assertEqual(self.request.body, b'{"hello":"world"}')
def test_should_issue_request_with_user_agent(self):
Client('key').request('GET', '/')
self.assertEqual(self.request.headers['user-agent'], Client.USER_AGENT)
def test_should_update_compression_count(self):
Client('key').request('GET', '/')
self.assertEqual(tinify.compression_count, 12)
class TinifyClientRequestWhenValidWithAppId(TestHelper):
def setUp(self):
super(type(self), self).setUp()
httpretty.register_uri(httpretty.GET, 'https://api.tinify.com/', **{
'compression-count': 12
})
def test_should_issue_request_with_user_agent(self):
Client('key', 'TestApp/0.2').request('GET', '/')
self.assertEqual(self.request.headers['user-agent'], Client.USER_AGENT + ' TestApp/0.2')
class TinifyClientRequestWhenValidWithProxy(TestHelper):
def setUp(self):
super(type(self), self).setUp()
httpretty.register_uri(httpretty.CONNECT, 'http://localhost:8080', **{
'compression-count': 12
})
@pytest.mark.skip(reason="https://github.com/gabrielfalcao/HTTPretty/issues/122")
def test_should_issue_request_with_proxy_authorization(self):
Client('key', None, 'http://user:pass@localhost:8080').request('GET', '/')
self.assertEqual(self.request.headers['proxy-authorization'], 'Basic dXNlcjpwYXNz')
class TinifyClientRequestWithTimeoutRepeatedly(TestHelper):
@patch('requests.sessions.Session.request', RaiseException(requests.exceptions.Timeout))
def test_should_raise_connection_error(self):
with self.assertRaises(ConnectionError) as context:
Client('key').request('GET', '/')
self.assertEqual('Timeout while connecting', str(context.exception))
@patch('requests.sessions.Session.request', RaiseException(requests.exceptions.Timeout))
def test_should_raise_connection_error_with_cause(self):
with self.assertRaises(ConnectionError) as context:
Client('key').request('GET', '/')
self.assertIsInstance(context.exception.__cause__, requests.exceptions.Timeout)
class TinifyClientRequestWithTimeoutOnce(TestHelper):
@patch('requests.sessions.Session.request')
def test_should_issue_request(self, mock):
mock.side_effect = RaiseException(requests.exceptions.Timeout, num=1)
mock.return_value = requests.Response()
mock.return_value.status_code = 201
self.assertIsInstance(Client('key').request('GET', '/', {}), requests.Response)
class TinifyClientRequestWithConnectionErrorRepeatedly(TestHelper):
@patch('requests.sessions.Session.request', RaiseException(requests.exceptions.ConnectionError('connection error')))
def test_should_raise_connection_error(self):
with self.assertRaises(ConnectionError) as context:
Client('key').request('GET', '/')
self.assertEqual('Error while connecting: connection error', str(context.exception))
@patch('requests.sessions.Session.request', RaiseException(requests.exceptions.ConnectionError('connection error')))
def test_should_raise_connection_error_with_cause(self):
with self.assertRaises(ConnectionError) as context:
Client('key').request('GET', '/')
self.assertIsInstance(context.exception.__cause__, requests.exceptions.ConnectionError)
class TinifyClientRequestWithConnectionErrorOnce(TestHelper):
@patch('requests.sessions.Session.request')
def test_should_issue_request(self, mock):
mock.side_effect = RaiseException(requests.exceptions.ConnectionError, num=1)
mock.return_value = requests.Response()
mock.return_value.status_code = 201
self.assertIsInstance(Client('key').request('GET', '/', {}), requests.Response)
class TinifyClientRequestWithSomeErrorRepeatedly(TestHelper):
@patch('requests.sessions.Session.request', RaiseException(RuntimeError('some error')))
def test_should_raise_connection_error(self):
with self.assertRaises(ConnectionError) as context:
Client('key').request('GET', '/')
self.assertEqual('Error while connecting: some error', str(context.exception))
class TinifyClientRequestWithSomeErrorOnce(TestHelper):
@patch('requests.sessions.Session.request')
def test_should_issue_request(self, mock):
mock.side_effect = RaiseException(RuntimeError('some error'), num=1)
mock.return_value = requests.Response()
mock.return_value.status_code = 201
self.assertIsInstance(Client('key').request('GET', '/', {}), requests.Response)
class TinifyClientRequestWithServerErrorRepeatedly(TestHelper):
def test_should_raise_server_error(self):
httpretty.register_uri(httpretty.GET, 'https://api.tinify.com/', status=584,
body='{"error":"InternalServerError","message":"Oops!"}')
with self.assertRaises(ServerError) as context:
Client('key').request('GET', '/')
self.assertEqual('Oops! (HTTP 584/InternalServerError)', str(context.exception))
class TinifyClientRequestWithServerErrorOnce(TestHelper):
def test_should_issue_request(self):
httpretty.register_uri(httpretty.GET, 'https://api.tinify.com/',
responses=[
httpretty.Response(body='{"error":"InternalServerError","message":"Oops!"}', status=584),
httpretty.Response(body='all good', status=201),
])
response = Client('key').request('GET', '/')
self.assertEqual('201', str(response.status_code))
class TinifyClientRequestWithBadServerResponseRepeatedly(TestHelper):
def test_should_raise_server_error(self):
httpretty.register_uri(httpretty.GET, 'https://api.tinify.com/', status=543,
body='<!-- this is not json -->')
with self.assertRaises(ServerError) as context:
Client('key').request('GET', '/')
msg = r'Error while parsing response: .* \(HTTP 543/ParseError\)'
self.assertRegex(str(context.exception), msg)
class TinifyClientRequestWithBadServerResponseOnce(TestHelper):
def test_should_issue_request(self):
httpretty.register_uri(httpretty.GET, 'https://api.tinify.com/',
responses=[
httpretty.Response(body='<!-- this is not json -->', status=543),
httpretty.Response(body='all good', status=201),
])
response = Client('key').request('GET', '/')
self.assertEqual('201', str(response.status_code))
class TinifyClientRequestWithClientError(TestHelper):
def test_should_raise_client_error(self):
httpretty.register_uri(httpretty.GET, 'https://api.tinify.com/', status=492,
body='{"error":"BadRequest","message":"Oops!"}')
with self.assertRaises(ClientError) as context:
Client('key').request('GET', '/')
self.assertEqual('Oops! (HTTP 492/BadRequest)', str(context.exception))
class TinifyClientRequestWithBadCredentialsResponse(TestHelper):
def test_should_raise_account_error(self):
httpretty.register_uri(httpretty.GET, 'https://api.tinify.com/', status=401,
body='{"error":"Unauthorized","message":"Oops!"}')
with self.assertRaises(AccountError) as context:
Client('key').request('GET', '/')
self.assertEqual('Oops! (HTTP 401/Unauthorized)', str(context.exception))
| {
"content_hash": "62b9c766ba757fe9c3acfa6ce5840ec5",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 120,
"avg_line_length": 44.85279187817259,
"alnum_prop": 0.6806247170665459,
"repo_name": "tinify/tinify-python",
"id": "ec291657e502f7b25e0385bc61031676650b45ad",
"size": "8860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/tinify_client_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43039"
},
{
"name": "Shell",
"bytes": "396"
}
],
"symlink_target": ""
} |
import os
from flask.helpers import send_from_directory
from trytond.model import ModelSQL, fields
from trytond.pool import Pool, PoolMeta
from nereid import current_app, route, render_template, request, jsonify, \
template_filter
from flask_babel import Locale, get_locale, format_currency
from trytond.pyson import Eval, Not
__metaclass__ = PoolMeta
__all__ = [
'Company', 'WebShop', 'BannerCategory', 'Banner', 'Article',
'Website', 'ArticleCategory', 'MenuItem'
]
#: Get the static folder. The static folder also
#: goes into the site packages
STATIC_FOLDER = os.path.join(
os.path.abspath(
os.path.dirname(__file__)
), 'static'
)
class Company:
__name__ = "company.company"
logo = fields.Many2One("nereid.static.file", "Logo", select=True)
class WebShop(ModelSQL):
"website"
__name__ = "nereid.webshop"
@classmethod
@route("/static-webshop/<path:filename>", methods=["GET"])
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
"""
cache_timeout = current_app.get_send_file_max_age(filename)
return send_from_directory(
STATIC_FOLDER, filename,
cache_timeout=cache_timeout
)
class BannerCategory:
"""Collection of related Banners"""
__name__ = 'nereid.cms.banner.category'
@staticmethod
def check_xml_record(records, values):
return True
class Banner:
"""Banner for CMS"""
__name__ = 'nereid.cms.banner'
@staticmethod
def check_xml_record(records, values):
return True
class Article:
"CMS Articles"
__name__ = 'nereid.cms.article'
@staticmethod
def check_xml_record(records, values):
"""The webshop module creates a bunch of commonly used articles on
webshops. Since tryton does not allow records created via XML to be
edited, this method explicitly allows users to modify the articles
created by the module.
"""
return True
class ArticleCategory:
"CMS Article Category"
__name__ = 'nereid.cms.article.category'
@staticmethod
def check_xml_record(records, values):
"""The webshop module creates a bunch of commonly used article category on
webshops. Since tryton does not allow records created via XML to be
edited, this method explicitly allows users to modify the article
category created by the module.
"""
return True
class Website:
"Nereid Website"
__name__ = 'nereid.website'
cms_root_menu = fields.Many2One(
'nereid.cms.menuitem', "CMS root menu", ondelete='RESTRICT',
select=True,
)
show_site_message = fields.Boolean('Show Site Message')
site_message = fields.Char(
'Site Message',
states={
'readonly': Not(Eval('show_site_message', False)),
'required': Eval('show_site_message', False)
},
depends=['show_site_message']
)
copyright_year_range = fields.Char('Copyright Year Range')
cms_root_footer = fields.Many2One(
'nereid.cms.menuitem', "CMS root Footer", ondelete='RESTRICT',
select=True,
)
homepage_menu = fields.Many2One(
'nereid.cms.menuitem', "Homepage Menu", ondelete='RESTRICT',
select=True,
)
@classmethod
@route('/sitemap', methods=["GET"])
def render_sitemap(cls):
"""
Return the sitemap.
"""
Node = Pool().get('product.tree_node')
# Search for nodes, sort by sequence.
nodes = Node.search([
('parent', '=', None),
], order=[
('sequence', 'ASC'),
])
return render_template('sitemap.jinja', nodes=nodes)
@classmethod
def auto_complete(cls, phrase):
"""
Customizable method which returns a list of dictionaries
according to the search query. The search service used can
be modified in downstream modules.
The front-end expects a jsonified list of dictionaries. For example,
a downstream implementation of this method could return -:
[
...
{
"value": "<suggestion string>"
}, {
"value": "Nexus 6"
}
...
]
"""
return []
@classmethod
@route('/search-auto-complete')
def search_auto_complete(cls):
"""
Handler for auto-completing search.
"""
return jsonify(results=cls.auto_complete(
request.args.get('q', '')
))
@classmethod
@route('/search')
def quick_search(cls):
"""
Downstream implementation of quick_search().
TODO:
* Add article search.
"""
return super(Website, cls).quick_search()
@staticmethod
def default_cms_root_footer():
"""
Get default record from xml
"""
ModelData = Pool().get('ir.model.data')
menu_item_id = ModelData.get_id("nereid_webshop", "cms_root_footer")
return menu_item_id
@classmethod
@template_filter('fullcurrencyformat')
def fullcurrencyformat(cls, amount, code, format=None):
"""
Format currency to include symbol and abbreviation.
ex- u'$ 199.00 USD'
"""
locale = get_locale()
locale = Locale.parse(locale)
if not format:
format = locale.currency_formats.get(format)
if u'\xa4\xa4' not in format.pattern:
format.pattern += u' \xa4\xa4'
return format_currency(amount, code, format.pattern)
class MenuItem:
__name__ = 'nereid.cms.menuitem'
@staticmethod
def check_xml_record(records, values):
return True
@classmethod
def allowed_models(cls):
res = super(MenuItem, cls).allowed_models()
if ('product.tree_node', 'Tree Node') not in res:
res.append(('product.tree_node', 'Tree Node'))
if ('product.product', 'Product') not in res:
res.append(('product.product', 'Product'))
return res
| {
"content_hash": "4de653acad5c863340fefa1cafc16299",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 82,
"avg_line_length": 27.345132743362832,
"alnum_prop": 0.5982200647249191,
"repo_name": "sharoonthomas/nereid-webshop",
"id": "9c901c7b742e5c9ffd26a7d840527b50e182583c",
"size": "6204",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "webshop.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "61685"
},
{
"name": "HTML",
"bytes": "221213"
},
{
"name": "JavaScript",
"bytes": "29359"
},
{
"name": "Python",
"bytes": "121308"
}
],
"symlink_target": ""
} |
from subprocess import Popen, PIPE
from plyer.facades import UniqueID
from plyer.utils import whereis_exe
from os import environ
class OSXUniqueID(UniqueID):
def _get_uid(self):
old_lang = environ.get('LANG')
environ['LANG'] = 'C'
ioreg_process = Popen(["ioreg", "-l"], stdout=PIPE)
grep_process = Popen(["grep", "IOPlatformSerialNumber"],
stdin=ioreg_process.stdout, stdout=PIPE)
ioreg_process.stdout.close()
output = grep_process.communicate()[0]
environ['LANG'] = old_lang
if output:
return output.split()[3][1:-1]
else:
return None
def instance():
import sys
if whereis_exe('ioreg'):
return OSXUniqueID()
sys.stderr.write("ioreg not found.")
return UniqueID()
| {
"content_hash": "af8cc068b84168bed28863ae002c463d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 64,
"avg_line_length": 25.25,
"alnum_prop": 0.6101485148514851,
"repo_name": "cleett/plyer",
"id": "51ba169cdc17319e0808bf6e511b987fd590332a",
"size": "808",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plyer/platforms/macosx/uniqueid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "868"
},
{
"name": "Python",
"bytes": "178546"
}
],
"symlink_target": ""
} |
import sys
import pymongo
import json
print "import crawled json file into mongodb 'newspapers' database."
if len(sys.argv) < 3:
print "input as [collection] [json_file]"
exit(1)
connection = pymongo.Connection("localhost", 27017)
news_database = connection.newspapers
news_collection = news_database[sys.argv[1]]
json_file_name = sys.argv[2]
try:
with open(json_file_name, mode='r') as json_file:
items = json.loads(json_file.read())
json_file.close()
except Exception, e:
raise e
for item in items:
news_collection.save(item)
print len(items), " items saved to mongodb."
| {
"content_hash": "0d54e44575fd2b2caa3b5b3e945a92d1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 23.653846153846153,
"alnum_prop": 0.697560975609756,
"repo_name": "ShiZhan/newspapers",
"id": "aecaf9c70f27dece7ad8643f148116c774aa8ab5",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/import-to-mongodb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10107"
}
],
"symlink_target": ""
} |
import galena
@galena.Component
class Health(object):
health = 1
@galena.Component
class Velocity(object):
required_components = (Health,)
speed = 10
direction = 180
@galena.Component
class Shield(object):
required_components = (Health, Velocity)
value = 10
| {
"content_hash": "aff4b3506fddaa0df0e787bedcb82a1a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 44,
"avg_line_length": 13.80952380952381,
"alnum_prop": 0.6862068965517242,
"repo_name": "Remolten/galena",
"id": "1efa3ebbc330d79d684826b87f49dc11f57f1fb5",
"size": "290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/component_classes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23867"
}
],
"symlink_target": ""
} |
from app.card import Card
class TestSingleCard:
"""Unique and consistent playing cards. All valid cards have a length of 2; one for the rank and the other for the suit."""
def test_ace_of_spades(self):
assert str(Card("A", "s")) == "As"
def test_four_of_hearts(self):
assert str(Card("4", "h")) == "4h"
def test_two_of_diamonds(self):
assert str(Card("2", "d")) == "2d"
def test_queen_of_clubs(self):
assert str(Card("Q", "c")) == "Qc" # Both correct
assert str(Card("Q", "c")) != "Qd" # Rank correct, suit incorrect
assert str(Card("Q", "c")) != "Jc" # Rank incorrect, suit correct
assert str(Card("Q", "c")) != "5h" # Both incorrect
assert str(Card("Q", "c")) != "2d" # Both incorrect; but both correct for card above which shouldn't be cached
def test_card_length(self):
assert len(str(Card("T", "d"))) == 2
assert len(str(Card("10", "d"))) != 2
class TestRank:
"""There are 13 ranks or values in a standard deck. The rank determines the high card, pairs, two pair, trips, straight, full house, quads, and 'half' of a straight flush."""
def test_card1(self):
card = Card("A", "s")
assert card.get_rank() == "A"
assert card.get_rank() != "s"
def test_card2(self):
card = Card("6", "d")
assert card.get_rank() == "6"
assert card.get_rank() != "d"
def test_card3(self):
card = Card("5", "s")
assert card.get_rank() == "5"
assert card.get_rank() != "T"
assert card.get_rank() != "6"
assert card.get_rank() != "s"
assert card.get_rank() != "c"
def test_number_of_ranks(self):
assert len(Card.RANKS) == 13
def test_all_ranks_there(self):
"""The default numeric and alphabetic sorting of the ranks is NOT equal to the value sorting of them."""
assert sorted(Card.RANKS) == ['2', '3', '4', '5', '6', '7', '8', '9', 'A', 'J', 'K', 'Q', 'T']
assert sorted(Card.RANKS) != ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']
class TestSuit:
"""There are 4 suits in a standard deck. The suit determines a flush and the other 'half' of a straight flush."""
def test_card1(self):
card = Card("J", "c")
assert card.get_suit() == "c"
assert card.get_suit() != "J"
def test_card2(self):
card = Card("9", "h")
assert card.get_suit() == "h"
assert card.get_suit() != "9"
def test_card3(self):
card = Card("3", "c")
assert card.get_suit() == "c"
assert card.get_suit() != "K"
assert card.get_suit() != "9"
assert card.get_suit() != "3"
assert card.get_suit() != "d"
assert card.get_suit() != "h"
assert card.get_suit() != "s"
def test_number_of_suits(self):
assert len(Card.SUITS) == 4
def test_all_suits_there(self):
assert sorted(Card.SUITS) == ["c", "d", "h", "s"]
| {
"content_hash": "ed2072bcdc7792047776370ef83e44ae",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 178,
"avg_line_length": 36.52439024390244,
"alnum_prop": 0.5385642737896494,
"repo_name": "jdcald13/Winning_Texas_Holdem_Strategy",
"id": "e3599418706ad17b7bb1f7b113f55f23a270b3f6",
"size": "2995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_card.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66921"
}
],
"symlink_target": ""
} |
if __name__ == '__main__':
from setuptools import setup, Feature, Extension
setup(
name='jitpy',
description='A library to embed PyPy in CPython.',
long_description="""
jitpy
=====
A library that let's you embed PyPy into CPython.
Please see the `Documentation <http://jitpy.readthedocs.org/>`_.
Contact
-------
`Mailing list <https://groups.google.com/forum/#!forum/jitpy>`_
""",
version='0.1.3',
packages=['jitpy'],
zip_safe=False,
url='http://jitpy.readthedocs.org',
author='Maciej Fijalkowski',
author_email='jitpy@googlegroups.com',
license='MIT',
features={
},
install_requires=[
'cffi', 'pytest', 'py', 'numpy'
]
)
| {
"content_hash": "be27461010251dc4c905913baca04455",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 64,
"avg_line_length": 20.676470588235293,
"alnum_prop": 0.6102418207681366,
"repo_name": "tempbottle/jitpy",
"id": "a0a5fa92ea9506964503215ebb1d03ea08599e19",
"size": "703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16246"
}
],
"symlink_target": ""
} |
from credentials import QiNiuACCESS_KEY, QiNiuSECRET_KEY
import qiniu.conf
import qiniu.io
import sys
import qiniu.rs
import qiniu.io
import qiniu.rsf
def uploadToQiNiu(bucketName,fileName):
'''
this function support override of existing file
'''
qiniu.conf.ACCESS_KEY = QiNiuACCESS_KEY
qiniu.conf.SECRET_KEY = QiNiuSECRET_KEY
policy = qiniu.rs.PutPolicy(bucketName)
policy.scope=bucketName+':'+unicode(fileName, "utf-8")
uptoken = policy.token()
extra = qiniu.io.PutExtra()
# extra.mime_type = "image/jpeg"
f=open(fileName,'r')
# localfile = "%s" % f.read()
ret, err = qiniu.io.put(uptoken, fileName, f)
f.close()
print ret;
if err is not None:
sys.stderr.write('error: %s ' % err)
def list_all(bucketName, rs=None, prefix=None, limit=None):
'''
sample code from official api page
'''
qiniu.conf.ACCESS_KEY = QiNiuACCESS_KEY
qiniu.conf.SECRET_KEY = QiNiuSECRET_KEY
if rs is None:
rs = qiniu.rsf.Client()
marker = None
err = None
while err is None:
ret, err = rs.list_prefix(bucketName, prefix=prefix, limit=limit, marker=marker)
marker = ret.get('marker', None)
# print ret
return ret['items']
for item in ret['items']:
# #do something
pass
if err is not qiniu.rsf.EOF:
#error handling
pass | {
"content_hash": "88e0bb62d992aba2ea7a1df506758392",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 82,
"avg_line_length": 22.763636363636362,
"alnum_prop": 0.7028753993610224,
"repo_name": "paulshi/qiniu",
"id": "e2c9f3ce0e098d95c6983d6f7f3a9a2f08056cd6",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiniulib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1429"
}
],
"symlink_target": ""
} |
"""
SDA: A Simulated Dual Annealing global optimization algorithm
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize
from scipy.special import gammaln
from scipy._lib._util import check_random_state
__all__ = ['sda']
BIG_VALUE = 1e16
class VisitingDistribution(object):
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the Markov
chain, the class implements the strategy for generating new location
changes.
"""
tail_limit = 1.e8
min_visit_bound = 1.e-10
def __init__(self, lb, ub, qv, rs):
self.qv = qv
self.rs = rs
self.lower = lb
self.upper = ub
self.b_range = ub - lb
self.x_gauss = None
self.s_gauss = 0
self.root_gauss = None
def visiting(self, x, step, temperature):
dim = x.size
if step < dim:
# Changing all coordinates with a new visting value
visits = np.array([self.visit_fn(
temperature) for _ in range(dim)])
upper_sample = self.rs.random_sample()
lower_sample = self.rs.random_sample()
visits[visits > self.tail_limit] = self.tail_limit * upper_sample
visits[visits < -self.tail_limit] = -self.tail_limit * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.b_range) + self.b_range
x_visit = np.fmod(b, self.b_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.min_visit_bound] += 1.e-10
else:
# Changing only one coordinate at a time based on Markov chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature)
if visit > self.tail_limit:
visit = self.tail_limit * self.rs.random_sample()
elif visit < -self.tail_limit:
visit = -self.tail_limit * self.rs.random_sample()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.b_range[index]) + self.b_range[index]
x_visit[index] = np.fmod(b, self.b_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.min_visit_bound:
x_visit[index] += self.min_visit_bound
return x_visit
def visit_fn(self, temperature):
factor1 = np.exp(np.log(temperature) / (self.qv - 1.0))
factor2 = np.exp((4.0 - self.qv) * np.log(self.qv - 1.0))
factor3 = np.exp((2.0 - self.qv) * np.log(2.0) / (self.qv - 1.0))
factor4 = np.sqrt(np.pi) * factor1 * factor2 / (factor3 * (
3.0 - self.qv))
factor5 = 1.0 / (self.qv - 1.0) - 0.5
d1 = 2.0 - factor5
factor6 = np.pi * (1.0 - factor5) / np.sin(
np.pi * (1.0 - factor5)) / np.exp(gammaln(d1))
sigmax = np.exp(-(self.qv - 1.0) * np.log(
factor6 / factor4) / (3.0 - self.qv))
x = sigmax * self.gaussian_fn(1)
y = self.gaussian_fn(0)
den = np.exp(
(self.qv - 1.0) * np.log((np.fabs(y))) / (3.0 - self.qv))
return x / den
def gaussian_fn(self, axis):
if axis == 1:
enter = True
while enter or (self.s_gauss <= 0 or self.s_gauss >= 1):
enter = False
sample1 = self.rs.random_sample()
self.x_gauss = sample1 * 2.0 - 1.0
sample2 = self.rs.random_sample()
y_gauss = sample2 * 2.0 - 1.0
self.s_gauss = self.x_gauss ** 2 + y_gauss ** 2
self.root_gauss = np.sqrt(-2.0 / self.s_gauss * np.log(
self.s_gauss))
return self.root_gauss * y_gauss
else:
return self.root_gauss * self.x_gauss
class EnergyState():
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location
"""
# Maximimum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
def __str__(self):
return 'Current: {0}@{1} Best: {2}@{3}'.format(
self.current_energy, self.current_location, self._ebest,
self.xbest,
)
def __repr__(self):
return self.__str__
def reset(self, owf, rs, x0=None):
if x0 is None:
self.current_location = self.lower + rs.random_sample(
len(self.lower)) * (self.upper - self.lower)
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = owf.func(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (self.current_energy >= BIG_VALUE or
np.isnan(self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) inifinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = self.lower + rs.random_sample(
self.lower.size) * (self.upper - self.lower)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, keep them in case of reannealing reset
class MarkovChain(object):
"""
Class used for the Markov chain and related strategy for local search
decision
"""
def __init__(self, qa, vd, ofw, rs, state):
# Local markov chain minimum energy and location
self.emin = state.current_energy
self.xmin = np.array(state.current_location)
# Global optimizer state
self.state = state
# Acceptance parameter
self.qa = qa
# Visiting distribution instance
self.vd = vd
# Wrapper to objective function and related local minimizer
self.ofw = ofw
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rs = rs
self.temperature_step = 0
self.K = 100 * len(state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.state.current_location.size * 2):
if j == 0:
self.state_improved = False
if step == 0 and j == 0:
self.state_improved = True
x_visit = self.vd.visiting(
self.state.current_location, j, temperature)
# Calling the objective function
e = self.ofw.func_wrapper(x_visit)
if e < self.state.current_energy:
# print('Better energy: {0}'.format(e))
# We have got a better ernergy value
self.state.current_energy = e
self.state.current_location = np.copy(x_visit)
if e < self.state.ebest:
self.state.ebest = e
self.state.xbest = np.copy(x_visit)
self.state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
r = self._rs.random_sample()
pqa_temp = (self.qa - 1.0) * (
e - self.state.current_energy) / self.temperature_step + 1.
if pqa_temp < 0.:
pqa = 0.
else:
pqa = np.exp(np.log(pqa_temp) / (1. - self.qa))
if r <= pqa:
# We accept the new location and update state
self.state.current_energy = e
self.state.current_location = np.copy(x_visit)
self.xmin = np.copy(self.state.current_location)
# No improvement since long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.state.current_energy < self.emin:
self.emin = self.state.current_energy
self.xmin = np.copy(self.state.current_location)
# End of MarkovChain loop
def local_search(self):
# Decision making for performing a local search
# based on Markov chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best Markov chain location
if self.state_improved:
# Global energy has improved, let's see if LS improved further
e, x = self.ofw.local_search(self.state.xbest)
if e < self.state.ebest:
self.not_improved_idx = 0
self.state.ebest = e
self.state.xbest = np.copy(x)
self.state.current_energy = e
self.state.current_location = np.copy(x)
return
# Check probability of a need to perform a LS even if no improvment
# (Dual annealing principle)
do_ls = False
if self.K < 90 * len(self.state.current_location):
pls = np.exp(self.K * (self.state.ebest - self.state.current_energy
) / self.temperature_step)
if pls >= self._rs.random_sample():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best Markov chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.ofw.local_search(self.xmin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.state.current_location.size
if e < self.state.ebest:
self.state.ebest = self.emin
self.state.xbest = np.copy(self.xmin)
self.state.current_energy = e
self.state.current_location = np.copy(x)
class ObjectiveFunWrapper(object):
"""
Class used to wrap around the objective function in order to apply local
search and default gradient computation.
Default local minimizer is L-BFGS-B
"""
def __init__(self, bounds, func, **kwargs):
self.func = func
self.nb_fun_call = 0
self.kwargs = kwargs
self.minimizer = minimize
self.fun_args = None
lu = list(zip(*bounds))
self.lower = np.array(lu[0])
self.upper = np.array(lu[1])
self.ls_max_iter = self.lower.size * 6
if self.ls_max_iter < 100:
self.ls_max_iter = 100
if self.ls_max_iter > 1000:
self.ls_max_iter = 1000
# By default, scipy L-BFGS-B is used with a custom 3 points gradient
# computation
if not self.kwargs or 'method' not in self.kwargs:
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'disp': None, 'maxls': 100, 'iprint': -1, 'gtol': 1e-06,
'eps': 1e-06,
'maxiter': self.ls_max_iter,
'maxcor': 10, 'maxfun': 15000
}
if 'jac' not in self.kwargs:
self.kwargs['jac'] = self.gradient
if 'bounds' not in self.kwargs:
self.kwargs['bounds'] = bounds
if 'eps' in self.kwargs:
self.reps = self.kwargs.get('eps')
else:
self.reps = 1.e-6
if 'args' in self.kwargs:
self.fun_args = self.kwargs.get('args')
else:
self.fun_args = ()
def func_wrapper(self, x):
self.nb_fun_call += 1
return self.func(x, *self.fun_args)
def gradient(self, x):
g = np.zeros(x.size, np.float64)
for i in range(x.size):
x1 = np.array(x)
x2 = np.array(x)
respl = self.reps
respr = self.reps
x1[i] = x[i] + respr
if x1[i] > self.upper[i]:
x1[i] = self.upper[i]
respr = x1[i] - x[i]
x2[i] = x[i] - respl
if x2[i] < self.lower[i]:
x2[i] = self.lower[i]
respl = x[i] - x2[i]
f1 = self.func_wrapper(x1)
f2 = self.func_wrapper(x2)
g[i] = ((f1 - f2)) / (respl + respr)
idx = np.logical_or(np.isnan(g), np.isinf(g))
g[idx] = 101.0
return g
def local_search(self, x, maxlsiter=None):
mres = self.minimizer(self.func_wrapper, x, **self.kwargs)
if not mres.success:
return BIG_VALUE, None
return (mres.fun, mres.x)
class SDARunner(object):
MAX_REINIT_COUNT = 1000
def __init__(self, fun, x0, bounds, seed=None, minimizer_kwargs=None,
temperature_start=5230, qv=2.62, qa=-5.0,
maxfun=1e7, maxsteps=500, pure_sa=False):
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are note consistent min < max')
# Wrapper for the objective function and minimizer
if minimizer_kwargs is None:
minimizer_kwargs = dict()
self.owf = ObjectiveFunWrapper(bounds, fun, **minimizer_kwargs)
# Initialization of RandomState for reproducible runs if seed provided
self.rs = check_random_state(seed)
# Initialization of the energy state
self.es = EnergyState(lower, upper)
self.es.reset(self.owf, self.rs, x0)
# Maximum number of function call that can be used a stopping criterion
self.maxfun = maxfun
# Maximum number of step (main iteration) that can be used as
# stopping criterion
self.maxsteps = maxsteps
# Minimum value of annealing temperature reached to perform
# re-annealing
self.temperature_start = temperature_start
self.temperature_restart = 0.1
# VisitingDistribution instance
vd = VisitingDistribution(lower, upper, qv, self.rs)
# Markov chain instance
self.mc = MarkovChain(qa, vd, self.owf, self.rs, self.es)
self.qv = qv
self.pure_sa = pure_sa
def search(self):
max_steps_reached = False
self._iter = 0
t1 = np.exp((self.qv - 1) * np.log(2.0)) - 1.0
while(not max_steps_reached):
for i in range(self.maxsteps):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((self.qv - 1) * np.log(s)) - 1.0
temperature = self.temperature_start * t1 / t2
self._iter += 1
if self._iter == self.maxsteps:
max_steps_reached = True
break
# Need a re-annealing process?
if temperature < self.temperature_restart:
self.es.reset(self.owf, self.rs)
break
# starting Markov chain
self.mc.run(i, temperature)
if self.owf.nb_fun_call >= self.maxfun:
break
if not self.pure_sa:
self.mc.local_search()
if self.owf.nb_fun_call >= self.maxfun:
break
@property
def result(self):
""" The OptimizeResult """
res = OptimizeResult()
res.x = self.es.xbest
res.fun = self.es.ebest
res.nit = self._iter
res.ncall = self.owf.nb_fun_call
return res
def sda(func, x0, bounds, maxiter=1000, minimizer_kwargs=None,
initial_temp=5230., visit=2.62, accept=-5.0, maxfun=1e7, seed=None,
pure_sa=False):
"""
Find the global minimum of a function using the Simulated Dual Annealing
algorithm
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
x0 : ndarray
The starting coordinates. If ``None`` is provided, initial
coordinates are automatically generated.
bounds : sequence
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining the lower and upper bounds for the optimizing argument of
`func`. It is required to have ``len(bounds) == len(x)``.
``len(bounds)`` is used to determine the number of parameters in ``x``.
maxiter : int, optional
The maximum number of sda iterations. Increase this value if the
objective function is very complicated with high dimensions.
minimizer_kwargs : dict, optional
Extra keyword arguments to be passed to the local minimizer
``scipy.optimize.minimize()`` Some important options could be:
method : str
The minimization method (e.g. ``"L-BFGS-B"``)
args : tuple
Extra arguments passed to the objective function (``func``) and
its derivatives (Jacobian, Hessian).
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing sda to escape local minima
that it is trapped in.
visit : float, optional
Parameter for visiting distribution. Higher values give the visiting
distribution a heavier tail, this makes the algorithm jump to a more
distant region. The value range is (0, 3]
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. It has to be any negative value.
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.RandomState` singleton is used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with seed.
If `seed` is already a `np.random.RandomState instance`, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution
function and new coordinates generation.
pure_sa: boolean, optional
If `pure_sa` is set to `True`, a traditional Generalized Simulated
Annealing will be performed with no local search strategy applied.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
SDA is an implementation of the Simulated Dual Annealing. This stochastic
approach generalizes CSA [2]_ (Classical Simulated Annealing) and FSA (Fast
Simulated Annealing) to find the neighborhood of minima and introduces an
additional annealing process for the best solution found.
This algorithm uses a distorted Cauchy-Lorentz visiting distribution, with
its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \min{\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 0.19.0
References
----------
.. [1] Tsallis C (1988). "Possible generalization of Boltzmann-Gibbs
statistics." Journal of Statistical Physics, 52, 479-487.
.. [2] Tsallis C, Stariolo DA (1996). "Generalized Simulated Annealing."
Physica A, 233, 395-406.
.. [3] Xiang Y, Sun DY, Fan W, Gong XG (1997). "Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model."
Physics Letters A, 233, 216-220.
.. [4] Xiang Y, Gong XG (2000a). "Efficiency of Generalized Simulated
Annealing." PHYSICAL REVIEW E, 62, 4473.
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng (2013). "Generalized Simulated
Annealing for Efficient Global Optimization: the GenSA Package for
R". The R Journal, Volume 5/1, June 2013.
http://journal.r-project.org/.
.. [6] Mullen, K. (2014). Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45.
http://dx.doi.org/10.18637/jss.v060.i06
Examples
--------
The following example is a 10-dimensional problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> import numpy as np
>>> from sdaopt import sda
>>> func = lambda x: np.sum(x * x - 10 * np.cos(
... 2 * np.pi * x)) + 10 * np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = sda(func, None, bounds=list(zip(lw, up)))
>>> print("global minimum: xmin = {0}, f(xmin) = {1}".format(
... ret.x, ret.fun))
"""
gr = SDARunner(func, x0, bounds, seed, minimizer_kwargs,
temperature_start=initial_temp, qv=visit, qa=accept,
maxfun=maxfun, maxsteps=maxiter, pure_sa=pure_sa)
gr.search()
return gr.result
| {
"content_hash": "09ecae134d757731552535c05fb70f83",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 79,
"avg_line_length": 41.02051282051282,
"alnum_prop": 0.562820352544068,
"repo_name": "sgubianpm/pygensa",
"id": "55a8534d55ec633155e893a449786cc0b716f0c1",
"size": "24171",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdaopt/_sda.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "395378"
},
{
"name": "R",
"bytes": "11648"
}
],
"symlink_target": ""
} |
"""Benchmark for KPL implementation of weighted embedding column with varying-length inputs."""
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.eager.def_function import function as tf_function
from tensorflow.python.feature_column import feature_column_v2 as fcv2
from tensorflow.python.framework import dtypes as dt
from tensorflow.python.keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as tf_test
# This is required as of 3/2021 because otherwise we drop into graph mode.
v2_compat.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
### KPL AND FC IMPLEMENTATION BENCHMARKS ###
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
embedding_size = 32768
data = fc_bm.create_data(
max_length, batch_size * NUM_REPEATS, embedding_size - 1, dtype=int)
weight = array_ops.ones_like_v2(data, dtype=dt.float32)
# Keras implementation
data_input = keras.Input(
shape=(None,), ragged=True, name="data", dtype=dt.int64)
weight_input = keras.Input(
shape=(None,), ragged=True, name="weight", dtype=dt.float32)
embedded_data = keras.layers.Embedding(embedding_size, 256)(data_input)
weighted_embedding = math_ops.multiply(
embedded_data, array_ops.expand_dims(weight_input, -1))
reduced_embedding = math_ops.reduce_sum(weighted_embedding, axis=1)
model = keras.Model([data_input, weight_input], reduced_embedding)
# FC implementation
fc = fcv2.embedding_column(
fcv2.weighted_categorical_column(
fcv2.categorical_column_with_identity(
"data", num_buckets=embedding_size - 1),
weight_feature_key="weight"),
dimension=256)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(fcv2.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data, "weight": weight}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_sparse(), "weight": weight.to_sparse()}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "weighted_embedding|varlen|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf_test.main()
| {
"content_hash": "988c9e16519fca611af7d41b52ec5727",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 101,
"avg_line_length": 37.67567567567568,
"alnum_prop": 0.71987087517934,
"repo_name": "frreiss/tensorflow-fred",
"id": "0dd4415273eca230ebe818137717e6d1fe5d1277",
"size": "3477",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/preprocessing/benchmarks/weighted_embedding_varlen_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import factory
from poznaj.points.tests.factories import PointFactory
from poznaj.stories.tests.factories import StoryFactory
class ImageFactory(factory.django.DjangoModelFactory):
title = factory.Sequence(lambda n: 'image-{}'.format(n))
image_file = factory.django.ImageField()
copyright = factory.LazyAttribute(lambda c: 'CC0')
story = factory.SubFactory(StoryFactory)
point = factory.SubFactory(PointFactory)
class Meta:
model = 'images.Image'
| {
"content_hash": "1ecf443127fda0a9a16f4b8db3fa7e58",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 34.42857142857143,
"alnum_prop": 0.7489626556016598,
"repo_name": "KlubJagiellonski/poznaj-app-backend",
"id": "2cd1fcfc6e2a0d9f0303f3d3a359ef3ad98f637a",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poznaj/images/tests/factories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1597"
},
{
"name": "Dockerfile",
"bytes": "533"
},
{
"name": "HTML",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "1020"
},
{
"name": "Python",
"bytes": "41829"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
} |
from django.db import models
class Post(models.Model):
body = models.TextField()
title = models.CharField(max_length=50)
publication_date = models.DateTimeField(auto_now_add=True)
enable_comments = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class Comment(models.Model):
text = models.TextField()
author = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey(Post)
def __unicode__(self):
return self.author + u"'s comment on " + self.post.__unicode__()
| {
"content_hash": "2d97d9606442ea075b8566c6139600a7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 72,
"avg_line_length": 29.85,
"alnum_prop": 0.6800670016750419,
"repo_name": "Lukasa/minimalog",
"id": "74ccff28e7d44c13c7bfa04bc0a01e700aa1f763",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "posts/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27970"
}
],
"symlink_target": ""
} |
"""Unit test cases for CSR1000V platform."""
from COT.platforms.cisco_csr1000v import CSR1000V
from COT.data_validation import (
ValueUnsupportedError, ValueTooLowError, ValueTooHighError
)
from COT.platforms.tests import PlatformTests
class TestCSR1000V(PlatformTests.PlatformTest):
"""Test cases for Cisco CSR 1000V platform handling."""
cls = CSR1000V
product_string = "com.cisco.csr1000v"
def test_controller_type_for_device(self):
"""Test platform-specific logic for device controllers."""
self.assertEqual(self.ins.controller_type_for_device('harddisk'),
'scsi')
self.assertEqual(self.ins.controller_type_for_device('cdrom'),
'ide')
# fallthrough to parent class
self.assertEqual(self.ins.controller_type_for_device('dvd'),
'ide')
def test_nic_name(self):
"""Test NIC name construction."""
self.assertEqual(self.ins.guess_nic_name(1),
"GigabitEthernet1")
self.assertEqual(self.ins.guess_nic_name(2),
"GigabitEthernet2")
self.assertEqual(self.ins.guess_nic_name(3),
"GigabitEthernet3")
self.assertEqual(self.ins.guess_nic_name(4),
"GigabitEthernet4")
def test_cpu_count(self):
"""Test CPU count limits."""
self.assertRaises(ValueTooLowError, self.ins.validate_cpu_count, 0)
self.ins.validate_cpu_count(1)
self.ins.validate_cpu_count(2)
self.assertRaises(ValueUnsupportedError,
self.ins.validate_cpu_count, 3)
self.ins.validate_cpu_count(4)
self.assertRaises(ValueUnsupportedError,
self.ins.validate_cpu_count, 5)
self.assertRaises(ValueUnsupportedError,
self.ins.validate_cpu_count, 6)
self.assertRaises(ValueUnsupportedError,
self.ins.validate_cpu_count, 7)
self.ins.validate_cpu_count(8)
self.assertRaises(ValueTooHighError, self.ins.validate_cpu_count, 9)
def test_memory_amount(self):
"""Test RAM allocation limits."""
self.assertRaises(ValueTooLowError,
self.ins.validate_memory_amount, 2559)
self.ins.validate_memory_amount(2560)
self.ins.validate_memory_amount(8192)
self.assertRaises(ValueTooHighError,
self.ins.validate_memory_amount, 8193)
def test_nic_count(self):
"""Test NIC range limits."""
self.assertRaises(ValueTooLowError, self.ins.validate_nic_count, 2)
self.ins.validate_nic_count(3)
self.ins.validate_nic_count(26)
self.assertRaises(ValueTooHighError, self.ins.validate_nic_count, 27)
def test_nic_type(self):
"""Test NIC valid and invalid types."""
self.assertRaises(ValueUnsupportedError,
self.ins.validate_nic_type, "E1000e")
self.ins.validate_nic_type("E1000")
self.assertRaises(ValueUnsupportedError,
self.ins.validate_nic_type, "PCNet32")
self.ins.validate_nic_type("virtio")
self.ins.validate_nic_type("VMXNET3")
def test_serial_count(self):
"""Test serial port range limits."""
self.assertRaises(ValueTooLowError, self.ins.validate_serial_count, -1)
self.ins.validate_serial_count(0)
self.ins.validate_serial_count(2)
self.assertRaises(ValueTooHighError, self.ins.validate_serial_count, 3)
| {
"content_hash": "222f79fee16d1fbb9cb355085ceab214",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 42.32941176470588,
"alnum_prop": 0.6206225680933852,
"repo_name": "glennmatthews/cot",
"id": "8c82a7c40f410326e819f13b888a19f9d670ad09",
"size": "4316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "COT/platforms/tests/test_cisco_csr1000v.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1293158"
},
{
"name": "Roff",
"bytes": "37442"
},
{
"name": "Shell",
"bytes": "3840"
}
],
"symlink_target": ""
} |
import unittest
from trello_plugin.trello import TrelloAPI
from maskgen.maskgen_loader import MaskGenLoader
class TestToolSet(unittest.TestCase):
def test_aproject(self):
api = TrelloAPI(MaskGenLoader())
api.update_status_to_card('JournalQA','testlist','123','test 123\n test 4567', ['to be removed','image'], create=True)
def test_check(self):
api = TrelloAPI(MaskGenLoader())
self.assertTrue(api.check_status())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "58646eb625022745ed335385829a0171",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 126,
"avg_line_length": 29.58823529411765,
"alnum_prop": 0.68389662027833,
"repo_name": "rwgdrummer/maskgen",
"id": "6759a63ad9ff38e1c011493c36c56fcbf0dde987",
"size": "503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notify_plugins/trello_plugin/tests/test_trello.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "544"
},
{
"name": "Dockerfile",
"bytes": "4825"
},
{
"name": "NSIS",
"bytes": "4907"
},
{
"name": "Python",
"bytes": "2768871"
},
{
"name": "Shell",
"bytes": "8086"
}
],
"symlink_target": ""
} |
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# Standard library
import re
import numbers
from fractions import Fraction
import warnings
import numpy as np
# AstroPy
from .core import (Unit, dimensionless_unscaled, get_current_unit_registry,
UnitBase, UnitsError, UnitConversionError, UnitTypeError)
from .utils import is_effectively_unity
from .format.latex import Latex
from astropy.utils.compat.misc import override__dir__
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.misc import isiterable
from astropy.utils.data_info import ParentDtypeInfo
from astropy import config as _config
from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit,
check_output)
from .quantity_helper.function_helpers import (
SUBCLASS_SAFE_FUNCTIONS, FUNCTION_HELPERS, DISPATCHED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
#### properties and methods to match `numpy.ndarray.flatiter` ####
@property
def base(self):
"""A reference to the array that is iterated over."""
return self._quantity
@property
def coords(self):
"""An N-dimensional tuple of current coordinates."""
return self._dataiter.coords
@property
def index(self):
"""Current flat index into the array."""
return self._dataiter.index
def copy(self):
"""Get a copy of the iterator as a 1-D array."""
return self._quantity.flatten()
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f'{val.value}'
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `~astropy.units.Quantity` (sequence), or str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : unit-like
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
Unless the ``dtype`` argument is explicitly specified, integer
or (non-Quantity) object inputs are converted to `float` by default.
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if dtype is None and value.dtype.kind in 'iu':
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{}" as a {}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif (isiterable(value) and len(value) > 0 and
all(isinstance(v, Quantity) for v in value)):
# Convert all quantities to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(0), numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and value.dtype.kind in 'iuO':
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# Check whether super().__array_finalize should be called
# (sadly, ndarray.__array_finalize__ is None; we cannot be sure
# what is above us).
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more since all use '
'should go through array_function. '
'Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, 'value', input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : ndarray or tuple thereof
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `~astropy.units.Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : unit-like, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : `~astropy.units.Quantity` subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False, subok=True)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, UnitBase):
raise UnitTypeError(
"{} instances require {} units, not {} instances."
.format(type(self).__name__, UnitBase, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : unit-like, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If not provided
or ``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : ndarray or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
else:
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
# Index with empty tuple to decay array scalars in to numpy scalars.
return value[()]
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
f"'{self.__class__.__name__}' object has no '{attr}' member")
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
f"{self.__class__.__name__} instance has no attribute '{attr}'")
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting. On the other
# hand, for structured arrays, the ufunc does not work, so we do use
# __eq__ and live with the warnings.
def __eq__(self, other):
try:
if self.dtype.kind == 'V':
return super().__eq__(other)
else:
return np.equal(self, other)
except UnitsError:
return False
except TypeError:
return NotImplemented
def __ne__(self, other):
try:
if self.dtype.kind == 'V':
return super().__ne__(other)
else:
return np.not_equal(self, other)
except UnitsError:
return True
except TypeError:
return NotImplemented
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
try:
factor = self.unit._to(other)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] *= factor
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __div__(self, other):
""" Division between `Quantity` objects. """
return self.__truediv__(other)
def __idiv__(self, other):
""" Division between `Quantity` objects. """
return self.__itruediv__(other)
def __rdiv__(self, other):
""" Division between `Quantity` objects. """
return self.__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : unit-like, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : number, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
subfmt : str, optional
Subformat of the result. For the moment,
only used for format="latex". Supported values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
str
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
if precision is None:
# Use default formatting settings
return f'{self.value}{self._unitstr:s}'
else:
# np.array2string properly formats arrays as well as scalars
return np.array2string(self.value, precision=precision, floatmode="fixed") + self._unitstr
# else, for the moment we assume format="latex"
# need to do try/finally because "threshold" cannot be overridden
# with array2string
# Set the precision if set, otherwise use numpy default
pops = np.get_printoptions()
format_spec = f".{precision if precision is not None else pops['precision']}g"
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
def complex_formatter(value):
return '({}{}i)'.format(
Latex.format_exponential_notation(value.real,
format_spec=format_spec),
Latex.format_exponential_notation(value.imag,
format_spec='+' + format_spec))
try:
formatter = {'float_kind': float_formatter,
'complex_kind': complex_formatter}
if conf.latex_array_threshold > -1:
np.set_printoptions(threshold=conf.latex_array_threshold,
formatter=formatter)
# the view is needed for the scalar case - value might be float
latex_value = np.array2string(
self.view(np.ndarray),
max_line_width=np.inf, separator=',~')
latex_value = latex_value.replace('...', r'\dots')
finally:
np.set_printoptions(**pops)
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode
if self.unit is not None
else _UNIT_NOT_INITIALISED)
delimiter_left, delimiter_right = formats[format][subfmt]
return r'{left}{0} \; {1}{right}'.format(latex_value, latex_unit,
left=delimiter_left,
right=delimiter_right)
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=prefixstr)
return f'{prefixstr}{arrstr}{self._unitstr:s}>'
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format='latex', subfmt='inline')
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format(f"{value}{self._unitstr:s}",
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of `~astropy.units.UnitBase`, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
"""Copy an element of an array to a scalar Quantity and return it.
Like :meth:`~numpy.ndarray.item` except that it always
returns a `Quantity`, not a Python scalar.
"""
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.tolist()")
def _to_own_unit(self, value, check_precision=True):
try:
_value = value.to_value(self.unit)
except AttributeError:
# We're not a Quantity.
# First remove two special cases (with a fast test):
# 1) Maybe masked printing? MaskedArray with quantities does not
# work very well, but no reason to break even repr and str.
# 2) np.ma.masked? useful if we're a MaskedQuantity.
if (value is np.ma.masked
or (value is np.ma.masked_print_option
and self.dtype.kind == 'O')):
return value
# Now, let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(self.unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(as_quantity.value)):
_value = as_quantity.value
else:
raise
if self.dtype.kind == 'i' and check_precision:
# If, e.g., we are casting float to int, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False, subok=True)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype, subok=True)
if not np.all(np.logical_or(self_dtype_array == _value,
np.isnan(_value))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tobytes(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tobytes(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode='raise'):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn("function '{}' is not known to astropy's Quantity. "
"Will run it anyway, hoping it will treat ndarray "
"subclasses correctly. Please raise an issue at "
"https://github.com/astropy/astropy/issues. "
.format(function.__name__), AstropyWarning)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Quantity)
for t in types):
raise TypeError("the Quantity implementation cannot handle {} "
"with the given arguments."
.format(function)) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, keepdims=keepdims,
unit=self.unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof,
keepdims=keepdims)
def mean(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.mean, axis, dtype, out=out,
keepdims=keepdims)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitalized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
f", so cannot set it to '{unit}'."))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array-like or `~astropy.units.Quantity`
Input values or arrays to compare
rtol : array-like or `~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or `~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
`~astropy.units.UnitsError`
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
| {
"content_hash": "e445ccd10793a3109580eb616f4559af",
"timestamp": "",
"source": "github",
"line_count": 1906,
"max_line_length": 106,
"avg_line_length": 39.09443861490031,
"alnum_prop": 0.5834608261534745,
"repo_name": "aleksandr-bakanov/astropy",
"id": "e2f3158db442d1139094c08e35bb4da155f0145b",
"size": "74610",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/units/quantity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898093"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import nibabel as nb
import numpy as np
from nipype.pipeline import engine as pe
from fmriprep.interfaces.maths import Clip
def test_Clip(tmp_path):
in_file = str(tmp_path / "input.nii")
data = np.array([[[-1., 1.], [-2., 2.]]])
nb.Nifti1Image(data, np.eye(4)).to_filename(in_file)
threshold = pe.Node(Clip(in_file=in_file, minimum=0), name="threshold", base_dir=tmp_path)
ret = threshold.run()
assert ret.outputs.out_file == str(tmp_path / "threshold/input_clipped.nii")
out_img = nb.load(ret.outputs.out_file)
assert np.allclose(out_img.get_fdata(), [[[0., 1.], [0., 2.]]])
threshold2 = pe.Node(
Clip(in_file=in_file, minimum=-3),
name="threshold2",
base_dir=tmp_path)
ret = threshold2.run()
assert ret.outputs.out_file == in_file
out_img = nb.load(ret.outputs.out_file)
assert np.allclose(out_img.get_fdata(), [[[-1., 1.], [-2., 2.]]])
clip = pe.Node(
Clip(in_file=in_file, minimum=-1, maximum=1),
name="clip",
base_dir=tmp_path)
ret = clip.run()
assert ret.outputs.out_file == str(tmp_path / "clip/input_clipped.nii")
out_img = nb.load(ret.outputs.out_file)
assert np.allclose(out_img.get_fdata(), [[[-1., 1.], [-1., 1.]]])
nonpositive = pe.Node(
Clip(in_file=in_file, maximum=0),
name="nonpositive",
base_dir=tmp_path)
ret = nonpositive.run()
assert ret.outputs.out_file == str(tmp_path / "nonpositive/input_clipped.nii")
out_img = nb.load(ret.outputs.out_file)
assert np.allclose(out_img.get_fdata(), [[[-1., 0.], [-2., 0.]]])
| {
"content_hash": "4de694ab1c426f5542e4b3d70f46e713",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 94,
"avg_line_length": 31.58823529411765,
"alnum_prop": 0.6045934202358784,
"repo_name": "oesteban/fmriprep",
"id": "d0ca703b2f3971e58e275104157a346b4133c342",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fmriprep/interfaces/tests/test_maths.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "7855"
},
{
"name": "HTML",
"bytes": "3051357"
},
{
"name": "Python",
"bytes": "489605"
},
{
"name": "Shell",
"bytes": "1827"
},
{
"name": "TeX",
"bytes": "16245"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext
from setuptools import Extension
import sys
class CMakeBuild(build_ext):
"""
Runs cmake and make install instead of a traditional C/C++ extension build.
"""
def run(self):
build_dir = os.path.dirname(os.path.realpath(__file__))
for cmd, target in [("cmake", ""), ("make -j -C", "install")]:
if os.system("%s %s %s" % (cmd, build_dir, target)) != 0:
print("ERROR: Failed to run %s" % cmd)
sys.exit(1)
#copy to cuda lib dir/ the cuda lib dir will be in path, so cudanet should automatically work
os.system("cp %s/cudanet/libcconv2_cudanet.so /usr/local/cuda/lib64" % (build_dir))
cudanet = Extension('cudanet.libcudanet', sources = [],
runtime_library_dirs=['cudanet'])
install_requires = ['numpy', ]
test_requires = ['nose', ]
setup(name="cudanet",
version="0.2.8",
description="Provides a set of cudamat like functions using cuda-convnet2 kernels",
ext_modules = [cudanet],
packages=['cudanet','cudanet.layers'],
author="Alex Khrizevsky, Nervanasys, and Tim Dettmers",
author_email="tim.dettmers@gmail.com",
url="https://code.google.com/p/cuda-convnet2/",
install_requires=install_requires,
tests_require=test_requires,
cmdclass={'build_ext': CMakeBuild},
)
| {
"content_hash": "e1e11a37ef1b7e3306806fbe2bc38e93",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 101,
"avg_line_length": 37.89473684210526,
"alnum_prop": 0.6368055555555555,
"repo_name": "TimDettmers/dlearndb",
"id": "887ae57fbd6eb2e7a1b4bc12cdeaecc7572bc455",
"size": "1681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "55013"
},
{
"name": "CMake",
"bytes": "122886"
},
{
"name": "Cuda",
"bytes": "1354492"
},
{
"name": "Python",
"bytes": "358960"
},
{
"name": "Shell",
"bytes": "600"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017-2022, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
"""
from atom.api import (
Bool,
Enum,
Float,
ForwardTyped,
Int,
Str,
Typed,
Property,
set_default,
)
from enaml.core.declarative import d_, observe
from .fragment import Fragment, ProxyFragment
from .view_group import ProxyViewGroup, ViewGroup
class ProxyViewPager(ProxyViewGroup):
"""The abstract definition of a proxy ViewPager object."""
#: A reference to the ViewPager declaration.
declaration = ForwardTyped(lambda: ViewPager)
def set_current_index(self, index: int):
raise NotImplementedError
def set_offscreen_page_limit(self, limit: int):
raise NotImplementedError
def set_page_margin(self, margin: int):
raise NotImplementedError
def set_paging_enabled(self, enabled: bool):
raise NotImplementedError
def set_transition(self, transition: str):
raise NotImplementedError
class ProxyPagerTitleStrip(ProxyViewGroup):
"""The abstract definition of a proxy PagerTitleStrip object."""
#: A reference to the declaration.
declaration = ForwardTyped(lambda: PagerTitleStrip)
def set_titles(self, titles: list[str]):
raise NotImplementedError
def set_inactive_alpha(self, alpha: float):
raise NotImplementedError
def set_text_color(self, color: str):
raise NotImplementedError
def set_text_size(self, size: int):
raise NotImplementedError
def set_text_spacing(self, spacing: int):
raise NotImplementedError
class ProxyPagerTabStrip(ProxyPagerTitleStrip):
"""The abstract definition of a proxy PagerTabStrip object."""
#: A reference to the declaration.
declaration = ForwardTyped(lambda: PagerTabStrip)
def set_tab_indicator_color(self, color: str):
raise NotImplementedError
def set_tab_full_underline(self, enabled: bool):
raise NotImplementedError
class ProxyPagerFragment(ProxyFragment):
"""The abstract definition of a proxy ProxyPagerFragment object."""
#: A reference to the declaration.
declaration = ForwardTyped(lambda: PagerFragment)
def set_title(self, title: str):
raise NotImplementedError
def set_icon(self, icon: str):
raise NotImplementedError
class ViewPager(ViewGroup):
"""Layout manager that allows the user to flip left and right through
pages of data.
"""
#: Set the currently selected page.
current_index = d_(Int())
#: Set the number of pages that should be retained to either side
#: of the current page in the view hierarchy in an idle state.
offscreen_page_limit = d_(Int())
#: Enable or disable paging by swiping
paging_enabled = d_(Bool(True))
#: Set the margin between pages.
page_margin = d_(Int(-1))
#: Read only list of pages
pages = Property()
def _get_pages(self):
return [c for c in self._children if isinstance(c, Fragment)]
#: Transition
transition = d_(
Enum(
"default",
"accordion",
"bg_to_fg",
"fg_to_bg",
"cube_in",
"cube_out",
"draw_from_back",
"flip_horizontal",
"flip_vertical",
"depth_page",
"parallax_page",
"rotate_down",
"rotate_up",
"stack",
"tablet",
"zoom_in",
"zoom_out",
"zoom_out_slide",
)
)
#: A reference to the ProxyLabel object.
proxy = Typed(ProxyViewPager)
# -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe(
"current_index",
"offscreen_page_limit",
"page_margin",
"paging_enabled",
"transition",
)
def _update_proxy(self, change):
super()._update_proxy(change)
class PagerTitleStrip(ViewGroup):
#: Top by default
gravity = set_default("top")
#: Set the alpha value used for non-primary page titles.
inactive_alpha = d_(Float())
# Set the color value used as the base color for all displayed page titles.
text_color = d_(Str())
#: Set the default text size to a given unit and value. Forced to DP
text_size = d_(Int())
#: Spacing pixels
text_spacing = d_(Int())
# -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe("text_color", "text_size", "text_spacing")
def _update_proxy(self, change):
super()._update_proxy(change)
class PagerTabStrip(PagerTitleStrip):
#: Set the color of the tab indicator bar.
tab_indicator_color = d_(Str())
#: Set whether this tab strip should draw a full-width underline
#: in the current tab indicator color.
tab_full_underline = d_(Bool())
# -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe("tab_indicator_color", "tab_full_underline")
def _update_proxy(self, change):
super()._update_proxy(change)
class PagerFragment(Fragment):
"""A Fragment that sets page content and provides a title for tabs
and title sliders.
"""
#: Set the title for the title or tab pager
title = d_(Str())
#: Set the icon or drawable resource for the title or tab pager
icon = d_(Str())
# -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe("title", "icon")
def _update_proxy(self, change):
super()._update_proxy(change)
| {
"content_hash": "514d76431f02f30fbe34f05aa9ef8e4c",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 26.734513274336283,
"alnum_prop": 0.56835484938762,
"repo_name": "codelv/enaml-native",
"id": "50391d849e1df7a7ee65145fd5ad5c425b830b5b",
"size": "6042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/enamlnative/widgets/view_pager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12016"
},
{
"name": "Cython",
"bytes": "37131"
},
{
"name": "Java",
"bytes": "129792"
},
{
"name": "Makefile",
"bytes": "1341"
},
{
"name": "Objective-C",
"bytes": "31920"
},
{
"name": "Python",
"bytes": "669324"
},
{
"name": "Shell",
"bytes": "2048"
}
],
"symlink_target": ""
} |
from findex_gui.web import app
from furl import furl
from werkzeug.routing import BaseConverter
from findex_common.static_variables import SearchParameters
class SearchUrlConverter(BaseConverter):
"""
The URL Converter for parsing search arguments.
Example: /search/die%20hard&cats=[movies]&type=[files]&size=0-1823718372
"""
def to_python(self, value):
lookup = SearchParameters()
if value.startswith('key='):
value = value[4:]
data = {}
for k, v in furl('/?key='+value).args.items():
kk = lookup.id_by_name(k)
if kk:
k = kk
if v.startswith('[') and v.endswith(']'):
v = v[1:-1]
v = v.split(',')
vv = []
for elem in v:
try:
vv.append(int(elem))
except ValueError:
vv.append(elem)
data[k] = vv
else:
try:
data[k] = int(v)
except ValueError:
data[k] = v
return data
def to_url(self, values):
return '+'.join(BaseConverter.to_url(value)
for value in values)
app.url_map.converters['search'] = SearchUrlConverter
| {
"content_hash": "ca555307c76f9fe27097047c4303ff36",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 76,
"avg_line_length": 26.68,
"alnum_prop": 0.49025487256371814,
"repo_name": "skftn/findex-gui",
"id": "b36cf96bf5bd3430028bd40d163dc65fdb18f78f",
"size": "1334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "findex_gui/controllers/search/converters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "344290"
},
{
"name": "HTML",
"bytes": "256465"
},
{
"name": "JavaScript",
"bytes": "690059"
},
{
"name": "Python",
"bytes": "249228"
}
],
"symlink_target": ""
} |
"""Helper functions and test harnesses for source implementations.
This module contains helper functions and test harnesses for checking
correctness of source (a subclass of ``iobase.BoundedSource``) and range
tracker (a subclass of``iobase.RangeTracker``) implementations.
Contains a few lightweight utilities (e.g. reading items from a source such as
``readFromSource()``, as well as heavyweight property testing and stress
testing harnesses that help getting a large amount of test coverage with few
code.
Most notable ones are:
* ``assertSourcesEqualReferenceSource()`` helps testing that the data read by
the union of sources produced by ``BoundedSource.split()`` is the same as data
read by the original source.
* If your source implements dynamic work rebalancing, use the
``assertSplitAtFraction()`` family of functions - they test behavior of
``RangeTracker.try_split()``, in particular, that various consistency
properties are respected and the total set of data read by the source is
preserved when splits happen. Use ``assertSplitAtFractionBehavior()`` to test
individual cases of ``RangeTracker.try_split()`` and use
``assertSplitAtFractionExhaustive()`` as a heavy-weight stress test including
concurrency. We strongly recommend to use both.
For example usages, see the unit tests of modules such as
* apache_beam.io.source_test_utils_test.py
* apache_beam.io.avroio_test.py
"""
from __future__ import absolute_import
from __future__ import division
import logging
import threading
import weakref
from builtins import next
from builtins import object
from builtins import range
from collections import namedtuple
from multiprocessing.pool import ThreadPool
from apache_beam.io import iobase
from apache_beam.testing.util import equal_to
__all__ = ['read_from_source',
'assert_sources_equal_reference_source',
'assert_reentrant_reads_succeed',
'assert_split_at_fraction_behavior',
'assert_split_at_fraction_binary',
'assert_split_at_fraction_exhaustive',
'assert_split_at_fraction_fails',
'assert_split_at_fraction_succeeds_and_consistent']
_LOGGER = logging.getLogger(__name__)
class ExpectedSplitOutcome(object):
MUST_SUCCEED_AND_BE_CONSISTENT = 1
MUST_FAIL = 2
MUST_BE_CONSISTENT_IF_SUCCEEDS = 3
SplitAtFractionResult = namedtuple(
'SplitAtFractionResult', 'num_primary_items num_residual_items')
SplitFractionStatistics = namedtuple(
'SplitFractionStatistics',
'successful_fractions non_trivial_fractions')
def read_from_source(source, start_position=None, stop_position=None):
"""Reads elements from the given ```BoundedSource```.
Only reads elements within the given position range.
Args:
source (~apache_beam.io.iobase.BoundedSource):
:class:`~apache_beam.io.iobase.BoundedSource` implementation.
start_position (int): start position for reading.
stop_position (int): stop position for reading.
Returns:
List[str]: the set of values read from the sources.
"""
values = []
range_tracker = source.get_range_tracker(start_position, stop_position)
assert isinstance(range_tracker, iobase.RangeTracker)
reader = source.read(range_tracker)
for value in reader:
values.append(value)
return values
def _ThreadPool(threads):
# ThreadPool crashes in old versions of Python (< 2.7.5) if created from a
# child thread. (http://bugs.python.org/issue10015)
if not hasattr(threading.current_thread(), '_children'):
threading.current_thread()._children = weakref.WeakKeyDictionary()
return ThreadPool(threads)
def assert_sources_equal_reference_source(reference_source_info, sources_info):
"""Tests if a reference source is equal to a given set of sources.
Given a reference source (a :class:`~apache_beam.io.iobase.BoundedSource`
and a position range) and a list of sources, assert that the union of the
records read from the list of sources is equal to the records read from the
reference source.
Args:
reference_source_info\
(Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):
a three-tuple that gives the reference
:class:`~apache_beam.io.iobase.BoundedSource`, position to start
reading at, and position to stop reading at.
sources_info\
(Iterable[Tuple[~apache_beam.io.iobase.BoundedSource, int, int]]):
a set of sources. Each source is a three-tuple that is of the same
format described above.
Raises:
~exceptions.ValueError: if the set of data produced by the reference source
and the given set of sources are not equivalent.
"""
if not (isinstance(reference_source_info, tuple) and
len(reference_source_info) == 3 and
isinstance(reference_source_info[0], iobase.BoundedSource)):
raise ValueError('reference_source_info must a three-tuple where first'
'item of the tuple gives a '
'iobase.BoundedSource. Received: %r'
% reference_source_info)
reference_records = read_from_source(
*reference_source_info)
source_records = []
for source_info in sources_info:
assert isinstance(source_info, tuple)
assert len(source_info) == 3
if not (isinstance(source_info, tuple) and
len(source_info) == 3 and
isinstance(source_info[0], iobase.BoundedSource)):
raise ValueError('source_info must a three tuple where first'
'item of the tuple gives a '
'iobase.BoundedSource. Received: %r'
% source_info)
if (type(reference_source_info[0].default_output_coder()) !=
type(source_info[0].default_output_coder())):
raise ValueError(
'Reference source %r and the source %r must use the same coder. '
'They are using %r and %r respectively instead.'
% (reference_source_info[0], source_info[0],
type(reference_source_info[0].default_output_coder()),
type(source_info[0].default_output_coder())))
source_records.extend(read_from_source(*source_info))
if len(reference_records) != len(source_records):
raise ValueError(
'Reference source must produce the same number of records as the '
'list of sources. Number of records were %d and %d instead.'
% (len(reference_records), len(source_records)))
if equal_to(reference_records)(source_records):
raise ValueError(
'Reference source and provided list of sources must produce the '
'same set of records.')
def assert_reentrant_reads_succeed(source_info):
"""Tests if a given source can be read in a reentrant manner.
Assume that given source produces the set of values ``{v1, v2, v3, ... vn}``.
For ``i`` in range ``[1, n-1]`` this method performs a reentrant read after
reading ``i`` elements and verifies that both the original and reentrant read
produce the expected set of values.
Args:
source_info (Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):
a three-tuple that gives the reference
:class:`~apache_beam.io.iobase.BoundedSource`, position to start reading
at, and a position to stop reading at.
Raises:
~exceptions.ValueError: if source is too trivial or reentrant read result
in an incorrect read.
"""
source, start_position, stop_position = source_info
assert isinstance(source, iobase.BoundedSource)
expected_values = [val for val in source.read(source.get_range_tracker(
start_position, stop_position))]
if len(expected_values) < 2:
raise ValueError('Source is too trivial since it produces only %d '
'values. Please give a source that reads at least 2 '
'values.' % len(expected_values))
for i in range(1, len(expected_values) - 1):
read_iter = source.read(source.get_range_tracker(
start_position, stop_position))
original_read = []
for _ in range(i):
original_read.append(next(read_iter))
# Reentrant read
reentrant_read = [val for val in source.read(
source.get_range_tracker(start_position, stop_position))]
# Continuing original read.
for val in read_iter:
original_read.append(val)
if equal_to(original_read)(expected_values):
raise ValueError('Source did not produce expected values when '
'performing a reentrant read after reading %d values. '
'Expected %r received %r.'
% (i, expected_values, original_read))
if equal_to(reentrant_read)(expected_values):
raise ValueError('A reentrant read of source after reading %d values '
'did not produce expected values. Expected %r '
'received %r.'
% (i, expected_values, reentrant_read))
def assert_split_at_fraction_behavior(source, num_items_to_read_before_split,
split_fraction, expected_outcome):
"""Verifies the behaviour of splitting a source at a given fraction.
Asserts that splitting a :class:`~apache_beam.io.iobase.BoundedSource` either
fails after reading **num_items_to_read_before_split** items, or succeeds in
a way that is consistent according to
:func:`assert_split_at_fraction_succeeds_and_consistent()`.
Args:
source (~apache_beam.io.iobase.BoundedSource): the source to perform
dynamic splitting on.
num_items_to_read_before_split (int): number of items to read before
splitting.
split_fraction (float): fraction to split at.
expected_outcome (int): a value from
:class:`~apache_beam.io.source_test_utils.ExpectedSplitOutcome`.
Returns:
Tuple[int, int]: a tuple that gives the number of items produced by reading
the two ranges produced after dynamic splitting. If splitting did not
occur, the first value of the tuple will represent the full set of records
read by the source while the second value of the tuple will be ``-1``.
"""
assert isinstance(source, iobase.BoundedSource)
expected_items = read_from_source(source, None, None)
return _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split, split_fraction,
expected_outcome)
def _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split,
split_fraction, expected_outcome, start_position=None, stop_position=None):
range_tracker = source.get_range_tracker(start_position, stop_position)
assert isinstance(range_tracker, iobase.RangeTracker)
current_items = []
reader = source.read(range_tracker)
# Reading 'num_items_to_read_before_split' items.
reader_iter = iter(reader)
for _ in range(num_items_to_read_before_split):
current_items.append(next(reader_iter))
suggested_split_position = range_tracker.position_at_fraction(
split_fraction)
stop_position_before_split = range_tracker.stop_position()
split_result = range_tracker.try_split(suggested_split_position)
if split_result is not None:
if len(split_result) != 2:
raise ValueError('Split result must be a tuple that contains split '
'position and split fraction. Received: %r' %
(split_result,))
if range_tracker.stop_position() != split_result[0]:
raise ValueError('After a successful split, the stop position of the '
'RangeTracker must be the same as the returned split '
'position. Observed %r and %r which are different.'
% (range_tracker.stop_position() % (split_result[0],)))
if split_fraction < 0 or split_fraction > 1:
raise ValueError('Split fraction must be within the range [0,1]',
'Observed split fraction was %r.' % (split_result[1],))
stop_position_after_split = range_tracker.stop_position()
if split_result and stop_position_after_split == stop_position_before_split:
raise ValueError('Stop position %r did not change after a successful '
'split of source %r at fraction %r.' %
(stop_position_before_split, source, split_fraction))
if expected_outcome == ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT:
if not split_result:
raise ValueError('Expected split of source %r at fraction %r to be '
'successful after reading %d elements. But '
'the split failed.' %
(source, split_fraction, num_items_to_read_before_split))
elif expected_outcome == ExpectedSplitOutcome.MUST_FAIL:
if split_result:
raise ValueError('Expected split of source %r at fraction %r after '
'reading %d elements to fail. But splitting '
'succeeded with result %r.' %
(source, split_fraction, num_items_to_read_before_split,
split_result))
elif (expected_outcome !=
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS):
raise ValueError('Unknown type of expected outcome: %r' %
expected_outcome)
current_items.extend([value for value in reader_iter])
residual_range = (
split_result[0], stop_position_before_split) if split_result else None
return _verify_single_split_fraction_result(
source, expected_items, current_items,
split_result,
(range_tracker.start_position(), range_tracker.stop_position()),
residual_range, split_fraction)
def _range_to_str(start, stop):
return '[' + (str(start) + ',' + str(stop) + ')')
def _verify_single_split_fraction_result(
source, expected_items, current_items, split_successful, primary_range,
residual_range, split_fraction):
assert primary_range
primary_items = read_from_source(source, *primary_range)
if not split_successful:
# For unsuccessful splits, residual_range should be None.
assert not residual_range
residual_items = (
read_from_source(source, *residual_range)
if split_successful else [])
total_items = primary_items + residual_items
if current_items != primary_items:
raise ValueError('Current source %r and a source created using the '
'range of the primary source %r determined '
'by performing dynamic work rebalancing at fraction '
'%r produced different values. Expected '
'these sources to produce the same list of values.'
% (source,
_range_to_str(*primary_range),
split_fraction)
)
if expected_items != total_items:
raise ValueError('Items obtained by reading the source %r for primary '
'and residual ranges %s and %s did not produce the '
'expected list of values.'
% (source,
_range_to_str(*primary_range),
_range_to_str(*residual_range)))
result = (len(primary_items),
len(residual_items) if split_successful else -1)
return result
def assert_split_at_fraction_succeeds_and_consistent(
source, num_items_to_read_before_split, split_fraction):
"""Verifies some consistency properties of dynamic work rebalancing.
Equivalent to the following pseudocode:::
original_range_tracker = source.getRangeTracker(None, None)
original_reader = source.read(original_range_tracker)
items_before_split = read N items from original_reader
suggested_split_position = original_range_tracker.position_for_fraction(
split_fraction)
original_stop_position - original_range_tracker.stop_position()
split_result = range_tracker.try_split()
split_position, split_fraction = split_result
primary_range_tracker = source.get_range_tracker(
original_range_tracker.start_position(), split_position)
residual_range_tracker = source.get_range_tracker(split_position,
original_stop_position)
assert that: items when reading source.read(primary_range_tracker) ==
items_before_split + items from continuing to read 'original_reader'
assert that: items when reading source.read(original_range_tracker) =
items when reading source.read(primary_range_tracker) + items when reading
source.read(residual_range_tracker)
Args:
source: source to perform dynamic work rebalancing on.
num_items_to_read_before_split: number of items to read before splitting.
split_fraction: fraction to split at.
"""
assert_split_at_fraction_behavior(
source, num_items_to_read_before_split, split_fraction,
ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT)
def assert_split_at_fraction_fails(source, num_items_to_read_before_split,
split_fraction):
"""Asserts that dynamic work rebalancing at a given fraction fails.
Asserts that trying to perform dynamic splitting after reading
'num_items_to_read_before_split' items from the source fails.
Args:
source: source to perform dynamic splitting on.
num_items_to_read_before_split: number of items to read before splitting.
split_fraction: fraction to split at.
"""
assert_split_at_fraction_behavior(
source, num_items_to_read_before_split, split_fraction,
ExpectedSplitOutcome.MUST_FAIL)
def assert_split_at_fraction_binary(
source, expected_items, num_items_to_read_before_split, left_fraction,
left_result, right_fraction, right_result, stats, start_position=None,
stop_position=None):
"""Performs dynamic work rebalancing for fractions within a given range.
Asserts that given a start position, a source can be split at every
interesting fraction (halfway between two fractions that differ by at
least one item) and the results are consistent if a split succeeds.
Args:
source: source to perform dynamic splitting on.
expected_items: total set of items expected when reading the source.
num_items_to_read_before_split: number of items to read before splitting.
left_fraction: left fraction for binary splitting.
left_result: result received by splitting at left fraction.
right_fraction: right fraction for binary splitting.
right_result: result received by splitting at right fraction.
stats: a ``SplitFractionStatistics`` for storing results.
"""
assert right_fraction > left_fraction
if right_fraction - left_fraction < 0.001:
# This prevents infinite recursion.
return
middle_fraction = (left_fraction + right_fraction) / 2
if left_result is None:
left_result = _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split, left_fraction,
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
if right_result is None:
right_result = _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split,
right_fraction, ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
middle_result = _assert_split_at_fraction_behavior(
source, expected_items, num_items_to_read_before_split, middle_fraction,
ExpectedSplitOutcome.MUST_BE_CONSISTENT_IF_SUCCEEDS)
if middle_result[1] != -1:
stats.successful_fractions.append(middle_fraction)
if middle_result[1] > 0:
stats.non_trivial_fractions.append(middle_fraction)
# Two split results are equivalent if primary and residual ranges of them
# produce the same number of records (simply checking the size of primary
# enough since the total number of records is constant).
if left_result[0] != middle_result[0]:
assert_split_at_fraction_binary(
source, expected_items, num_items_to_read_before_split, left_fraction,
left_result, middle_fraction, middle_result, stats)
# We special case right_fraction=1.0 since that could fail due to being out
# of range. (even if a dynamic split fails at 'middle_fraction' and at
# fraction 1.0, there might be fractions in range ('middle_fraction', 1.0)
# where dynamic splitting succeeds).
if right_fraction == 1.0 or middle_result[0] != right_result[0]:
assert_split_at_fraction_binary(
source, expected_items, num_items_to_read_before_split,
middle_fraction, middle_result, right_fraction, right_result, stats)
MAX_CONCURRENT_SPLITTING_TRIALS_PER_ITEM = 100
MAX_CONCURRENT_SPLITTING_TRIALS_TOTAL = 1000
def assert_split_at_fraction_exhaustive(
source, start_position=None, stop_position=None,
perform_multi_threaded_test=True):
"""Performs and tests dynamic work rebalancing exhaustively.
Asserts that for each possible start position, a source can be split at
every interesting fraction (halfway between two fractions that differ by at
least one item) and the results are consistent if a split succeeds.
Verifies multi threaded splitting as well.
Args:
source (~apache_beam.io.iobase.BoundedSource): the source to perform
dynamic splitting on.
perform_multi_threaded_test (bool): if :data:`True` performs a
multi-threaded test, otherwise this test is skipped.
Raises:
~exceptions.ValueError: if the exhaustive splitting test fails.
"""
expected_items = read_from_source(source, start_position, stop_position)
if not expected_items:
raise ValueError('Source %r is empty.' % source)
if len(expected_items) == 1:
raise ValueError('Source %r only reads a single item.' % source)
all_non_trivial_fractions = []
any_successful_fractions = False
any_non_trivial_fractions = False
for i in range(len(expected_items)):
stats = SplitFractionStatistics([], [])
assert_split_at_fraction_binary(
source, expected_items, i, 0.0, None, 1.0, None, stats)
if stats.successful_fractions:
any_successful_fractions = True
if stats.non_trivial_fractions:
any_non_trivial_fractions = True
all_non_trivial_fractions.append(stats.non_trivial_fractions)
if not any_successful_fractions:
raise ValueError('SplitAtFraction test completed vacuously: no '
'successful split fractions found')
if not any_non_trivial_fractions:
raise ValueError(
'SplitAtFraction test completed vacuously: no non-trivial split '
'fractions found')
if not perform_multi_threaded_test:
return
num_total_trials = 0
for i in range(len(expected_items)):
non_trivial_fractions = [2.0] # 2.0 is larger than any valid fraction.
non_trivial_fractions.extend(all_non_trivial_fractions[i])
min_non_trivial_fraction = min(non_trivial_fractions)
if min_non_trivial_fraction == 2.0:
# This will not happen all the time. Otherwise previous test will fail
# due to vacuousness.
continue
num_trials = 0
have_success = False
have_failure = False
thread_pool = _ThreadPool(2)
try:
while True:
num_trials += 1
if (num_trials >
MAX_CONCURRENT_SPLITTING_TRIALS_PER_ITEM):
_LOGGER.warning(
'After %d concurrent splitting trials at item #%d, observed '
'only %s, giving up on this item',
num_trials,
i,
'success' if have_success else 'failure'
)
break
if _assert_split_at_fraction_concurrent(
source, expected_items, i, min_non_trivial_fraction, thread_pool):
have_success = True
else:
have_failure = True
if have_success and have_failure:
_LOGGER.info('%d trials to observe both success and failure of '
'concurrent splitting at item #%d', num_trials, i)
break
finally:
thread_pool.close()
num_total_trials += num_trials
if num_total_trials > MAX_CONCURRENT_SPLITTING_TRIALS_TOTAL:
_LOGGER.warning('After %d total concurrent splitting trials, considered '
'only %d items, giving up.', num_total_trials, i)
break
_LOGGER.info('%d total concurrent splitting trials for %d items',
num_total_trials, len(expected_items))
def _assert_split_at_fraction_concurrent(
source, expected_items, num_items_to_read_before_splitting,
split_fraction, thread_pool=None):
range_tracker = source.get_range_tracker(None, None)
stop_position_before_split = range_tracker.stop_position()
reader = source.read(range_tracker)
reader_iter = iter(reader)
current_items = []
for _ in range(num_items_to_read_before_splitting):
current_items.append(next(reader_iter))
def read_or_split(test_params):
if test_params[0]:
return [val for val in test_params[1]]
else:
position = test_params[1].position_at_fraction(test_params[2])
result = test_params[1].try_split(position)
return result
inputs = []
pool = thread_pool if thread_pool else _ThreadPool(2)
try:
inputs.append([True, reader_iter])
inputs.append([False, range_tracker, split_fraction])
results = pool.map(read_or_split, inputs)
finally:
if not thread_pool:
pool.close()
current_items.extend(results[0])
primary_range = (
range_tracker.start_position(), range_tracker.stop_position())
split_result = results[1]
residual_range = (
split_result[0], stop_position_before_split) if split_result else None
res = _verify_single_split_fraction_result(
source, expected_items, current_items, split_result,
primary_range, residual_range, split_fraction)
return res[1] > 0
| {
"content_hash": "a3b78117e3b0da040620cd3d18ecd3e8",
"timestamp": "",
"source": "github",
"line_count": 654,
"max_line_length": 80,
"avg_line_length": 39.11467889908257,
"alnum_prop": 0.683319651303702,
"repo_name": "RyanSkraba/beam",
"id": "72917860d3c75af17e6b932bf4c43b90cdd91449",
"size": "26366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/source_test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
} |
import algorithms
import metrics
import itertools
class RectNode(object):
__slots__ = ('walkable', 'neighbor_gen', '_move_cost', 'pos',
'default_walkable', '_heuristic',
'_came_from', '_h', '_g') # set by algorithms.astar
def __init__(self, pos,
move_cost=1, walkable=None, default_walkable=True,
neighbor_gen=None, heuristic=metrics.manhattan):
"""Create a RectNode
with position `pos` and that generates neighbors by calling
`neighbor_gen` with similar arguments
`move_cost` is a constant cost for moving directly from one node to
the next
`walkable` is a map from position->walkable for any tile position
if a position is not in `walkable`, it is assumed
`default_walkable` (default_walkable is True by default)"""
if walkable is None:
walkable = {}
self.walkable = walkable
if neighbor_gen is None:
neighbor_gen = type(self)
self.neighbor_gen = neighbor_gen
self._move_cost = move_cost
self.pos = pos
self.default_walkable = default_walkable
self._heuristic = heuristic
def __hash__(self):
return hash(self.pos)
def __eq__(self, o):
return self.pos == o.pos
def _get_x(self):
return self.pos[0]
def _get_y(self):
return self.pos[1]
x = property(fget=_get_x)
y = property(fget=_get_y)
def get_neighbors(self):
"""Get all the traversable neighbor nodes
use neighbor_gen to generate nodes given positions"""
for i in ((1,0), (-1,0), (0, 1), (0, -1)):
pos = self.x - i[0], self.y - i[1]
if self.walkable.get(pos, self.default_walkable):
yield self.neighbor_gen(pos, walkable=self.walkable,
default_walkable=self.default_walkable,
neighbor_gen=self.neighbor_gen,
heuristic=self._heuristic)
def heuristic(self, node):
"""Use the supplied heuristic to determine distance from `node`
the heuristic may not always be used, depending on the pathfinding
algorithm used"""
return self._heuristic(self.pos, node.pos)
def move_cost(self, node):
"""Find the cost for moving between self and `node`
defaults to providing a constant cost, as provided on initialization"""
return self._move_cost
| {
"content_hash": "fcaa29455292f8a7119f3fb425851542",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 37.95454545454545,
"alnum_prop": 0.5836327345309381,
"repo_name": "mattiasbergstrom/python-pathfinding",
"id": "e7268925cad58a7c0dcd6d13e4511668e404cdba",
"size": "2539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pathfinding/nodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22254"
}
],
"symlink_target": ""
} |
from test_schematic_base import TestSchematicBase
from unittest.mock import patch
from werkzeug.datastructures import OrderedMultiDict
import os
import pytest
class TestSchematicDownload(TestSchematicBase):
def setup(self):
TestSchematicBase.setup(self)
self.downloads_dir = self.app.config["SCHEMATIC_DOWNLOADS_DIR"]
self.clean_schematic_downloads_dir()
def teardown(self):
TestSchematicBase.teardown(self)
self.clean_schematic_downloads_dir()
# Tests
@patch("mrt_file_server.utils.log_utils.log_adapter")
@pytest.mark.parametrize("filename", [
("mrt_v5_final_elevated_centre_station.schem"),
("mrt_v5_final_elevated_centre_station.schematic")
])
def test_create_download_link_should_be_successful(self, mock_logger, filename):
message_key = "SCHEMATIC_DOWNLOAD_LINK_CREATION_SUCCESS"
self.copy_test_data_file(filename, self.downloads_dir)
data = self.create_request_data(filename)
response = self.create_download_link(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
self.verify_flash_message_by_key(message_key, response.data, filename)
mock_logger.info.assert_called_with(self.get_log_message(message_key), filename)
@patch("mrt_file_server.utils.log_utils.log_adapter")
@pytest.mark.parametrize("filename, message_key", [
("", "SCHEMATIC_DOWNLOAD_LINK_CREATION_FILENAME_EMPTY"),
("this file has spaces.schematic", "SCHEMATIC_DOWNLOAD_LINK_CREATION_FILENAME_WHITESPACE"),
("mrt_v5_final_elevated_centre_station.schematic", "SCHEMATIC_DOWNLOAD_LINK_CREATION_FILE_NOT_FOUND"),
("mrt_v5_final_elevated_centre_station.txt", "SCHEMATIC_DOWNLOAD_LINK_CREATION_INVALID_EXTENSION")
])
def test_create_download_link_should_fail(self, mock_logger, filename, message_key):
data = self.create_request_data(filename)
response = self.create_download_link(data)
assert response.status_code == 200
assert response.mimetype == "text/html"
if filename:
self.verify_flash_message_by_key(message_key, response.data, filename)
mock_logger.warn.assert_called_with(self.get_log_message(message_key), filename)
else:
self.verify_flash_message_by_key(message_key, response.data)
mock_logger.warn.assert_called_with(self.get_log_message(message_key))
@patch("mrt_file_server.utils.log_utils.log_adapter")
@pytest.mark.parametrize("filename", [
("mrt_v5_final_elevated_centre_station.schem"),
("mrt_v5_final_elevated_centre_station.schematic")
])
def test_download_should_be_successful(self, mock_logger, filename):
original_file_content = self.load_test_data_file(filename)
self.copy_test_data_file(filename, self.downloads_dir)
response = self.start_download(filename)
assert response.status_code == 200
assert response.mimetype == "application/octet-stream"
assert response.headers.get("Content-Disposition") == "attachment; filename={}".format(filename)
assert int(response.headers.get("Content-Length")) == len(original_file_content)
mock_logger.info.assert_called_with(self.get_log_message("SCHEMATIC_DOWNLOAD_SUCCESS"), filename)
# Helper Functions
def clean_schematic_downloads_dir(self):
self.remove_files(self.downloads_dir, "schematic")
self.remove_files(self.downloads_dir, "schem")
def create_download_link(self, data):
return self.client.post("/schematic/download", data = data)
def start_download(self, filename):
return self.client.get("/schematic/download/{}".format(filename))
def create_request_data(self, filename):
pair = os.path.splitext(filename)
data = OrderedMultiDict()
data.add("fileRoot", pair[0])
data.add("fileExtension", pair[1][1:])
return data | {
"content_hash": "4ceb7e862189d75660a105756ae0adf4",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 111,
"avg_line_length": 38.19,
"alnum_prop": 0.7161560617962818,
"repo_name": "Frumple/mrt-file-server",
"id": "f819a26f0bd7c1031e7b6a0b206f62c9d7e9e42a",
"size": "3819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_schematic_download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "860"
},
{
"name": "HTML",
"bytes": "29179"
},
{
"name": "JavaScript",
"bytes": "1231"
},
{
"name": "Python",
"bytes": "69360"
}
],
"symlink_target": ""
} |
import os
import sys
import django
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "museum.settings")
django.setup()
from django.contrib.auth.models import User # noqa: E402
from museum_site.models import * # noqa: E402
def main():
print(
"This script will generate Upload objects for Files which did not \
have one created at time of archival."
)
input("Press Enter to begin.")
qs = File.objects.all().order_by("id")
ids = []
# Get all used IDs
for f in qs:
ids.append(f.id)
# Get all upload object file_ids
qs = Upload.objects.all().order_by("id")
files_with_uploads = []
for u in qs:
files_with_uploads.append(u.file_id)
files_without_uploads = []
for i in ids:
if i not in files_with_uploads and (f is not None):
files_without_uploads.append(i)
print("FILES WITH UPLOAD OBJECTS:", len(files_with_uploads))
print("FILES WITHOUT UPLOAD OBJECTS:", len(files_without_uploads))
for i in files_without_uploads:
f = File.objects.get(pk=i)
u = Upload()
u.generate_edit_token()
if f.uploader_ip:
u.ip = f.uploader_ip
if f.upload_date:
u.date = f.upload_date
else:
u.date = "1970-01-01 00:00:00"
u.notes = "Autogenerated Upload Object. Dec. 10, 2021"
u.file_id = f.id
u.save()
print(u)
return True
if __name__ == '__main__':
main()
| {
"content_hash": "916e47bff04ce7dd842e69fd768b2147",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 24.40625,
"alnum_prop": 0.5973111395646606,
"repo_name": "DrDos0016/z2",
"id": "abed96f5488cea689200371b8a03d4acd5b4b6d6",
"size": "1562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/one-offs/generate-upload-objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46276"
},
{
"name": "HTML",
"bytes": "198985"
},
{
"name": "JavaScript",
"bytes": "120902"
},
{
"name": "Python",
"bytes": "297554"
}
],
"symlink_target": ""
} |
from abc import ABCMeta, abstractmethod
import sys
if sys.version >= '3':
xrange = range
from pyspark import SparkContext
from pyspark.sql import DataFrame
from pyspark.ml import Estimator, Transformer, Model
from pyspark.ml.param import Params
from pyspark.ml.util import _jvm
from pyspark.ml.common import inherit_doc, _java2py, _py2java
class JavaWrapper(object):
"""
Wrapper class for a Java companion object
"""
def __init__(self, java_obj=None):
super(JavaWrapper, self).__init__()
self._java_obj = java_obj
@classmethod
def _create_from_java_class(cls, java_class, *args):
"""
Construct this object from given Java classname and arguments
"""
java_obj = JavaWrapper._new_java_obj(java_class, *args)
return cls(java_obj)
def _call_java(self, name, *args):
m = getattr(self._java_obj, name)
sc = SparkContext._active_spark_context
java_args = [_py2java(sc, arg) for arg in args]
return _java2py(sc, m(*java_args))
@staticmethod
def _new_java_obj(java_class, *args):
"""
Returns a new Java object.
"""
sc = SparkContext._active_spark_context
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
java_args = [_py2java(sc, arg) for arg in args]
return java_obj(*java_args)
@staticmethod
def _new_java_array(pylist, java_class):
"""
Create a Java array of given java_class type. Useful for
calling a method with a Scala Array from Python with Py4J.
:param pylist:
Python list to convert to a Java Array.
:param java_class:
Java class to specify the type of Array. Should be in the
form of sc._gateway.jvm.* (sc is a valid Spark Context).
:return:
Java Array of converted pylist.
Example primitive Java classes:
- basestring -> sc._gateway.jvm.java.lang.String
- int -> sc._gateway.jvm.java.lang.Integer
- float -> sc._gateway.jvm.java.lang.Double
- bool -> sc._gateway.jvm.java.lang.Boolean
"""
sc = SparkContext._active_spark_context
java_array = sc._gateway.new_array(java_class, len(pylist))
for i in xrange(len(pylist)):
java_array[i] = pylist[i]
return java_array
@inherit_doc
class JavaParams(JavaWrapper, Params):
"""
Utility class to help create wrapper classes from Java/Scala
implementations of pipeline components.
"""
#: The param values in the Java object should be
#: synced with the Python wrapper in fit/transform/evaluate/copy.
__metaclass__ = ABCMeta
def __del__(self):
if SparkContext._active_spark_context:
SparkContext._active_spark_context._gateway.detach(self._java_obj)
def _make_java_param_pair(self, param, value):
"""
Makes a Java param pair.
"""
sc = SparkContext._active_spark_context
param = self._resolveParam(param)
java_param = self._java_obj.getParam(param.name)
java_value = _py2java(sc, value)
return java_param.w(java_value)
def _transfer_params_to_java(self):
"""
Transforms the embedded params to the companion Java object.
"""
paramMap = self.extractParamMap()
for param in self.params:
if param in paramMap:
pair = self._make_java_param_pair(param, paramMap[param])
self._java_obj.set(pair)
def _transfer_param_map_to_java(self, pyParamMap):
"""
Transforms a Python ParamMap into a Java ParamMap.
"""
paramMap = JavaWrapper._new_java_obj("org.apache.spark.ml.param.ParamMap")
for param in self.params:
if param in pyParamMap:
pair = self._make_java_param_pair(param, pyParamMap[param])
paramMap.put([pair])
return paramMap
def _create_params_from_java(self):
"""
SPARK-10931: Temporary fix to create params that are defined in the Java obj but not here
"""
java_params = list(self._java_obj.params())
from pyspark.ml.param import Param
for java_param in java_params:
java_param_name = java_param.name()
if not hasattr(self, java_param_name):
param = Param(self, java_param_name, java_param.doc())
setattr(param, "created_from_java_param", True)
setattr(self, java_param_name, param)
self._params = None # need to reset so self.params will discover new params
def _transfer_params_from_java(self):
"""
Transforms the embedded params from the companion Java object.
"""
sc = SparkContext._active_spark_context
for param in self.params:
if self._java_obj.hasParam(param.name):
java_param = self._java_obj.getParam(param.name)
# SPARK-14931: Only check set params back to avoid default params mismatch.
if self._java_obj.isSet(java_param):
value = _java2py(sc, self._java_obj.getOrDefault(java_param))
self._set(**{param.name: value})
# SPARK-10931: Temporary fix for params that have a default in Java
if self._java_obj.hasDefault(java_param) and not self.isDefined(param):
value = _java2py(sc, self._java_obj.getDefault(java_param)).get()
self._setDefault(**{param.name: value})
def _transfer_param_map_from_java(self, javaParamMap):
"""
Transforms a Java ParamMap into a Python ParamMap.
"""
sc = SparkContext._active_spark_context
paramMap = dict()
for pair in javaParamMap.toList():
param = pair.param()
if self.hasParam(str(param.name())):
paramMap[self.getParam(param.name())] = _java2py(sc, pair.value())
return paramMap
@staticmethod
def _empty_java_param_map():
"""
Returns an empty Java ParamMap reference.
"""
return _jvm().org.apache.spark.ml.param.ParamMap()
def _to_java(self):
"""
Transfer this instance's Params to the wrapped Java object, and return the Java object.
Used for ML persistence.
Meta-algorithms such as Pipeline should override this method.
:return: Java object equivalent to this instance.
"""
self._transfer_params_to_java()
return self._java_obj
@staticmethod
def _from_java(java_stage):
"""
Given a Java object, create and return a Python wrapper of it.
Used for ML persistence.
Meta-algorithms such as Pipeline should override this method as a classmethod.
"""
def __get_class(clazz):
"""
Loads Python class from its name.
"""
parts = clazz.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
stage_name = java_stage.getClass().getName().replace("org.apache.spark", "pyspark")
# Generate a default new instance from the stage_name class.
py_type = __get_class(stage_name)
if issubclass(py_type, JavaParams):
# Load information from java_stage to the instance.
py_stage = py_type()
py_stage._java_obj = java_stage
# SPARK-10931: Temporary fix so that persisted models would own params from Estimator
if issubclass(py_type, JavaModel):
py_stage._create_params_from_java()
py_stage._resetUid(java_stage.uid())
py_stage._transfer_params_from_java()
elif hasattr(py_type, "_from_java"):
py_stage = py_type._from_java(java_stage)
else:
raise NotImplementedError("This Java stage cannot be loaded into Python currently: %r"
% stage_name)
return py_stage
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. This implementation first calls Params.copy and
then make a copy of the companion Java pipeline component with
extra params. So both the Python wrapper and the Java pipeline
component get copied.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = super(JavaParams, self).copy(extra)
if self._java_obj is not None:
that._java_obj = self._java_obj.copy(self._empty_java_param_map())
that._transfer_params_to_java()
return that
@inherit_doc
class JavaEstimator(JavaParams, Estimator):
"""
Base class for :py:class:`Estimator`s that wrap Java/Scala
implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def _create_model(self, java_model):
"""
Creates a model from the input Java model reference.
"""
raise NotImplementedError()
def _fit_java(self, dataset):
"""
Fits a Java model to the input dataset.
:param dataset: input dataset, which is an instance of
:py:class:`pyspark.sql.DataFrame`
:param params: additional params (overwriting embedded values)
:return: fitted Java model
"""
self._transfer_params_to_java()
return self._java_obj.fit(dataset._jdf)
def _fit(self, dataset):
java_model = self._fit_java(dataset)
model = self._create_model(java_model)
return self._copyValues(model)
@inherit_doc
class JavaTransformer(JavaParams, Transformer):
"""
Base class for :py:class:`Transformer`s that wrap Java/Scala
implementations. Subclasses should ensure they have the transformer Java object
available as _java_obj.
"""
__metaclass__ = ABCMeta
def _transform(self, dataset):
self._transfer_params_to_java()
return DataFrame(self._java_obj.transform(dataset._jdf), dataset.sql_ctx)
@inherit_doc
class JavaModel(JavaTransformer, Model):
"""
Base class for :py:class:`Model`s that wrap Java/Scala
implementations. Subclasses should inherit this class before
param mix-ins, because this sets the UID from the Java model.
"""
__metaclass__ = ABCMeta
def __init__(self, java_model=None):
"""
Initialize this instance with a Java model object.
Subclasses should call this constructor, initialize params,
and then call _transfer_params_from_java.
This instance can be instantiated without specifying java_model,
it will be assigned after that, but this scenario only used by
:py:class:`JavaMLReader` to load models. This is a bit of a
hack, but it is easiest since a proper fix would require
MLReader (in pyspark.ml.util) to depend on these wrappers, but
these wrappers depend on pyspark.ml.util (both directly and via
other ML classes).
"""
super(JavaModel, self).__init__(java_model)
if java_model is not None:
# SPARK-10931: This is a temporary fix to allow models to own params
# from estimators. Eventually, these params should be in models through
# using common base classes between estimators and models.
self._create_params_from_java()
self._resetUid(java_model.uid())
| {
"content_hash": "deb2e5b6a85df0e36f76c3978a16e14c",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 98,
"avg_line_length": 36.39938080495356,
"alnum_prop": 0.6064472229310198,
"repo_name": "alunarbeach/spark",
"id": "0f846fbc5b5ef32930b3501fd62de0d50b7f8237",
"size": "12542",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "python/pyspark/ml/wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33886"
},
{
"name": "Batchfile",
"bytes": "24266"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "10056"
},
{
"name": "Java",
"bytes": "3098468"
},
{
"name": "JavaScript",
"bytes": "141001"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "8788"
},
{
"name": "PowerShell",
"bytes": "3756"
},
{
"name": "Python",
"bytes": "2336152"
},
{
"name": "R",
"bytes": "1081317"
},
{
"name": "Roff",
"bytes": "14732"
},
{
"name": "SQLPL",
"bytes": "6233"
},
{
"name": "Scala",
"bytes": "23574115"
},
{
"name": "Shell",
"bytes": "155682"
},
{
"name": "Thrift",
"bytes": "33605"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import pwd
import six
import sys
import copy
import traceback
import collections
from oslo_config import cfg
from st2common import log as logging
from st2common.models.base import DictSerializableClassMixin
from st2common.util.shell import quote_unix
from st2common.constants.action import LIBS_DIR as ACTION_LIBS_DIR
from st2common.util.secrets import get_secret_parameters
from st2common.util.secrets import mask_secret_parameters
from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
__all__ = [
"ShellCommandAction",
"ShellScriptAction",
"RemoteAction",
"RemoteScriptAction",
"ResolvedActionParameters",
]
LOG = logging.getLogger(__name__)
LOGGED_USER_USERNAME = pwd.getpwuid(os.getuid())[0]
# Flags which are passed to every sudo invocation
SUDO_COMMON_OPTIONS = [
"-E" # we want to preserve the environment of the user which ran sudo
]
# Flags which are only passed to sudo when not running as current user and when
# -u flag is used
SUDO_DIFFERENT_USER_OPTIONS = [
"-H" # we want $HOME to reflect the home directory of the requested / target user
]
class ShellCommandAction(object):
EXPORT_CMD = "export"
def __init__(
self,
name,
action_exec_id,
command,
user,
env_vars=None,
sudo=False,
timeout=None,
cwd=None,
sudo_password=None,
):
self.name = name
self.action_exec_id = action_exec_id
self.command = command
self.env_vars = env_vars or {}
self.user = user
self.sudo = sudo
self.timeout = timeout
self.cwd = cwd
self.sudo_password = sudo_password
def get_full_command_string(self):
# Note: We pass -E to sudo because we want to preserve user provided environment variables
if self.sudo:
command = quote_unix(self.command)
sudo_arguments = " ".join(self._get_common_sudo_arguments())
command = "sudo %s -- bash -c %s" % (sudo_arguments, command)
else:
if self.user and self.user != LOGGED_USER_USERNAME:
# Need to use sudo to run as a different (requested) user
user = quote_unix(self.user)
sudo_arguments = " ".join(self._get_user_sudo_arguments(user=user))
command = quote_unix(self.command)
command = "sudo %s -- bash -c %s" % (sudo_arguments, command)
else:
command = self.command
return command
def get_sanitized_full_command_string(self):
"""
Get a command string which can be used inside the log messages (if provided, sudo password
is masked).
:rtype: ``password``
"""
command_string = self.get_full_command_string()
if self.sudo_password:
# Mask sudo password
command_string = "echo -e '%s\n' | %s" % (
MASKED_ATTRIBUTE_VALUE,
command_string,
)
return command_string
def get_timeout(self):
return self.timeout
def get_cwd(self):
return self.cwd
def _get_common_sudo_arguments(self):
"""
Retrieve a list of flags which are passed to sudo on every invocation.
:rtype: ``list``
"""
flags = []
if self.sudo_password:
# Note: We use subprocess.Popen in local runner so we provide password via subprocess
# stdin (using echo -e won't work when using subprocess.Popen)
flags.append("-S")
flags = flags + SUDO_COMMON_OPTIONS
return flags
def _get_user_sudo_arguments(self, user):
"""
Retrieve a list of flags which are passed to sudo when running as a different user and "-u"
flag is used.
:rtype: ``list``
"""
flags = self._get_common_sudo_arguments()
flags += SUDO_DIFFERENT_USER_OPTIONS
flags += ["-u", user]
return flags
def _get_env_vars_export_string(self):
if self.env_vars:
env_vars = copy.copy(self.env_vars)
# If sudo_password is provided, explicitly disable bash history to make sure password
# is not logged, because password is provided via command line
if self.sudo and self.sudo_password:
env_vars["HISTFILE"] = "/dev/null"
env_vars["HISTSIZE"] = "0"
# Sort the dict to guarantee consistent order
env_vars = collections.OrderedDict(sorted(env_vars.items()))
# Environment variables could contain spaces and open us to shell
# injection attacks. Always quote the key and the value.
exports = " ".join(
"%s=%s" % (quote_unix(k), quote_unix(v))
for k, v in six.iteritems(env_vars)
)
shell_env_str = "%s %s" % (ShellCommandAction.EXPORT_CMD, exports)
else:
shell_env_str = ""
return shell_env_str
def _get_command_string(self, cmd, args):
"""
Escape the command arguments and form a command string.
:type cmd: ``str``
:type args: ``list``
:rtype: ``str``
"""
if not isinstance(args, (list, tuple)):
raise TypeError(
"The args has a value that is not a list or a tuple"
f" (was {type(args)})."
)
args = [quote_unix(arg) for arg in args]
args = " ".join(args)
result = "%s %s" % (cmd, args)
return result
def _get_error_result(self):
"""
Prepares a structured error result based on the exception.
:type e: ``Exception``
:rtype: ``dict``
"""
_, exc_value, exc_traceback = sys.exc_info()
exc_value = str(exc_value)
exc_traceback = "".join(traceback.format_tb(exc_traceback))
result = {}
result["failed"] = True
result["succeeded"] = False
result["error"] = exc_value
result["traceback"] = exc_traceback
return result
class ShellScriptAction(ShellCommandAction):
def __init__(
self,
name,
action_exec_id,
script_local_path_abs,
named_args=None,
positional_args=None,
env_vars=None,
user=None,
sudo=False,
timeout=None,
cwd=None,
sudo_password=None,
):
super(ShellScriptAction, self).__init__(
name=name,
action_exec_id=action_exec_id,
command=None,
user=user,
env_vars=env_vars,
sudo=sudo,
timeout=timeout,
cwd=cwd,
sudo_password=sudo_password,
)
self.script_local_path_abs = script_local_path_abs
self.named_args = named_args
self.positional_args = positional_args
def get_full_command_string(self):
return self._format_command()
def _format_command(self):
script_arguments = self._get_script_arguments(
named_args=self.named_args, positional_args=self.positional_args
)
if self.sudo:
if script_arguments:
command = quote_unix(
"%s %s" % (self.script_local_path_abs, script_arguments)
)
else:
command = quote_unix(self.script_local_path_abs)
sudo_arguments = " ".join(self._get_common_sudo_arguments())
command = "sudo %s -- bash -c %s" % (sudo_arguments, command)
else:
if self.user and self.user != LOGGED_USER_USERNAME:
# Need to use sudo to run as a different user
user = quote_unix(self.user)
if script_arguments:
command = quote_unix(
"%s %s" % (self.script_local_path_abs, script_arguments)
)
else:
command = quote_unix(self.script_local_path_abs)
sudo_arguments = " ".join(self._get_user_sudo_arguments(user=user))
command = "sudo %s -- bash -c %s" % (sudo_arguments, command)
else:
script_path = quote_unix(self.script_local_path_abs)
if script_arguments:
command = "%s %s" % (script_path, script_arguments)
else:
command = script_path
return command
def _get_script_arguments(self, named_args=None, positional_args=None):
"""
Build a string of named and positional arguments which are passed to the
script.
:param named_args: Dictionary with named arguments.
:type named_args: ``dict``.
:param positional_args: List with positional arguments.
:type positional_args: ``dict``.
:rtype: ``str``
"""
command_parts = []
# add all named_args in the format <kwarg_op>name=value (e.g. --name=value)
if named_args is not None:
for (arg, value) in six.iteritems(named_args):
if value is None or (
isinstance(value, (str, six.text_type)) and len(value) < 1
):
LOG.debug("Ignoring arg %s as its value is %s.", arg, value)
continue
if isinstance(value, bool):
if value is True:
command_parts.append(arg)
else:
values = (quote_unix(arg), quote_unix(six.text_type(value)))
command_parts.append(six.text_type("%s=%s" % values))
# add the positional args
if positional_args:
quoted_pos_args = [quote_unix(pos_arg) for pos_arg in positional_args]
pos_args_string = " ".join(quoted_pos_args)
command_parts.append(pos_args_string)
return " ".join(command_parts)
class SSHCommandAction(ShellCommandAction):
def __init__(
self,
name,
action_exec_id,
command,
env_vars,
user,
password=None,
pkey=None,
hosts=None,
parallel=True,
sudo=False,
timeout=None,
cwd=None,
passphrase=None,
sudo_password=None,
):
super(SSHCommandAction, self).__init__(
name=name,
action_exec_id=action_exec_id,
command=command,
env_vars=env_vars,
user=user,
sudo=sudo,
timeout=timeout,
cwd=cwd,
sudo_password=sudo_password,
)
self.hosts = hosts
self.parallel = parallel
self.pkey = pkey
self.passphrase = passphrase
self.password = password
def is_parallel(self):
return self.parallel
def is_sudo(self):
return self.sudo
def get_user(self):
return self.user
def get_hosts(self):
return self.hosts
def is_pkey_authentication(self):
return self.pkey is not None
def get_pkey(self):
return self.pkey
def get_password(self):
return self.password
def get_command(self):
return self.command
def __str__(self):
str_rep = []
str_rep.append(
"%s@%s(name: %s" % (self.__class__.__name__, id(self), self.name)
)
str_rep.append("id: %s" % self.action_exec_id)
str_rep.append("command: %s" % self.command)
str_rep.append("user: %s" % self.user)
str_rep.append("sudo: %s" % str(self.sudo))
str_rep.append("parallel: %s" % str(self.parallel))
str_rep.append("hosts: %s)" % str(self.hosts))
return ", ".join(str_rep)
class RemoteAction(SSHCommandAction):
def __init__(
self,
name,
action_exec_id,
command,
env_vars=None,
on_behalf_user=None,
user=None,
password=None,
private_key=None,
hosts=None,
parallel=True,
sudo=False,
timeout=None,
cwd=None,
passphrase=None,
sudo_password=None,
):
super(RemoteAction, self).__init__(
name=name,
action_exec_id=action_exec_id,
command=command,
env_vars=env_vars,
user=user,
hosts=hosts,
parallel=parallel,
sudo=sudo,
timeout=timeout,
cwd=cwd,
passphrase=passphrase,
sudo_password=sudo_password,
)
self.password = password
self.private_key = private_key
self.passphrase = passphrase
self.on_behalf_user = on_behalf_user # Used for audit purposes.
self.timeout = timeout
def get_on_behalf_user(self):
return self.on_behalf_user
def __str__(self):
str_rep = []
str_rep.append(
"%s@%s(name: %s" % (self.__class__.__name__, id(self), self.name)
)
str_rep.append("id: %s" % self.action_exec_id)
str_rep.append("command: %s" % self.command)
str_rep.append("user: %s" % self.user)
str_rep.append("on_behalf_user: %s" % self.on_behalf_user)
str_rep.append("sudo: %s" % str(self.sudo))
str_rep.append("parallel: %s" % str(self.parallel))
str_rep.append("hosts: %s)" % str(self.hosts))
str_rep.append("timeout: %s)" % str(self.timeout))
return ", ".join(str_rep)
class RemoteScriptAction(ShellScriptAction):
def __init__(
self,
name,
action_exec_id,
script_local_path_abs,
script_local_libs_path_abs,
named_args=None,
positional_args=None,
env_vars=None,
on_behalf_user=None,
user=None,
password=None,
private_key=None,
remote_dir=None,
hosts=None,
parallel=True,
sudo=False,
timeout=None,
cwd=None,
sudo_password=None,
):
super(RemoteScriptAction, self).__init__(
name=name,
action_exec_id=action_exec_id,
script_local_path_abs=script_local_path_abs,
user=user,
named_args=named_args,
positional_args=positional_args,
env_vars=env_vars,
sudo=sudo,
timeout=timeout,
cwd=cwd,
sudo_password=sudo_password,
)
self.script_local_libs_path_abs = script_local_libs_path_abs
self.script_local_dir, self.script_name = os.path.split(
self.script_local_path_abs
)
self.remote_dir = remote_dir if remote_dir is not None else "/tmp"
self.remote_libs_path_abs = os.path.join(self.remote_dir, ACTION_LIBS_DIR)
self.on_behalf_user = on_behalf_user
self.password = password
self.private_key = private_key
self.remote_script = os.path.join(self.remote_dir, quote_unix(self.script_name))
self.hosts = hosts
self.parallel = parallel
self.command = self._format_command()
LOG.debug("RemoteScriptAction: command to run on remote box: %s", self.command)
def get_remote_script_abs_path(self):
return self.remote_script
def get_local_script_abs_path(self):
return self.script_local_path_abs
def get_remote_libs_path_abs(self):
return self.remote_libs_path_abs
def get_local_libs_path_abs(self):
return self.script_local_libs_path_abs
def get_remote_base_dir(self):
return self.remote_dir
def _format_command(self):
script_arguments = self._get_script_arguments(
named_args=self.named_args, positional_args=self.positional_args
)
if script_arguments:
command = "%s %s" % (self.remote_script, script_arguments)
else:
command = self.remote_script
return command
def __str__(self):
str_rep = []
str_rep.append(
"%s@%s(name: %s" % (self.__class__.__name__, id(self), self.name)
)
str_rep.append("id: %s" % self.action_exec_id)
str_rep.append("local_script: %s" % self.script_local_path_abs)
str_rep.append("local_libs: %s" % self.script_local_libs_path_abs)
str_rep.append("remote_dir: %s" % self.remote_dir)
str_rep.append("remote_libs: %s" % self.remote_libs_path_abs)
str_rep.append("named_args: %s" % self.named_args)
str_rep.append("positional_args: %s" % self.positional_args)
str_rep.append("user: %s" % self.user)
str_rep.append("on_behalf_user: %s" % self.on_behalf_user)
str_rep.append("sudo: %s" % self.sudo)
str_rep.append("parallel: %s" % self.parallel)
str_rep.append("hosts: %s)" % self.hosts)
return ", ".join(str_rep)
class ResolvedActionParameters(DictSerializableClassMixin):
"""
Class which contains resolved runner and action parameters for a particular action.
"""
def __init__(
self, action_db, runner_type_db, runner_parameters=None, action_parameters=None
):
self._action_db = action_db
self._runner_type_db = runner_type_db
self._runner_parameters = runner_parameters
self._action_parameters = action_parameters
def mask_secrets(self, value):
result = copy.deepcopy(value)
runner_parameters = result["runner_parameters"]
action_parameters = result["action_parameters"]
runner_parameters_specs = self._runner_type_db.runner_parameters
action_parameters_sepcs = self._action_db.parameters
secret_runner_parameters = get_secret_parameters(
parameters=runner_parameters_specs
)
secret_action_parameters = get_secret_parameters(
parameters=action_parameters_sepcs
)
runner_parameters = mask_secret_parameters(
parameters=runner_parameters, secret_parameters=secret_runner_parameters
)
action_parameters = mask_secret_parameters(
parameters=action_parameters, secret_parameters=secret_action_parameters
)
result["runner_parameters"] = runner_parameters
result["action_parameters"] = action_parameters
return result
def to_serializable_dict(self, mask_secrets=False):
result = {}
result["runner_parameters"] = self._runner_parameters
result["action_parameters"] = self._action_parameters
if mask_secrets and cfg.CONF.log.mask_secrets:
result = self.mask_secrets(value=result)
return result
| {
"content_hash": "f117e0845f8409c6bb973abda9e18378",
"timestamp": "",
"source": "github",
"line_count": 595,
"max_line_length": 99,
"avg_line_length": 31.445378151260503,
"alnum_prop": 0.5661143773383217,
"repo_name": "StackStorm/st2",
"id": "77a110394dca6546a55cbb202ab6b0064ec63a16",
"size": "19400",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2common/st2common/models/system/action.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
} |
from Excitation import Excitation
class ExcitationRain(Excitation):
"""
Rain on the roof excitation
"""
pass
| {
"content_hash": "8ddd81f8303998d77e8d06bfc9c072e1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 33,
"avg_line_length": 18,
"alnum_prop": 0.6825396825396826,
"repo_name": "python-acoustics/Sea",
"id": "21c866c7dd9d4abf100dfb5d7266ab9e2d430d43",
"size": "126",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Sea/model/excitations/ExcitationRain.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "220852"
},
{
"name": "Shell",
"bytes": "5106"
}
],
"symlink_target": ""
} |
from mongrel2 import config
from mongrel2.config import args
import mongrel2.config.commands
from uuid import uuid4
from mongrel2.config import model
import getpass
import sys
import os
import signal
from sqlite3 import OperationalError
def try_reading(reader):
try:
cmd = reader.readline()
return cmd.split(' ')
except UnicodeDecodeError:
print "\nERROR: Sorry, PyRepl and Python hate printing to your screen: UnicodeDecodeError."
return []
def shell_command():
"""
Starts an interactive shell with readline style input so you can
work with Mongrel2 easier.
"""
try:
from pyrepl.unix_console import UnixConsole
from pyrepl.historical_reader import HistoricalReader
except:
print "You don't have PyRepl installed, shell not available."
reader = HistoricalReader(UnixConsole())
reader.ps1 = "m2> "
reader.ps2 = "..> "
reader.ps3 = "...> "
reader.ps4 = "....> "
try:
while True:
cmd = try_reading(reader)
if cmd:
try:
args.parse_and_run_command(cmd, mongrel2.config.commands)
except Exception, e:
print "ERROR:", e
except EOFError:
print "Bye."
except KeyboardInterrupt:
print "BYE!"
def help_command(**options):
"""
Prints out help for the commands.
m2sh help
You can get help for one command with:
m2sh help -for STR
"""
if "for" in options:
help_text = args.help_for_command(config.commands, options['for'])
if help_text:
print help_text
else:
args.invalid_command_message(config.commands)
else:
print "Available commands:\n"
print "\n".join(args.available_commands(config.commands))
print "\nUse config help -for <command> to find out more."
def dump_command(db=None):
"""
Simple dump of a config database:
m2sh dump -db config.sqlite
"""
print "LOADING DB: ", db
try:
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
raise IOError
store = model.begin(db)
servers = store.find(model.Server)
for server in servers:
print server
for host in server.hosts:
print "\t", host
for route in host.routes:
print "\t\t", route
except IOError:
print "%s not readable" % db
except OperationalError, exc:
print "SQLite error: %s" % exc
def uuid_command(hex=False):
"""
Generates a UUID for you to use in your configurations:
m2sh uuid
m2sh uuid -hex
The -hex means to print it as a big hex number, which is
more efficient but harder to read.
"""
if hex:
print uuid4().hex
else:
print str(uuid4())
def servers_command(db=None):
"""
Lists the servers that are configured in this setup:
m2sh servers -db config.sqlite
"""
if not os.path.isfile(db):
print "ERROR: Cannot access database file %s" % db
return
try:
store = model.begin(db)
servers = store.find(model.Server)
for server in servers:
print "-------"
print server.name, server.default_host, server.uuid
for host in server.hosts:
print "\t", host.id, ':', host.name
except OperationalError, exc:
print "SQLite error: %s" % exc
def hosts_command(db=None, uuid="", host="", name=""):
"""
List all the hosts in the given server identified by UUID or host.
m2sh hosts -db config.sqlite -uuid f400bf85-4538-4f7a-8908-67e313d515c2
m2sh hosts -db config.sqlite -host localhost
m2sh hosts -db config.sqlite -name test
The -host parameter is the default_host for the server.
"""
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
print "Cannot read database file %s" % db
return
try:
store = model.begin(db)
results = None
if uuid:
results = store.find(model.Server, model.Server.uuid == unicode(uuid))
elif host:
results = store.find(model.Server, model.Server.default_host == unicode(host))
elif name:
results = store.find(model.Server, model.Server.name == unicode(name))
else:
print "ERROR: Must give a -host or -uuid or -name."
return
if results.count():
server = results[0]
hosts = store.find(model.Host, model.Host.server_id == server.id)
for host in hosts:
print "--------"
print host, ":"
for route in host.routes:
print "\t", route.path, ':', route.target
else:
print "No servers found."
except OperationalError, exc:
print "SQLite error: %s" % exc
def init_command(db=None):
"""
Initializes a new config database.
m2sh init -db config.sqlite
It will obliterate this config.
"""
from pkg_resources import resource_stream
import sqlite3
sql = resource_stream('mongrel2', 'sql/config.sql').read()
if model.store:
model.store.close()
model.store = None
if os.path.isfile(db) and not os.access(db, os.W_OK):
print "Cannot access database file %s" % db
return
try:
conn = sqlite3.connect(db)
conn.executescript(sql)
commit_command(db=db, what="init_command", why=" ".join(sys.argv))
except OperationalError, exc:
print "Error: %s" % exc
def load_command(db=None, config=None, clear=True):
"""
After using init you can use this to load a config:
m2sh load -db config.sqlite -config tests/sample_conf.py
This will erase the previous config, but we'll make it
safer later on.
"""
import imp
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
print "Cannot access database file %s" % db
return
try:
model.begin(db, clear=clear)
imp.load_source('mongrel2_config_main', config)
commit_command(db=db, what="load_command", why=" ".join(sys.argv))
except OperationalError, exc:
print "SQLite error: %s" % exc
except SyntaxError,exc:
print "Syntax error: %s" % exc
def config_command(db=None, config=None, clear=True):
"""
Effectively does an init then load of a config to get
you started quicker:
m2sh config -db config.sqlite -config tests/sample_conf.py
Like the other two, this will nuke your config, but we'll
make it safer later.
"""
init_command(db=db)
load_command(db=db, config=config, clear=clear)
def commit_command(db=None, what=None, why=None):
"""
Used to a commit event to the database for other admins to know
what is going on with the config. The system logs quite a lot
already for you, like your username, machine name, etc:
m2sh commit -db test.sqlite -what mongrel2.org \
-why "Needed to change paters."
In future versions it will prevent you from committing as root,
because only assholes commit from root.
Both parameters are arbitrary, but I like to record what I did to
different Hosts in servers.
"""
import socket
store = model.load_db("sqlite:" + db)
who = unicode(getpass.getuser())
if who == u'root':
print "Commit from root eh? Man, you're kind of a tool."
log = model.Log()
log.who = who
log.what = unicode(what)
log.why = unicode(why)
log.location = unicode(socket.gethostname())
log.how = u'm2sh'
store.add(log)
store.commit()
def log_command(db=None, count=20):
"""
Dumps commit logs:
m2sh log -db test.sqlite -count 20
m2sh log -db test.sqlite
So you know who to blame.
"""
store = model.load_db("sqlite:" + db)
logs = store.find(model.Log)
for log in logs.order_by(model.Log.happened_at)[0:count]:
print log
def find_servers(db=None, uuid="", host="", name="", every=False):
"""
Finds all the servers which match the given uuid, host or name.
If every is true all servers in the database will be returned.
"""
store = model.begin(db)
servers = []
if every:
servers = store.find(model.Server)
elif uuid:
servers = store.find(model.Server, model.Server.uuid == unicode(uuid))
elif host:
servers = store.find(model.Server, model.Server.default_host == unicode(host))
elif name:
servers = store.find(model.Server, model.Server.name == unicode(name))
if servers.count() > 1 and not every:
print "Not sure which server to run, what I found:"
print "NAME HOST UUID"
print "--------------"
for server in servers:
print server.name, server.default_host, server.uuid
print "* Use -every to run them all."
return []
else:
return servers
def start_command(db=None, uuid= "", host="", name="", sudo=False, every=False):
"""
Does a simple start of the given server(s) identified by the uuid, host
(default_host) parameter or the name.:
m2sh start -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh start -db config.sqlite -host localhost
m2sh start -db config.sqlite -name test
m2sh start -db config.sqlite -every
Give the -sudo options if you want it to start mongrel2 as root for you
(must have sudo installed).
Give the -every option if you want mongrel2 to launch all servers listed in
the given db.
If multiple servers match and -every is not given, m2sh will ask you which
to start.
"""
root_enabler = 'sudo' if sudo else ''
servers = find_servers(db, uuid, host, name, every)
if not servers or servers.count() == 0:
print 'No matching servers found, nothing launched'
else:
for server in servers:
print 'Launching server %s %s on port %d' % (server.name, server.uuid, server.port)
os.system('%s mongrel2 %s %s' % (root_enabler, db, server.uuid))
def stop_command(db=None, uuid="", host="", name="", every=False, murder=False):
"""
Stops a running mongrel2 process according to the host, either
gracefully (INT) or murderous (TERM):
m2sh stop -db config.sqlite -host localhost
m2sh stop -db config.sqlite -host localhost -murder
m2sh stop -db config.sqlite -name test -murder
m2sh stop -db config.sqlite -every
You shouldn't need sudo to stop a running mongrel if you
are also the user that owns the chroot directory or root.
Normally mongrel2 will wait until connections die off before really
leaving, but you can give it the -murder flag and it'll nuke it
semi-gracefully. You can also do it again with -murder if it's waiting
for some dead connections and you want it to just quit.
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
if pid:
sig = signal.SIGTERM if murder else signal.SIGINT
os.kill(pid, sig)
def reload_command(db=None, uuid="", host="", name="", every=False):
"""
Causes Mongrel2 to do a soft-reload which will re-read the config
database and then attempt to load a whole new configuration without
losing connections on the previous one:
m2sh reload -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh reload -db config.sqlite -host localhost
m2sh reload -db config.sqlite -name test
m2sh reload -db config.sqlite -every
This reload will need access to the config database from within the
chroot for it to work, and it's not totally guaranteed to be 100%
reliable, but if you are doing development and need to do quick changes
then this is what you do.
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
if pid:
os.kill(pid, signal.SIGHUP)
def running_command(db=None, uuid="", host="", name="", every=False):
"""
Tells you if the given server is still running:
m2sh running -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh running -db config.sqlite -host localhost
m2sh running -db config.sqlite -name test
m2sh running -db config.sqlite -every
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
# TODO: Clean this up.
if pid:
try:
os.kill(pid, 0)
print "Found server %s %s RUNNING at PID %i" % (server.name,
server.uuid,
pid)
except OSError:
print "Server %s %s NOT RUNNING at PID %i" % (server.name,
server.uuid,
pid)
def control_command(db=None, host="", name="", uuid=""):
"""
Start a simple control console for working with mongrel2.
This is *very* bare bones at the moment but should improve.
m2sh control -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh control -db config.sqlite -host localhost
m2sh control -db config.sqlite -name test
"""
store = model.load_db("sqlite:" + db)
import zmq
servers = find_servers(db, uuid, host, name, False)
if servers:
server = servers[0]
CTX = zmq.Context()
results = store.find(model.Setting, model.Setting.key == unicode("control_port"))
addr = results[0].value if results.count() > 1 else "ipc://run/control"
ctl = CTX.socket(zmq.REQ)
print "CONNECTING to: %s in %s" % (addr, server.chroot)
os.chdir(server.chroot)
ctl.connect(addr)
try:
while True:
cmd = raw_input("> ")
ctl.send(cmd)
print ctl.recv()
except EOFError:
ctl.close()
def get_server_pid(server):
pid_file = os.path.realpath(server.chroot + server.pid_file)
if not os.path.isfile(pid_file):
print "PID file %s not found for server %s %s" % (pid_file,
server.name,
server.uuid)
return None
else:
return int(open(pid_file, 'r').read())
def version_command():
"""
Prints out the version of your mongrel2 binary."
"""
print "Mongrel2/1.7.3"
| {
"content_hash": "2e04576fbc852aacb45fec5bc3d78c80",
"timestamp": "",
"source": "github",
"line_count": 509,
"max_line_length": 99,
"avg_line_length": 29.25147347740668,
"alnum_prop": 0.5945328766203237,
"repo_name": "duaneg/mongrel2",
"id": "a663175f7f23fe585b06976f7abb5da22c5f9cde",
"size": "14889",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/python/mongrel2/config/commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "2384"
},
{
"name": "C",
"bytes": "1902641"
},
{
"name": "Haxe",
"bytes": "6282"
},
{
"name": "JavaScript",
"bytes": "52799"
},
{
"name": "Lua",
"bytes": "10686"
},
{
"name": "Python",
"bytes": "111753"
},
{
"name": "Shell",
"bytes": "9909"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Organization.name'
db.alter_column('organizations_organization', 'name', self.gf('django.db.models.fields.CharField')(max_length=100))
# Changing field 'Organization.slug'
db.alter_column('organizations_organization', 'slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100))
def backwards(self, orm):
# Changing field 'Organization.name'
db.alter_column('organizations_organization', 'name', self.gf('django.db.models.fields.CharField')(max_length=50))
# Changing field 'Organization.slug'
db.alter_column('organizations_organization', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=50, unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'{model}'.format(model=AUTH_USER_MODEL.lower()): {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'organizations.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['{model}']".format(model=AUTH_USER_MODEL), 'through': "orm['organizations.OrganizationUser']", 'symmetrical': 'False'})
},
'organizations.organizationowner': {
'Meta': {'object_name': 'OrganizationOwner'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'owner'", 'unique': 'True', 'to': "orm['organizations.Organization']"}),
'organization_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'owned_organization'", 'unique': 'True', 'to': "orm['organizations.OrganizationUser']"})
},
'organizations.organizationuser': {
'Meta': {'ordering': "['organization', 'user']", 'unique_together': "(('user', 'organization'),)", 'object_name': 'OrganizationUser'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organization_users'", 'to': "orm['organizations.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organization_users'", 'to': "orm['{model}']".format(model=AUTH_USER_MODEL)})
}
}
complete_apps = ['organizations']
| {
"content_hash": "df5a052833a3201731fe2c8b02b8b182",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 208,
"avg_line_length": 72.09473684210526,
"alnum_prop": 0.5830048182216382,
"repo_name": "aptivate/django-organizations",
"id": "6c35daff9b0e711da4918727eaabd7358fabf958",
"size": "6873",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "organizations/migrations/0002_auto__chg_field_organization_name__chg_field_organization_slug.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4611"
},
{
"name": "Python",
"bytes": "144404"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
"""Determine new partition divisions using approximate percentiles.
We use a custom algorithm to calculate approximate, evenly-distributed
percentiles of arbitrarily-ordered data for any dtype in a distributed
fashion with one pass over the data. This is used to determine new
partition divisions when changing the index of a dask.dataframe. We claim
no statistical guarantees, but we use a variety of heuristics to try to
provide reliable, robust results that are "good enough" and can scale to
large number of partitions.
Our approach is similar to standard approaches such as t- and q-digest,
GK, and sampling-based algorithms, which consist of three parts:
1. **Summarize:** create summaries of subsets of data
2. **Merge:** combine summaries to make a new summary
3. **Compress:** periodically compress a summary into a smaller summary
We summarize the data in each partition by calculating several percentiles.
The value at each percentile is given a weight proportional to the length
of the partition and the differences between the current percentile and
the adjacent percentiles. Merging summaries is simply a ``merge_sorted``
of the values and their weights, which we do with a reduction tree.
Percentiles is a good choice for our case, because we are given a numpy
array of the partition's data, and percentiles is a relatively cheap
operation. Moreover, percentiles are, by definition, much less
susceptible to the underlying distribution of the data, so the weights
given to each value--even across partitions--should be comparable.
Let us describe this to a child of five. We are given many small cubes
(of equal size) with numbers on them. Split these into many piles. This
is like the original data. Let's sort and stack the cubes from one of the
piles. Next, we are given a bunch of unlabeled blocks of different sizes,
and most are much larger than the the original cubes. Stack these blocks
until they're the same height as our first stack. Let's write a number on
each block of the new stack. To do this, choose the number of the cube in
the first stack that is located in the middle of an unlabeled block. We
are finished with this stack once all blocks have a number written on them.
Repeat this for all the piles of cubes. Finished already? Great! Now
take all the stacks of the larger blocks you wrote on and throw them into
a single pile. We'll be sorting these blocks next, which may be easier if
you carefully move the blocks over and organize... ah, nevermind--too late.
Okay, sort and stack all the blocks from that amazing, disorganized pile
you just made. This will be very tall, so we had better stack it sideways
on the floor like so. This will also make it easier for us to split the
stack into groups of approximately equal size, which is our final task...
This, in a nutshell, is the algorithm we deploy. The main difference
is that we don't always assign a block the number at its median (ours
fluctuates around the median). The numbers at the edges of the final
groups is what we use as divisions for repartitioning. We also need
the overall min and max, so we take the 0th and 100th percentile of
each partition, and another sample near each edge so we don't give
disproportionate weights to extreme values.
Choosing appropriate percentiles to take in each partition is where things
get interesting. The data is arbitrarily ordered, which means it may be
sorted, random, or follow some pathological distribution--who knows. We
hope all partitions are of similar length, but we ought to expect some
variation in lengths. The number of partitions may also be changing
significantly, which could affect the optimal choice of percentiles. For
improved robustness, we use both evenly-distributed and random percentiles.
If the number of partitions isn't changing, then the total number of
percentiles across all partitions scales as ``npartitions**1.5``. Although
we only have a simple compression operation (step 3 above) that combines
weights of equal values, a more sophisticated one could be added if needed,
such as for extremely large ``npartitions`` or if we find we need to
increase the sample size for each partition.
"""
import math
import numpy as np
import pandas as pd
from pandas.api.types import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
)
from tlz import merge, merge_sorted, take
from dask.base import tokenize
from dask.dataframe.core import Series
from dask.dataframe.utils import is_categorical_dtype
from dask.utils import is_cupy_type, random_state_data
def sample_percentiles(num_old, num_new, chunk_length, upsample=1.0, random_state=None):
"""Construct percentiles for a chunk for repartitioning.
Adapt the number of total percentiles calculated based on the number
of current and new partitions. Returned percentiles include equally
spaced percentiles between [0, 100], and random percentiles. See
detailed discussion below.
Parameters
----------
num_old: int
Number of partitions of the current object
num_new: int
Number of partitions of the new object
chunk_length: int
Number of rows of the partition
upsample : float
Multiplicative factor to increase the number of samples
Returns
-------
qs : numpy.ndarray of sorted percentiles between 0, 100
Constructing ordered (i.e., not hashed) partitions is hard. Calculating
approximate percentiles for generic objects in an out-of-core fashion is
also hard. Fortunately, partition boundaries don't need to be perfect
in order for partitioning to be effective, so we strive for a "good enough"
method that can scale to many partitions and is reasonably well-behaved for
a wide variety of scenarios.
Two similar approaches come to mind: (1) take a subsample of every
partition, then find the best new partitions for the combined subsamples;
and (2) calculate equally-spaced percentiles on every partition (a
relatively cheap operation), then merge the results. We do both, but
instead of random samples, we use random percentiles.
If the number of partitions isn't changing, then the ratio of fixed
percentiles to random percentiles is 2 to 1. If repartitioning goes from
a very high number of partitions to a very low number of partitions, then
we use more random percentiles, because a stochastic approach will be more
stable to potential correlations in the data that may cause a few equally-
spaced partitions to under-sample the data.
The more partitions there are, then the more total percentiles will get
calculated across all partitions. Squaring the number of partitions
approximately doubles the number of total percentiles calculated, so
num_total_percentiles ~ sqrt(num_partitions). We assume each partition
is approximately the same length. This should provide adequate resolution
and allow the number of partitions to scale.
For numeric data, one could instead use T-Digest for floats and Q-Digest
for ints to calculate approximate percentiles. Our current method works
for any dtype.
"""
# *waves hands*
random_percentage = 1 / (1 + (4 * num_new / num_old) ** 0.5)
num_percentiles = upsample * num_new * (num_old + 22) ** 0.55 / num_old
num_fixed = int(num_percentiles * (1 - random_percentage)) + 2
num_random = int(num_percentiles * random_percentage) + 2
if num_fixed + num_random + 5 >= chunk_length:
return np.linspace(0, 100, chunk_length + 1)
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
q_fixed = np.linspace(0, 100, num_fixed)
q_random = random_state.rand(num_random) * 100
q_edges = [60 / (num_fixed - 1), 100 - 60 / (num_fixed - 1)]
qs = np.concatenate([q_fixed, q_random, q_edges, [0, 100]])
qs.sort()
# Make the divisions between percentiles a little more even
qs = 0.5 * (qs[:-1] + qs[1:])
return qs
def tree_width(N, to_binary=False):
"""Generate tree width suitable for ``merge_sorted`` given N inputs
The larger N is, the more tasks are reduced in a single task.
In theory, this is designed so all tasks are of comparable effort.
"""
if N < 32:
group_size = 2
else:
group_size = int(math.log(N))
num_groups = N // group_size
if to_binary or num_groups < 16:
return 2 ** int(math.log(N / group_size, 2))
else:
return num_groups
def tree_groups(N, num_groups):
"""Split an integer N into evenly sized and spaced groups.
>>> tree_groups(16, 6)
[3, 2, 3, 3, 2, 3]
"""
# Bresenham, you so smooth!
group_size = N // num_groups
dx = num_groups
dy = N - group_size * num_groups
D = 2 * dy - dx
rv = []
for _ in range(num_groups):
if D < 0:
rv.append(group_size)
else:
rv.append(group_size + 1)
D -= 2 * dx
D += 2 * dy
return rv
def create_merge_tree(func, keys, token):
"""Create a task tree that merges all the keys with a reduction function.
Parameters
----------
func: callable
Reduction function that accepts a single list of values to reduce.
keys: iterable
Keys to reduce from the source dask graph.
token: object
Included in each key of the returned dict.
This creates a k-ary tree where k depends on the current level and is
greater the further away a node is from the root node. This reduces the
total number of nodes (thereby reducing scheduler overhead), but still
has beneficial properties of trees.
For reasonable numbers of keys, N < 1e5, the total number of nodes in the
tree is roughly ``N**0.78``. For 1e5 < N < 2e5, is it roughly ``N**0.8``.
"""
level = 0
prev_width = len(keys)
prev_keys = iter(keys)
rv = {}
while prev_width > 1:
width = tree_width(prev_width)
groups = tree_groups(prev_width, width)
keys = [(token, level, i) for i in range(width)]
for num, key in zip(groups, keys):
rv[key] = (func, list(take(num, prev_keys)))
prev_width = width
prev_keys = iter(keys)
level += 1
return rv
def percentiles_to_weights(qs, vals, length):
"""Weigh percentile values by length and the difference between percentiles
>>> percentiles = np.array([0., 25., 50., 90., 100.])
>>> values = np.array([2, 3, 5, 8, 13])
>>> length = 10
>>> percentiles_to_weights(percentiles, values, length)
([2, 3, 5, 8, 13], [125.0, 250.0, 325.0, 250.0, 50.0])
The weight of the first element, ``2``, is determined by the difference
between the first and second percentiles, and then scaled by length:
>>> 0.5 * length * (percentiles[1] - percentiles[0])
125.0
The second weight uses the difference of percentiles on both sides, so
it will be twice the first weight if the percentiles are equally spaced:
>>> 0.5 * length * (percentiles[2] - percentiles[0])
250.0
"""
if length == 0:
return ()
diff = np.ediff1d(qs, 0.0, 0.0)
weights = 0.5 * length * (diff[1:] + diff[:-1])
return vals.tolist(), weights.tolist()
def merge_and_compress_summaries(vals_and_weights):
"""Merge and sort percentile summaries that are already sorted.
Each item is a tuple like ``(vals, weights)`` where vals and weights
are lists. We sort both by vals.
Equal values will be combined, their weights summed together.
"""
vals_and_weights = [x for x in vals_and_weights if x]
if not vals_and_weights:
return ()
it = merge_sorted(*[zip(x, y) for x, y in vals_and_weights])
vals = []
weights = []
vals_append = vals.append
weights_append = weights.append
val, weight = prev_val, prev_weight = next(it)
for val, weight in it:
if val == prev_val:
prev_weight += weight
else:
vals_append(prev_val)
weights_append(prev_weight)
prev_val, prev_weight = val, weight
if val == prev_val:
vals_append(prev_val)
weights_append(prev_weight)
return vals, weights
def process_val_weights(vals_and_weights, npartitions, dtype_info):
"""Calculate final approximate percentiles given weighted vals
``vals_and_weights`` is assumed to be sorted. We take a cumulative
sum of the weights, which makes them percentile-like (their scale is
[0, N] instead of [0, 100]). Next we find the divisions to create
partitions of approximately equal size.
It is possible for adjacent values of the result to be the same. Since
these determine the divisions of the new partitions, some partitions
may be empty. This can happen if we under-sample the data, or if there
aren't enough unique values in the column. Increasing ``upsample``
keyword argument in ``df.set_index`` may help.
"""
dtype, info = dtype_info
if not vals_and_weights:
try:
return np.array(None, dtype=dtype)
except Exception:
# dtype does not support None value so allow it to change
return np.array(None, dtype=np.float_)
vals, weights = vals_and_weights
vals = np.array(vals)
weights = np.array(weights)
# We want to create exactly `npartition` number of groups of `vals` that
# are approximately the same weight and non-empty if possible. We use a
# simple approach (more accurate algorithms exist):
# 1. Remove all the values with weights larger than the relative
# percentile width from consideration (these are `jumbo`s)
# 2. Calculate percentiles with "interpolation=left" of percentile-like
# weights of the remaining values. These are guaranteed to be unique.
# 3. Concatenate the values from (1) and (2), sort, and return.
#
# We assume that all values are unique, which happens in the previous
# step `merge_and_compress_summaries`.
if len(vals) == npartitions + 1:
rv = vals
elif len(vals) < npartitions + 1:
# The data is under-sampled
if np.issubdtype(vals.dtype, np.number) and not is_categorical_dtype(dtype):
# Interpolate extra divisions
q_weights = np.cumsum(weights)
q_target = np.linspace(q_weights[0], q_weights[-1], npartitions + 1)
rv = np.interp(q_target, q_weights, vals)
else:
# Distribute the empty partitions
duplicated_index = np.linspace(
0, len(vals) - 1, npartitions - len(vals) + 1, dtype=int
)
duplicated_vals = vals[duplicated_index]
rv = np.concatenate([vals, duplicated_vals])
rv.sort()
else:
target_weight = weights.sum() / npartitions
jumbo_mask = weights >= target_weight
jumbo_vals = vals[jumbo_mask]
trimmed_vals = vals[~jumbo_mask]
trimmed_weights = weights[~jumbo_mask]
trimmed_npartitions = npartitions - len(jumbo_vals)
# percentile-like, but scaled by weights
q_weights = np.cumsum(trimmed_weights)
q_target = np.linspace(0, q_weights[-1], trimmed_npartitions + 1)
left = np.searchsorted(q_weights, q_target, side="left")
right = np.searchsorted(q_weights, q_target, side="right") - 1
# stay inbounds
np.maximum(right, 0, right)
lower = np.minimum(left, right)
trimmed = trimmed_vals[lower]
rv = np.concatenate([trimmed, jumbo_vals])
rv.sort()
if is_categorical_dtype(dtype):
rv = pd.Categorical.from_codes(rv, info[0], info[1])
elif is_datetime64tz_dtype(dtype):
rv = pd.DatetimeIndex(rv).tz_localize(dtype.tz)
elif "datetime64" in str(dtype):
rv = pd.DatetimeIndex(rv, dtype=dtype)
elif rv.dtype != dtype:
rv = pd.array(rv, dtype=dtype)
return rv
def percentiles_summary(df, num_old, num_new, upsample, state):
"""Summarize data using percentiles and derived weights.
These summaries can be merged, compressed, and converted back into
approximate percentiles.
Parameters
----------
df: pandas.Series
Data to summarize
num_old: int
Number of partitions of the current object
num_new: int
Number of partitions of the new object
upsample: float
Scale factor to increase the number of percentiles calculated in
each partition. Use to improve accuracy.
"""
from dask.array.dispatch import percentile_lookup as _percentile
from dask.array.utils import array_safe
length = len(df)
if length == 0:
return ()
random_state = np.random.RandomState(state)
qs = sample_percentiles(num_old, num_new, length, upsample, random_state)
data = df
interpolation = "linear"
if is_categorical_dtype(data):
data = data.cat.codes
interpolation = "nearest"
elif is_datetime64_dtype(data.dtype) or is_integer_dtype(data.dtype):
interpolation = "nearest"
# FIXME: pandas quantile doesn't work with some data types (e.g. strings).
# We fall back to an ndarray as a workaround.
try:
vals = data.quantile(q=qs / 100, interpolation=interpolation).values
except (TypeError, NotImplementedError):
vals, _ = _percentile(array_safe(data, data.dtype), qs, interpolation)
if (
is_cupy_type(data)
and interpolation == "linear"
and np.issubdtype(data.dtype, np.integer)
):
vals = np.round(vals).astype(data.dtype)
if qs[0] == 0:
# Ensure the 0th quantile is the minimum value of the data
vals[0] = data.min()
vals_and_weights = percentiles_to_weights(qs, vals, length)
return vals_and_weights
def dtype_info(df):
info = None
if is_categorical_dtype(df):
data = df.values
info = (data.categories, data.ordered)
return df.dtype, info
def partition_quantiles(df, npartitions, upsample=1.0, random_state=None):
"""Approximate quantiles of Series used for repartitioning"""
assert isinstance(df, Series)
# currently, only Series has quantile method
# Index.quantile(list-like) must be pd.Series, not pd.Index
return_type = Series
qs = np.linspace(0, 1, npartitions + 1)
token = tokenize(df, qs, upsample)
if random_state is None:
random_state = int(token, 16) % np.iinfo(np.int32).max
state_data = random_state_data(df.npartitions, random_state)
df_keys = df.__dask_keys__()
name0 = "re-quantiles-0-" + token
dtype_dsk = {(name0, 0): (dtype_info, df_keys[0])}
name1 = "re-quantiles-1-" + token
val_dsk = {
(name1, i): (
percentiles_summary,
key,
df.npartitions,
npartitions,
upsample,
state,
)
for i, (state, key) in enumerate(zip(state_data, df_keys))
}
name2 = "re-quantiles-2-" + token
merge_dsk = create_merge_tree(merge_and_compress_summaries, sorted(val_dsk), name2)
if not merge_dsk:
# Compress the data even if we only have one partition
merge_dsk = {(name2, 0, 0): (merge_and_compress_summaries, [list(val_dsk)[0]])}
merged_key = max(merge_dsk)
name3 = "re-quantiles-3-" + token
last_dsk = {
(name3, 0): (
pd.Series, # TODO: Use `type(df._meta)` when cudf adds `tolist()`
(process_val_weights, merged_key, npartitions, (name0, 0)),
qs,
None,
df.name,
)
}
dsk = merge(df.dask, dtype_dsk, val_dsk, merge_dsk, last_dsk)
new_divisions = [0.0, 1.0]
return return_type(dsk, name3, df._meta, new_divisions)
| {
"content_hash": "39392fb02e894aa93a8217d545e90745",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 88,
"avg_line_length": 39.3029702970297,
"alnum_prop": 0.6730652962515115,
"repo_name": "jakirkham/dask",
"id": "128493b5dd69f72663061daf52b945db12faa394",
"size": "19848",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "dask/dataframe/partitionquantiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jinja",
"bytes": "6086"
},
{
"name": "Python",
"bytes": "4588734"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
} |
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from genericm2m.models import RelatedObjectsDescriptor
class RelatedBeverage(models.Model):
food = models.ForeignKey('Food')
beverage = models.ForeignKey('Beverage')
class Meta:
ordering = ('-id',)
class Food(models.Model):
name = models.CharField(max_length=255)
related = RelatedObjectsDescriptor()
related_beverages = RelatedObjectsDescriptor(RelatedBeverage, 'food', 'beverage')
def __unicode__(self):
return self.name
class Beverage(models.Model):
name = models.CharField(max_length=255)
related = RelatedObjectsDescriptor()
def __unicode__(self):
return self.name
class Person(models.Model):
name = models.CharField(max_length=255)
related = RelatedObjectsDescriptor()
def __unicode__(self):
return self.name
class Boring(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class AnotherRelatedObject(models.Model):
parent_type = models.ForeignKey(ContentType, related_name="child_%(class)s")
parent_id = models.IntegerField(db_index=True)
parent = GenericForeignKey(ct_field="parent_type", fk_field="parent_id")
object_type = models.ForeignKey(ContentType, related_name="related_%(class)s")
object_id = models.IntegerField(db_index=True)
object = GenericForeignKey(ct_field="object_type", fk_field="object_id")
alias = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
creation_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('id',)
class Note(models.Model):
content = models.TextField()
related = RelatedObjectsDescriptor(AnotherRelatedObject)
| {
"content_hash": "fd7bf03f890ce53d7c858baa4aed627d",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 85,
"avg_line_length": 27.02777777777778,
"alnum_prop": 0.6927029804727647,
"repo_name": "wd5/jangr",
"id": "9ed3bc28c10debc4bb4a0fcb4a3d69e227f70a12",
"size": "1946",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "genericm2m/genericm2m_tests/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
from sigops import Signal
class TestSignalDict(unittest.TestCase):
def test_assert_named_signals(self):
Signal(np.array(0.))
Signal.assert_named_signals = True
with self.assertRaises(AssertionError):
Signal(np.array(0.))
# So that other tests that build signals don't fail...
Signal.assert_named_signals = False
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "ba0d357c2d29fa732d228f9c78d5ba54",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 62,
"avg_line_length": 24.61904761904762,
"alnum_prop": 0.6595744680851063,
"repo_name": "jaberg/sigops",
"id": "6b18f040c720838b66c5461240c4c83616a32c66",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sigops/tests/test_signal.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "29639"
}
],
"symlink_target": ""
} |
__author__ = "Rafael S. Guimarães e João Paulo de Brito Gonçalves"
import socket
import struct
from server.ClientP2P import ClientP2P
class ServerP2P(object):
"""
Class Server P2P - Work 01
"""
def __init__(self, node):
self.__HOST = ""
self.__PORT = 12345
# Cria o Socket UDP na porta 12345
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP protocol
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind((self.__HOST, self.__PORT))
self.conn = None
self.stop = False
self.node = node
self.client_p2p = ClientP2P("127.0.0.1", self.s)
def ip2int(self, addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def int2ip(self, addr):
return socket.inet_ntoa(struct.pack("!I", addr))
def joinMessage(self, msg):
# Envio da mensagem Join
typeMSG = bytearray(msg["data"])
if int(typeMSG[0]) == 0:
res = struct.unpack("!BI", msg["data"])
id_node = int(res[1])
# Responder a mensagem Join informando os dados do sucessor a antecessor.
# Vou atualizar o campo antecessor do nó que inserirá o novo nó no anel com o ID do novo nó.
# Verifico se sou o unico no na rede: O antecessor e o sucessor sao iguais.
# Responde o Join
rmsg = {
"dest_ip_addr": msg["addr"],
"type": 64,
"id_node_sucessor": self.node.code,
"ip_node_sucessor": self.node.ipAddrNode,
"id_node_predecessor": self.node.idPredecessor,
"ip_node_predecessor": self.node.ipAddrPredecessor,
}
self.client_p2p.sendJoinMsg(rmsg)
# Alterar valor do Antecessor
self.node.idPredecessor = id_node
self.node.ipAddrPredecessor = msg["addr"]
self.node.updateScreen("Received: JOIN " + str(id_node))
elif int(typeMSG[0]) == 64:
# Resposta da mensagem Join
res = struct.unpack("!BIIII", msg["data"])
id_node_sucessor = int(res[1])
ip_node_sucessor = self.int2ip(int(res[2]))
id_node_predecessor = int(res[3])
ip_node_predecessor = self.int2ip(int(res[4]))
# Atualiza as informacoes do meu no
self.node.idSuccessor = id_node_sucessor
self.node.ipAddrSuccessor = ip_node_sucessor
self.node.idPredecessor = id_node_predecessor
self.node.ipAddrPredecessor = ip_node_predecessor
# Enviar um msg de Update para o antecessor informando que voce e o novo sucessor no anel. (COD 3)
rmsg = {
"dest_ip_addr": ip_node_predecessor,
"type": 3,
"id_src": self.node.code,
"id_new_sucessor": self.node.code,
"ip_new_sucessor": self.node.ipAddrNode,
}
self.client_p2p.sendUpdateMsg(rmsg)
self.node.updateScreen(
"Received: ANSWER JOIN - UPDATE TO" + str(ip_node_sucessor)
)
def leaveMessage(self, msg):
# Envio Leave
typeMSG = bytearray(msg["data"])
if int(typeMSG[0]) == 1:
res = struct.unpack("!BIIIII", msg["data"])
id_node_out = int(res[1])
id_node_sucessor = int(res[2])
ip_node_sucessor = self.int2ip(int(res[3]))
id_node_predecessor = int(res[4])
ip_node_predecessor = self.int2ip(int(res[5]))
# Ao receber a mensagem de Leave tenho que verificar se sou o sucessor ou antecessor da origem da mensagem.
# Se for antecessor: Atualizar o campo do sucessor com a informacao do sucessor do no que deixou o anel.
# Se for sucessor: Atualizar o campo do antecessor com a informacao do antecessor do no que deixou o anel.
rmsg = {
"dest_ip_addr": msg["addr"],
"type": 65,
"id_src_msg": self.node.code,
}
if (self.node.code == id_node_sucessor) and (
self.node.code == id_node_predecessor
):
self.node.idPredecessor = id_node_predecessor
self.node.ipAddrPredecessor = ip_node_predecessor
self.node.idSuccessor = id_node_sucessor
self.node.ipAddrSuccessor = ip_node_sucessor
elif self.node.code == id_node_sucessor:
self.node.idPredecessor = id_node_predecessor
self.node.ipAddrPredecessor = ip_node_predecessor
elif self.node.code == id_node_predecessor:
self.node.idSuccessor = id_node_sucessor
self.node.ipAddrSuccessor = ip_node_sucessor
# Envia mensagem de resposta do Leave ( COD 65 )
self.client_p2p.sendLeaveMsg(rmsg)
self.node.updateScreen("Received: LEAVE IP_S:" + str(rmsg))
elif int(typeMSG[0]) == 65:
# Resposta Leave
res = struct.unpack("!BI", msg["data"])
id_src_msg = int(res[1])
# Comparar o ID_SRC_MSG com os ID do meu sucessor e antecessor.
if id_src_msg == self.node.idSuccessor:
# Retira os valores do sucessor.
self.node.idSuccessor = None
self.node.ipAddrSuccessor = None
elif id_src_msg == self.node.idPredecessor:
# Retira os valores do antecessor.
self.node.idPredecessor = None
self.node.ipAddrPredecessor = None
self.node.updateScreen("Received: ANSWER LEAVE " + str(id_src_msg))
def lookupMessage(self, msg):
# Envio Lookup
typeMSG = bytearray(msg["data"])
if int(typeMSG[0]) == 2:
res = struct.unpack("!BIII", msg["data"])
src_id_searched = int(res[1])
src_ip_searched = self.int2ip(int(res[2]))
id_searched = int(res[3])
if (
self.node.code == self.node.idSuccessor
and self.node.code == self.node.idPredecessor
):
# Sou o no inicial: Respondo o Lookup ( COD 66) com os dados do meu no
rmsg = {
"dest_ip_addr": src_ip_searched,
"type": 66,
"id_searched": id_searched,
"id_sucessor_searched": self.node.code,
"ip_sucessor_searched": self.node.ipAddrNode,
"state": "A",
}
self.client_p2p.sendLookupMsg(rmsg)
elif self.node.code < id_searched and self.node.idSuccessor > id_searched:
# O meu antecessor é maior porem o meu sucessor eh maior
# Repassa a Mensagem de Lookup para o meu Sucessor. (COD 2)
rmsg = {
"dest_ip_addr": self.node.ipAddrSuccessor,
"type": 2,
"src_id_searched": src_id_searched,
"src_ip_searched": src_ip_searched,
"id_searched": id_searched,
"state": "B",
}
self.client_p2p.sendLookupMsg(rmsg)
elif (
self.node.code < id_searched
and self.node.idPredecessor > self.node.code
):
# se o meu ID é menor que o ID procurado e o ID do meu antecessor é maior que o meu próprio ID,
# eu respondo a mensagem de Lookup para o ID do procurado. (COD 66)
rmsg = {
"dest_ip_addr": src_ip_searched,
"type": 66,
"id_searched": id_searched,
"id_sucessor_searched": self.node.code,
"ip_sucessor_searched": self.node.ipAddrNode,
"state": "C",
}
self.client_p2p.sendLookupMsg(rmsg)
elif self.node.code >= id_searched:
# Meu id Maior que ID do procurado = Responderei o Lookup para o ID do procurado. ( COD 66)
rmsg = {
"dest_ip_addr": src_ip_searched,
"type": 66,
"id_searched": id_searched,
"id_sucessor_searched": self.node.code,
"ip_sucessor_searched": self.node.ipAddrNode,
"state": "D",
}
self.client_p2p.sendLookupMsg(rmsg)
elif self.node.code < id_searched:
# Repassa a Mensagem de Lookup para o meu Sucessor. (COD 2)
rmsg = {
"dest_ip_addr": self.node.ipAddrSuccessor,
"type": 2,
"src_id_searched": src_id_searched,
"src_ip_searched": src_ip_searched,
"id_searched": id_searched,
"state": "FORWADING",
}
self.client_p2p.sendLookupMsg(rmsg)
self.node.updateScreen("Received: LOOKUP " + str(rmsg))
elif int(typeMSG[0]) == 66:
# Resposta Lookup
res = struct.unpack("!BIII", msg["data"])
id_searched = int(res[1])
id_sucessor_searched = int(res[2])
ip_sucessor_searched = self.int2ip(int(res[3]))
# Ao receber a resposta de lookup envio uma mensagem de Join para o IP sucessor procurado ( COD 0 )
#
rmsg = {
"dest_ip_addr": ip_sucessor_searched,
"type": 0,
"id_node": self.node.code,
}
self.client_p2p.sendJoinMsg(rmsg)
self.node.updateScreen("Received: ANSWER LOOKUP - JOIN TO " + str(rmsg))
def updateMessage(self, msg):
# Envio Update
typeMSG = bytearray(msg["data"])
if int(typeMSG[0]) == 3:
res = struct.unpack("!BIII", msg["data"])
id_src = int(res[1])
id_new_sucessor = int(res[2])
ip_new_sucessor = self.int2ip(int(res[3]))
# Atualizar a informacao do meu No que o novo sucessor e o que esta descrito no cabecalho
self.node.idSuccessor = id_new_sucessor
self.node.ipAddrSuccessor = ip_new_sucessor
# Responde o Update ( COD 67 )
rmsg = {
"dest_ip_addr": ip_new_sucessor,
"type": 67,
"id_src_msg": self.node.code,
}
self.client_p2p.sendUpdateMsg(rmsg)
self.node.updateScreen("Received: UPDATE:" + str(rmsg))
elif int(typeMSG[0]) == 67:
# Resposta Update
res = struct.unpack("!BI", msg["data"])
id_src_msg = int(res[1])
# ID_ORIGEM_MENSAGEM e igual ao valor no campo ID DO NO do meu No
self.node.updateScreen(
"Received: ANSWER UPDATE :ID_SRC="
+ str(id_src_msg)
+ " NODE ID="
+ str(self.node.code)
)
def run(self):
while not self.stop:
# Handle sockets
data, addr = self.s.recvfrom(1024)
self.client_p2p.sock = self.s
msg = {"addr": addr[0], "data": data}
try:
if len(data) > 0:
codeMessage = bytearray(data)
if int(codeMessage[0]) == 0:
self.joinMessage(msg)
elif int(codeMessage[0]) == 1:
self.leaveMessage(msg)
elif int(codeMessage[0]) == 2:
self.lookupMessage(msg)
elif int(codeMessage[0]) == 3:
self.updateMessage(msg)
elif int(codeMessage[0]) == 64:
self.joinMessage(msg)
elif int(codeMessage[0]) == 65:
self.leaveMessage(msg)
elif int(codeMessage[0]) == 66:
self.lookupMessage(msg)
elif int(codeMessage[0]) == 67:
self.updateMessage(msg)
else:
print("Received: Invalid code!")
except ValueError as e:
continue
| {
"content_hash": "2ed1e4afefe47a4623abfdb8482de4be",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 119,
"avg_line_length": 43.60424028268551,
"alnum_prop": 0.5103727714748785,
"repo_name": "rafaelsilvag/PyChord",
"id": "624a51fd34e6e7f05a2c96d34ae516a2c3ef1107",
"size": "12375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/ServerP2P.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26257"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
#==============================================================================
# A simple twitter client capable of persisting data to a DB
# Usage: python client.py --config=config.json
# To persist to a database a valid connection string is needed.
# Also, create a proper Tweets table. Check 'insert' method in DbConnector.
# =============================================================================
from __future__ import unicode_literals
from __future__ import print_function
import pypyodbc
import sys
import getopt
import re
import codecs
from colorama import *
from datetime import datetime
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
import json
import traceback
from unidecode import unidecode
colormap = {
0: Fore.RED,
1: Fore.GREEN,
2: Fore.BLUE,
3: Fore.CYAN,
4: Fore.MAGENTA
}
class TweetEntry(object):
""" Represents a tweet message """
def __init__(self, user, message, hash_tags=[]):
self.username = user
self.tweet = message
self.created = datetime.now()
self.hashtags = list(hash_tags)
def to_string(self):
""" Returns the tweet as a well-formatted string """
return '{0} | {1}'.format(self.username.encode("ascii","ignore"),
self.tweet.encode("ascii","ignore").replace('\n', ' ').replace('\r', ''))
class DbConnector(object):
""" Helper class for managing DB access """
def __init__(self, connection_string):
self.connect(connection_string)
def connect(self, connection_string):
self.conn = pypyodbc.connect(connection_string)
self.cursor = self.conn.cursor()
print('Database connection established')
def insert(self, TweetEntry):
self.cursor.execute('''INSERT INTO [dbo].[Tweets]
([ScreenName]
,[Message]
,[CreatedAt]
,[Hashtags])
VALUES
(?
,?
,?
,?)''',(TweetEntry.username,
TweetEntry.tweet,
TweetEntry.created,
", ".join(TweetEntry.hashtags)))
self.cursor.commit()
class Listener(StreamListener):
"""Twitter listener implementation"""
def __init__(self, hash_tags, ignore_users,
ignore_terms, accept_langs,
persist = False, connection_string = None):
self.hashtags = list(hash_tags)
self.ignored_users = [x.lower() for x in list(ignore_users)]
self.ignored_terms = list(ignore_terms)
self.accepted_langs = list(accept_langs)
self.persist = persist
self.connection_string = connection_string
self.colorized_hashtags = {}
self.assign_hashtag_colors()
if self.persist:
self.connect_db()
print('Initialized Twitter Listener with DB')
else:
print('Initialized Twitter Listener without DB')
print('======================= TWEETS =======================')
def on_data(self, data):
""" Must be implemented so the TwitterStream instance cann call it """
try:
parsed = json.loads(data)
if not 'user' in parsed:
return True
username = parsed['user']['screen_name']
tweet = parsed['text']
lang = parsed['lang']
urls = parsed['entities']['urls']
db_entry = TweetEntry(username, tweet, self.hashtags)
if self.is_acceptable(username, tweet, lang):
if self.persist:
self.db.insert(db_entry)
tweet = self.expand_urls(tweet, urls)
line = username.ljust(20) + ' | ' + tweet.rjust(20)
clean = self.sanitize(line.replace('\r','').replace('\n',' '))
self.print_colorized(clean)
return True
except:
traceback.print_exc()
def is_acceptable(self, username, tweet, lang):
return (self.lang_ok(lang) and self.user_ok(username) and self.tweet_ok(tweet))
def user_ok(self, username):
return username.lower() not in self.ignored_users
def tweet_ok(self, tweet):
return not any(term in tweet for term in self.ignored_terms)
def lang_ok(self, lang):
return lang in self.accepted_langs
def assign_hashtag_colors(self):
max = len(colormap)
count = 0
for tag in self.hashtags:
if(count == max):
count = 0
self.colorized_hashtags[tag] = colormap[count]
print(Style.BRIGHT + colormap[count] + 'set tag ' +
tag + ' to this color'
+ Fore.RESET)
count += 1
def on_error(self, status):
print(status)
def expand_urls(self, tweet, urls):
if len(urls) == 0:
return tweet
try:
for entry in urls:
if entry['url'] is None or entry['expanded_url'] is None:
continue
url_c = re.compile(re.escape(entry['url']), re.IGNORECASE)
tweet = url_c.sub(entry['expanded_url'], tweet)
return tweet
except:
traceback.print_exc()
def sanitize(self, line):
return unidecode(line)
def print_colorized(self, line):
""" Colorize console output """
try:
for term in self.hashtags:
term_ci = re.compile(re.escape(term), re.IGNORECASE)
line = term_ci.sub(self.colorized_hashtags[term] + Style.BRIGHT + term + Fore.RESET, line)
print(line)
except:
traceback.print_exc()
def connect_db(self):
""" Connect with DB by using DSN info """
self.db = DbConnector(self.connection_string)
# -- console client code --
def activate_twitter(hash_tags = [], ignore_users = [],
ignore_terms = [], accept_langs = [],
persist = False, connection_string = None):
""" Connect to Twitter API """
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
listener = Listener(hash_tags, ignore_users, ignore_terms, accept_langs, persist, connection_string)
twitter_stream = Stream(auth, listener)
twitter_stream.filter(track =hash_tags)
def usage():
print('Usage: twitter_client.py --config=[JSON_formatted_config]')
def start_client(_json):
global CONSUMER_KEY
global CONSUMER_SECRET
global ACCESS_TOKEN
global ACCESS_SECRET
hash_tags = None
connection_string = ''
persist = False
# load ignored users
ignore_users = [_user for _user in _json['config']['ignore']['users']]
ignore_terms = [_term for _term in _json['config']['ignore']['terms']]
# accept only messages written in following languages
accept_langs = [_lang for _lang in _json['config']['accept']['languages']]
if(len(ignore_users) > 0):
print('Ignoring users: {0}'.format(json.dumps(ignore_users)))
if(len(ignore_terms)):
print('Ignoring terms: {0}'.format(json.dumps(ignore_terms)))
if(len(accept_langs) > 0):
print('Accepting only languages: {0}'.format(json.dumps(accept_langs)))
# configure twitter api access
access = _json['config']['services']['twitter']
CONSUMER_KEY = access['consumerKey']
CONSUMER_SECRET = access['consumerSecret']
ACCESS_TOKEN = access['accessToken']
ACCESS_SECRET = access['accessSecret']
# configure persistence
database = _json['config']['services']['db']
if(database['active'] == True):
persist = True
connection_string = database['connectionstring']
else:
persist = False
connection_string = None
# configure filtering of messages
hash_tags = _json['config']['filter']
print('Using filter: {0}'.format(json.dumps(hash_tags)))
hash_tags = [tag.lower() for tag in hash_tags]
try:
activate_twitter(hash_tags, ignore_users, ignore_terms, accept_langs, persist, connection_string)
except:
err = sys.exc_info()[0]
print('Error => {}'.format(err))
def main(argv):
try:
init(wrap=True)
opts, args = getopt.getopt(argv, "hc:d", ["help", "config="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h","--help"):
usage()
sys.exit()
elif opt == '-d':
global _debug
_debug = True
elif opt in ("-c","--config"):
print('Loading config from file {0}'.format(arg))
with codecs.open(arg,'r', encoding='utf-8') as config_file:
_json = json.load(config_file)
start_client(_json)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "6f9549d0bb67800224dcff9328c9f083",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 123,
"avg_line_length": 37.932,
"alnum_prop": 0.5329537066329221,
"repo_name": "brakmic/TwitterClient",
"id": "9e0f26739408a9c0156f74b0679b909158cbcca8",
"size": "9485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TwitterClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "59"
},
{
"name": "Python",
"bytes": "9485"
}
],
"symlink_target": ""
} |
import os
import sys
import django
from django.conf import settings
from django.core.management import call_command
def runtests():
if not settings.configured:
# Choose database for settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'modeltranslation',
'USER': 'root',
})
elif test_db == 'postgres':
DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'modeltranslation',
})
if django.VERSION < (1, 6):
DATABASES['default']['OPTIONS'] = {'autocommit': True}
# Configure test environment
settings.configure(
DATABASES=DATABASES,
INSTALLED_APPS=(
'django.contrib.contenttypes',
'modeltranslation',
),
ROOT_URLCONF=None, # tests override urlconf, but it still needs to be defined
LANGUAGES=(
('en', 'English'),
),
MIDDLEWARE_CLASSES=(),
)
if django.VERSION >= (1, 7):
django.setup()
failures = call_command(
'test', 'modeltranslation', interactive=False, failfast=False, verbosity=2)
sys.exit(bool(failures))
if __name__ == '__main__':
runtests()
| {
"content_hash": "822488e47d6fa5d87d44db91d200922f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 90,
"avg_line_length": 29.24561403508772,
"alnum_prop": 0.50749850029994,
"repo_name": "marctc/django-modeltranslation",
"id": "08e133a4661f791a73dde867981271ec8bca7a17",
"size": "1689",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4121"
},
{
"name": "JavaScript",
"bytes": "20238"
},
{
"name": "Python",
"bytes": "263100"
}
],
"symlink_target": ""
} |
from django.db import models
from datetime import datetime
class JudgeResult(models.Model):
sid = models.IntegerField()
pid = models.IntegerField()
username = models.CharField(max_length=50)
result = models.CharField(max_length=50)
time = models.IntegerField()
memory = models.IntegerField()
message = models.CharField(max_length=500)
status = models.IntegerField()
submit_time = models.DateTimeField(default=datetime.now(), editable=True, auto_now_add=True)
language = models.CharField(max_length=50)
def __str__(self):
return self.username + ': ' + self.result
| {
"content_hash": "2a005a8d7d0824fc645f2c4c4be23ce2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 96,
"avg_line_length": 38.6875,
"alnum_prop": 0.7011308562197092,
"repo_name": "ISeaTeL/ISeaTeL_Cup_Site",
"id": "1e217e9cd182d2b999f3cb6d69cbef156b629386",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oj_judge/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6568"
},
{
"name": "CSS",
"bytes": "8504"
},
{
"name": "HTML",
"bytes": "25170"
},
{
"name": "JavaScript",
"bytes": "2431"
},
{
"name": "Perl",
"bytes": "106"
},
{
"name": "Python",
"bytes": "47589"
},
{
"name": "Shell",
"bytes": "618"
}
],
"symlink_target": ""
} |
from django.contrib import admin
# Register your models here.
| {
"content_hash": "77df2557ca40b4d929e88eab50d206db",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 21,
"alnum_prop": 0.7936507936507936,
"repo_name": "wevote/WebAppPublic",
"id": "87f7238f57a72928d210846f76b4bd2762bbca78",
"size": "153",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "wevote_settings/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8022"
},
{
"name": "HTML",
"bytes": "131153"
},
{
"name": "JavaScript",
"bytes": "296860"
},
{
"name": "Python",
"bytes": "1700558"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
} |
from django import forms
from .models import Job
from django.contrib.auth.models import User
class JobForm(forms.ModelForm):
class Meta:
model = Job
exclude = ('author',)
| {
"content_hash": "419e132c243039dd0930adca4c04aab8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 19.4,
"alnum_prop": 0.6855670103092784,
"repo_name": "academichero/jobs",
"id": "d9211f4ac4553e6b34d1f0963e743ddad30340fa",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/board/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "971"
},
{
"name": "HTML",
"bytes": "1890"
},
{
"name": "Python",
"bytes": "10724"
}
],
"symlink_target": ""
} |
"""
Step implementations for block content containers
"""
from behave import given, then, when
from docx import Document
from docx.table import Table
from helpers import test_docx
# given ===================================================
@given('a document containing a table')
def given_a_document_containing_a_table(context):
context.document = Document(test_docx('blk-containing-table'))
@given('a paragraph')
def given_a_paragraph(context):
context.document = Document()
context.paragraph = context.document.add_paragraph()
# when ====================================================
@when('I add a paragraph')
def when_add_paragraph(context):
document = context.document
context.p = document.add_paragraph()
@when('I add a table')
def when_add_table(context):
rows, cols = 2, 2
context.document.add_table(rows, cols)
# then =====================================================
@then('I can access the table')
def then_can_access_table(context):
table = context.document.tables[-1]
assert isinstance(table, Table)
@then('the new table appears in the document')
def then_new_table_appears_in_document(context):
table = context.document.tables[-1]
assert isinstance(table, Table)
| {
"content_hash": "5a654c579c0f1f869ac7e8881aba754c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 66,
"avg_line_length": 24.45098039215686,
"alnum_prop": 0.6287089013632718,
"repo_name": "tanyunshi/python-docx",
"id": "1eee70cd2a200caa3cd6097c870bec485fef0d2c",
"size": "1266",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "features/steps/block.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "67188"
},
{
"name": "Makefile",
"bytes": "1112"
},
{
"name": "Python",
"bytes": "1000619"
}
],
"symlink_target": ""
} |
import os, re, csv, time
import itertools
import argparse
import cv2
import numpy as np
from progress_bar import printProgressBar
oshapeX = 640
oshapeY = 240
shapeX = 320
shapeY = 120
reverse = [0,2,1,3]
# def image_autocontrast(image):
# # img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# # minVal, maxVal, _minLoc, _maxLoc = cv2.minMaxLoc(img_gray)
# # input_range = maxVal - minVal
# # alpha = 255 / input_range
# # beta = -minVal * alpha
# # output = alpha * image + beta
# # print output.shape, output.size, output.dtype
# # print image.shape, image.size, image.dtype
# # return alpha * image + beta
# # return image
# B = 0.0
# W = 0.0
# hist, bins = np.histogram(image.flatten(),256,[0,256])
# cdf = np.cumsum(hist)
# cdf_n = cdf * hist.max() / cdf.max()
# cdf_m = np.ma.masked_less_equal(cdf, B * cdf.max())
# cdf_m = np.ma.masked_greater_equal(cdf_m, (1.0 - W) * cdf.max())
# imin = cdf_m.argmin()
# imax = cdf_m.argmax()
# tr = np.zeros(256, dtype=np.uint8)
# tr = np.zeros(256, dtype=np.uint8)
# for i in range(0, 256):
# if i < imin: tr[i] = 0
# elif i > imax: tr[i] = 255
# else: tr[i] = (i - imin) * 255 / (imax - imin)
# img_res = tr[image]
# return img_res
def adjust_gamma(image, gamma=1.0):
# img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# img_hsv[:,:,2] += np.uint8(gamma)
# image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
# return image
# print image.shape
invGamma = 1.0/gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image,table)
def image_darken(image):
return adjust_gamma(image, .5)
def image_brighten(image):
return adjust_gamma(image, 2)
# def image_equalize(image):
# img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
# img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
# image = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
# return image
def image_flip(image):
return cv2.flip(image, 1)
def process_image(path, name, command, op_todo, shape=(shapeY, shapeX)):
"""ProcWss and augmXnt imagY"""
image_paths = [path[i]+name[i] for i in range(len(path))]
aug_images = []
# darkening
# tmp_img = img_orig
# tmp_img = image_darken(tmp_img)
# cv2.imwrite(filename=path+"darken_"+name,img=tmp_img)
# aug_images.append(["darken_"+name,command])
# # brightening
# tmp_img = img_orig
# tmp_img = image_brighten(tmp_img)
# cv2.imwrite(filename=path+"brighten_"+name,img=tmp_img)
# aug_images.append(["brighten_"+name,command])
for ops in op_todo:
new_command = command
for ind in range(len(image_paths)):
img_orig = cv2.imread(image_paths[ind])
new_image = img_orig
output_prepend = ""
for op in ops:
output_prepend += op[0]+"_"
new_image = op[1](new_image)
if op[0] == 'flip':
new_command = reverse[command]
cv2.imwrite(filename=path[ind]+output_prepend+name[ind],img=new_image)
aug_images.append([output_prepend+name[0],output_prepend+name[1],new_command])
# # do darkening and brightening
# tmp_img = new_image
# tmp_img = image_darken(tmp_img)
# cv2.imwrite(filename=path+"darken_"+output_prepend+name,img=tmp_img)
# aug_images.append(["darken_"+output_prepend+name,new_command])
# tmp_img = new_image
# tmp_img = image_darken(tmp_img)
# cv2.imwrite(filename=path+"brighten_"+output_prepend+name,img=tmp_img)
# aug_images.append(["brighten_"+output_prepend+name,new_command])
return aug_images
def synthesize_images(set_name, op_list):
"""Synthesize data from original images"""
op_todo = [
([op_list[0]]),
([op_list[1]]),
([op_list[2]]),
([op_list[0],op_list[2]]),
([op_list[1],op_list[2]])
]
print op_todo
# for ind in range(len(op_list)):
# for item in itertools.combinations(op_list, ind+1):
# op_todo.append(item)
# img_path = "data_sets/%s/data/" % (set_name)
img_path = ["data_sets/%s/left/" % (set_name),
"data_sets/%s/right/" % (set_name)]
csv_file = "model_data/%s_log.csv" % (set_name)
with open(csv_file, 'r') as in_csv:
for line in in_csv:
if re.search(r"(flip|autocont|equalize|darken|brighten)", line):
printProgressBar(1, 1)
return
print "Processing images..."
with open(csv_file, 'a+') as io_csv:
io_csv.seek(0)
reader = csv.reader(io_csv, delimiter=',')
attribute = next(reader, None)
entries = list(reader)
cnt_total = len(entries)
cnt_iter = 0
printProgressBar(cnt_iter, cnt_total)
for entry in entries:
cnt_iter += 1
printProgressBar(cnt_iter, cnt_total)
# try:
new_entries = process_image(img_path, [entry[0],entry[1]], int(entry[-1]), op_todo)
writer = csv.writer(io_csv, delimiter=',')
for new_entry in new_entries:
writer.writerow(new_entry)
# except:
# print "CSV entry error"
time.sleep(0.1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Image Processing")
parser.add_argument(
"set_name",
type=str,
help="Image folder path"
)
args = parser.parse_args()
if not os.path.exists("data_sets/"+args.set_name):
print "Image set does not exist"
exit(1)
if not os.path.exists("model_data/"+args.set_name+"_log.csv"):
print "Image set data does not exist"
exit(1)
op_list = [
# ('autocont',image_autocontrast),
# ('equalize',image_equalize),
('darken',image_darken),
('brighten',image_brighten),
('flip',image_flip)
]
synthesize_images(args.set_name, op_list)
print "Data set has been processed" | {
"content_hash": "40e2c20c094cece3f7a523957d7fb981",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 95,
"avg_line_length": 33.51912568306011,
"alnum_prop": 0.5730355396152592,
"repo_name": "LiuVII/Self-driving-RC-car",
"id": "486ad648685c4b6115462864ed48d14886494af4",
"size": "6134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "img_augment_tmp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2844"
},
{
"name": "C",
"bytes": "48821"
},
{
"name": "C++",
"bytes": "5636"
},
{
"name": "Makefile",
"bytes": "347"
},
{
"name": "Python",
"bytes": "190033"
},
{
"name": "Shell",
"bytes": "893"
}
],
"symlink_target": ""
} |
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_stock_customer': fields.property(
type='many2one',
relation='stock.location',
string="Customer Location",
help="This stock location will be used, instead of the default one, as the destination location for goods you send to this partner"),
'property_stock_supplier': fields.property(
type='many2one',
relation='stock.location',
string="Vendor Location",
help="This stock location will be used, instead of the default one, as the source location for goods you receive from the current partner"),
}
| {
"content_hash": "a09199ce591ec63bd86673f0b67e3760",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 150,
"avg_line_length": 44.875,
"alnum_prop": 0.6518105849582173,
"repo_name": "vileopratama/vitech",
"id": "917c5de1ce9c1c386d845696e7091f234c99218b",
"size": "818",
"binary": false,
"copies": "47",
"ref": "refs/heads/master",
"path": "src/addons/stock/partner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from dynd import nd
import datashape
from . import DDesc, Capabilities
from .dynd_data_descriptor import DyND_DDesc
from .stream_data_descriptor import Stream_DDesc
from ..optional_packages import tables_is_here
if tables_is_here:
import tables as tb
class HDF5_DDesc(DDesc):
"""
A Blaze data descriptor which exposes a HDF5 dataset.
"""
def __init__(self, path, datapath, mode='r', filters=None):
self.path = path
self.datapath = datapath
self.mode = mode
self.filters = filters
@property
def dshape(self):
# This cannot be cached because the Array can change the dshape
with tb.open_file(self.path, mode='r') as f:
dset = f.get_node(self.datapath)
odshape = datashape.from_numpy(dset.shape, dset.dtype)
return odshape
@property
def capabilities(self):
"""The capabilities for the HDF5 arrays."""
with tb.open_file(self.path, mode='r') as f:
dset = f.get_node(self.datapath)
appendable = isinstance(dset, (tb.EArray, tb.Table))
queryable = isinstance(dset, (tb.Table,))
caps = Capabilities(
# HDF5 arrays can be updated
immutable = False,
# HDF5 arrays are concrete
deferred = False,
# HDF5 arrays are persistent
persistent = True,
# HDF5 arrays can be appended efficiently (EArrays and Tables)
appendable = appendable,
# PyTables Tables can be queried efficiently
queryable = queryable,
remote = False,
)
return caps
def dynd_arr(self):
# Positionate at the beginning of the file
with tb.open_file(self.path, mode='r') as f:
dset = f.get_node(self.datapath)
dset = nd.array(dset[:], dtype=dset.dtype)
return dset
def __array__(self):
with tb.open_file(self.path, mode='r') as f:
dset = f.get_node(self.datapath)
dset = dset[:]
return dset
def __len__(self):
with tb.open_file(self.path, mode='r') as f:
dset = f.get_node(self.datapath)
arrlen = len(dset)
return arrlen
def __getitem__(self, key):
with tb.open_file(self.path, mode='r') as f:
dset = f.get_node(self.datapath)
# The returned arrays are temporary buffers,
# so must be flagged as readonly.
dyndarr = nd.asarray(dset[key], access='readonly')
return DyND_DDesc(dyndarr)
def __setitem__(self, key, value):
# HDF5 arrays can be updated
with tb.open_file(self.path, mode=self.mode) as f:
dset = f.get_node(self.datapath)
dset[key] = value
def __iter__(self):
f = tb.open_file(self.path, mode='r')
dset = f.get_node(self.datapath)
# Get rid of the leading dimension on which we iterate
dshape = datashape.from_numpy(dset.shape[1:], dset.dtype)
for el in dset:
if hasattr(el, "nrow"):
yield DyND_DDesc(nd.array(el[:], type=str(dshape)))
else:
yield DyND_DDesc(nd.array(el, type=str(dshape)))
dset._v_file.close()
def where(self, condition):
"""Iterate over values fulfilling a condition."""
f = tb.open_file(self.path, mode='r')
dset = f.get_node(self.datapath)
# Get rid of the leading dimension on which we iterate
dshape = datashape.from_numpy(dset.shape[1:], dset.dtype)
for el in dset.where(condition):
yield DyND_DDesc(nd.array(el[:], type=str(dshape)))
dset._v_file.close()
def getattr(self, name):
with tb.open_file(self.path, mode=self.mode) as f:
dset = f.get_node(self.datapath)
if hasattr(dset, 'cols'):
return DyND_DDesc(
nd.asarray(getattr(dset.cols, name)[:],
access='readonly'))
else:
raise IndexError("not an HDF5 compound dataset")
def append(self, values):
"""Append a list of values."""
shape, dtype = datashape.to_numpy(self.dshape)
values_arr = np.array(values, dtype=dtype)
shape_vals = values_arr.shape
if len(shape_vals) < len(shape):
shape_vals = (1,) + shape_vals
if len(shape_vals) != len(shape):
raise ValueError("shape of values is not compatible")
# Now, do the actual append
with tb.open_file(self.path, mode=self.mode) as f:
dset = f.get_node(self.datapath)
dset.append(values_arr.reshape(shape_vals))
def remove(self):
"""Remove the persistent storage."""
os.unlink(self.path)
| {
"content_hash": "12ab748f164598754343f98e4c85ae66",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 74,
"avg_line_length": 35.31654676258993,
"alnum_prop": 0.575677327357914,
"repo_name": "FrancescAlted/blaze",
"id": "e75b096f538bc500600e77e86b3e2ec6dcabbec3",
"size": "4909",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "blaze/datadescriptor/hdf5_data_descriptor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "143372"
},
{
"name": "JavaScript",
"bytes": "56478"
},
{
"name": "Python",
"bytes": "557985"
},
{
"name": "Ruby",
"bytes": "1188"
},
{
"name": "Shell",
"bytes": "13149"
}
],
"symlink_target": ""
} |
import pytest
from flask import url_for
from comport.department.models import Extractor, Department
from bs4 import BeautifulSoup
from comport.data.models import OfficerInvolvedShootingBPD, UseOfForceIncidentBPD, CitizenComplaintBPD
from comport.data.models import OfficerInvolvedShootingIMPD, UseOfForceIncidentIMPD, CitizenComplaintIMPD, AssaultOnOfficerIMPD
from comport.data.models import PursuitSRPD
from comport.data.models import UseOfForceIncidentLMPD
from .utils import create_and_log_in_user
import datetime
@pytest.mark.usefixtures('db')
class TestPublicPages:
def test_home_page_exists(self, testapp):
testapp.get("/", status=200)
def test_home_page_links_to_about(self, testapp):
response = testapp.get("/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="/about/") is not None
def test_about_page_exists(self, testapp):
response = testapp.get("/about/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="https://www.codeforamerica.org") is not None
def test_multiple_depts_display(self, testapp):
impd = Department.create(name="I Police Department", short_name="IMPD", is_public=True)
UseOfForceIncidentIMPD.create(department_id=impd.id, opaque_id="12345abcde")
bpd = Department.create(name="B Police Department", short_name="BPD", is_public=True)
UseOfForceIncidentBPD.create(department_id=bpd.id, opaque_id="12345abcde")
lmpd = Department.create(name="LM Police Department", short_name="LMPD", is_public=False)
UseOfForceIncidentLMPD.create(department_id=lmpd.id, opaque_id="12345abcde")
response = testapp.get("/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="/department/IMPD/useofforce") is not None
assert soup.find("a", href="/department/BPD/useofforce") is not None
assert soup.find("a", href="/department/LMPD/useofforce") is None
def test_non_public_depts_display_for_users_with_access(self, testapp):
''' Users can see links to datasets they're allowed to access on the front page
'''
impd = Department.create(name="I Police Department", short_name="IMPD", is_public=True)
UseOfForceIncidentIMPD.create(department_id=impd.id, opaque_id="12345abcde")
bpd = Department.create(name="B Police Department", short_name="BPD", is_public=False)
UseOfForceIncidentBPD.create(department_id=bpd.id, opaque_id="12345abcde")
lmpd = Department.create(name="LM Police Department", short_name="LMPD", is_public=False)
UseOfForceIncidentLMPD.create(department_id=lmpd.id, opaque_id="12345abcde")
# A non logged-in user can only see the public department
response = testapp.get("/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="/department/IMPD/useofforce") is not None
assert soup.find("a", href="/department/BPD/useofforce") is None
assert soup.find("a", href="/department/LMPD/useofforce") is None
# A user associated with a particular department can see that department's
# available datasets when logged in
create_and_log_in_user(testapp=testapp, department=bpd, username="user1")
response = testapp.get("/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="/department/IMPD/useofforce") is not None
assert soup.find("a", href="/department/BPD/useofforce") is not None
assert soup.find("a", href="/department/LMPD/useofforce") is None
# A user with admin access can see all departments' available datasets
create_and_log_in_user(testapp=testapp, department=impd, rolename='admin', username="user2")
response = testapp.get("/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="/department/IMPD/useofforce") is not None
assert soup.find("a", href="/department/BPD/useofforce") is not None
assert soup.find("a", href="/department/LMPD/useofforce") is not None
# Log out and only the public department should be visible
testapp.get(url_for('public.logout')).follow()
response = testapp.get("/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="/department/IMPD/useofforce") is not None
assert soup.find("a", href="/department/BPD/useofforce") is None
assert soup.find("a", href="/department/LMPD/useofforce") is None
def test_all_dept_links(self, testapp):
department = Department.create(name="B Police Department", short_name="BPD", is_public=True)
CitizenComplaintBPD.create(department_id=department.id, opaque_id="12345abcde")
UseOfForceIncidentBPD.create(department_id=department.id, opaque_id="23456bcdef")
OfficerInvolvedShootingBPD.create(department_id=department.id, opaque_id="34567cdefg")
SRDepartment = Department.create(name="SR Police Department", short_name="SRPD", is_public=True)
PursuitSRPD.create(department_id=SRDepartment.id, opaque_id="45678defgh")
response = testapp.get("/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="/department/BPD/complaints") is not None
assert soup.find("a", href="/department/BPD/useofforce") is not None
assert soup.find("a", href="/department/BPD/officerinvolvedshootings") is not None
assert soup.find("a", href="/department/SRPD/pursuits") is not None
def test_data_status(self, testapp):
department = Department.create(name="B Police Department", short_name="BPD", is_public=True)
CitizenComplaintBPD.create(department_id=department.id, opaque_id="12345abcde")
OfficerInvolvedShootingBPD.create(department_id=department.id, opaque_id="34567cdefg")
department.is_public_officer_involved_shootings = False
response = testapp.get("/", status=200)
soup = BeautifulSoup(response.text, "html.parser")
assert soup.find("a", href="/department/BPD/complaints") is not None
assert soup.find("a", href="/department/BPD/useofforce") is None
assert soup.find("a", href="/department/BPD/officerinvolvedshootings") is None
def test_updated_text_on_schema_pages(self, testapp):
''' The notice of the last time a dataset was updated is on all schema pages
'''
department = Department.create(name="B Police Department", short_name="BPD", is_public=True)
CitizenComplaintBPD.create(department_id=department.id, opaque_id="12345abcde")
UseOfForceIncidentBPD.create(department_id=department.id, opaque_id="23456bcdef")
OfficerInvolvedShootingBPD.create(department_id=department.id, opaque_id="34567cdefg")
SRDepartment = Department.create(name="SR Police Department", short_name="SRPD", is_public=True)
PursuitSRPD.create(department_id=SRDepartment.id, opaque_id="45678defgh")
extractor_password = 'password'
bpd_extractor, envs = Extractor.from_department_and_password(department=department, password=extractor_password)
bpd_extractor.last_contact = datetime.datetime(2012, 9, 16)
srpd_extractor, envs = Extractor.from_department_and_password(department=SRDepartment, password=extractor_password)
srpd_extractor.last_contact = datetime.datetime(2014, 11, 2)
response = testapp.get("/department/BPD/schema/complaints/")
soup = BeautifulSoup(response.text, "html.parser")
updated_span = soup.find("span", {"class": "updated"})
assert updated_span is not None
assert "Last Updated September 16, 2012" == updated_span.text
response = testapp.get("/department/BPD/schema/useofforce/")
soup = BeautifulSoup(response.text, "html.parser")
updated_span = soup.find("span", {"class": "updated"})
assert updated_span is not None
assert "Last Updated September 16, 2012" == updated_span.text
response = testapp.get("/department/BPD/schema/officerinvolvedshootings/")
soup = BeautifulSoup(response.text, "html.parser")
updated_span = soup.find("span", {"class": "updated"})
assert updated_span is not None
assert "Last Updated September 16, 2012" == updated_span.text
response = testapp.get("/department/SRPD/schema/pursuits/")
soup = BeautifulSoup(response.text, "html.parser")
updated_span = soup.find("span", {"class": "updated"})
assert updated_span is not None
assert "Last Updated November 02, 2014" == updated_span.text
| {
"content_hash": "cd64a3900aa5a9bc91364bc4ad0426a8",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 127,
"avg_line_length": 58.806666666666665,
"alnum_prop": 0.6943657181725428,
"repo_name": "codeforamerica/comport",
"id": "29c7213ffb5fd899b34fe1ff43cb3ccb484c312c",
"size": "8821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_public_pages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6889"
},
{
"name": "HTML",
"bytes": "73956"
},
{
"name": "JavaScript",
"bytes": "228515"
},
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "725626"
},
{
"name": "Ruby",
"bytes": "1030"
}
],
"symlink_target": ""
} |
import os
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
"""事件处理"""
def process_IN_CREATE(self, event):
print "Create file: %s " % os.path.join(event.path,event.name)
def process_IN_DELETE(self, event):
print "Delete file: %s " % os.path.join(event.path,event.name)
def process_IN_MODIFY(self, event):
print "Modify file: %s " % os.path.join(event.path,event.name)
def FSMonitor(path = '.'):
wm = pyinotify.WatchManager()
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE | pyinotify.IN_MODIFY
notifier = pyinotify.Notifier(wm, EventHandler())
wm.add_watch(path, mask, rec = True, auto_add = True)
print 'now starting monitor %s'%(path)
while True:
try:
notifier.process_events()
if notifier.check_events():
notifier.read_events()
except KeyboardInterrupt:
notifier.stop()
break
if __name__ == "__main__":
FSMonitor() | {
"content_hash": "585e2d59de9202f182c3937a9d9cc6b0",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 28.0625,
"alnum_prop": 0.6748329621380846,
"repo_name": "lanbaba/ossync",
"id": "0da80608caa4b9d5e9cdd34c51c7bfe970e28f62",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ossync/tests/pyinotify_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110393"
}
],
"symlink_target": ""
} |
"""
generate_twitter_scoreboard.py
Use data in config.yml to generate follower-count information for a set of Twitter screennames,
and output to a JSON file
Created by Sanders Kleinfeld on Sun Feb 16 15:46:00 EST 2014
Copyright (c) 2014 Sanders Kleinfeld. All rights reserved.
"""
import json
import os
import twitter
import yaml
CONFIG = os.path.join(os.path.dirname(__file__), 'config.yml')
JSON_OUTPUT_FILE = os.path.join(os.path.dirname(__file__), 'public', 'scoreboard.json')
json_output = [] # start with an empty list
f = open(CONFIG)
app_config = yaml.safe_load(f)
f.close()
auth_creds = twitter.oauth.OAuth(app_config['oauth_token'], app_config['oauth_secret'],
app_config['consumer_key'], app_config['consumer_secret'])
screen_names = [user['screen_name'] for user in app_config['twitter_users']]
tw_obj = twitter.Twitter(auth=auth_creds)
userdata = tw_obj.users.lookup(screen_name=','.join(screen_names), timeout=1)
for user in userdata:
# Match up the user record in our config with the corresponding record in returned Twitter data using "screen_name"
corresponding_config_user = [config_user for config_user in app_config['twitter_users'] if config_user['screen_name'].lower() == user['screen_name'].lower()][0]
json_output.append({'screen_name': user['screen_name'],
'display_name': corresponding_config_user['display_name'],
'followers_count': user['followers_count'],
'avatar': user['profile_image_url']
})
# Sort list by follower count, in descending order
json_output.sort(key=lambda user: user['followers_count'], reverse=True)
f = open(JSON_OUTPUT_FILE, 'w')
f.write(json.dumps(json_output, indent=1, sort_keys=True))
f.close()
| {
"content_hash": "a3a22b1b0ccb51750ade4efaffe36d88",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 164,
"avg_line_length": 37.583333333333336,
"alnum_prop": 0.6762749445676275,
"repo_name": "sandersk/twitter-follower-scoreboard",
"id": "0806cffcd98b046ca0f6e1adc8733438a3654e41",
"size": "1844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate_twitter_scoreboard.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1160"
}
],
"symlink_target": ""
} |
from app.util.conf.slave_config_loader import SlaveConfigLoader
from app.util.conf.configuration import Configuration
from test.framework.base_unit_test_case import BaseUnitTestCase
class TestSlaveConfigLoader(BaseUnitTestCase):
def test_configure_default_sets_protocol_scheme_to_http(self):
mock_config_file = self.patch('app.util.conf.base_config_loader.ConfigFile').return_value
config = Configuration.singleton()
config_loader = SlaveConfigLoader()
config_loader.configure_defaults(config)
key = 'protocol_scheme'
expected_stored_protocol_scheme_value = 'http'
actual_stored_protocol_scheme_value = Configuration[key]
self.assertEqual(expected_stored_protocol_scheme_value, actual_stored_protocol_scheme_value,
'The configuration value for the key "{}" was expected to be {}:{}, but was {}:{}.'.format(
key, type(expected_stored_protocol_scheme_value), expected_stored_protocol_scheme_value,
type(actual_stored_protocol_scheme_value), actual_stored_protocol_scheme_value))
def test_configure_postload_sets_protocol_scheme_to_https(self):
mock_config_file = self.patch('app.util.conf.base_config_loader.ConfigFile').return_value
mock_config_file.read_config_from_disk.return_value = {'general': {'https_cert_file': '/path/to/cert',
'https_key_file': '/path/to/key'},
'slave': {}
}
config = Configuration.singleton()
config_loader = SlaveConfigLoader()
config_loader.configure_defaults(config)
config_loader.load_from_config_file(config, config_filename='fake_filename')
config_loader.configure_postload(config)
key = 'protocol_scheme'
expected_stored_protocol_scheme_value = 'https'
actual_stored_protocol_scheme_value = Configuration[key]
self.assertEqual(expected_stored_protocol_scheme_value, actual_stored_protocol_scheme_value,
'The configuration value for the key "{}" was expected to be {}:{}, but was {}:{}.'.format(
key, type(expected_stored_protocol_scheme_value), expected_stored_protocol_scheme_value,
type(actual_stored_protocol_scheme_value), actual_stored_protocol_scheme_value))
| {
"content_hash": "c61c37d09baa6b5f10e4c8c6ca136323",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 117,
"avg_line_length": 57.59090909090909,
"alnum_prop": 0.6227308602999211,
"repo_name": "box/ClusterRunner",
"id": "d530dc94896ab139d9622bc812abcde4fe18aaff",
"size": "2534",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/unit/util/conf/test_slave_config_loader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "64"
},
{
"name": "Dockerfile",
"bytes": "718"
},
{
"name": "Makefile",
"bytes": "10301"
},
{
"name": "PowerShell",
"bytes": "1467"
},
{
"name": "Python",
"bytes": "776771"
},
{
"name": "Shell",
"bytes": "545"
}
],
"symlink_target": ""
} |
"""Reliable UDP implementation using Twisted."""
import collections
from google.protobuf import message
from twisted.internet import protocol
from txrudp import packet
class ConnectionMultiplexer(
protocol.DatagramProtocol,
collections.MutableMapping
):
"""
Multiplexes many virtual connections over single UDP socket.
Handles graceful shutdown of active connections.
"""
def __init__(
self,
connection_factory,
public_ip,
relaying=False,
logger=None
):
"""
Initialize a new multiplexer.
Args:
connection_factory: The connection factory used to
instantiate new connections, as a
connection.ConnectionFactory.
public_ip: The external IPv4/IPv6 this node publishes as its
reception address.
relaying: If True, the multiplexer will silently forward
packets that are not targeting this node (i.e. messages
that have a destination IP different than `public_ip`.)
If False, this node will drop such messages.
logger: A logging.Logger instance to dump invalid received
packets into; if None, dumping is disabled.
"""
super(ConnectionMultiplexer, self).__init__()
self.connection_factory = connection_factory
self.public_ip = public_ip
self.port = None
self.relaying = relaying
self._active_connections = {}
self._banned_ips = set()
self._logger = logger
def startProtocol(self):
"""Start the protocol and cache listening port."""
super(ConnectionMultiplexer, self).startProtocol()
self.port = self.transport.getHost().port
def __len__(self):
"""Return the number of live connections."""
return len(self._active_connections)
def __getitem__(self, addr):
"""
Return the handling connection of the given address.
Args:
addr: Tuple of destination address (ip, port).
Raises:
KeyError: No connection is handling the given address.
"""
return self._active_connections[addr]
def __setitem__(self, addr, con):
"""
Register a handling connection for a given remote address.
If a previous connection is already bound to that address,
it is shutdown and then replaced.
Args:
key: Tuple of destination address (ip, port).
value: The connection to register, as a Connection.
"""
prev_con = self._active_connections.get(addr)
if prev_con is not None:
prev_con.shutdown()
self._active_connections[addr] = con
def __delitem__(self, addr):
"""
Unregister a handling connection for a given remote address.
Args:
addr: Tuple of destination address (ip, port).
Raises:
KeyError: No connection is handling the given address.
"""
del self._active_connections[addr]
def __iter__(self):
"""Return iterator over the active contacts."""
return iter(self._active_connections)
def ban_ip(self, ip_address):
"""
Add an IP address to the ban list. No connections will be
made to this IP and packets will be dropped.
Args:
ip_address: a `String` IP address (without port).
"""
self._banned_ips.add(ip_address)
def remove_ip_ban(self, ip_address):
"""
Remove an IP address from the ban list.
Args:
ip_address: a `String` IP address (without port).
"""
self._banned_ips.discard(ip_address)
def datagramReceived(self, datagram, addr):
"""
Called when a datagram is received.
If the datagram isn't meant for us, immediately relay it.
Otherwise, delegate handling to the appropriate connection.
If no such connection exists, create one. Always take care
to avoid mistaking a relay address for the original sender's
address.
Args:
datagram: Datagram string received from transport layer.
addr: Sender address, as a tuple of an IPv4/IPv6 address
and a port, in that order. If this address is
different from the packet's source address, the packet
is being relayed; future outbound packets should also
be relayed through the specified relay address.
"""
try:
rudp_packet = packet.Packet.from_bytes(datagram)
except (message.DecodeError, TypeError, ValueError):
if self._logger is not None:
self._logger.info(
'Bad packet (bad protobuf format): {0}'.format(datagram)
)
except packet.ValidationError:
if self._logger is not None:
self._logger.info(
'Bad packet (invalid RUDP packet): {0}'.format(datagram)
)
else:
if (addr[0] in self._banned_ips or
rudp_packet.source_addr[0] in self._banned_ips):
return
if rudp_packet.dest_addr[0] != self.public_ip:
if self.relaying:
self.transport.write(datagram, rudp_packet.dest_addr)
else:
con = self._active_connections.get(rudp_packet.source_addr)
if con is None and rudp_packet.get_syn():
con = self.make_new_connection(
(self.public_ip, self.port),
rudp_packet.source_addr,
addr
)
if con is not None:
con.receive_packet(rudp_packet, addr)
def make_new_connection(self, own_addr, source_addr, relay_addr=None):
"""
Create a new connection to handle the given address.
Args:
own_addr: Local host address, as a (ip, port) tuple.
source_addr: Remote host address, as a (ip, port) tuple.
relay_addr: Remote host address, as a (ip, port) tuple.
Returns:
A new connection.Connection
"""
con = self.connection_factory.make_new_connection(
self,
own_addr,
source_addr,
relay_addr
)
self._active_connections[source_addr] = con
return con
def send_datagram(self, datagram, addr):
"""
Send RUDP datagram to the given address.
Args:
datagram: Prepared RUDP datagram, as a string.
addr: Tuple of destination address (ip, port).
This is essentially a wrapper so that the transport layer is
not exposed to the connections.
"""
self.transport.write(datagram, addr)
def shutdown(self):
"""Shutdown all active connections and then terminate protocol."""
for connection in self._active_connections.values():
connection.shutdown()
if hasattr(self.transport, 'loseConnection'):
self.transport.loseConnection()
| {
"content_hash": "d8597107f7c1d03bda6986e37ad95e30",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 76,
"avg_line_length": 33.771028037383175,
"alnum_prop": 0.5804621558046216,
"repo_name": "OpenBazaar/txrudp",
"id": "7f688a2383bad8cdf31c80ddab0624393e4a1eec",
"size": "7227",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "txrudp/rudp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "162"
},
{
"name": "Protocol Buffer",
"bytes": "434"
},
{
"name": "Python",
"bytes": "98226"
}
],
"symlink_target": ""
} |
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_configuration(osv.osv_memory):
_name = 'project.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_sale_service': fields.boolean('Generate tasks from sale orders',
help='This feature automatically creates project tasks from service products in sale orders. '
'More precisely, tasks are created for procurement lines with product of type \'Service\', '
'procurement method \'Make to Order\', and supply method \'Manufacture\'.\n'
'-This installs the module sale_service.'),
'module_pad': fields.boolean("Use integrated collaborative note pads on task",
help='Lets the company customize which Pad installation should be used to link to new pads '
'(for example: http://ietherpad.com/).\n'
'-This installs the module pad.'),
'module_project_timesheet': fields.boolean("Record timesheet lines per tasks",
help='This allows you to transfer the entries under tasks defined for Project Management to '
'the timesheet line entries for particular date and user, with the effect of creating, '
'editing and deleting either ways.\n'
'-This installs the module project_timesheet.'),
'module_project_issue': fields.boolean("Track issues and bugs",
help='Provides management of issues/bugs in projects.\n'
'-This installs the module project_issue.'),
'time_unit': fields.many2one('product.uom', 'Working time unit', required=True,
help='This will set the unit of measure used in projects and tasks.\n'
'Changing the unit will only impact new entries.'),
'module_project_issue_sheet': fields.boolean("Invoice working time on issues",
help='Provides timesheet support for the issues/bugs management in project.\n'
'-This installs the module project_issue_sheet.'),
'group_tasks_work_on_tasks': fields.boolean("Log work activities on tasks",
implied_group='project.group_tasks_work_on_tasks',
help="Allows you to compute work on tasks."),
'group_time_work_estimation_tasks': fields.boolean("Manage time estimation on tasks",
implied_group='project.group_time_work_estimation_tasks',
help="Allows you to compute Time Estimation on tasks."),
'group_manage_delegation_task': fields.boolean("Allow task delegation",
implied_group='project.group_delegate_task',
help="Allows you to delegate tasks to other users."),
}
def get_default_time_unit(self, cr, uid, fields, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return {'time_unit': user.company_id.project_time_mode_id.id}
def set_time_unit(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
user.company_id.write({'project_time_mode_id': config.time_unit.id})
def onchange_time_estimation_project_timesheet(self, cr, uid, ids, group_time_work_estimation_tasks, module_project_timesheet):
if group_time_work_estimation_tasks or module_project_timesheet:
return {'value': {'group_tasks_work_on_tasks': True}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "f249ffab46a3fa634bfdfc84ad42ac02",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 131,
"avg_line_length": 62.50877192982456,
"alnum_prop": 0.6550659556553466,
"repo_name": "cristianquaglio/odoo",
"id": "52bcf2cd36c8c37be47b23233aa160414302ec72",
"size": "4551",
"binary": false,
"copies": "226",
"ref": "refs/heads/master",
"path": "addons/project/res_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "671328"
},
{
"name": "HTML",
"bytes": "212829"
},
{
"name": "JavaScript",
"bytes": "5984109"
},
{
"name": "Makefile",
"bytes": "12332"
},
{
"name": "Mako",
"bytes": "561"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "8366254"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "19163"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "92945"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from datetime import datetime
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from gzip import GzipFile
from tempfile import NamedTemporaryFile
from optparse import make_option
from remote_fixtures.conf import settings
from remote_fixtures.utils import S3Mixin
class Command(BaseCommand, S3Mixin):
option_list = BaseCommand.option_list + (
make_option(
'--nocompress',
action='store_false',
dest='compress',
default=True,
),
)
def get_fixture_file(self, dumpdata_args):
fixture_file = NamedTemporaryFile(suffix='.json')
call_command('dumpdata', *dumpdata_args, stdout=fixture_file)
fixture_file.seek(0)
return fixture_file
def compress_fixture_file(self, fixture_file):
compressed_file = NamedTemporaryFile(suffix='.json.gz')
gzip_file = GzipFile(compresslevel=9, fileobj=compressed_file)
gzip_file.write(fixture_file.read())
gzip_file.close()
compressed_file.seek(0)
return compressed_file
def get_file_name(self, compress):
now = datetime.utcnow()
return 'fixture_{}.json{}'.format(
slugify(unicode(now.isoformat())),
'.gz' if compress else ''
)
def upload_file(self, fixture_file, filename):
bucket = self.get_bucket()
key = bucket.new_key(filename)
key.set_contents_from_file(fixture_file)
def handle(self, *args, **options):
filename = self.get_file_name(options['compress'])
fixture_file = self.get_fixture_file(args)
if settings.REMOTE_FIXTURES_ENABLE_CACHE:
self.cache_fixture_file(fixture_file, self.remove_gz_suffix(filename))
if options['compress']:
fixture_file = self.compress_fixture_file(fixture_file)
self.upload_file(fixture_file, filename)
print 'filename: %s' % filename
| {
"content_hash": "e205e064b11cfd0cf59fce9ebb537951",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 82,
"avg_line_length": 32.523809523809526,
"alnum_prop": 0.6544655929721815,
"repo_name": "gizmag/remote-fixtures",
"id": "272061f56bb4a780d1d722f8d72625a71783091d",
"size": "2049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remote_fixtures/management/commands/push_fixtures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11804"
}
],
"symlink_target": ""
} |
import json
from slackclient import SlackClient
from geowatchutil.client.base import GeoWatchClientWebHook
class GeoWatchClientSlack(GeoWatchClientWebHook):
# Private
url_api = "https://slack.com/api"
def check_topic_exists(self, channel, timeout=5, verbose=True):
exists = False
self.join_channel(channel, verbose=verbose)
try:
url = "{base}/channels.info?token={authtoken}&channel={channel}".format(
base=self.url_api,
authtoken=self.authtoken,
channel=channel)
self._get(url)
exists = True
except:
exists = False
if verbose:
if exists:
print "Channel "+channel+" exists."
else:
print "Channel "+channel+" does not exist."
return exists
def create_channel(self, channel, shards=1, timeout=5, verbose=True):
if self.check_channel_exists(channel, timeout=timeout, verbose=verbose):
return False
created = False
try:
url = "{base}/channels.create?token={authtoken}&name={channel}".format(
base=self.url_api,
authtoken=self.authtoken,
channel=channel)
self._get(url)
created = True
except:
created = False
if verbose:
if created:
print "Channel "+channel+" created."
else:
print "Channel "+channel+" could not be created"
return created
def join_channel(self, channel, verbose=True):
if verbose:
print "Joining channel "+channel
print "Bots currently can't join channels. You need to invite manually with /invite @botname"
# https://github.com/slackhq/node-slack-client/issues/26
# self._client.api_call("channels.join", channel="channel")
def archive_channel(self, channel, timeout=5, verbose=True):
if not self.check_channel_exists(channel, timeout=timeout, verbose=verbose):
return False
archived = False
try:
url = "{base}/channels.archive?token={authtoken}&channel={channel}".format(
base=self.url_api,
authtoken=self.authtoken,
channel=channel)
self._get(url)
archived = True
except:
archived = False
if verbose:
if archived:
print "Channel "+channel+" archived."
else:
print "Channel "+channel+" could not be archived."
return archived
def archive_channels(self, channels, ignore_errors=True, timeout=5, verbose=False):
archived = True
for channel in channels:
archived = self.archive_channel(channel, timeout=timeout, verbose=verbose)
if (not ignore_errors) and (not archived):
break
return archived
def list_channels(self, exclude_archived=True, verbose=False):
if self.authtoken:
url = "{base}/channels.list?token={authtoken}&exclude_archived={exclude_archived}".format(
base=self.url_api,
authtoken=self.authtoken,
exclude_archived=exclude_archived)
response = self._get(url)
data = json.loads(response)
if verbose:
print response
channels = []
for channel in data['channels']:
channels.append(channel['name'])
return channels
else:
print "No authtoken present."
return []
def __init__(self, url_webhook="", authtoken=None, templates=None):
super(GeoWatchClientSlack, self).__init__(
backend="slack",
url_webhook=url_webhook,
authtoken=authtoken,
templates=templates)
if authtoken:
self._client = SlackClient(authtoken)
d = None
try:
r = json.loads(self._client.api_call("auth.test"))
self._user_id = r[u'user_id']
self._user_name = r[u'user']
except:
print "Could not initialize Slack Client user"
| {
"content_hash": "35269083ef9ab23799db5f8989c7b24b",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 102,
"avg_line_length": 32.6793893129771,
"alnum_prop": 0.5545433309974305,
"repo_name": "pjdufour/geowatch-util",
"id": "b92b8558feef789f0524fab240c9cf90c3567d69",
"size": "4281",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geowatchutil/client/geowatch_client_slack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "102094"
}
],
"symlink_target": ""
} |
import pyfwk
from pyfi.entity.entity.db import EntityDB
# ------------------------------INDICES-MODEL-----------------------------#
class IndicesModel(pyfwk.Model):
model = None
dbase = None
table = None
columns = None
@staticmethod
def instance():
if not IndicesModel.model:
IndicesModel.model = IndicesModel()
return IndicesModel.model
def __init__(self):
self.dbase = EntityDB.instance()
self.table = 'indices'
id = pyfwk.DBCol('id', 'INTEGER PRIMARY KEY')
symbol = pyfwk.DBCol('symbol', 'TEXT')
name = pyfwk.DBCol('name', 'TEXT')
self.columns = [id, symbol, name]
self.validate()
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
| {
"content_hash": "e136ba09b23e5486587343ed9f98daba",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 24.676470588235293,
"alnum_prop": 0.4958283671036949,
"repo_name": "rlinguri/pyfi",
"id": "fb7af305c60f18fc6fa6e5e459b96ae62d1e7e27",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfi/entity/indices/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41076"
}
],
"symlink_target": ""
} |
import hashlib
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import ValidationError
from django.test.utils import override_settings
from dateutil.tz import tzutc
from mock import patch
from nose.tools import eq_, ok_
from lib.crypto.packaged import SigningError
from mkt.api.tests.test_oauth import RestOAuth
from mkt.constants import MANIFEST_CONTENT_TYPE
from mkt.files.models import FileUpload
from mkt.langpacks.models import LangPack
from mkt.langpacks.tests.test_models import UploadCreationMixin, UploadTest
from mkt.site.storage_utils import public_storage
from mkt.site.fixtures import fixture
from mkt.site.tests import TestCase
from mkt.users.models import UserProfile
class TestLangPackViewSetMixin(RestOAuth):
fixtures = fixture('user_2519')
def setUp(self):
super(TestLangPackViewSetMixin, self).setUp()
self.list_url = reverse('api-v2:langpack-list')
self.user = UserProfile.objects.get(pk=2519)
def create_langpack(self, **kwargs):
data = {
'active': True,
'version': '0.1',
'language': 'fr',
'fxos_version': '2.2',
}
data.update(kwargs)
return LangPack.objects.create(**data)
def check_langpack(self, langpack_data, instance=None):
if instance is None:
instance = self.langpack
eq_(instance.pk, langpack_data['uuid'])
eq_(instance.manifest_url, langpack_data['manifest_url'])
eq_(instance.active, langpack_data['active'])
eq_(instance.language, langpack_data['language'])
eq_(instance.fxos_version, langpack_data['fxos_version'])
eq_(instance.get_language_display(), langpack_data['language_display'])
class TestLangPackViewSetBase(TestLangPackViewSetMixin):
def setUp(self):
super(TestLangPackViewSetBase, self).setUp()
self.detail_url = reverse('api-v2:langpack-detail', kwargs={'pk': 42})
def test_cors(self):
self.assertCORS(self.anon.options(self.detail_url),
'get', 'delete', 'patch', 'post', 'put')
self.assertCORS(self.anon.options(self.list_url),
'get', 'delete', 'patch', 'post', 'put')
def test_no_double_slash(self):
ok_(not self.detail_url.endswith('//'))
ok_(not self.list_url.endswith('//'))
class TestLangPackViewSetGet(TestLangPackViewSetMixin):
def setUp(self):
super(TestLangPackViewSetGet, self).setUp()
self.langpack = self.create_langpack()
self.detail_url = reverse('api-v2:langpack-detail',
kwargs={'pk': self.langpack.pk})
# Anonymously, you can view all active langpacks.
# Logged in view the right permission ('LangPacks', '%') you get them
# all if you use active=0.
def test_list_active_anonymous(self):
response = self.anon.get(self.list_url)
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 1)
self.check_langpack(response.json['objects'][0])
def test_list_active_no_perm_needed(self):
response = self.client.get(self.list_url)
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 1)
self.check_langpack(response.json['objects'][0])
def test_list_inactive_anon(self):
self.create_langpack(active=False)
response = self.anon.get(self.list_url, {'active': 'false'})
eq_(response.status_code, 403)
response = self.anon.get(
self.list_url, {'active': 'false', 'fxos_version': '2.2'})
eq_(response.status_code, 403)
def test_list_inactive_no_perm(self):
self.create_langpack(active=False)
response = self.client.get(self.list_url, {'active': 'false'})
eq_(response.status_code, 403)
response = self.client.get(
self.list_url, {'active': 'false', 'fxos_version': '2.2'})
eq_(response.status_code, 403)
def test_list_inactive_has_perm(self):
inactive_langpack = self.create_langpack(active=False)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.get(self.list_url, {'active': 'false'})
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 1)
self.check_langpack(response.json['objects'][0],
instance=inactive_langpack)
def test_list_inactive_has_perm_with_fxos_version(self):
inactive_langpack = self.create_langpack(
active=False, language='it', fxos_version='3.0')
self.create_langpack(
active=False, language='de', fxos_version='2.2')
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.get(
self.list_url, {'active': 'false', 'fxos_version': '3.0'})
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 1)
self.check_langpack(response.json['objects'][0],
instance=inactive_langpack)
def test_list_all_has_perm(self):
inactive_langpack = self.create_langpack(
active=False, language='it', fxos_version='3.0')
inactive_langpack.update(created=self.days_ago(1))
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.get(self.list_url, {'active': 'null'})
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 2)
self.check_langpack(response.json['objects'][0],
instance=self.langpack)
self.check_langpack(response.json['objects'][1],
instance=inactive_langpack)
def test_list_fxos_version(self):
self.create_langpack(active=True, language='it', fxos_version='3.0')
response = self.client.get(self.list_url, {'fxos_version': '2.2'})
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 1)
self.check_langpack(response.json['objects'][0],
instance=self.langpack)
response = self.anon.get(self.list_url, {'fxos_version': '2.2'})
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 1)
self.check_langpack(response.json['objects'][0],
instance=self.langpack)
def test_active_detail(self):
response = self.anon.get(self.detail_url)
eq_(response.status_code, 200)
self.check_langpack(response.json)
response = self.client.get(self.detail_url)
eq_(response.status_code, 200)
self.check_langpack(response.json)
def test_inactive_detail_anon(self):
self.langpack.update(active=False)
response = self.anon.get(self.detail_url)
eq_(response.status_code, 403)
def test_inactive_detail_no_perm(self):
self.langpack.update(active=False)
response = self.client.get(self.detail_url)
eq_(response.status_code, 403)
def test_inactive_has_perm(self):
self.langpack.update(active=False)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.get(self.detail_url)
eq_(response.status_code, 200)
self.check_langpack(response.json)
class TestLangPackViewSetCreate(TestLangPackViewSetMixin,
UploadCreationMixin, UploadTest):
def test_anonymous(self):
response = self.anon.post(self.list_url)
eq_(response.status_code, 403)
def test_no_perms(self):
response = self.client.post(self.list_url)
eq_(response.status_code, 403)
@patch('mkt.langpacks.serializers.LangPackUploadSerializer.is_valid',
return_value=True)
@patch('mkt.langpacks.serializers.LangPackUploadSerializer.save',
return_value=None)
def test_with_perm(self, mock_save, mock_is_valid):
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url)
eq_(response.status_code, 201)
def test_no_upload(self):
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url)
eq_(response.status_code, 400)
eq_(response.json, {u'upload': [u'This field is required.']})
def test_upload_does_not_exist(self):
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url, data=json.dumps({
'upload': 'my-non-existing-uuid'}))
eq_(response.status_code, 400)
eq_(response.json, {u'upload': [u'No upload found.']})
def test_dont_own_the_upload(self):
FileUpload.objects.create(uuid='my-uuid', user=None, valid=True)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url, data=json.dumps({
'upload': 'my-uuid'}))
eq_(response.status_code, 400)
eq_(response.json, {u'upload': [u'No upload found.']})
def test_invalid_upload(self):
FileUpload.objects.create(uuid='my-uuid', valid=False, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url, data=json.dumps({
'upload': 'my-uuid'}))
eq_(response.status_code, 400)
eq_(response.json, {u'upload': [u'Upload not valid.']})
@patch('mkt.langpacks.models.LangPack.from_upload')
def test_errors_returned_by_from_upload(self, mock_from_upload):
mock_from_upload.side_effect = ValidationError('foo bar')
FileUpload.objects.create(uuid='my-uuid', valid=True, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url, data=json.dumps({
'upload': 'my-uuid'}))
eq_(response.status_code, 400)
eq_(response.json, {u'detail': [u'foo bar']})
@patch('mkt.langpacks.models.sign_app')
def test_signing_error(self, sign_app_mock):
sign_app_mock.side_effect = SigningError(u'Fake signing error')
upload = self.upload('langpack.zip', valid=True, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url, data=json.dumps({
'upload': upload.uuid}))
eq_(response.status_code, 503)
eq_(response.json, {u'detail': [u'Fake signing error']})
def test_create(self):
eq_(LangPack.objects.count(), 0)
upload = self.upload('langpack.zip', valid=True, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url, data=json.dumps({
'upload': upload.uuid}))
eq_(response.status_code, 201)
eq_(LangPack.objects.count(), 1)
langpack = LangPack.objects.get()
eq_(langpack.active, False)
eq_(response.data['uuid'], langpack.uuid)
eq_(response.data['active'], langpack.active)
def test_create_with_existing_langpack_in_db(self):
self.langpack = self.create_langpack()
eq_(LangPack.objects.count(), 1)
upload = self.upload('langpack.zip', valid=True, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.post(self.list_url, data=json.dumps({
'upload': upload.uuid}))
eq_(response.status_code, 201)
ok_(response.json['uuid'] != self.langpack.pk)
eq_(LangPack.objects.count(), 2)
langpack = LangPack.objects.get(pk=response.json['uuid'])
eq_(langpack.active, False)
eq_(langpack.language, 'de')
eq_(langpack.fxos_version, '2.2')
eq_(response.data['uuid'], langpack.uuid)
eq_(response.data['active'], langpack.active)
class TestLangPackViewSetUpdate(TestLangPackViewSetMixin, UploadCreationMixin,
UploadTest):
def setUp(self):
super(TestLangPackViewSetUpdate, self).setUp()
self.langpack = self.create_langpack()
self.detail_url = reverse('api-v2:langpack-detail',
kwargs={'pk': self.langpack.pk})
def test_anonymous(self):
response = self.anon.put(self.detail_url)
eq_(response.status_code, 403)
def test_no_perms(self):
response = self.client.put(self.detail_url)
eq_(response.status_code, 403)
@patch('mkt.langpacks.serializers.LangPackUploadSerializer.is_valid',
return_value=True)
@patch('mkt.langpacks.serializers.LangPackUploadSerializer.save',
return_value=None)
def test_with_perm(self, mock_save, mock_is_valid):
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url)
eq_(response.status_code, 200)
def test_no_upload(self):
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url)
eq_(response.status_code, 400)
eq_(response.json, {u'upload': [u'This field is required.']})
def test_upload_does_not_exist(self):
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url, data=json.dumps({
'upload': 'my-non-existing-uuid'}))
eq_(response.status_code, 400)
eq_(response.json, {u'upload': [u'No upload found.']})
def test_dont_own_the_upload(self):
FileUpload.objects.create(uuid='my-uuid', user=None, valid=True)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url, data=json.dumps({
'upload': 'my-uuid'}))
eq_(response.status_code, 400)
eq_(response.json, {u'upload': [u'No upload found.']})
def test_invalid_upload(self):
FileUpload.objects.create(uuid='my-uuid', valid=False, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url, data=json.dumps({
'upload': 'my-uuid'}))
eq_(response.status_code, 400)
eq_(response.json, {u'upload': [u'Upload not valid.']})
@patch('mkt.langpacks.models.LangPack.from_upload')
def test_errors_returned_by_from_upload(self, mock_from_upload):
mock_from_upload.side_effect = ValidationError('foo bar')
FileUpload.objects.create(uuid='my-uuid', valid=True, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url, data=json.dumps({
'upload': 'my-uuid'}))
eq_(response.status_code, 400)
eq_(response.json, {u'detail': [u'foo bar']})
def test_update(self):
upload = self.upload('langpack.zip', valid=True, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url, data=json.dumps({
'upload': upload.uuid}))
eq_(response.status_code, 200)
eq_(LangPack.objects.count(), 1)
langpack = LangPack.objects.get()
eq_(langpack.active, True) # Langpack was already active.
eq_(langpack.language, 'de')
eq_(langpack.fxos_version, '2.2')
eq_(response.data['uuid'], langpack.uuid)
eq_(response.data['active'], langpack.active)
def test_update_with_another_existing_langpack_in_db(self):
self.langpack = self.create_langpack()
eq_(LangPack.objects.count(), 2)
upload = self.upload('langpack.zip', valid=True, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url, data=json.dumps({
'upload': upload.uuid}))
eq_(response.status_code, 200)
eq_(LangPack.objects.count(), 2)
langpack = LangPack.objects.get(pk=response.json['uuid'])
eq_(langpack.active, True)
eq_(langpack.language, 'de')
eq_(langpack.fxos_version, '2.2')
eq_(response.data['uuid'], langpack.uuid)
eq_(response.data['active'], langpack.active)
def test_update_was_inactive(self):
self.langpack.update(active=False)
upload = self.upload('langpack.zip', valid=True, user=self.user)
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.put(self.detail_url, data=json.dumps({
'upload': upload.uuid}))
eq_(response.status_code, 200)
eq_(LangPack.objects.count(), 1)
langpack = LangPack.objects.get()
eq_(langpack.active, False)
eq_(langpack.language, 'de')
eq_(langpack.fxos_version, '2.2')
eq_(response.data['uuid'], langpack.uuid)
eq_(response.data['active'], langpack.active)
class TestLangPackViewSetPartialUpdate(TestLangPackViewSetMixin):
def setUp(self):
super(TestLangPackViewSetPartialUpdate, self).setUp()
self.langpack = self.create_langpack()
self.detail_url = reverse('api-v2:langpack-detail',
kwargs={'pk': self.langpack.pk})
def test_anonymous(self):
response = self.anon.patch(self.detail_url)
eq_(response.status_code, 403)
def test_no_perms(self):
response = self.client.patch(self.detail_url)
eq_(response.status_code, 403)
def test_with_perm(self):
self.grant_permission(self.user, 'LangPacks:Admin')
response = self.client.patch(self.detail_url,
json.dumps({'active': False}))
eq_(response.status_code, 200)
eq_(response.data['active'], False)
self.langpack.reload()
eq_(self.langpack.pk, response.data['uuid'])
eq_(self.langpack.active, response.data['active'])
def test_not_allowed_fields(self):
self.grant_permission(self.user, 'LangPacks:Admin')
original_filename = self.langpack.filename
response = self.client.patch(self.detail_url, json.dumps({
'active': False,
'filename': 'dummy-data',
'fxos_version': 'dummy-data',
'language': 'es',
'modified': 'dummy-data',
'uuid': 'dummy-data',
'version': 'dummy-data',
}))
eq_(response.status_code, 400)
eq_(response.data, {
'language': [u'This field is read-only.'],
'fxos_version': [u'This field is read-only.'],
'version': [u'This field is read-only.']})
self.langpack.reload()
# Verify that nothing has changed.
eq_(self.langpack.active, True)
# Not changed either (not even exposed, so does not trigger an error)
eq_(self.langpack.filename, original_filename)
class TestLangPackViewSetDelete(TestLangPackViewSetMixin):
def setUp(self):
super(TestLangPackViewSetDelete, self).setUp()
self.langpack = self.create_langpack()
self.detail_url = reverse('api-v2:langpack-detail',
kwargs={'pk': self.langpack.pk})
def test_anonymous(self):
response = self.anon.delete(self.detail_url)
eq_(response.status_code, 403)
def test_no_perms(self):
response = self.client.delete(self.detail_url)
eq_(response.status_code, 403)
def test_with_perm(self):
self.grant_permission(self.user, 'LangPacks:Admin')
langpack_to_keep = self.create_langpack()
eq_(LangPack.objects.count(), 2)
response = self.client.delete(self.detail_url)
eq_(response.status_code, 204)
eq_(LangPack.objects.count(), 1)
eq_(LangPack.objects.get().pk, langpack_to_keep.pk)
class TestLangPackNonAPIViews(TestCase):
fixtures = fixture('user_2519')
def setUp(self):
super(TestLangPackNonAPIViews, self).setUp()
self.fake_manifest = {
'name': u'Fake LangPäck',
'developer': {
'name': 'Mozilla'
}
}
self.langpack = LangPack.objects.create(
version='0.1', active=True,
manifest=json.dumps(self.fake_manifest))
self.user = UserProfile.objects.get(pk=2519)
with public_storage.open(self.langpack.file_path, 'w') as f:
f.write('sample data\n')
def _expected_etag(self):
expected_etag = hashlib.sha256()
expected_etag.update(unicode(self.langpack.pk))
expected_etag.update(unicode(self.langpack.file_version))
return '"%s"' % expected_etag.hexdigest()
@override_settings(
XSENDFILE=True,
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.LocalFileStorage')
def test_download(self):
ok_(self.langpack.download_url)
response = self.client.get(self.langpack.download_url)
eq_(response.status_code, 200)
eq_(response[settings.XSENDFILE_HEADER], self.langpack.file_path)
eq_(response['Content-Type'], 'application/zip')
eq_(response['etag'], self._expected_etag())
self.login(self.user)
response = self.client.get(self.langpack.download_url)
eq_(response.status_code, 200)
@override_settings(
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.S3BotoPrivateStorage')
def test_download_storage(self):
ok_(self.langpack.download_url)
response = self.client.get(self.langpack.download_url)
path = public_storage.url(self.langpack.file_path)
self.assert3xx(response, path)
def test_download_inactive(self):
self.langpack.update(active=False)
ok_(self.langpack.download_url)
response = self.client.get(self.langpack.download_url)
eq_(response.status_code, 404)
self.login(self.user)
response = self.client.get(self.langpack.download_url)
eq_(response.status_code, 404)
@override_settings(
XSENDFILE=True,
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.LocalFileStorage')
def test_download_inactive_has_perm(self):
self.langpack.update(active=False)
self.grant_permission(self.user, 'LangPacks:Admin')
self.login(self.user)
ok_(self.langpack.download_url)
response = self.client.get(self.langpack.download_url)
eq_(response.status_code, 200)
eq_(response[settings.XSENDFILE_HEADER], self.langpack.file_path)
eq_(response['Content-Type'], 'application/zip')
eq_(response['etag'], self._expected_etag())
def test_manifest(self):
ok_(self.langpack.manifest_url)
response = self.client.get(self.langpack.manifest_url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], MANIFEST_CONTENT_TYPE)
manifest_contents = json.loads(
self.langpack.get_minifest_contents()[0])
data = json.loads(response.content)
eq_(data, manifest_contents)
def test_manifest_etag(self):
response = self.client.get(self.langpack.manifest_url)
eq_(response.status_code, 200)
original_etag = response['ETag']
ok_(original_etag)
self.assertCloseToNow(
response['Last-Modified'],
now=self.langpack.modified.replace(tzinfo=tzutc()))
# Test that the etag is different if the langpack file_version changes.
self.langpack.update(file_version=42)
self.langpack.get_minifest_contents(force=True) # Re-generate cache.
response = self.client.get(self.langpack.manifest_url)
eq_(response.status_code, 200)
new_etag = response['ETag']
ok_(new_etag)
ok_(original_etag != new_etag)
# Test that the etag is different if just the minifest contents change,
# but not the langpack instance itself.
minifest_contents = json.loads(
self.langpack.get_minifest_contents()[0])
minifest_contents['name'] = 'Different Name'
minifest_contents = json.dumps(minifest_contents)
patch_method = 'mkt.langpacks.models.LangPack.get_minifest_contents'
with patch(patch_method) as get_minifest_contents_mock:
get_minifest_contents_mock.return_value = (
minifest_contents, 'yet_another_etag')
response = self.client.get(self.langpack.manifest_url)
eq_(response.status_code, 200)
yet_another_etag = response['ETag']
ok_(yet_another_etag)
ok_(original_etag != new_etag != yet_another_etag)
def test_manifest_inactive(self):
manifest_url = self.langpack.manifest_url
ok_(manifest_url)
self.langpack.update(active=False)
# We don't return a manifest url when the langpack is inactive.
eq_(self.langpack.manifest_url, '')
response = self.client.get(manifest_url)
eq_(response.status_code, 404)
def test_manifest_inactive_has_perm(self):
manifest_url = self.langpack.manifest_url
ok_(manifest_url)
self.langpack.update(active=False)
self.grant_permission(self.user, 'LangPacks:Admin')
self.login(self.user)
# We don't return a manifest url when the langpack is inactive, but
# it should still work if you have the right permission.
eq_(self.langpack.manifest_url, '')
response = self.client.get(manifest_url)
eq_(response.status_code, 200)
manifest_contents = json.loads(
self.langpack.get_minifest_contents()[0])
data = json.loads(response.content)
eq_(data, manifest_contents)
| {
"content_hash": "6355f279f0ad2f67c412a6a5e067e511",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 79,
"avg_line_length": 40.502377179080824,
"alnum_prop": 0.6262080838909105,
"repo_name": "shahbaz17/zamboni",
"id": "7e424ee268c528c94793c7ec8258622ac2fd8ef5",
"size": "25582",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mkt/langpacks/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357511"
},
{
"name": "HTML",
"bytes": "2331440"
},
{
"name": "JavaScript",
"bytes": "536153"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4400945"
},
{
"name": "Shell",
"bytes": "11200"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
} |
"""
Functions for common plots
"""
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from .constants import LOG_SIZES
def _plot_svsize_density(log_svsize, ax, label=None,
linestyle='-', color='k'):
"""
Helper function to plot svsize.
sns.distplot does not support independent control of alpha for density line
and shading, so must invoke twice.
log_svsize : pd.Series or np.ndarray
"""
if label is not None:
label = label + ' (n={0:,})'.format(log_svsize.shape[0])
ax = sns.distplot(log_svsize, hist=False,
kde_kws=dict(shade=True, alpha=0.2),
color=color,
ax=ax)
kde_kws = dict(shade=False, linewidth=2.5, linestyle=linestyle, alpha=1)
ax = sns.distplot(log_svsize, hist=False, kde_kws=kde_kws,
label=label, color=color,
ax=ax)
def _add_log_ticks(ax, axmin, axmax, axis='x'):
# Generate log-scaled ticks
ticks = []
for i in range(axmin, axmax):
ticks.append(np.arange(10 ** i, 10 ** (i + 1), 10 ** i))
ticks.append(np.array([10 ** axmax]))
ticks = np.concatenate(ticks)
log_ticks = [np.log10(x) for x in ticks]
if axis == 'x':
ax.set_xticks(log_ticks)
ax.set_xlim(axmin, axmax)
else:
ax.set_yticks(log_ticks)
ax.set_ylim(axmin, axmax)
def plot_svsize_distro(df, hue=None, hue_order=None, ax=None,
hue_dict=None, palette=None,
xmin=1, xmax=8):
# Check for required columns
if 'log_svsize' not in df.columns:
raise Exception('Column `log_svsize` not present in dataframe')
if hue is not None and hue not in df.columns:
raise Exception('Hue column {0} not present in dataframe'.format(hue))
# Set defaults
if ax is None:
ax = plt.gca()
if palette is None:
palette = sns.color_palette('colorblind')
# If no hue specified, plot size distribution of entire dataframe
if hue is None:
_plot_svsize_density(df.log_svsize, ax, color=palette[0])
# If hue column specified, plot size distribution of each set and label
# appropriately
else:
hue_col = hue
if hue_order is None:
hue_order = sorted(df[hue_col].unique())
if len(hue_order) > len(palette):
raise Exception('Palette smaller than number of hue variables')
for i, hue_val in enumerate(hue_order):
if hue_dict is None:
label = str(hue_val)
else:
label = hue_dict[hue_val]
data = df.loc[df[hue_col] == hue_val]
_plot_svsize_density(data.log_svsize, ax, label, color=palette[i])
# Add legend
l = ax.legend(frameon=True)
l.get_frame().set_linewidth(1)
# Remove horizontal grid lines
ax.yaxis.grid(False)
# Label axes
ax.set_ylabel('Density')
ax.set_xlabel('Log-scaled SV length')
# Add log-scaled xtick labels
_add_log_ticks(ax, xmin, xmax)
xticklabels = []
for i in range(xmin, xmax):
xticklabels.append([LOG_SIZES[i]] + [''] * 8)
xticklabels.append([LOG_SIZES[xmax]])
xticklabels = np.concatenate(xticklabels)
ax.set_xticklabels(xticklabels)
return ax
def _plot_vaf_cum(data, xticks, ax, label=None,
color='k', linestyle='-', linewidth=2.5):
ys = []
for frac in xticks:
count = data.loc[data.vf <= frac].shape[0]
pct = count / data.shape[0]
ys.append(pct)
log_xticks = [np.log10(x) for x in xticks]
ax.plot(log_xticks, ys, label=label,
color=color, linewidth=linewidth, linestyle=linestyle)
def _vaf_ticks(xmin=0.002, xmax=1):
step = 10 ** np.floor(np.log10(xmin))
first_max = step * 10
ticks = [np.arange(xmin, first_max, step)]
for i in np.arange(np.log10(first_max), np.log10(xmax)):
ticks.append(np.arange(10 ** i, 10 ** (i+1), 10 ** i))
ticks.append([xmax])
ticks = np.concatenate(ticks)
return ticks
def _vaf_ticklabels(ticks):
ticklabels = []
# Label 10s and 5s
for tick in ticks:
y = tick * 10 ** np.abs(np.floor(np.log10(tick)))
if y == 1 or (y % 5 == 0):
pct = tick * 100
if pct >= 1:
label = '{0:d}%'.format(int(pct))
else:
prec = int(np.ceil(np.abs(np.log10(pct))))
label = '{:.{prec}f}%'.format(pct, prec=prec)
ticklabels.append(label)
else:
ticklabels.append('')
return ticklabels
def plot_vaf_cum(df, hue=None, hue_order=None, ax=None,
xmin=0.002, xmax=1,
hue_dict=None, palette=None):
# Set defaults
if ax is None:
ax = plt.gca()
if palette is None:
palette = sns.color_palette('colorblind')
# Set log-scaled ticks for cum
xticks = _vaf_ticks(xmin, xmax)
log_xticks = [np.log10(x) for x in xticks]
# If no hue specified, plot size distribution of entire dataframe
if hue is None:
_plot_vaf_cum(df, xticks, ax, color=palette[0])
# If hue column specified, plot size distribution of each set and label
# appropriately
else:
hue_col = hue
if hue_order is None:
hue_order = sorted(df[hue_col].unique())
if len(hue_order) > len(palette):
raise Exception('Palette smaller than number of hue variables')
for i, hue_val in enumerate(hue_order):
if hue_dict is None:
label = str(hue_val)
else:
label = hue_dict[hue_val]
data = df.loc[df[hue_col] == hue_val]
_plot_vaf_cum(data, xticks, ax, label, color=palette[i])
# Set log-scaled xticks
ax.set_xticks(log_xticks)
ax.set_xlim(log_xticks[0], log_xticks[-1])
xticklabels = _vaf_ticklabels(xticks)
ax.set_xticklabels(xticklabels)
# Set y-scale to 0%, 25%, 50%, 75%, 100%
yticks = np.arange(0, 1.25, 0.25)
ax.set_ylim(0, 1)
ax.set_yticks(yticks)
ax.set_yticklabels(['{0}%'.format(int(x * 100)) for x in yticks])
# Add legend under curves
l = ax.legend(frameon=True, loc='lower right')
l.get_frame().set_linewidth(1)
ax.set_ylabel('Cumulative percentage of variants')
ax.set_xlabel('Variant allele frequency')
def violin_with_strip(x=None, y=None, hue=None, data=None,
order=None, hue_order=None, orient='v', ax=None,
violin_kwargs={}):
"""
Plot stripplot with overlaying violin and box-and-whisker plot.
Arguments
---------
x, y, hue : names of variables in `data` or vector data, optional
data : pd.DataFrame, array, or list of arrays, optional
ax : matplotlib Axes
Returns
-------
ax : matplotlib Axes
TODO
----
* parametrize edgecolors and linewidths
* pass dicts of violin_kwargs and strip_kwargs dicts
"""
if ax is None:
ax = plt.gca()
# Plot the data points
# zorder<3 required to plot beneath violinplot
ax = sns.stripplot(x=x, y=y, hue=hue, data=data,
order=order, hue_order=hue_order,
jitter=0.2, linewidth=0.5, edgecolor='k', size=3.5,
split=True,
ax=ax, zorder=1)
# Plot violins
ax = sns.violinplot(x=x, y=y, hue=hue, data=data,
order=order, hue_order=hue_order, ax=ax,
**violin_kwargs)
# Change the color of the internal box/whisker plot
for i, line in enumerate(ax.lines):
# whiskers
if i % 2 == 0:
line.set_color('k')
# line.set_linewidth(3)
# box
else:
line.set_color('k')
# line.set_linewidth(7)
# Turn off violinplot fill and change the outline color
# Variant on https://github.com/mwaskom/seaborn/issues/979
poly_obs = False
for collection in ax.collections:
# PolyCollections are violins
if isinstance(collection, mpl.collections.PolyCollection):
r, g, b, a = collection.get_facecolor()[0]
collection.set_facecolor((r, g, b, 0.3))
collection.set_edgecolor('k')
collection.set_linewidths(1.2)
poly_obs = True
# First n PathCollections are stripplot points
# Subsequent PathCollections are data median
if isinstance(collection, mpl.collections.PathCollection) and poly_obs:
collection.set_visible(False)
x, y = collection._offsets[0]
ax.plot(x, y, 'ow', markersize=7, mew=1, mec='k')
# Remove stripplot legend
if hue is not None:
legend = ax.get_legend()
ax.legend_ = None
texts = [text.get_text() for text in legend.texts]
legend = ax.legend(legend.legendHandles[:2], texts,
frameon=True, loc='best')
legend.get_frame().set_linewidth(1)
return ax
| {
"content_hash": "b7458e7a0553dae5c0f965d83cc181a0",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 79,
"avg_line_length": 30.85810810810811,
"alnum_prop": 0.5733523100503612,
"repo_name": "msto/svplot",
"id": "ccae6136df4023ea9cb1124f9333a5ad4108f942",
"size": "9267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "svplot/plotters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34204"
}
],
"symlink_target": ""
} |
import mock
from rally.benchmark.scenarios.ceilometer import meters
from tests.unit import test
class CeilometerMetersTestCase(test.TestCase):
def test_list_meters(self):
scenario = meters.CeilometerMeters()
scenario._list_meters = mock.MagicMock()
scenario.list_meters()
scenario._list_meters.assert_called_once_with()
| {
"content_hash": "43270a861ed8652a3508d8a58ccf23d1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 29.916666666666668,
"alnum_prop": 0.7270194986072424,
"repo_name": "pandeyop/rally",
"id": "37fa8c37c05fad3527e6511d8d190e6db533fa61",
"size": "957",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/benchmark/scenarios/ceilometer/test_meters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46741"
},
{
"name": "Python",
"bytes": "2053831"
},
{
"name": "Shell",
"bytes": "18078"
}
],
"symlink_target": ""
} |
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os.path as op
import nibabel as nb
import numpy as np
from ..base import (traits, TraitedSpec, File, isdefined)
from .base import DipyBaseInterface
from ... import logging
IFLOGGER = logging.getLogger('interface')
class ResampleInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='The input 4D diffusion-weighted image file')
vox_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
desc=('specify the new voxel zooms. If no vox_size'
' is set, then isotropic regridding will '
'be performed, with spacing equal to the '
'smallest current zoom.'))
interp = traits.Int(
1, mandatory=True, usedefault=True,
desc=('order of the interpolator (0 = nearest, 1 = linear, etc.'))
class ResampleOutputSpec(TraitedSpec):
out_file = File(exists=True)
class Resample(DipyBaseInterface):
"""
An interface to reslicing diffusion datasets.
See
http://nipy.org/dipy/examples_built/reslice_datasets.html#example-reslice-datasets.
Example
-------
>>> import nipype.interfaces.dipy as dipy
>>> reslice = dipy.Resample()
>>> reslice.inputs.in_file = 'diffusion.nii'
>>> reslice.run() # doctest: +SKIP
"""
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
def _run_interface(self, runtime):
order = self.inputs.interp
vox_size = None
if isdefined(self.inputs.vox_size):
vox_size = self.inputs.vox_size
out_file = op.abspath(self._gen_outfilename())
resample_proxy(self.inputs.in_file, order=order,
new_zooms=vox_size, out_file=out_file)
IFLOGGER.info('Resliced image saved as {i}'.format(i=out_file))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = op.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
fname, fext = op.splitext(op.basename(self.inputs.in_file))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
return op.abspath('%s_reslice%s' % (fname, fext))
class DenoiseInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='The input 4D diffusion-weighted image file')
in_mask = File(exists=True, desc='brain mask')
noise_model = traits.Enum('rician', 'gaussian', mandatory=True,
usedefault=True,
desc=('noise distribution model'))
signal_mask = File(desc=('mask in which the mean signal '
'will be computed'), exists=True)
noise_mask = File(desc=('mask in which the standard deviation of noise '
'will be computed'), exists=True)
patch_radius = traits.Int(1, desc='patch radius')
block_radius = traits.Int(5, desc='block_radius')
snr = traits.Float(desc='manually set an SNR')
class DenoiseOutputSpec(TraitedSpec):
out_file = File(exists=True)
class Denoise(DipyBaseInterface):
"""
An interface to denoising diffusion datasets [Coupe2008]_.
See
http://nipy.org/dipy/examples_built/denoise_nlmeans.html#example-denoise-nlmeans.
.. [Coupe2008] Coupe P et al., `An Optimized Blockwise Non Local Means
Denoising Filter for 3D Magnetic Resonance Images
<http://dx.doi.org/10.1109%2FTMI.2007.906087>`_,
IEEE Transactions on Medical Imaging, 27(4):425-441, 2008.
Example
-------
>>> import nipype.interfaces.dipy as dipy
>>> denoise = dipy.Denoise()
>>> denoise.inputs.in_file = 'diffusion.nii'
>>> denoise.run() # doctest: +SKIP
"""
input_spec = DenoiseInputSpec
output_spec = DenoiseOutputSpec
def _run_interface(self, runtime):
out_file = op.abspath(self._gen_outfilename())
settings = dict(mask=None,
rician=(self.inputs.noise_model == 'rician'))
if isdefined(self.inputs.in_mask):
settings['mask'] = nb.load(self.inputs.in_mask).get_data()
if isdefined(self.inputs.patch_radius):
settings['patch_radius'] = self.inputs.patch_radius
if isdefined(self.inputs.block_radius):
settings['block_radius'] = self.inputs.block_radius
snr = None
if isdefined(self.inputs.snr):
snr = self.inputs.snr
signal_mask = None
if isdefined(self.inputs.signal_mask):
signal_mask = nb.load(self.inputs.signal_mask).get_data()
noise_mask = None
if isdefined(self.inputs.noise_mask):
noise_mask = nb.load(self.inputs.noise_mask).get_data()
_, s = nlmeans_proxy(self.inputs.in_file, settings,
snr=snr,
smask=signal_mask,
nmask=noise_mask,
out_file=out_file)
IFLOGGER.info(('Denoised image saved as {i}, estimated '
'SNR={s}').format(i=out_file, s=str(s)))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = op.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
fname, fext = op.splitext(op.basename(self.inputs.in_file))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
return op.abspath('%s_denoise%s' % (fname, fext))
def resample_proxy(in_file, order=3, new_zooms=None, out_file=None):
"""
Performs regridding of an image to set isotropic voxel sizes using dipy.
"""
from dipy.align.reslice import reslice
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
out_file = op.abspath('./%s_reslice%s' % (fname, fext))
img = nb.load(in_file)
hdr = img.header.copy()
data = img.get_data().astype(np.float32)
affine = img.affine
im_zooms = hdr.get_zooms()[:3]
if new_zooms is None:
minzoom = np.array(im_zooms).min()
new_zooms = tuple(np.ones((3,)) * minzoom)
if np.all(im_zooms == new_zooms):
return in_file
data2, affine2 = reslice(data, affine, im_zooms, new_zooms, order=order)
tmp_zooms = np.array(hdr.get_zooms())
tmp_zooms[:3] = new_zooms[0]
hdr.set_zooms(tuple(tmp_zooms))
hdr.set_data_shape(data2.shape)
hdr.set_xyzt_units('mm')
nb.Nifti1Image(data2.astype(hdr.get_data_dtype()),
affine2, hdr).to_filename(out_file)
return out_file, new_zooms
def nlmeans_proxy(in_file, settings,
snr=None,
smask=None,
nmask=None,
out_file=None):
"""
Uses non-local means to denoise 4D datasets
"""
from dipy.denoise.nlmeans import nlmeans
from scipy.ndimage.morphology import binary_erosion
from scipy import ndimage
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
out_file = op.abspath('./%s_denoise%s' % (fname, fext))
img = nb.load(in_file)
hdr = img.header
data = img.get_data()
aff = img.affine
if data.ndim < 4:
data = data[..., np.newaxis]
data = np.nan_to_num(data)
if data.max() < 1.0e-4:
raise RuntimeError('There is no signal in the image')
df = 1.0
if data.max() < 1000.0:
df = 1000. / data.max()
data *= df
b0 = data[..., 0]
if smask is None:
smask = np.zeros_like(b0)
smask[b0 > np.percentile(b0, 85.)] = 1
smask = binary_erosion(
smask.astype(np.uint8), iterations=2).astype(np.uint8)
if nmask is None:
nmask = np.ones_like(b0, dtype=np.uint8)
bmask = settings['mask']
if bmask is None:
bmask = np.zeros_like(b0)
bmask[b0 > np.percentile(b0[b0 > 0], 10)] = 1
label_im, nb_labels = ndimage.label(bmask)
sizes = ndimage.sum(bmask, label_im, range(nb_labels + 1))
maxidx = np.argmax(sizes)
bmask = np.zeros_like(b0, dtype=np.uint8)
bmask[label_im == maxidx] = 1
nmask[bmask > 0] = 0
else:
nmask = np.squeeze(nmask)
nmask[nmask > 0.0] = 1
nmask[nmask < 1] = 0
nmask = nmask.astype(bool)
nmask = binary_erosion(nmask, iterations=1).astype(np.uint8)
den = np.zeros_like(data)
est_snr = True
if snr is not None:
snr = [snr] * data.shape[-1]
est_snr = False
else:
snr = []
for i in range(data.shape[-1]):
d = data[..., i]
if est_snr:
s = np.mean(d[smask > 0])
n = np.std(d[nmask > 0])
snr.append(s / n)
den[..., i] = nlmeans(d, snr[i], **settings)
den = np.squeeze(den)
den /= df
nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff,
hdr).to_filename(out_file)
return out_file, snr
| {
"content_hash": "631e0623bf213ddc76122862cd3c162d",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 87,
"avg_line_length": 32.20134228187919,
"alnum_prop": 0.5776365152146727,
"repo_name": "FCP-INDI/nipype",
"id": "143f239e6ce8e88c5d327ee4106499f720991ced",
"size": "9642",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nipype/interfaces/dipy/preprocess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5280923"
},
{
"name": "Shell",
"bytes": "1958"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
"""Class advice.
This module was adapted from 'protocols.advice', part of the Python
Enterprise Application Kit (PEAK). Please notify the PEAK authors
(pje@telecommunity.com and tsarna@sarna.org) if bugs are found or
Zope-specific changes are required, so that the PEAK version of this module
can be kept in sync.
PEAK is a Python application framework that interoperates with (but does
not require) Zope 3 and Twisted. It provides tools for manipulating UML
models, object-relational persistence, aspect-oriented programming, and more.
Visit the PEAK home page at http://peak.telecommunity.com for more information.
"""
from types import FunctionType
try:
from types import ClassType
except ImportError: #pragma NO COVER Python > 3.x
__python3 = True
else: #pragma NO COVER Python < 3.x
__python3 = False
import sys
def getFrameInfo(frame):
"""Return (kind,module,locals,globals) for a frame
'kind' is one of "exec", "module", "class", "function call", or "unknown".
"""
f_locals = frame.f_locals
f_globals = frame.f_globals
sameNamespace = f_locals is f_globals
hasModule = '__module__' in f_locals
hasName = '__name__' in f_globals
sameName = hasModule and hasName
sameName = sameName and f_globals['__name__']==f_locals['__module__']
module = hasName and sys.modules.get(f_globals['__name__']) or None
namespaceIsModule = module and module.__dict__ is f_globals
if not namespaceIsModule:
# some kind of funky exec
kind = "exec"
elif sameNamespace and not hasModule:
kind = "module"
elif sameName and not sameNamespace:
kind = "class"
elif not sameNamespace:
kind = "function call"
else: # pragma NO COVER
# How can you have f_locals is f_globals, and have '__module__' set?
# This is probably module-level code, but with a '__module__' variable.
kind = "unknown"
return kind, module, f_locals, f_globals
def addClassAdvisor(callback, depth=2):
"""Set up 'callback' to be passed the containing class upon creation
This function is designed to be called by an "advising" function executed
in a class suite. The "advising" function supplies a callback that it
wishes to have executed when the containing class is created. The
callback will be given one argument: the newly created containing class.
The return value of the callback will be used in place of the class, so
the callback should return the input if it does not wish to replace the
class.
The optional 'depth' argument to this function determines the number of
frames between this function and the targeted class suite. 'depth'
defaults to 2, since this skips this function's frame and one calling
function frame. If you use this function from a function called directly
in the class suite, the default will be correct, otherwise you will need
to determine the correct depth yourself.
This function works by installing a special class factory function in
place of the '__metaclass__' of the containing class. Therefore, only
callbacks *after* the last '__metaclass__' assignment in the containing
class will be executed. Be sure that classes using "advising" functions
declare any '__metaclass__' *first*, to ensure all callbacks are run."""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if __python3: #pragma NO COVER
raise TypeError('Class advice impossible in Python3')
frame = sys._getframe(depth)
kind, module, caller_locals, caller_globals = getFrameInfo(frame)
# This causes a problem when zope interfaces are used from doctest.
# In these cases, kind == "exec".
#
#if kind != "class":
# raise SyntaxError(
# "Advice must be in the body of a class statement"
# )
previousMetaclass = caller_locals.get('__metaclass__')
if __python3: # pragma NO COVER
defaultMetaclass = caller_globals.get('__metaclass__', type)
else:
defaultMetaclass = caller_globals.get('__metaclass__', ClassType)
def advise(name, bases, cdict):
if '__metaclass__' in cdict:
del cdict['__metaclass__']
if previousMetaclass is None:
if bases:
# find best metaclass or use global __metaclass__ if no bases
meta = determineMetaclass(bases)
else:
meta = defaultMetaclass
elif isClassAdvisor(previousMetaclass):
# special case: we can't compute the "true" metaclass here,
# so we need to invoke the previous metaclass and let it
# figure it out for us (and apply its own advice in the process)
meta = previousMetaclass
else:
meta = determineMetaclass(bases, previousMetaclass)
newClass = meta(name,bases,cdict)
# this lets the callback replace the class completely, if it wants to
return callback(newClass)
# introspection data only, not used by inner function
advise.previousMetaclass = previousMetaclass
advise.callback = callback
# install the advisor
caller_locals['__metaclass__'] = advise
def isClassAdvisor(ob):
"""True if 'ob' is a class advisor function"""
return isinstance(ob,FunctionType) and hasattr(ob,'previousMetaclass')
def determineMetaclass(bases, explicit_mc=None):
"""Determine metaclass from 1+ bases and optional explicit __metaclass__"""
meta = [getattr(b,'__class__',type(b)) for b in bases]
if explicit_mc is not None:
# The explicit metaclass needs to be verified for compatibility
# as well, and allowed to resolve the incompatible bases, if any
meta.append(explicit_mc)
if len(meta)==1:
# easy case
return meta[0]
candidates = minimalBases(meta) # minimal set of metaclasses
if not candidates: #pragma NO COVER
# they're all "classic" classes
assert(not __python3) # This should not happen under Python 3
return ClassType
elif len(candidates)>1:
# We could auto-combine, but for now we won't...
raise TypeError("Incompatible metatypes",bases)
# Just one, return it
return candidates[0]
def minimalBases(classes):
"""Reduce a list of base classes to its ordered minimum equivalent"""
if not __python3: #pragma NO COVER
classes = [c for c in classes if c is not ClassType]
candidates = []
for m in classes:
for n in classes:
if issubclass(n,m) and m is not n:
break
else:
# m has no subclasses in 'classes'
if m in candidates:
candidates.remove(m) # ensure that we're later in the list
candidates.append(m)
return candidates
| {
"content_hash": "b6e4a56e33b075419f1af93fde6ade6e",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 79,
"avg_line_length": 36.803108808290155,
"alnum_prop": 0.6476136843587217,
"repo_name": "hlzz/dotfiles",
"id": "ccc103d8fcf304bf84ac3bb0e709f136f2accf2b",
"size": "7752",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/ThirdParty/ZopeInterface/zope/interface/advice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
"""Model representing MAAS node/machine resource."""
import logging
import drydock_provisioner.error as errors
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
import drydock_provisioner.drivers.node.maasdriver.models.interface as maas_interface
import drydock_provisioner.drivers.node.maasdriver.models.blockdev as maas_blockdev
import drydock_provisioner.drivers.node.maasdriver.models.volumegroup as maas_vg
from bson import BSON
LOG = logging.getLogger(__name__)
class Machine(model_base.ResourceBase):
resource_url = 'machines/{resource_id}/'
fields = [
'resource_id',
'hostname',
'power_type',
'power_state',
'power_parameters',
'interfaces',
'boot_interface',
'memory',
'cpu_count',
'tag_names',
'status_name',
'boot_mac',
'boot_ip',
'owner_data',
'block_devices',
'volume_groups',
]
json_fields = ['hostname', 'power_type']
def __init__(self, api_client, **kwargs):
super(Machine, self).__init__(api_client, **kwargs)
# Replace generic dicts with interface collection model
if hasattr(self, 'resource_id'):
self.interfaces = maas_interface.Interfaces(
api_client, system_id=self.resource_id)
self.interfaces.refresh()
try:
self.block_devices = maas_blockdev.BlockDevices(
api_client, system_id=self.resource_id)
self.block_devices.refresh()
except Exception:
self.logger.warning("Failed loading node %s block devices." %
(self.resource_id))
try:
self.volume_groups = maas_vg.VolumeGroups(
api_client, system_id=self.resource_id)
self.volume_groups.refresh()
except Exception:
self.logger.warning("Failed load node %s volume groups." %
(self.resource_id))
else:
self.interfaces = None
self.block_devices = None
self.volume_groups = None
def interface_for_ip(self, ip_address):
"""Find the machine interface that will respond to ip_address.
:param ip_address: The IP address to check interfaces
:return: The interface that responds to this IP or None
"""
for i in self.interfaces:
if i.responds_to_ip(ip_address):
return i
return None
def get_power_params(self):
"""Load power parameters for this node from MaaS."""
url = self.interpolate_url()
resp = self.api_client.get(url, op='power_parameters')
if resp.status_code == 200:
self.power_parameters = resp.json()
def reset_network_config(self):
"""Reset the node networking configuration."""
self.logger.info("Resetting networking configuration on node %s" %
(self.resource_id))
url = self.interpolate_url()
resp = self.api_client.post(url, op='restore_networking_configuration')
if not resp.ok:
msg = "Error resetting network on node %s: %s - %s" \
% (self.resource_id, resp.status_code, resp.text)
self.logger.error(msg)
raise errors.DriverError(msg)
def reset_storage_config(self):
"""Reset storage config on this machine.
Removes all the volume groups/logical volumes and all the physical
device partitions on this machine.
"""
self.logger.info("Resetting storage configuration on node %s" %
(self.resource_id))
if self.volume_groups is not None and self.volume_groups.len() > 0:
for vg in self.volume_groups:
self.logger.debug("Removing VG %s" % vg.name)
vg.delete()
else:
self.logger.debug("No VGs configured on node %s" %
(self.resource_id))
if self.block_devices is not None:
for d in self.block_devices:
if d.partitions is not None and d.partitions.len() > 0:
self.logger.debug(
"Clearing partitions on device %s" % d.name)
d.clear_partitions()
else:
self.logger.debug(
"No partitions found on device %s" % d.name)
else:
self.logger.debug("No block devices found on node %s" %
(self.resource_id))
def set_storage_layout(self,
layout_type='flat',
root_device=None,
root_size=None,
boot_size=None,
root_lv_size=None,
root_vg_name=None,
root_lv_name=None):
"""Set machine storage layout for the root disk.
:param layout_type: Whether to use 'flat' (partitions) or 'lvm' for the root filesystem
:param root_device: Name of the block device to place the root partition on
:param root_size: Size of the root partition in bytes
:param boot_size: Size of the boot partition in bytes
:param root_lv_size: Size of the root logical volume in bytes for LVM layout
:param root_vg_name: Name of the volume group with root LV
:param root_lv_name: Name of the root LV
"""
try:
url = self.interpolate_url()
self.block_devices.refresh()
root_dev = self.block_devices.singleton({'name': root_device})
if root_dev is None:
msg = "Error: cannot find storage device %s to set as root device" % root_device
self.logger.error(msg)
raise errors.DriverError(msg)
root_dev.set_bootable()
data = {
'storage_layout': layout_type,
'root_device': root_dev.resource_id,
}
self.logger.debug("Setting node %s storage layout to %s" %
(self.hostname, layout_type))
if root_size:
data['root_size'] = root_size
if boot_size:
data['boot_size'] = boot_size
if layout_type == 'lvm':
if root_lv_size:
data['lv_size'] = root_lv_size
if root_vg_name:
data['vg_name'] = root_vg_name
if root_lv_name:
data['lv_name'] = root_lv_name
resp = self.api_client.post(
url, op='set_storage_layout', files=data)
if not resp.ok:
raise Exception("MAAS Error: %s - %s" % (resp.status_code,
resp.text))
except Exception as ex:
msg = "Error: failed configuring node %s storage layout: %s" % (
self.resource_id, str(ex))
self.logger.error(msg)
raise errors.DriverError(msg)
def release(self, erase_disk=False):
"""Release a node so it can be redeployed.
:param erase_disk: If true, the local disks on the machine will be quick wiped
"""
url = self.interpolate_url()
options = {'erase': erase_disk}
resp = self.api_client.post(url, op='release', files=options)
if not resp.ok:
brief_msg = ("Error releasing node, received HTTP %s from MaaS" %
resp.status_code)
self.logger.error(brief_msg)
self.logger.debug("MaaS response: %s" % resp.text)
raise errors.DriverError(brief_msg)
def commission(self, debug=False):
"""Start the MaaS commissioning process.
:param debug: If true, enable ssh on the node and leave it power up after commission
"""
url = self.interpolate_url()
# If we want to debug this node commissioning, enable SSH
# after commissioning and leave the node powered up
options = {'enable_ssh': '1' if debug else '0'}
resp = self.api_client.post(url, op='commission', files=options)
# Need to sort out how to handle exceptions
if not resp.ok:
self.logger.error(
"Error commissioning node, received HTTP %s from MaaS" %
resp.status_code)
self.logger.debug("MaaS response: %s" % resp.text)
raise errors.DriverError(
"Error commissioning node, received HTTP %s from MaaS" %
resp.status_code)
def deploy(self, user_data=None, platform=None, kernel=None):
"""Start the MaaS deployment process.
:param user_data: cloud-init user data
:param platform: Which image to install
:param kernel: Which kernel to enable
"""
deploy_options = {}
if user_data is not None:
deploy_options['user_data'] = user_data
if platform is not None:
deploy_options['distro_series'] = platform
if kernel is not None:
deploy_options['hwe_kernel'] = kernel
url = self.interpolate_url()
resp = self.api_client.post(
url,
op='deploy',
files=deploy_options if len(deploy_options) > 0 else None)
if not resp.ok:
self.logger.error(
"Error deploying node, received HTTP %s from MaaS" %
resp.status_code)
self.logger.debug("MaaS response: %s" % resp.text)
raise errors.DriverError(
"Error deploying node, received HTTP %s from MaaS" %
resp.status_code)
def get_network_interface(self, iface_name):
if self.interfaces is not None:
iface = self.interfaces.singleton({'name': iface_name})
return iface
def get_details(self):
url = self.interpolate_url()
resp = self.api_client.get(url, op='details')
if resp.status_code == 200:
detail_config = BSON.decode(resp.content)
return detail_config
def set_owner_data(self, key, value):
"""Add/update/remove node owner data.
If the machine is not currently allocated to a user
it cannot have owner data
:param key: Key of the owner data
:param value: Value of the owner data. If None, the key is removed
"""
url = self.interpolate_url()
resp = self.api_client.post(
url, op='set_owner_data', files={key: value})
if resp.status_code != 200:
self.logger.error(
"Error setting node metadata, received HTTP %s from MaaS" %
resp.status_code)
self.logger.debug("MaaS response: %s" % resp.text)
raise errors.DriverError(
"Error setting node metadata, received HTTP %s from MaaS" %
resp.status_code)
def to_dict(self):
"""Serialize this resource instance into a dict.
The dict format matches the
MAAS representation of the resource
"""
data_dict = {}
for f in self.json_fields:
if getattr(self, f, None) is not None:
if f == 'resource_id':
data_dict['system_id'] = getattr(self, f)
else:
data_dict[f] = getattr(self, f)
return data_dict
@classmethod
def from_dict(cls, api_client, obj_dict):
"""Create a instance of this resource class based on a dict.
Dict format matches MaaS type attributes
Customized for Machine due to use of system_id instead of id
as resource key
:param api_client: Instance of api_client.MaasRequestFactory for accessing MaaS API
:param obj_dict: Python dict as parsed from MaaS API JSON representing this resource type
"""
refined_dict = {k: obj_dict.get(k, None) for k in cls.fields}
if 'system_id' in obj_dict.keys():
refined_dict['resource_id'] = obj_dict.get('system_id')
# Capture the boot interface MAC to allow for node id of VMs
if 'boot_interface' in obj_dict.keys():
if isinstance(obj_dict['boot_interface'], dict):
refined_dict['boot_mac'] = obj_dict['boot_interface'][
'mac_address']
if len(obj_dict['boot_interface']['links']) > 0:
refined_dict['boot_ip'] = obj_dict['boot_interface'][
'links'][0].get('ip_address', None)
i = cls(api_client, **refined_dict)
return i
class Machines(model_base.ResourceCollectionBase):
collection_url = 'machines/'
collection_resource = Machine
def __init__(self, api_client, **kwargs):
super(Machines, self).__init__(api_client)
# Add the OOB power parameters to each machine instance
def collect_power_params(self):
for k, v in self.resources.items():
v.get_power_params()
def acquire_node(self, node_name):
"""Acquire a commissioned node fro deployment.
:param node_name: The hostname of a node to acquire
"""
self.refresh()
node = self.singleton({'hostname': node_name})
if node is None:
self.logger.info("Node %s not found" % (node_name))
raise errors.DriverError("Node %s not found" % (node_name))
if node.status_name != 'Ready':
self.logger.info(
"Node %s status '%s' does not allow deployment, should be 'Ready'."
% (node_name, node.status_name))
raise errors.DriverError(
"Node %s status '%s' does not allow deployment, should be 'Ready'."
% (node_name, node.status_name))
url = self.interpolate_url()
resp = self.api_client.post(
url, op='allocate', files={'system_id': node.resource_id})
if not resp.ok:
self.logger.error(
"Error acquiring node, MaaS returned %s" % resp.status_code)
self.logger.debug("MaaS response: %s" % resp.text)
raise errors.DriverError(
"Error acquiring node, MaaS returned %s" % resp.status_code)
return node
def identify_baremetal_node(self, node_model, update_name=True):
"""Find MaaS node resource matching Drydock BaremetalNode.
Search all the defined MaaS Machines and attempt to match
one against the provided Drydock BaremetalNode model. Update
the MaaS instance with the correct hostname
:param node_model: Instance of objects.node.BaremetalNode to search MaaS for matching resource
:param update_name: Whether Drydock should update the MaaS resource name to match the Drydock design
"""
maas_node = None
if node_model.oob_type == 'ipmi':
node_oob_network = node_model.oob_parameters['network']
node_oob_ip = node_model.get_network_address(node_oob_network)
if node_oob_ip is None:
self.logger.warn("Node model missing OOB IP address")
raise ValueError('Node model missing OOB IP address')
try:
self.collect_power_params()
maas_node = self.singleton({
'power_params.power_address':
node_oob_ip
})
except ValueError:
self.logger.warn(
"Error locating matching MaaS resource for OOB IP %s" %
(node_oob_ip))
return None
else:
# Use boot_mac for node's not using IPMI
node_boot_mac = node_model.boot_mac
if node_boot_mac is not None:
maas_node = self.singleton({'boot_mac': node_model.boot_mac})
if maas_node is None:
self.logger.info(
"Could not locate node %s in MaaS" % node_model.name)
return None
self.logger.debug("Found MaaS resource %s matching Node %s" %
(maas_node.resource_id, node_model.get_id()))
if maas_node.hostname != node_model.name and update_name:
maas_node.hostname = node_model.name
maas_node.update()
self.logger.debug("Updated MaaS resource %s hostname to %s" %
(maas_node.resource_id, node_model.name))
return maas_node
def query(self, query):
"""Custom query method to deal with complex fields."""
result = list(self.resources.values())
for (k, v) in query.items():
if k.startswith('power_params.'):
field = k[13:]
result = [
i for i in result if str(
getattr(i, 'power_parameters', {}).get(field, None)) ==
str(v)
]
else:
result = [
i for i in result if str(getattr(i, k, None)) == str(v)
]
return result
def add(self, res):
"""Create a new resource in this collection in MaaS.
Customize as Machine resources use 'system_id' instead of 'id'
:param res: A instance of the Machine model
"""
data_dict = res.to_dict()
url = self.interpolate_url()
resp = self.api_client.post(url, files=data_dict)
if resp.status_code == 200:
resp_json = resp.json()
res.set_resource_id(resp_json.get('system_id'))
return res
raise errors.DriverError(
"Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))
| {
"content_hash": "ee2a89e3a1b24014654450b5e2edbd2e",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 108,
"avg_line_length": 36.31578947368421,
"alnum_prop": 0.5527313266443701,
"repo_name": "att-comdev/drydock",
"id": "d8177ecce1e7b21eb384dd22cf028e08b8607dea",
"size": "18559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drydock_provisioner/drivers/node/maasdriver/models/machine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2663"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "902228"
},
{
"name": "Shell",
"bytes": "15107"
},
{
"name": "Smarty",
"bytes": "2147"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
class RunTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys
import subprocess
errno = subprocess.call([sys.executable, 'tests/test_whatstyle.py'])
raise SystemExit(errno)
with open('README.rst') as f:
readme = f.read()
setup(
name='whatstyle',
version='0.1.9',
url='https://github.com/mikr/whatstyle',
license='MIT',
author='Michael Krause',
author_email='michael@krause-software.com',
description='whatstyle finds a code format style that fits given source files.',
long_description=readme,
py_modules=['whatstyle'],
cmdclass={'test': RunTest},
zip_safe=False,
platforms='any',
keywords='formatter beautifier clang-format yapf tidy indent astyle uncrustify '
'scalariform scalafmt rfmt rustfmt',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Utilities',
],
entry_points={
'console_scripts': [
'whatstyle = whatstyle:main'
]
},
)
| {
"content_hash": "d9d4eb86d84147bc60582c91b947fa4e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 84,
"avg_line_length": 29.56923076923077,
"alnum_prop": 0.6071800208116546,
"repo_name": "mikr/whatstyle",
"id": "0979ee541607e00090285edbd6c0fa365a513151",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "172049"
},
{
"name": "C++",
"bytes": "68976"
},
{
"name": "HTML",
"bytes": "15118"
},
{
"name": "Java",
"bytes": "38402"
},
{
"name": "JavaScript",
"bytes": "7969"
},
{
"name": "Objective-C",
"bytes": "80282"
},
{
"name": "Python",
"bytes": "346712"
},
{
"name": "R",
"bytes": "1396"
},
{
"name": "Rust",
"bytes": "26539"
},
{
"name": "Scala",
"bytes": "8208"
},
{
"name": "Shell",
"bytes": "499"
}
],
"symlink_target": ""
} |
encoding="UTF-8"
topN=3
ifFullText=1
ifPinYin=1
ifJianPin=1
ifMergeSampleText=1
FileInput="data/sample.input"
FileOutput="data/release/sample.suggidx"
FileOutputItemSet="data/release/sample.suggidx.itemset"
FileTag2tid="data/sample.tag2tid"
FilePrefix2tid="data/sample.prefix2tid"
| {
"content_hash": "649e6d4b9702a3578ae18d059801f2d0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 19,
"alnum_prop": 0.8175438596491228,
"repo_name": "MingdaMingda/TO050001-TinySuggestion",
"id": "73c481c78d8d986084409ed8eba59d4112a99987",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "index/bin/sugg_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "442"
},
{
"name": "CSS",
"bytes": "1327"
},
{
"name": "HTML",
"bytes": "669"
},
{
"name": "JavaScript",
"bytes": "1091"
},
{
"name": "Python",
"bytes": "13076"
},
{
"name": "Shell",
"bytes": "727"
}
],
"symlink_target": ""
} |
__version__ = 1.8
from .ABVD import ABVDatabase, Downloader, Parser, Record
from .ABVD import DeadLanguageError, InvalidLanguageError
| {
"content_hash": "14cbe114733d3dc8ad56c3f40aaf8f9a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 57,
"avg_line_length": 27.2,
"alnum_prop": 0.7867647058823529,
"repo_name": "SimonGreenhill/ABVDGet",
"id": "3f69d9d108a46889fae27619a695c1f99fbb184d",
"size": "136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abvdget/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "197"
},
{
"name": "Python",
"bytes": "27711"
}
],
"symlink_target": ""
} |
import pytest
from hatchling.version.source.code import CodeSource
def test_no_path(isolation):
source = CodeSource(str(isolation), {})
with pytest.raises(ValueError, match='option `path` must be specified'):
source.get_version_data()
def test_path_not_string(isolation):
source = CodeSource(str(isolation), {'path': 1})
with pytest.raises(TypeError, match='option `path` must be a string'):
source.get_version_data()
def test_path_nonexistent(isolation):
source = CodeSource(str(isolation), {'path': 'a/b'})
with pytest.raises(OSError, match='file does not exist: a/b'):
source.get_version_data()
def test_expression_not_string(temp_dir):
source = CodeSource(str(temp_dir), {'path': 'a/b', 'expression': 23})
file_path = temp_dir / 'a' / 'b'
file_path.ensure_parent_dir_exists()
file_path.touch()
with pytest.raises(TypeError, match='option `expression` must be a string'):
source.get_version_data()
def test_match_default_expression(temp_dir, helpers):
source = CodeSource(str(temp_dir), {'path': 'a/b'})
file_path = temp_dir / 'a' / 'b'
file_path.ensure_parent_dir_exists()
file_path.write_text('__version__ = "0.0.1"')
with temp_dir.as_cwd():
assert source.get_version_data()['version'] == '0.0.1'
def test_match_custom_expression_basic(temp_dir):
source = CodeSource(str(temp_dir), {'path': 'a/b', 'expression': 'VER'})
file_path = temp_dir / 'a' / 'b'
file_path.ensure_parent_dir_exists()
file_path.write_text('VER = "0.0.1"')
with temp_dir.as_cwd():
assert source.get_version_data()['version'] == '0.0.1'
def test_match_custom_expression_complex(temp_dir, helpers):
source = CodeSource(str(temp_dir), {'path': 'a/b', 'expression': 'foo()'})
file_path = temp_dir / 'a' / 'b'
file_path.ensure_parent_dir_exists()
file_path.write_text(
helpers.dedent(
"""
__version_info__ = (1, 0, 0, 1, 'dev0')
def foo():
return '.'.join(str(part) for part in __version_info__)
"""
)
)
with temp_dir.as_cwd():
assert source.get_version_data()['version'] == '1.0.0.1.dev0'
| {
"content_hash": "f6639821cc68992d22a567eec396fd04",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 80,
"avg_line_length": 28.98701298701299,
"alnum_prop": 0.6093189964157706,
"repo_name": "ofek/hatch",
"id": "9e0ebd7143eb6540518e6728107b14d3a97bad3d",
"size": "2232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/backend/version/source/test_code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1112263"
}
],
"symlink_target": ""
} |
class Queue(object):
"""
Composed of two lists:
- `_front`
- `_back` -- backwards
"""
def __init__(self, elems):
self._front = elems
self._back = []
def unshift(self, elem):
self._back.append(elem)
def _flip(self):
if len(self._front) != 0:
raise ValueError("shouldn't flip a queue with a non-empty front")
self._front = self._back[-1::-1]
self._back = []
def pop(self):
if len(self._front) == 0:
self._flip()
if len(self._front) == 0:
raise ValueError("can't pop empty Queue")
return self._front.pop()
def __repr__(self):
elems = self._back[-1::-1] + self._front
return repr(elems)
| {
"content_hash": "26b1573c48c113e0523adc3bf92d6626",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 25.8,
"alnum_prop": 0.4883720930232558,
"repo_name": "mattfenwick/DataStructures",
"id": "b5a51452ebb863970b482f1d294d4ca1e2f24e67",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22948"
},
{
"name": "Python",
"bytes": "25672"
}
],
"symlink_target": ""
} |
"""This bot will move pages out of redirected categories.
The bot will look for categories that are marked with a category redirect
template, take the first parameter of the template as the target of the
redirect, and move all pages and subcategories of the category there. It
also changes hard redirects into soft redirects, and fixes double redirects.
A log is written under <userpage>/category_redirect_log. Only category pages
that haven't been edited for a certain cooldown period (currently 7 days)
are taken into account.
-delay:# Set an amount of days. If the category is edited more recenty
than given days, ignore it. Default is 7.
-tiny Only loops over Category:Non-empty_category_redirects and
moves all images, pages and categories in redirect categories
to the target category.
Usage:
python pwb.py category_redirect [options]
"""
#
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import re
import sys
import time
from datetime import timedelta
import pywikibot
from pywikibot import i18n, pagegenerators, config
if sys.version_info[0] > 2:
import pickle as cPickle
else:
import cPickle
class CategoryRedirectBot(pywikibot.Bot):
"""Page category update bot."""
def __init__(self, **kwargs):
"""Constructor."""
self.availableOptions.update({
'tiny': False, # use Non-empty category redirects only
'delay': 7, # cool down delay in days
})
super(CategoryRedirectBot, self).__init__(**kwargs)
self.cooldown = self.getOption('delay')
self.site = pywikibot.Site()
self.catprefix = self.site.namespace(14) + ":"
self.log_text = []
self.edit_requests = []
self.problems = []
self.template_list = []
self.cat = None
self.log_page = pywikibot.Page(self.site,
u"User:%(user)s/category redirect log"
% {'user': self.site.username()})
# Localization:
# Category that contains all redirected category pages
self.cat_redirect_cat = {
'commons': "Category:Category redirects",
'meta': 'Category:Maintenance of categories/Soft redirected categories',
'ar': u"تصنيف:تحويلات تصنيفات ويكيبيديا",
'cs': 'Kategorie:Údržba:Zastaralé kategorie',
'da': "Kategori:Omdirigeringskategorier",
'en': "Category:Wikipedia soft redirected categories",
'es': "Categoría:Wikipedia:Categorías redirigidas",
'fa': u"رده:ردههای منتقلشده",
'hi': 'श्रेणी:विकिपीडिया श्रेणी अनुप्रेषित',
'hu': "Kategória:Kategóriaátirányítások",
'ja': "Category:移行中のカテゴリ",
'no': "Kategori:Wikipedia omdirigertekategorier",
'pl': "Kategoria:Przekierowania kategorii",
'pt': "Categoria:!Redirecionamentos de categorias",
'ru': "Категория:Википедия:Категории-дубликаты",
'sco': "Category:Wikipaedia soft redirectit categories",
'simple': "Category:Category redirects",
'sh': u"Kategorija:Preusmjerene kategorije Wikipedije",
'sr': 'Категорија:Википедијине меко преусмерене категорије',
'vi': u"Thể loại:Thể loại đổi hướng",
'zh': u"Category:已重定向的分类",
'ro': 'Categorie:Categorii de redirecționare',
}
# Category that contains non-empty redirected category pages
self.tiny_cat_redirect_cat = 'Q8099903'
self.move_comment = 'category_redirect-change-category'
self.redir_comment = 'category_redirect-add-template'
self.dbl_redir_comment = 'category_redirect-fix-double'
self.maint_comment = 'category_redirect-comment'
self.edit_request_text = i18n.twtranslate(
self.site, 'category_redirect-edit-request') + u'\n~~~~'
self.edit_request_item = i18n.twtranslate(
self.site, 'category_redirect-edit-request-item')
def get_cat(self):
"""Specify the category page."""
if self.getOption('tiny'):
self.cat = self.site.page_from_repository(
self.tiny_cat_redirect_cat)
else:
cat_title = pywikibot.translate(self.site, self.cat_redirect_cat)
if cat_title:
self.cat = pywikibot.Category(pywikibot.Link(cat_title,
self.site))
return self.cat is not None
def move_contents(self, oldCatTitle, newCatTitle, editSummary):
"""The worker function that moves pages out of oldCat into newCat."""
while True:
try:
oldCat = pywikibot.Category(self.site,
self.catprefix + oldCatTitle)
newCat = pywikibot.Category(self.site,
self.catprefix + newCatTitle)
param = {
'oldCatLink': oldCat.title(),
'oldCatTitle': oldCatTitle,
'newCatLink': newCat.title(),
'newCatTitle': newCatTitle,
}
summary = editSummary % param
# Move articles
found, moved = 0, 0
for article in oldCat.members():
found += 1
changed = article.change_category(oldCat, newCat,
summary=summary)
if changed:
moved += 1
# pass 2: look for template doc pages
for item in pywikibot.data.api.ListGenerator(
"categorymembers", cmtitle=oldCat.title(),
cmprop="title|sortkey", cmnamespace="10",
cmlimit="max"):
doc = pywikibot.Page(pywikibot.Link(item['title'] +
"/doc", self.site))
try:
doc.get()
except pywikibot.Error:
continue
changed = doc.change_category(oldCat, newCat,
summary=summary)
if changed:
moved += 1
if found:
pywikibot.output(u"%s: %s found, %s moved"
% (oldCat.title(), found, moved))
return (found, moved)
except pywikibot.ServerError:
pywikibot.output(u"Server error: retrying in 5 seconds...")
time.sleep(5)
continue
except KeyboardInterrupt:
raise
except Exception:
return (None, None)
def readyToEdit(self, cat):
"""Return True if cat not edited during cooldown period, else False."""
today = pywikibot.Timestamp.now()
deadline = today + timedelta(days=-self.cooldown)
if cat.editTime() is None:
raise RuntimeError
return (deadline > cat.editTime())
def get_log_text(self):
"""Rotate log text and return the most recent text."""
LOG_SIZE = 7 # Number of items to keep in active log
try:
log_text = self.log_page.get()
except pywikibot.NoPage:
log_text = u""
log_items = {}
header = None
for line in log_text.splitlines():
if line.startswith("==") and line.endswith("=="):
header = line[2:-2].strip()
if header is not None:
log_items.setdefault(header, [])
log_items[header].append(line)
if len(log_items) < LOG_SIZE:
return log_text
# sort by keys and keep the first (LOG_SIZE-1) values
keep = [text for (key, text) in
sorted(log_items.items(), reverse=True)[:LOG_SIZE - 1]]
log_text = "\n".join("\n".join(line for line in text) for text in keep)
# get permalink to older logs
history = list(self.log_page.revisions(total=LOG_SIZE))
# get the id of the newest log being archived
rotate_revid = history[-1].revid
# append permalink
log_text += ("\n\n'''[%s Older logs]'''"
% self.log_page.permalink(oldid=rotate_revid))
return log_text
def check_hard_redirect(self):
"""
Check for hard-redirected categories.
Check categories that are not already marked with an appropriate
softredirect template.
"""
pywikibot.output("Checking hard-redirect category pages.")
comment = i18n.twtranslate(self.site, self.redir_comment)
# generator yields all hard redirect pages in namespace 14
for page in self.site.allpages(namespace=14, filterredir=True,
content=True):
if page.isCategoryRedirect():
# this is already a soft-redirect, so skip it (for now)
continue
try:
target = page.getRedirectTarget()
except pywikibot.CircularRedirect:
target = page
self.problems.append(u"# %s is a self-linked redirect"
% page.title(asLink=True, textlink=True))
except RuntimeError:
# race condition: someone else removed the redirect while we
# were checking for it
continue
if target.is_categorypage():
# this is a hard-redirect to a category page
newtext = (u"{{%(template)s|%(cat)s}}"
% {'cat': target.title(withNamespace=False),
'template': self.template_list[0]})
try:
page.text = newtext
page.save(comment)
self.log_text.append(u"* Added {{tl|%s}} to %s"
% (self.template_list[0],
page.title(asLink=True,
textlink=True)))
except pywikibot.Error:
self.log_text.append(u"* Failed to add {{tl|%s}} to %s"
% (self.template_list[0],
page.title(asLink=True,
textlink=True)))
else:
self.problems.append(u"# %s is a hard redirect to %s"
% (page.title(asLink=True, textlink=True),
target.title(asLink=True, textlink=True)))
def run(self):
"""Run the bot."""
# validate L10N
self.template_list = self.site.category_redirects()
if not self.template_list:
pywikibot.warning(u"No redirect templates defined for %s"
% self.site)
return
if not self.get_cat():
pywikibot.warning(u"No redirect category found for %s" % self.site)
return
user = self.site.user() # invokes login()
newredirs = []
localtime = time.localtime()
today = '%04d-%02d-%02d' % localtime[:3]
edit_request_page = pywikibot.Page(
self.site, u"User:%s/category edit requests" % user)
datafile = pywikibot.config.datafilepath("%s-catmovebot-data"
% self.site.dbName())
try:
with open(datafile, "rb") as inp:
record = cPickle.load(inp)
except IOError:
record = {}
if record:
with open(datafile + ".bak", "wb") as f:
cPickle.dump(record, f, protocol=config.pickle_protocol)
# regex to match soft category redirects
# TODO: enhance and use textlib._MultiTemplateMatchBuilder
# note that any templates containing optional "category:" are
# incorrect and will be fixed by the bot
template_regex = re.compile(
r"""{{\s*(?:%(prefix)s\s*:\s*)? # optional "template:"
(?:%(template)s)\s*\| # catredir template name
(\s*%(catns)s\s*:\s*)? # optional "category:"
([^|}]+) # redirect target cat
(?:\|[^|}]*)*}} # optional arguments 2+, ignored
""" % {'prefix': self.site.namespace(10).lower(),
'template': "|".join(item.replace(" ", "[ _]+")
for item in self.template_list),
'catns': self.site.namespace(14)},
re.I | re.X)
self.check_hard_redirect()
comment = i18n.twtranslate(self.site, self.move_comment)
counts = {}
nonemptypages = []
redircat = self.cat
pywikibot.output(u"\nChecking %d category redirect pages"
% redircat.categoryinfo['subcats'])
catpages = set()
for cat in redircat.subcategories():
catpages.add(cat)
cat_title = cat.title(withNamespace=False)
if "category redirect" in cat_title:
self.log_text.append(u"* Ignoring %s"
% cat.title(asLink=True, textlink=True))
continue
if hasattr(cat, "_catinfo"):
# skip empty categories that don't return a "categoryinfo" key
catdata = cat.categoryinfo
if "size" in catdata and int(catdata['size']):
# save those categories that have contents
nonemptypages.append(cat)
if cat_title not in record:
# make sure every redirect has a record entry
record[cat_title] = {today: None}
try:
newredirs.append("*# %s -> %s"
% (cat.title(asLink=True, textlink=True),
cat.getCategoryRedirectTarget().title(
asLink=True, textlink=True)))
except pywikibot.Error:
pass
# do a null edit on cat
try:
cat.save()
except Exception:
pass
# delete record entries for non-existent categories
for cat_name in record.keys():
if pywikibot.Category(self.site,
self.catprefix + cat_name) not in catpages:
del record[cat_name]
pywikibot.output(u"\nMoving pages out of %s redirected categories."
% len(nonemptypages))
for cat in pagegenerators.PreloadingGenerator(nonemptypages):
try:
if not cat.isCategoryRedirect():
self.log_text.append(u"* False positive: %s"
% cat.title(asLink=True,
textlink=True))
continue
except pywikibot.Error:
self.log_text.append(u"* Could not load %s; ignoring"
% cat.title(asLink=True, textlink=True))
continue
cat_title = cat.title(withNamespace=False)
if not self.readyToEdit(cat):
counts[cat_title] = None
self.log_text.append(u"* Skipping %s; in cooldown period."
% cat.title(asLink=True, textlink=True))
continue
dest = cat.getCategoryRedirectTarget()
if not dest.exists():
self.problems.append("# %s redirects to %s"
% (cat.title(asLink=True, textlink=True),
dest.title(asLink=True, textlink=True)))
# do a null edit on cat to update any special redirect
# categories this wiki might maintain
try:
cat.save()
except Exception:
pass
continue
if dest.isCategoryRedirect():
double = dest.getCategoryRedirectTarget()
if double == dest or double == cat:
self.log_text.append(u"* Redirect loop from %s"
% dest.title(asLink=True,
textlink=True))
# do a null edit on cat
try:
cat.save()
except Exception:
pass
else:
self.log_text.append(
u"* Fixed double-redirect: %s -> %s -> %s"
% (cat.title(asLink=True, textlink=True),
dest.title(asLink=True, textlink=True),
double.title(asLink=True, textlink=True)))
oldtext = cat.text
# remove the old redirect from the old text,
# leaving behind any non-redirect text
oldtext = template_regex.sub("", oldtext)
newtext = (u"{{%(redirtemp)s|%(ncat)s}}"
% {'redirtemp': self.template_list[0],
'ncat': double.title(withNamespace=False)})
newtext = newtext + oldtext.strip()
try:
cat.text = newtext
cat.save(i18n.twtranslate(self.site,
self.dbl_redir_comment))
except pywikibot.Error as e:
self.log_text.append("** Failed: %s" % e)
continue
found, moved = self.move_contents(cat_title,
dest.title(withNamespace=False),
editSummary=comment)
if found is None:
self.log_text.append(
u"* [[:%s%s]]: error in move_contents"
% (self.catprefix, cat_title))
elif found:
record[cat_title][today] = found
self.log_text.append(
u"* [[:%s%s]]: %d found, %d moved"
% (self.catprefix, cat_title, found, moved))
counts[cat_title] = found
# do a null edit on cat
try:
cat.save()
except Exception:
pass
with open(datafile, "wb") as f:
cPickle.dump(record, f, protocol=config.pickle_protocol)
self.log_text.sort()
self.problems.sort()
newredirs.sort()
comment = i18n.twtranslate(self.site, self.maint_comment)
self.log_page.text = (u"\n== %i-%02i-%02iT%02i:%02i:%02iZ ==\n"
% time.gmtime()[:6] +
u'\n'.join(self.log_text) +
u'\n* New redirects since last report:\n' +
u'\n'.join(newredirs) +
u'\n' + u'\n'.join(self.problems) +
u'\n' + self.get_log_text())
self.log_page.save(comment)
if self.edit_requests:
edit_request_page.text = (self.edit_request_text
% {'itemlist': u"\n" + u"\n".join(
(self.edit_request_item % item)
for item in self.edit_requests)})
edit_request_page.save(comment)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
for arg in pywikibot.handle_args(args):
if arg.startswith('-delay:'):
pos = arg.find(':')
options[arg[1:pos]] = int(arg[pos + 1:])
else:
# generic handling of we have boolean options
options[arg[1:]] = True
bot = CategoryRedirectBot(**options)
bot.run()
if __name__ == "__main__":
main()
| {
"content_hash": "6abdc15fa777ab6975cdf1b775c7648e",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 84,
"avg_line_length": 43.19166666666667,
"alnum_prop": 0.49546594636311014,
"repo_name": "magul/pywikibot-core",
"id": "db7543dc65fa3e43a26e0fee3b74bd49039a511b",
"size": "21026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/category_redirect.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4538707"
}
],
"symlink_target": ""
} |
"""Support for the Netatmo binary sensors."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA, BinarySensorDevice)
from homeassistant.const import CONF_TIMEOUT
from homeassistant.helpers import config_validation as cv
from . import CameraData, NETATMO_AUTH
_LOGGER = logging.getLogger(__name__)
# These are the available sensors mapped to binary_sensor class
WELCOME_SENSOR_TYPES = {
"Someone known": "motion",
"Someone unknown": "motion",
"Motion": "motion",
}
PRESENCE_SENSOR_TYPES = {
"Outdoor motion": "motion",
"Outdoor human": "motion",
"Outdoor animal": "motion",
"Outdoor vehicle": "motion"
}
TAG_SENSOR_TYPES = {
"Tag Vibration": "vibration",
"Tag Open": "opening"
}
CONF_HOME = 'home'
CONF_CAMERAS = 'cameras'
CONF_WELCOME_SENSORS = 'welcome_sensors'
CONF_PRESENCE_SENSORS = 'presence_sensors'
CONF_TAG_SENSORS = 'tag_sensors'
DEFAULT_TIMEOUT = 90
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_CAMERAS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOME): cv.string,
vol.Optional(CONF_PRESENCE_SENSORS, default=list(PRESENCE_SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(PRESENCE_SENSOR_TYPES)]),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_WELCOME_SENSORS, default=list(WELCOME_SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(WELCOME_SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
if timeout is None:
timeout = DEFAULT_TIMEOUT
module_name = None
import pyatmo
try:
data = CameraData(hass, NETATMO_AUTH, home)
if not data.get_camera_names():
return None
except pyatmo.NoDevice:
return None
welcome_sensors = config.get(
CONF_WELCOME_SENSORS, WELCOME_SENSOR_TYPES)
presence_sensors = config.get(
CONF_PRESENCE_SENSORS, PRESENCE_SENSOR_TYPES)
tag_sensors = config.get(CONF_TAG_SENSORS, TAG_SENSOR_TYPES)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if camera_type == 'NACamera':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in welcome_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
if camera_type == 'NOC':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in presence_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
for module_name in data.get_module_names(camera_name):
for variable in tag_sensors:
camera_type = None
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
class NetatmoBinarySensor(BinarySensorDevice):
"""Represent a single binary sensor in a Netatmo Camera device."""
def __init__(self, data, camera_name, module_name, home,
timeout, camera_type, sensor):
"""Set up for access to the Netatmo camera events."""
self._data = data
self._camera_name = camera_name
self._module_name = module_name
self._home = home
self._timeout = timeout
if home:
self._name = '{} / {}'.format(home, camera_name)
else:
self._name = camera_name
if module_name:
self._name += ' / ' + module_name
self._sensor_name = sensor
self._name += ' ' + sensor
self._cameratype = camera_type
self._state = None
@property
def name(self):
"""Return the name of the Netatmo device and this sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
if self._cameratype == 'NACamera':
return WELCOME_SENSOR_TYPES.get(self._sensor_name)
if self._cameratype == 'NOC':
return PRESENCE_SENSOR_TYPES.get(self._sensor_name)
return TAG_SENSOR_TYPES.get(self._sensor_name)
@property
def is_on(self):
"""Return true if binary sensor is on."""
return self._state
def update(self):
"""Request an update from the Netatmo API."""
self._data.update()
self._data.update_event()
if self._cameratype == 'NACamera':
if self._sensor_name == "Someone known":
self._state =\
self._data.camera_data.someoneKnownSeen(
self._home, self._camera_name, self._timeout)
elif self._sensor_name == "Someone unknown":
self._state =\
self._data.camera_data.someoneUnknownSeen(
self._home, self._camera_name, self._timeout)
elif self._sensor_name == "Motion":
self._state =\
self._data.camera_data.motionDetected(
self._home, self._camera_name, self._timeout)
elif self._cameratype == 'NOC':
if self._sensor_name == "Outdoor motion":
self._state =\
self._data.camera_data.outdoormotionDetected(
self._home, self._camera_name, self._timeout)
elif self._sensor_name == "Outdoor human":
self._state =\
self._data.camera_data.humanDetected(
self._home, self._camera_name, self._timeout)
elif self._sensor_name == "Outdoor animal":
self._state =\
self._data.camera_data.animalDetected(
self._home, self._camera_name, self._timeout)
elif self._sensor_name == "Outdoor vehicle":
self._state =\
self._data.camera_data.carDetected(
self._home, self._camera_name, self._timeout)
if self._sensor_name == "Tag Vibration":
self._state =\
self._data.camera_data.moduleMotionDetected(
self._home, self._module_name, self._camera_name,
self._timeout)
elif self._sensor_name == "Tag Open":
self._state =\
self._data.camera_data.moduleOpened(
self._home, self._module_name, self._camera_name,
self._timeout)
| {
"content_hash": "fbfe72c5123f06e01ee34b9bdd51551b",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 77,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.5771878072763028,
"repo_name": "MartinHjelmare/home-assistant",
"id": "f282faf82c87aa19131ebbbf5721477c0948a875",
"size": "7119",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/netatmo/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15222591"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
import unittest
from dblite.query import SQLBuilder
class SQLBuilderTest(unittest.TestCase):
def test_wrong_params(self):
self.assertEqual(SQLBuilder('test', params='').select(), 'SELECT rowid,* FROM test;')
self.assertEqual(SQLBuilder('test', params=None).select(), 'SELECT rowid,* FROM test;')
def test_empty_params(self):
self.assertEqual(SQLBuilder('test', {}).select(), 'SELECT rowid,* FROM test;')
def test_simple_request(self):
self.assertEqual(
SQLBuilder('test', {'f1': 'v1'}).select(),
"SELECT rowid,* FROM test WHERE f1='v1';",)
def test_logical_and_in_simple_form(self):
self.assertEqual(
SQLBuilder('test', {'f1': 'v1', 'f2': 'v2'}).select(),
"SELECT rowid,* FROM test WHERE f1='v1' AND f2='v2';")
def test_simple_request_with_logical_opers(self):
self.assertEqual(
SQLBuilder('test', {'$and': {'f1': 'v1', 'f2': 'v2', 'f3': 2}}).select(),
"SELECT rowid,* FROM test WHERE (f1='v1') AND (f2='v2') AND (f3=2);",)
self.assertEqual(
SQLBuilder('test', {'$or': {'f1': 'v1', 'f2': 'v2', 'f3': 2}}).select(),
"SELECT rowid,* FROM test WHERE (f1='v1') OR (f2='v2') OR (f3=2);",)
self.assertEqual(
SQLBuilder('test', {'$or': [{'f1': 'v1'}, {'f1': 'v1'},],}).select(),
"SELECT rowid,* FROM test WHERE (f1='v1') OR (f1='v1');",)
def test_escapting_quotes(self):
self.assertEqual(
SQLBuilder('test', {'f1': 'value = "Value"' }).select(),
'SELECT rowid,* FROM test WHERE f1=\'value = "Value"\';',)
self.assertEqual(
SQLBuilder('test', {'f1': "value = 'Value'" }).select(),
'SELECT rowid,* FROM test WHERE f1="value = \'Value\'";',)
def test_wrong_request_with_logical_opers(self):
self.assertRaises(RuntimeError, SQLBuilder('t', {})._logical, '$and2', {'f1': 'v1'},)
self.assertRaises(RuntimeError, SQLBuilder('t', {})._logical, '$and', 1,)
def test_like_syntax(self):
self.assertEqual(
SQLBuilder('test', {'f1': '/search pattern/'}).select(),
"SELECT rowid,* FROM test WHERE f1 LIKE 'search pattern';",)
def test_regexp_syntax(self):
self.assertEqual(
SQLBuilder('test', {'f1': 'r/search pattern/'}).select(),
"SELECT rowid,* FROM test WHERE f1 REGEXP 'search pattern';",)
def test_none_value(self):
self.assertEqual(
SQLBuilder('test', {'f1': None}).select(),
'SELECT rowid,* FROM test WHERE f1 ISNULL;',)
def test_orderby(self):
self.assertEqual(
SQLBuilder('t', {'f1': '/search pattern/', '$orderby': {'f1': -1}}).select(),
"SELECT rowid,* FROM t WHERE f1 LIKE 'search pattern' ORDER BY f1 DESC;",)
self.assertEqual(
SQLBuilder('t', {'f1': '/search pattern/', '$orderby': {'f1': 1}}).select(),
"SELECT rowid,* FROM t WHERE f1 LIKE 'search pattern' ORDER BY f1 ASC;",)
self.assertEqual(
SQLBuilder('t', {'$orderby': {'f1': -1, 'f2': 1}}).select(),
"SELECT rowid,* FROM t ORDER BY f1 DESC,f2 ASC;",
)
def test_wrong_orderby(self):
self.assertRaises(RuntimeError, SQLBuilder('t', {})._modifier, '$orderby', ['f1', 1])
self.assertRaises(RuntimeError, SQLBuilder('t', {})._modifier, '$orderby2', ['f1', 1])
def test_offset(self):
self.assertEqual(
SQLBuilder('t', {}).select(offset=10, limit=10),
"SELECT rowid,* FROM t LIMIT 10 OFFSET 10;",
)
self.assertEqual(
SQLBuilder('t', {}).select(limit=10),
"SELECT rowid,* FROM t LIMIT 10;",
)
| {
"content_hash": "da3959f3899d1ed00d058cb43d23e9b1",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 95,
"avg_line_length": 36.783018867924525,
"alnum_prop": 0.5314183123877917,
"repo_name": "ownport/scrapy-dblite",
"id": "35beb94697540c58b1e3278754c67559e50bccb4",
"size": "3899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_query.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "77"
},
{
"name": "Python",
"bytes": "47073"
},
{
"name": "Shell",
"bytes": "478"
}
],
"symlink_target": ""
} |
from . import AWSObject
from . import AWSProperty
from .validators import boolean
from .validators import double
class CostTypes(AWSProperty):
props = {
'IncludeCredit': (boolean, False),
'IncludeDiscount': (boolean, False),
'IncludeOtherSubscription': (boolean, False),
'IncludeRecurring': (boolean, False),
'IncludeRefund': (boolean, False),
'IncludeSubscription': (boolean, False),
'IncludeSupport': (boolean, False),
'IncludeTax': (boolean, False),
'IncludeUpfront': (boolean, False),
'UseAmortized': (boolean, False),
'UseBlended': (boolean, False),
}
class Spend(AWSProperty):
props = {
'Amount': (double, True),
'Unit': (basestring, True),
}
class TimePeriod(AWSProperty):
props = {
'End': (basestring, False),
'Start': (basestring, False),
}
class BudgetData(AWSProperty):
props = {
'BudgetLimit': (Spend, False),
'BudgetName': (basestring, False),
'BudgetType': (basestring, True),
'CostFilters': (dict, False),
'CostTypes': (CostTypes, False),
'PlannedBudgetLimits': (dict, False),
'TimePeriod': (TimePeriod, False),
'TimeUnit': (basestring, True),
}
class Notification(AWSProperty):
props = {
'ComparisonOperator': (basestring, True),
'NotificationType': (basestring, True),
'Threshold': (double, True),
'ThresholdType': (basestring, False),
}
class Subscriber(AWSProperty):
props = {
'Address': (basestring, True),
'SubscriptionType': (basestring, True),
}
class NotificationWithSubscribers(AWSProperty):
props = {
'Notification': (Notification, True),
'Subscribers': ([Subscriber], True),
}
class Budget(AWSObject):
resource_type = "AWS::Budgets::Budget"
props = {
'Budget': (BudgetData, True),
'NotificationsWithSubscribers':
([NotificationWithSubscribers], False),
}
| {
"content_hash": "aa5f9e0f67c5370e9c5546cbad04278d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 53,
"avg_line_length": 25.5375,
"alnum_prop": 0.5976505139500734,
"repo_name": "ikben/troposphere",
"id": "402224fc833f5cfcb89aee925e8cdf78484b8a85",
"size": "2255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "troposphere/budgets.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1555"
},
{
"name": "Python",
"bytes": "790849"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
"""Library for testing DistributionStrategy descendants."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import optimizer
class _TestException(Exception):
pass
# May be the argument to either distribution.call_for_each_tower() or
# get_tower_context().merge_call()
def _raise_exception_fn(_=None):
raise _TestException()
# Must be the argument to a distribution.call_for_each_tower() call, calls a
# get_tower_context().merge_call() that raises an exception.
def _merge_raises_fn():
distribution_strategy_context.get_tower_context().merge_call(
_raise_exception_fn)
# Must be the argument to a get_tower_context().merge_call() call, calls
# dist.call_for_each_tower() with a function that raises an exception.
def _call_raises_fn(dist):
dist.call_for_each_tower(_raise_exception_fn)
# Must be the argument to a distribution.call_for_each_tower() call,
# calls a get_tower_context().merge_call() that calls a
# call_for_each_tower() that raises an exception.
def _merge_call_raises_fn():
distribution_strategy_context.get_tower_context().merge_call(_call_raises_fn)
# Must be the argument to a get_tower_context().merge_call() call, calls
# dist.call_for_each_tower() with a function that calls a
# get_tower_context().merge_call() that raises an exception.
def _call_merge_raises_fn(dist):
dist.call_for_each_tower(_merge_raises_fn)
# Must be the argument to a distribution.call_for_each_tower() call, calls a
# get_tower_context().merge_call() that calls a call_for_each_tower() that
# calls a get_tower_context().merge_call() that raises an exception.
def _merge_call_merge_raises_fn():
distribution_strategy_context.get_tower_context().merge_call(
_call_merge_raises_fn)
class DistributionTestBase(test.TestCase):
"""Some tests that should work with any DistributionStrategy."""
def _test_minimize_loss_eager(self, d):
with d.scope():
l = core.Dense(1, use_bias=False)
def loss(x):
# TODO(josh11b): What if this constant was instead a captured
# value? Would it need to be a value that has been passed
# through d.broadcast()?
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(isaprykin): Extract implicit_grad+get_filtered_grad_fn into a
# common `implicit_grad` function and put it in DistributionStrategy.
grad_fn = backprop.implicit_grad(loss)
grad_fn = optimizer.get_filtered_grad_fn(grad_fn)
def update(v, g):
return v.assign_sub(0.2 * g)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_tower(grad_fn, one, run_concurrently=l.built)
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
# control_dependencies irrelevant but harmless in eager execution
with ops.control_dependencies([fetched]):
g = d.reduce(
variable_scope.VariableAggregation.SUM, g, destinations=v)
with ops.control_dependencies(d.unwrap(d.update(v, update, g))):
after_list.append(d.read_var(v))
return before_list, after_list
for i in range(10):
b, a = step()
if i == 0:
before, = b # pylint: disable=unbalanced-tuple-unpacking
after, = a # pylint: disable=unbalanced-tuple-unpacking
error_before = abs(before.numpy() - 1)
error_after = abs(after.numpy() - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_minimize_loss_graph(self, d, soft_placement=False):
config = config_pb2.ConfigProto()
config.allow_soft_placement = soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with context.graph_mode(), \
ops.Graph().as_default(), \
self.test_session(config=config) as sess, \
d.scope():
l = core.Dense(1, use_bias=False)
def loss(x):
# TODO(josh11b): What if this constant was instead a captured
# value? Would it need to be a value that has been passed
# through d.broadcast()?
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
grad_fn = backprop.implicit_grad(loss)
def update(v, g):
return v.assign_sub(0.2 * g)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_tower(grad_fn, one)
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
g = d.reduce(
variable_scope.VariableAggregation.SUM, g, destinations=v)
with ops.control_dependencies(d.unwrap(d.update(v, update, g))):
after_list.append(d.read_var(v))
return before_list, after_list
before_out, after_out = step()
variables.global_variables_initializer().run()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_map_reduce(self, d, in_graph=None):
with d.scope():
map_in = [constant_op.constant(i) for i in range(10)]
map_out = d.map(map_in, lambda x, y: x * y, 2)
observed = d.reduce(variable_scope.VariableAggregation.SUM, map_out)
expected = 90 # 2 * (0 + 1 + ... + 9)
self.assertEqual(expected, observed.numpy())
def _test_device_index(self, d):
with d.scope():
expected_devices = [False] * len(d.worker_devices)
def mark_devices_fn(device_id):
self.assertLess(device_id, len(d.worker_devices))
self.assertFalse(expected_devices[device_id])
expected_devices[device_id] = True
d.call_for_each_tower(mark_devices_fn, d.worker_device_index)
self.assertAllEqual(expected_devices, [True] * len(d.worker_devices))
def _test_tower_id(self, d):
with d.scope():
expected_devices = [False] * len(d.worker_devices)
def mark_devices_fn():
tower_id = distribution_strategy_context.get_tower_context().tower_id
self.assertLess(tower_id, len(d.worker_devices))
self.assertFalse(expected_devices[tower_id])
expected_devices[tower_id] = True
d.call_for_each_tower(mark_devices_fn)
self.assertAllEqual(expected_devices, [True] * len(d.worker_devices))
def _test_call_and_merge_exceptions(self, dist):
with dist.scope():
with self.assertRaises(_TestException):
dist.call_for_each_tower(_raise_exception_fn)
with self.assertRaises(_TestException):
dist.call_for_each_tower(_merge_raises_fn)
with self.assertRaises(_TestException):
dist.call_for_each_tower(_merge_call_raises_fn)
with self.assertRaises(_TestException):
dist.call_for_each_tower(_merge_call_merge_raises_fn)
| {
"content_hash": "a43c5b5449437b38f57da71d051e1cee",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 79,
"avg_line_length": 37.50925925925926,
"alnum_prop": 0.6584793878054801,
"repo_name": "ZhangXinNan/tensorflow",
"id": "371b97ba96a826194a6469ba63e485fc67639585",
"size": "8791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/strategy_test_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "327005"
},
{
"name": "C#",
"bytes": "8215"
},
{
"name": "C++",
"bytes": "46648068"
},
{
"name": "CMake",
"bytes": "206720"
},
{
"name": "Dockerfile",
"bytes": "6978"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "830576"
},
{
"name": "Jupyter Notebook",
"bytes": "2632421"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "51309"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40046802"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "455624"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
"""
:module: mom.os.path
:synopsis: Directory walking, listing, and path sanitizing functions.
Functions
---------
.. autofunction:: get_dir_walker
.. autofunction:: walk
.. autofunction:: listdir
.. autofunction:: list_directories
.. autofunction:: list_files
.. autofunction:: absolute_path
.. autofunction:: real_absolute_path
.. autofunction:: parent_dir_path
"""
from __future__ import absolute_import
import functools
import os
from mom import builtins
__author__ = "yesudeep@google.com (Yesudeep Mangalapilly)"
__all__ = [
"absolute_path",
"get_dir_walker",
"list_directories",
"list_files",
"listdir",
"parent_dir_path",
"real_absolute_path",
"walk",
]
def get_dir_walker(recursive, topdown=True, followlinks=False):
"""
Returns a recursive or a non-recursive directory walker.
:param recursive:
``True`` produces a recursive walker; ``False`` produces a non-recursive
walker.
:returns:
A walker function.
"""
if recursive:
walker = functools.partial(os.walk,
topdown=topdown,
followlinks=followlinks)
else:
def walker(path, topdown=topdown, followlinks=followlinks):
"""Alternative walker."""
yield builtins.next(os.walk(path,
topdown=topdown,
followlinks=followlinks))
return walker
def walk(dir_pathname, recursive=True, topdown=True, followlinks=False):
"""
Walks a directory tree optionally recursively. Works exactly like
:func:`os.walk` only adding the `recursive` argument.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
walk_func = get_dir_walker(recursive, topdown, followlinks)
for root, dir_names, file_names in walk_func(dir_pathname):
yield (root, dir_names, file_names)
def listdir(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all items using their absolute paths in a directory, optionally
non-recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` (default) for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dir_names, file_names in walk(dir_pathname,
recursive, topdown, followlinks):
for dir_name in dir_names:
yield absolute_path(os.path.join(root, dir_name))
for file_name in file_names:
yield absolute_path(os.path.join(root, file_name))
def list_directories(dir_pathname, recursive=True, topdown=True,
followlinks=False):
"""
Enlists all the directories using their absolute paths within the
specified directory, optionally non-recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` (default) for walking recursively through the directory
tree; ``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dir_names, _ in walk(dir_pathname, recursive, topdown, followlinks):
for dir_name in dir_names:
yield absolute_path(os.path.join(root, dir_name))
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False):
"""
Enlists all the files using their absolute paths within the
specified directory, optionally recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, _, file_names in walk(dir_pathname,
recursive, topdown, followlinks):
for file_name in file_names:
yield absolute_path(os.path.join(root, file_name))
def absolute_path(path):
"""
Returns the absolute path for the given path and normalizes the
path.
:param path:
Path for which the absolute normalized path will be found.
:returns:
Absolute normalized path.
"""
return os.path.abspath(os.path.normpath(path))
def real_absolute_path(path):
"""
Returns the real absolute normalized path for the given path.
:param path:
Path for which the real absolute normalized path will be found.
:returns:
Real absolute normalized path.
"""
return os.path.realpath(absolute_path(path))
def parent_dir_path(path):
"""
Returns the parent directory path.
:param path:
Path for which the parent directory will be obtained.
:returns:
Parent directory path.
"""
return absolute_path(os.path.dirname(path))
| {
"content_hash": "39b73306f3943819f1307114fff0e2ae",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 80,
"avg_line_length": 28.306451612903224,
"alnum_prop": 0.6628679962013295,
"repo_name": "gorakhargosh/mom",
"id": "d892a7ddb56d44b1d49fc25665decbc2d2d56e1a",
"size": "5970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mom/os/path.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "626298"
},
{
"name": "Shell",
"bytes": "1902"
}
],
"symlink_target": ""
} |
from requests.auth import HTTPBasicAuth
import requests
class PostRequestFactory():
"""
An Encapsulation for building GET/PUT/POST Post requests
"""
def get(self):
raise NotImplementedError('`get()` must be implemented.')
def post(self):
raise NotImplementedError('`post()` must be implemented.')
def put(self):
raise NotImplementedError('`put()` must be implemented.')
# Static Factory
def create(node):
if node.team_number == 5:
return SocshizzlePostRequest(node)
if node.team_number == 8:
return Team8PostRequest(node)
if node.team_number == 9:
return HindlebookPostRequest(node)
else:
raise NotImplementedError('node `%s` does not have a corresponding factory.' % node.host_name)
create = staticmethod(create)
class HindlebookPostRequest(PostRequestFactory):
"""
Hindlebook specific GET/PUT/POST Post requests
"""
def __init__(self, node):
self.node = node
self.url = "%s/api/post" % node.host
self.auth = HTTPBasicAuth(node.our_username, node.our_password)
def get(self, post_id):
self.url = self.url + "/%s" % post_id
return requests.get(url=self.url, auth=self.auth)
def post(self, post_id, serializedPost):
self.url = self.url + "/%s" % post_id
return requests.post(url=self.url, data=serializedPost, auth=self.auth)
def put(self, post_id, serializedPost):
self.url = self.url + "/%s" % post_id
return requests.put(url=self.url, data=serializedPost, auth=self.auth)
class Team8PostRequest(PostRequestFactory):
"""
Team 8 specific GET/PUT/POST Post requests
"""
def __init__(self, node):
self.node = node
self.url = "%s/post" % node.host
def get(self, post_id, requester_uuid="YourAuthSucks"):
self.auth = (requester_uuid+":"+self.node.our_username,self.node.our_password)
self.url = self.url + "/%s" % post_id
return requests.get(url=self.url, auth=self.auth)
def post(self, post_id, serializedPost, requester_uuid="YourAuthSucks"):
self.auth = (requester_uuid+":"+self.node.our_username,self.node.our_password)
self.url = self.url + "/%s" % post_id
return requests.post(url=self.url, data=serializedPost, auth=self.auth)
def put(self, post_id, serializedPost, requester_uuid="YourAuthSucks"):
self.auth = (requester_uuid+":"+self.node.our_username,self.node.our_password)
self.url = self.url + "/%s" % post_id
return requests.put(url=self.url, data=serializedPost, auth=self.auth)
class SocshizzlePostRequest(PostRequestFactory):
"""
Socshizzle specific GET/PUT/POST Post requests
"""
def __init__(self, node):
self.node = node
self.url = "%s/post" % node.host
self.auth = HTTPBasicAuth(node.our_username, node.our_password)
def get(self, post_id):
self.url = self.url + "/%s" % post_id
return requests.get(url=self.url, auth=self.auth)
def post(self, post_id, serializedPost):
self.url = self.url + "/%s" % post_id
return requests.post(url=self.url, data=serializedPost, auth=self.auth)
def put(self, post_id, serializedPost):
self.url = self.url + "/%s" % post_id
return requests.put(url=self.url, data=serializedPost, auth=self.auth)
| {
"content_hash": "ceb23db8aa1d2d6856e4d6265b281cb9",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 106,
"avg_line_length": 34.93877551020408,
"alnum_prop": 0.6352219626168224,
"repo_name": "Roshack/cmput410-project",
"id": "282bf22f802a56c18bbf028537b089ef102176cc",
"size": "3424",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "DistributedSocialNetworking/api/requests/post_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36523"
},
{
"name": "HTML",
"bytes": "29835"
},
{
"name": "JavaScript",
"bytes": "89893"
},
{
"name": "Python",
"bytes": "166628"
}
],
"symlink_target": ""
} |
import random
import boto3
from moto.events import mock_events
from botocore.exceptions import ClientError
from nose.tools import assert_raises
RULES = [
{'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'},
{'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'},
{'Name': 'test3', 'EventPattern': '{"source": ["test-source"]}'}
]
TARGETS = {
'test-target-1': {
'Id': 'test-target-1',
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-1',
'Rules': ['test1', 'test2']
},
'test-target-2': {
'Id': 'test-target-2',
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-2',
'Rules': ['test1', 'test3']
},
'test-target-3': {
'Id': 'test-target-3',
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-3',
'Rules': ['test1', 'test2']
},
'test-target-4': {
'Id': 'test-target-4',
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-4',
'Rules': ['test1', 'test3']
},
'test-target-5': {
'Id': 'test-target-5',
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-5',
'Rules': ['test1', 'test2']
},
'test-target-6': {
'Id': 'test-target-6',
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-6',
'Rules': ['test1', 'test3']
}
}
def get_random_rule():
return RULES[random.randint(0, len(RULES) - 1)]
def generate_environment():
client = boto3.client('events', 'us-west-2')
for rule in RULES:
client.put_rule(
Name=rule['Name'],
ScheduleExpression=rule.get('ScheduleExpression', ''),
EventPattern=rule.get('EventPattern', '')
)
targets = []
for target in TARGETS:
if rule['Name'] in TARGETS[target].get('Rules'):
targets.append({'Id': target, 'Arn': TARGETS[target]['Arn']})
client.put_targets(Rule=rule['Name'], Targets=targets)
return client
@mock_events
def test_list_rules():
client = generate_environment()
response = client.list_rules()
assert(response is not None)
assert(len(response['Rules']) > 0)
@mock_events
def test_describe_rule():
rule_name = get_random_rule()['Name']
client = generate_environment()
response = client.describe_rule(Name=rule_name)
assert(response is not None)
assert(response.get('Name') == rule_name)
assert(response.get('Arn') is not None)
@mock_events
def test_enable_disable_rule():
rule_name = get_random_rule()['Name']
client = generate_environment()
# Rules should start out enabled in these tests.
rule = client.describe_rule(Name=rule_name)
assert(rule['State'] == 'ENABLED')
client.disable_rule(Name=rule_name)
rule = client.describe_rule(Name=rule_name)
assert(rule['State'] == 'DISABLED')
client.enable_rule(Name=rule_name)
rule = client.describe_rule(Name=rule_name)
assert(rule['State'] == 'ENABLED')
@mock_events
def test_list_rule_names_by_target():
test_1_target = TARGETS['test-target-1']
test_2_target = TARGETS['test-target-2']
client = generate_environment()
rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn'])
assert(len(rules['RuleNames']) == len(test_1_target['Rules']))
for rule in rules['RuleNames']:
assert(rule in test_1_target['Rules'])
rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn'])
assert(len(rules['RuleNames']) == len(test_2_target['Rules']))
for rule in rules['RuleNames']:
assert(rule in test_2_target['Rules'])
@mock_events
def test_list_rules():
client = generate_environment()
rules = client.list_rules()
assert(len(rules['Rules']) == len(RULES))
@mock_events
def test_delete_rule():
client = generate_environment()
client.delete_rule(Name=RULES[0]['Name'])
rules = client.list_rules()
assert(len(rules['Rules']) == len(RULES) - 1)
@mock_events
def test_list_targets_by_rule():
rule_name = get_random_rule()['Name']
client = generate_environment()
targets = client.list_targets_by_rule(Rule=rule_name)
expected_targets = []
for target in TARGETS:
if rule_name in TARGETS[target].get('Rules'):
expected_targets.append(target)
assert(len(targets['Targets']) == len(expected_targets))
@mock_events
def test_remove_targets():
rule_name = get_random_rule()['Name']
client = generate_environment()
targets = client.list_targets_by_rule(Rule=rule_name)['Targets']
targets_before = len(targets)
assert(targets_before > 0)
client.remove_targets(Rule=rule_name, Ids=[targets[0]['Id']])
targets = client.list_targets_by_rule(Rule=rule_name)['Targets']
targets_after = len(targets)
assert(targets_before - 1 == targets_after)
@mock_events
def test_permissions():
client = boto3.client('events', 'eu-central-1')
client.put_permission(Action='PutEvents', Principal='111111111111', StatementId='Account1')
client.put_permission(Action='PutEvents', Principal='222222222222', StatementId='Account2')
resp = client.describe_event_bus()
assert len(resp['Policy']['Statement']) == 2
client.remove_permission(StatementId='Account2')
resp = client.describe_event_bus()
assert len(resp['Policy']['Statement']) == 1
assert resp['Policy']['Statement'][0]['Sid'] == 'Account1'
@mock_events
def test_put_events():
client = boto3.client('events', 'eu-central-1')
event = {
"Source": "com.mycompany.myapp",
"Detail": '{"key1": "value3", "key2": "value4"}',
"Resources": ["resource1", "resource2"],
"DetailType": "myDetailType"
}
client.put_events(Entries=[event])
# Boto3 would error if it didn't return 200 OK
with assert_raises(ClientError):
client.put_events(Entries=[event]*20)
| {
"content_hash": "6c23dd015cd9989b780819ec12e6b0c2",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 95,
"avg_line_length": 28.807692307692307,
"alnum_prop": 0.6253337783711616,
"repo_name": "dbfr3qs/moto",
"id": "e839bde5b1f76df2a9510489a9fcb49e82d01b7a",
"size": "5992",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_events/test_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1457"
},
{
"name": "Python",
"bytes": "4188972"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
import random
from panoptes.pocs.dome import AbstractDome
class Dome(AbstractDome):
"""Simulator for a Dome controller."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._state = 'disconnected'
@property
def is_open(self):
return self._state == 'open'
@property
def is_closed(self):
return self._state == 'closed'
@property
def status(self):
return dict(connected=self.is_connected, open=self._state)
def connect(self):
if not self.is_connected:
self._is_connected = True
# Pick a random initial state.
self._state = random.choice(['open', 'closed', 'unknown'])
return self.is_connected
def disconnect(self):
self._is_connected = False
return True
def open(self):
self._state = 'open'
return self.is_open
def close(self):
self._state = 'closed'
return self.is_closed
| {
"content_hash": "f463c6759af036d4703ec9269c2efdbf",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 70,
"avg_line_length": 23.69047619047619,
"alnum_prop": 0.5809045226130654,
"repo_name": "panoptes/POCS",
"id": "91a9bc1ac7b67d4bf6d96a56ef8eebf8c2e2c2d6",
"size": "995",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/panoptes/pocs/dome/simulator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5689"
},
{
"name": "JavaScript",
"bytes": "18198"
},
{
"name": "Python",
"bytes": "837393"
},
{
"name": "Shell",
"bytes": "9960"
}
],
"symlink_target": ""
} |
from src.base.test_cases import TestCases
class UglyNumTestCases(TestCases):
def __init__(self):
super(UglyNumTestCases, self).__init__()
self.__add_test_case__('Test 1', 10, 12)
self.__add_test_case__('Test 2', 20, 36) | {
"content_hash": "334843878d1c5757480e6c170ff2037c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 48,
"avg_line_length": 31.125,
"alnum_prop": 0.6144578313253012,
"repo_name": "hychrisli/PyAlgorithms",
"id": "b9acd334661bc416cce8d23c66747be2a8e37478",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/part2/q264_test_ugly_num_ii.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "201747"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.